follow-exec: delete all non-execing threads
[deliverable/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625
MM
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
6e07b1d2 33#include "regcache.h"
02d27625
MM
34
35/* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
37
38#define DEBUG(msg, args...) \
39 do \
40 { \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
44 } \
45 while (0)
46
47#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48
02d27625
MM
49/* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
51
52static const char *
23a7fe75 53ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
54{
55 struct minimal_symbol *msym;
56 struct symbol *sym;
57
58 msym = bfun->msym;
59 sym = bfun->sym;
60
61 if (sym != NULL)
62 return SYMBOL_PRINT_NAME (sym);
63
64 if (msym != NULL)
efd66ac6 65 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
66
67 return "<unknown>";
68}
69
70/* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
72
73static const char *
23a7fe75 74ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
75{
76 struct symbol *sym;
77 const char *filename;
78
79 sym = bfun->sym;
80
81 if (sym != NULL)
08be3fe3 82 filename = symtab_to_filename_for_display (symbol_symtab (sym));
02d27625
MM
83 else
84 filename = "<unknown>";
85
86 return filename;
87}
88
23a7fe75
MM
89/* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
02d27625 91
23a7fe75
MM
92static const char *
93ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 94{
23a7fe75
MM
95 if (insn == NULL)
96 return "<nil>";
97
98 return core_addr_to_string_nz (insn->pc);
02d27625
MM
99}
100
23a7fe75 101/* Print an ftrace debug status message. */
02d27625
MM
102
103static void
23a7fe75 104ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 105{
23a7fe75
MM
106 const char *fun, *file;
107 unsigned int ibegin, iend;
108 int lbegin, lend, level;
109
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
112 level = bfun->level;
113
114 lbegin = bfun->lbegin;
115 lend = bfun->lend;
116
117 ibegin = bfun->insn_offset;
118 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
119
120 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
121 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
122 ibegin, iend);
02d27625
MM
123}
124
23a7fe75
MM
125/* Return non-zero if BFUN does not match MFUN and FUN,
126 return zero otherwise. */
02d27625
MM
127
128static int
23a7fe75
MM
129ftrace_function_switched (const struct btrace_function *bfun,
130 const struct minimal_symbol *mfun,
131 const struct symbol *fun)
02d27625
MM
132{
133 struct minimal_symbol *msym;
134 struct symbol *sym;
135
02d27625
MM
136 msym = bfun->msym;
137 sym = bfun->sym;
138
139 /* If the minimal symbol changed, we certainly switched functions. */
140 if (mfun != NULL && msym != NULL
efd66ac6 141 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
142 return 1;
143
144 /* If the symbol changed, we certainly switched functions. */
145 if (fun != NULL && sym != NULL)
146 {
147 const char *bfname, *fname;
148
149 /* Check the function name. */
150 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
151 return 1;
152
153 /* Check the location of those functions, as well. */
08be3fe3
DE
154 bfname = symtab_to_fullname (symbol_symtab (sym));
155 fname = symtab_to_fullname (symbol_symtab (fun));
02d27625
MM
156 if (filename_cmp (fname, bfname) != 0)
157 return 1;
158 }
159
23a7fe75
MM
160 /* If we lost symbol information, we switched functions. */
161 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
162 return 1;
163
164 /* If we gained symbol information, we switched functions. */
165 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
166 return 1;
167
02d27625
MM
168 return 0;
169}
170
23a7fe75
MM
171/* Return non-zero if we should skip this file when generating the function
172 call history, zero otherwise.
173 We would want to do that if, say, a macro that is defined in another file
174 is expanded in this function. */
02d27625
MM
175
176static int
23a7fe75 177ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
02d27625
MM
178{
179 struct symbol *sym;
180 const char *bfile;
181
182 sym = bfun->sym;
23a7fe75
MM
183 if (sym == NULL)
184 return 1;
02d27625 185
08be3fe3 186 bfile = symtab_to_fullname (symbol_symtab (sym));
23a7fe75
MM
187
188 return (filename_cmp (bfile, fullname) != 0);
189}
190
191/* Allocate and initialize a new branch trace function segment.
192 PREV is the chronologically preceding function segment.
193 MFUN and FUN are the symbol information we have for this function. */
194
195static struct btrace_function *
196ftrace_new_function (struct btrace_function *prev,
197 struct minimal_symbol *mfun,
198 struct symbol *fun)
199{
200 struct btrace_function *bfun;
201
202 bfun = xzalloc (sizeof (*bfun));
203
204 bfun->msym = mfun;
205 bfun->sym = fun;
206 bfun->flow.prev = prev;
207
208 /* We start with the identities of min and max, respectively. */
209 bfun->lbegin = INT_MAX;
210 bfun->lend = INT_MIN;
02d27625 211
5de9129b
MM
212 if (prev == NULL)
213 {
214 /* Start counting at one. */
215 bfun->number = 1;
216 bfun->insn_offset = 1;
217 }
218 else
23a7fe75
MM
219 {
220 gdb_assert (prev->flow.next == NULL);
221 prev->flow.next = bfun;
02d27625 222
23a7fe75
MM
223 bfun->number = prev->number + 1;
224 bfun->insn_offset = (prev->insn_offset
225 + VEC_length (btrace_insn_s, prev->insn));
31fd9caa 226 bfun->level = prev->level;
23a7fe75
MM
227 }
228
229 return bfun;
02d27625
MM
230}
231
23a7fe75 232/* Update the UP field of a function segment. */
02d27625 233
23a7fe75
MM
234static void
235ftrace_update_caller (struct btrace_function *bfun,
236 struct btrace_function *caller,
237 enum btrace_function_flag flags)
02d27625 238{
23a7fe75
MM
239 if (bfun->up != NULL)
240 ftrace_debug (bfun, "updating caller");
02d27625 241
23a7fe75
MM
242 bfun->up = caller;
243 bfun->flags = flags;
244
245 ftrace_debug (bfun, "set caller");
246}
247
248/* Fix up the caller for all segments of a function. */
249
250static void
251ftrace_fixup_caller (struct btrace_function *bfun,
252 struct btrace_function *caller,
253 enum btrace_function_flag flags)
254{
255 struct btrace_function *prev, *next;
256
257 ftrace_update_caller (bfun, caller, flags);
258
259 /* Update all function segments belonging to the same function. */
260 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
261 ftrace_update_caller (prev, caller, flags);
262
263 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
264 ftrace_update_caller (next, caller, flags);
265}
266
267/* Add a new function segment for a call.
268 CALLER is the chronologically preceding function segment.
269 MFUN and FUN are the symbol information we have for this function. */
270
271static struct btrace_function *
272ftrace_new_call (struct btrace_function *caller,
273 struct minimal_symbol *mfun,
274 struct symbol *fun)
275{
276 struct btrace_function *bfun;
277
278 bfun = ftrace_new_function (caller, mfun, fun);
279 bfun->up = caller;
31fd9caa 280 bfun->level += 1;
23a7fe75
MM
281
282 ftrace_debug (bfun, "new call");
283
284 return bfun;
285}
286
287/* Add a new function segment for a tail call.
288 CALLER is the chronologically preceding function segment.
289 MFUN and FUN are the symbol information we have for this function. */
290
291static struct btrace_function *
292ftrace_new_tailcall (struct btrace_function *caller,
293 struct minimal_symbol *mfun,
294 struct symbol *fun)
295{
296 struct btrace_function *bfun;
02d27625 297
23a7fe75
MM
298 bfun = ftrace_new_function (caller, mfun, fun);
299 bfun->up = caller;
31fd9caa 300 bfun->level += 1;
23a7fe75 301 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 302
23a7fe75
MM
303 ftrace_debug (bfun, "new tail call");
304
305 return bfun;
306}
307
308/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
309 symbol information. */
310
311static struct btrace_function *
312ftrace_find_caller (struct btrace_function *bfun,
313 struct minimal_symbol *mfun,
314 struct symbol *fun)
315{
316 for (; bfun != NULL; bfun = bfun->up)
317 {
318 /* Skip functions with incompatible symbol information. */
319 if (ftrace_function_switched (bfun, mfun, fun))
320 continue;
321
322 /* This is the function segment we're looking for. */
323 break;
324 }
325
326 return bfun;
327}
328
329/* Find the innermost caller in the back trace of BFUN, skipping all
330 function segments that do not end with a call instruction (e.g.
331 tail calls ending with a jump). */
332
333static struct btrace_function *
7d5c24b3 334ftrace_find_call (struct btrace_function *bfun)
23a7fe75
MM
335{
336 for (; bfun != NULL; bfun = bfun->up)
02d27625 337 {
23a7fe75 338 struct btrace_insn *last;
02d27625 339
31fd9caa
MM
340 /* Skip gaps. */
341 if (bfun->errcode != 0)
342 continue;
23a7fe75
MM
343
344 last = VEC_last (btrace_insn_s, bfun->insn);
02d27625 345
7d5c24b3 346 if (last->iclass == BTRACE_INSN_CALL)
23a7fe75
MM
347 break;
348 }
349
350 return bfun;
351}
352
353/* Add a continuation segment for a function into which we return.
354 PREV is the chronologically preceding function segment.
355 MFUN and FUN are the symbol information we have for this function. */
356
357static struct btrace_function *
7d5c24b3 358ftrace_new_return (struct btrace_function *prev,
23a7fe75
MM
359 struct minimal_symbol *mfun,
360 struct symbol *fun)
361{
362 struct btrace_function *bfun, *caller;
363
364 bfun = ftrace_new_function (prev, mfun, fun);
365
366 /* It is important to start at PREV's caller. Otherwise, we might find
367 PREV itself, if PREV is a recursive function. */
368 caller = ftrace_find_caller (prev->up, mfun, fun);
369 if (caller != NULL)
370 {
371 /* The caller of PREV is the preceding btrace function segment in this
372 function instance. */
373 gdb_assert (caller->segment.next == NULL);
374
375 caller->segment.next = bfun;
376 bfun->segment.prev = caller;
377
378 /* Maintain the function level. */
379 bfun->level = caller->level;
380
381 /* Maintain the call stack. */
382 bfun->up = caller->up;
383 bfun->flags = caller->flags;
384
385 ftrace_debug (bfun, "new return");
386 }
387 else
388 {
389 /* We did not find a caller. This could mean that something went
390 wrong or that the call is simply not included in the trace. */
02d27625 391
23a7fe75 392 /* Let's search for some actual call. */
7d5c24b3 393 caller = ftrace_find_call (prev->up);
23a7fe75 394 if (caller == NULL)
02d27625 395 {
23a7fe75
MM
396 /* There is no call in PREV's back trace. We assume that the
397 branch trace did not include it. */
398
399 /* Let's find the topmost call function - this skips tail calls. */
400 while (prev->up != NULL)
401 prev = prev->up;
02d27625 402
23a7fe75
MM
403 /* We maintain levels for a series of returns for which we have
404 not seen the calls.
405 We start at the preceding function's level in case this has
406 already been a return for which we have not seen the call.
407 We start at level 0 otherwise, to handle tail calls correctly. */
408 bfun->level = min (0, prev->level) - 1;
409
410 /* Fix up the call stack for PREV. */
411 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
412
413 ftrace_debug (bfun, "new return - no caller");
414 }
415 else
02d27625 416 {
23a7fe75
MM
417 /* There is a call in PREV's back trace to which we should have
418 returned. Let's remain at this level. */
419 bfun->level = prev->level;
02d27625 420
23a7fe75 421 ftrace_debug (bfun, "new return - unknown caller");
02d27625 422 }
23a7fe75
MM
423 }
424
425 return bfun;
426}
427
428/* Add a new function segment for a function switch.
429 PREV is the chronologically preceding function segment.
430 MFUN and FUN are the symbol information we have for this function. */
431
432static struct btrace_function *
433ftrace_new_switch (struct btrace_function *prev,
434 struct minimal_symbol *mfun,
435 struct symbol *fun)
436{
437 struct btrace_function *bfun;
438
439 /* This is an unexplained function switch. The call stack will likely
440 be wrong at this point. */
441 bfun = ftrace_new_function (prev, mfun, fun);
02d27625 442
23a7fe75
MM
443 ftrace_debug (bfun, "new switch");
444
445 return bfun;
446}
447
31fd9caa
MM
448/* Add a new function segment for a gap in the trace due to a decode error.
449 PREV is the chronologically preceding function segment.
450 ERRCODE is the format-specific error code. */
451
452static struct btrace_function *
453ftrace_new_gap (struct btrace_function *prev, int errcode)
454{
455 struct btrace_function *bfun;
456
457 /* We hijack prev if it was empty. */
458 if (prev != NULL && prev->errcode == 0
459 && VEC_empty (btrace_insn_s, prev->insn))
460 bfun = prev;
461 else
462 bfun = ftrace_new_function (prev, NULL, NULL);
463
464 bfun->errcode = errcode;
465
466 ftrace_debug (bfun, "new gap");
467
468 return bfun;
469}
470
23a7fe75
MM
471/* Update BFUN with respect to the instruction at PC. This may create new
472 function segments.
473 Return the chronologically latest function segment, never NULL. */
474
475static struct btrace_function *
7d5c24b3 476ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
23a7fe75
MM
477{
478 struct bound_minimal_symbol bmfun;
479 struct minimal_symbol *mfun;
480 struct symbol *fun;
481 struct btrace_insn *last;
482
483 /* Try to determine the function we're in. We use both types of symbols
484 to avoid surprises when we sometimes get a full symbol and sometimes
485 only a minimal symbol. */
486 fun = find_pc_function (pc);
487 bmfun = lookup_minimal_symbol_by_pc (pc);
488 mfun = bmfun.minsym;
489
490 if (fun == NULL && mfun == NULL)
491 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
492
31fd9caa
MM
493 /* If we didn't have a function or if we had a gap before, we create one. */
494 if (bfun == NULL || bfun->errcode != 0)
23a7fe75
MM
495 return ftrace_new_function (bfun, mfun, fun);
496
497 /* Check the last instruction, if we have one.
498 We do this check first, since it allows us to fill in the call stack
499 links in addition to the normal flow links. */
500 last = NULL;
501 if (!VEC_empty (btrace_insn_s, bfun->insn))
502 last = VEC_last (btrace_insn_s, bfun->insn);
503
504 if (last != NULL)
505 {
7d5c24b3
MM
506 switch (last->iclass)
507 {
508 case BTRACE_INSN_RETURN:
509 return ftrace_new_return (bfun, mfun, fun);
23a7fe75 510
7d5c24b3
MM
511 case BTRACE_INSN_CALL:
512 /* Ignore calls to the next instruction. They are used for PIC. */
513 if (last->pc + last->size == pc)
514 break;
23a7fe75 515
7d5c24b3 516 return ftrace_new_call (bfun, mfun, fun);
23a7fe75 517
7d5c24b3
MM
518 case BTRACE_INSN_JUMP:
519 {
520 CORE_ADDR start;
23a7fe75 521
7d5c24b3 522 start = get_pc_function_start (pc);
23a7fe75 523
7d5c24b3
MM
524 /* If we can't determine the function for PC, we treat a jump at
525 the end of the block as tail call. */
526 if (start == 0 || start == pc)
527 return ftrace_new_tailcall (bfun, mfun, fun);
528 }
02d27625 529 }
23a7fe75
MM
530 }
531
532 /* Check if we're switching functions for some other reason. */
533 if (ftrace_function_switched (bfun, mfun, fun))
534 {
535 DEBUG_FTRACE ("switching from %s in %s at %s",
536 ftrace_print_insn_addr (last),
537 ftrace_print_function_name (bfun),
538 ftrace_print_filename (bfun));
02d27625 539
23a7fe75
MM
540 return ftrace_new_switch (bfun, mfun, fun);
541 }
542
543 return bfun;
544}
545
546/* Update BFUN's source range with respect to the instruction at PC. */
547
548static void
549ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
550{
551 struct symtab_and_line sal;
552 const char *fullname;
553
554 sal = find_pc_line (pc, 0);
555 if (sal.symtab == NULL || sal.line == 0)
556 {
557 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
558 return;
559 }
560
561 /* Check if we switched files. This could happen if, say, a macro that
562 is defined in another file is expanded here. */
563 fullname = symtab_to_fullname (sal.symtab);
564 if (ftrace_skip_file (bfun, fullname))
565 {
566 DEBUG_FTRACE ("ignoring file at %s, file=%s",
567 core_addr_to_string_nz (pc), fullname);
568 return;
569 }
570
571 /* Update the line range. */
572 bfun->lbegin = min (bfun->lbegin, sal.line);
573 bfun->lend = max (bfun->lend, sal.line);
574
575 if (record_debug > 1)
576 ftrace_debug (bfun, "update lines");
577}
578
579/* Add the instruction at PC to BFUN's instructions. */
580
581static void
7d5c24b3
MM
582ftrace_update_insns (struct btrace_function *bfun,
583 const struct btrace_insn *insn)
23a7fe75 584{
7d5c24b3 585 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
23a7fe75
MM
586
587 if (record_debug > 1)
588 ftrace_debug (bfun, "update insn");
589}
590
7d5c24b3
MM
591/* Classify the instruction at PC. */
592
593static enum btrace_insn_class
594ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
595{
596 volatile struct gdb_exception error;
597 enum btrace_insn_class iclass;
598
599 iclass = BTRACE_INSN_OTHER;
600 TRY_CATCH (error, RETURN_MASK_ERROR)
601 {
602 if (gdbarch_insn_is_call (gdbarch, pc))
603 iclass = BTRACE_INSN_CALL;
604 else if (gdbarch_insn_is_ret (gdbarch, pc))
605 iclass = BTRACE_INSN_RETURN;
606 else if (gdbarch_insn_is_jump (gdbarch, pc))
607 iclass = BTRACE_INSN_JUMP;
608 }
609
610 return iclass;
611}
612
734b0e4b 613/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
614
615static void
76235df1 616btrace_compute_ftrace_bts (struct thread_info *tp,
734b0e4b 617 const struct btrace_data_bts *btrace)
23a7fe75 618{
76235df1 619 struct btrace_thread_info *btinfo;
23a7fe75
MM
620 struct btrace_function *begin, *end;
621 struct gdbarch *gdbarch;
31fd9caa 622 unsigned int blk, ngaps;
23a7fe75
MM
623 int level;
624
23a7fe75 625 gdbarch = target_gdbarch ();
76235df1 626 btinfo = &tp->btrace;
969c39fb
MM
627 begin = btinfo->begin;
628 end = btinfo->end;
31fd9caa 629 ngaps = btinfo->ngaps;
969c39fb 630 level = begin != NULL ? -btinfo->level : INT_MAX;
734b0e4b 631 blk = VEC_length (btrace_block_s, btrace->blocks);
23a7fe75
MM
632
633 while (blk != 0)
634 {
635 btrace_block_s *block;
636 CORE_ADDR pc;
637
638 blk -= 1;
639
734b0e4b 640 block = VEC_index (btrace_block_s, btrace->blocks, blk);
23a7fe75
MM
641 pc = block->begin;
642
643 for (;;)
644 {
7d5c24b3
MM
645 volatile struct gdb_exception error;
646 struct btrace_insn insn;
23a7fe75
MM
647 int size;
648
649 /* We should hit the end of the block. Warn if we went too far. */
650 if (block->end < pc)
651 {
31fd9caa
MM
652 /* Indicate the gap in the trace - unless we're at the
653 beginning. */
654 if (begin != NULL)
655 {
656 warning (_("Recorded trace may be corrupted around %s."),
657 core_addr_to_string_nz (pc));
658
659 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
660 ngaps += 1;
661 }
23a7fe75
MM
662 break;
663 }
664
7d5c24b3 665 end = ftrace_update_function (end, pc);
23a7fe75
MM
666 if (begin == NULL)
667 begin = end;
668
8710b709
MM
669 /* Maintain the function level offset.
670 For all but the last block, we do it here. */
671 if (blk != 0)
672 level = min (level, end->level);
23a7fe75 673
7d5c24b3
MM
674 size = 0;
675 TRY_CATCH (error, RETURN_MASK_ERROR)
676 size = gdb_insn_length (gdbarch, pc);
677
678 insn.pc = pc;
679 insn.size = size;
680 insn.iclass = ftrace_classify_insn (gdbarch, pc);
681
682 ftrace_update_insns (end, &insn);
23a7fe75
MM
683 ftrace_update_lines (end, pc);
684
685 /* We're done once we pushed the instruction at the end. */
686 if (block->end == pc)
687 break;
688
7d5c24b3 689 /* We can't continue if we fail to compute the size. */
23a7fe75
MM
690 if (size <= 0)
691 {
692 warning (_("Recorded trace may be incomplete around %s."),
693 core_addr_to_string_nz (pc));
31fd9caa
MM
694
695 /* Indicate the gap in the trace. We just added INSN so we're
696 not at the beginning. */
697 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
698 ngaps += 1;
699
23a7fe75
MM
700 break;
701 }
702
703 pc += size;
8710b709
MM
704
705 /* Maintain the function level offset.
706 For the last block, we do it here to not consider the last
707 instruction.
708 Since the last instruction corresponds to the current instruction
709 and is not really part of the execution history, it shouldn't
710 affect the level. */
711 if (blk == 0)
712 level = min (level, end->level);
23a7fe75 713 }
02d27625
MM
714 }
715
23a7fe75
MM
716 btinfo->begin = begin;
717 btinfo->end = end;
31fd9caa 718 btinfo->ngaps = ngaps;
23a7fe75
MM
719
720 /* LEVEL is the minimal function level of all btrace function segments.
721 Define the global level offset to -LEVEL so all function levels are
722 normalized to start at zero. */
723 btinfo->level = -level;
02d27625
MM
724}
725
734b0e4b
MM
726/* Compute the function branch trace from a block branch trace BTRACE for
727 a thread given by BTINFO. */
728
729static void
76235df1 730btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
734b0e4b
MM
731{
732 DEBUG ("compute ftrace");
733
734 switch (btrace->format)
735 {
736 case BTRACE_FORMAT_NONE:
737 return;
738
739 case BTRACE_FORMAT_BTS:
76235df1 740 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
734b0e4b
MM
741 return;
742 }
743
744 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
745}
746
6e07b1d2
MM
747/* Add an entry for the current PC. */
748
749static void
750btrace_add_pc (struct thread_info *tp)
751{
734b0e4b 752 struct btrace_data btrace;
6e07b1d2
MM
753 struct btrace_block *block;
754 struct regcache *regcache;
755 struct cleanup *cleanup;
756 CORE_ADDR pc;
757
758 regcache = get_thread_regcache (tp->ptid);
759 pc = regcache_read_pc (regcache);
760
734b0e4b
MM
761 btrace_data_init (&btrace);
762 btrace.format = BTRACE_FORMAT_BTS;
763 btrace.variant.bts.blocks = NULL;
6e07b1d2 764
734b0e4b
MM
765 cleanup = make_cleanup_btrace_data (&btrace);
766
767 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
6e07b1d2
MM
768 block->begin = pc;
769 block->end = pc;
770
76235df1 771 btrace_compute_ftrace (tp, &btrace);
6e07b1d2
MM
772
773 do_cleanups (cleanup);
774}
775
02d27625
MM
776/* See btrace.h. */
777
778void
f4abbc16 779btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
780{
781 if (tp->btrace.target != NULL)
782 return;
783
f4abbc16 784 if (!target_supports_btrace (conf->format))
02d27625
MM
785 error (_("Target does not support branch tracing."));
786
787 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
788
f4abbc16 789 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
6e07b1d2
MM
790
791 /* Add an entry for the current PC so we start tracing from where we
792 enabled it. */
793 if (tp->btrace.target != NULL)
794 btrace_add_pc (tp);
02d27625
MM
795}
796
797/* See btrace.h. */
798
f4abbc16
MM
799const struct btrace_config *
800btrace_conf (const struct btrace_thread_info *btinfo)
801{
802 if (btinfo->target == NULL)
803 return NULL;
804
805 return target_btrace_conf (btinfo->target);
806}
807
808/* See btrace.h. */
809
02d27625
MM
810void
811btrace_disable (struct thread_info *tp)
812{
813 struct btrace_thread_info *btp = &tp->btrace;
814 int errcode = 0;
815
816 if (btp->target == NULL)
817 return;
818
819 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
820
821 target_disable_btrace (btp->target);
822 btp->target = NULL;
823
824 btrace_clear (tp);
825}
826
827/* See btrace.h. */
828
829void
830btrace_teardown (struct thread_info *tp)
831{
832 struct btrace_thread_info *btp = &tp->btrace;
833 int errcode = 0;
834
835 if (btp->target == NULL)
836 return;
837
838 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
839
840 target_teardown_btrace (btp->target);
841 btp->target = NULL;
842
843 btrace_clear (tp);
844}
845
734b0e4b 846/* Stitch branch trace in BTS format. */
969c39fb
MM
847
848static int
31fd9caa 849btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
969c39fb 850{
31fd9caa 851 struct btrace_thread_info *btinfo;
969c39fb
MM
852 struct btrace_function *last_bfun;
853 struct btrace_insn *last_insn;
854 btrace_block_s *first_new_block;
855
31fd9caa 856 btinfo = &tp->btrace;
969c39fb
MM
857 last_bfun = btinfo->end;
858 gdb_assert (last_bfun != NULL);
31fd9caa
MM
859 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
860
861 /* If the existing trace ends with a gap, we just glue the traces
862 together. We need to drop the last (i.e. chronologically first) block
863 of the new trace, though, since we can't fill in the start address.*/
864 if (VEC_empty (btrace_insn_s, last_bfun->insn))
865 {
866 VEC_pop (btrace_block_s, btrace->blocks);
867 return 0;
868 }
969c39fb
MM
869
870 /* Beware that block trace starts with the most recent block, so the
871 chronologically first block in the new trace is the last block in
872 the new trace's block vector. */
734b0e4b 873 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
969c39fb
MM
874 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
875
876 /* If the current PC at the end of the block is the same as in our current
877 trace, there are two explanations:
878 1. we executed the instruction and some branch brought us back.
879 2. we have not made any progress.
880 In the first case, the delta trace vector should contain at least two
881 entries.
882 In the second case, the delta trace vector should contain exactly one
883 entry for the partial block containing the current PC. Remove it. */
884 if (first_new_block->end == last_insn->pc
734b0e4b 885 && VEC_length (btrace_block_s, btrace->blocks) == 1)
969c39fb 886 {
734b0e4b 887 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
888 return 0;
889 }
890
891 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
892 core_addr_to_string_nz (first_new_block->end));
893
894 /* Do a simple sanity check to make sure we don't accidentally end up
895 with a bad block. This should not occur in practice. */
896 if (first_new_block->end < last_insn->pc)
897 {
898 warning (_("Error while trying to read delta trace. Falling back to "
899 "a full read."));
900 return -1;
901 }
902
903 /* We adjust the last block to start at the end of our current trace. */
904 gdb_assert (first_new_block->begin == 0);
905 first_new_block->begin = last_insn->pc;
906
907 /* We simply pop the last insn so we can insert it again as part of
908 the normal branch trace computation.
909 Since instruction iterators are based on indices in the instructions
910 vector, we don't leave any pointers dangling. */
911 DEBUG ("pruning insn at %s for stitching",
912 ftrace_print_insn_addr (last_insn));
913
914 VEC_pop (btrace_insn_s, last_bfun->insn);
915
916 /* The instructions vector may become empty temporarily if this has
917 been the only instruction in this function segment.
918 This violates the invariant but will be remedied shortly by
919 btrace_compute_ftrace when we add the new trace. */
31fd9caa
MM
920
921 /* The only case where this would hurt is if the entire trace consisted
922 of just that one instruction. If we remove it, we might turn the now
923 empty btrace function segment into a gap. But we don't want gaps at
924 the beginning. To avoid this, we remove the entire old trace. */
925 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
926 btrace_clear (tp);
927
969c39fb
MM
928 return 0;
929}
930
734b0e4b
MM
931/* Adjust the block trace in order to stitch old and new trace together.
932 BTRACE is the new delta trace between the last and the current stop.
31fd9caa
MM
933 TP is the traced thread.
934 May modifx BTRACE as well as the existing trace in TP.
734b0e4b
MM
935 Return 0 on success, -1 otherwise. */
936
937static int
31fd9caa 938btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
734b0e4b
MM
939{
940 /* If we don't have trace, there's nothing to do. */
941 if (btrace_data_empty (btrace))
942 return 0;
943
944 switch (btrace->format)
945 {
946 case BTRACE_FORMAT_NONE:
947 return 0;
948
949 case BTRACE_FORMAT_BTS:
31fd9caa 950 return btrace_stitch_bts (&btrace->variant.bts, tp);
734b0e4b
MM
951 }
952
953 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
954}
955
969c39fb
MM
956/* Clear the branch trace histories in BTINFO. */
957
958static void
959btrace_clear_history (struct btrace_thread_info *btinfo)
960{
961 xfree (btinfo->insn_history);
962 xfree (btinfo->call_history);
963 xfree (btinfo->replay);
964
965 btinfo->insn_history = NULL;
966 btinfo->call_history = NULL;
967 btinfo->replay = NULL;
968}
969
02d27625
MM
970/* See btrace.h. */
971
972void
973btrace_fetch (struct thread_info *tp)
974{
975 struct btrace_thread_info *btinfo;
969c39fb 976 struct btrace_target_info *tinfo;
734b0e4b 977 struct btrace_data btrace;
23a7fe75 978 struct cleanup *cleanup;
969c39fb 979 int errcode;
02d27625
MM
980
981 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
982
983 btinfo = &tp->btrace;
969c39fb
MM
984 tinfo = btinfo->target;
985 if (tinfo == NULL)
986 return;
987
988 /* There's no way we could get new trace while replaying.
989 On the other hand, delta trace would return a partial record with the
990 current PC, which is the replay PC, not the last PC, as expected. */
991 if (btinfo->replay != NULL)
02d27625
MM
992 return;
993
734b0e4b
MM
994 btrace_data_init (&btrace);
995 cleanup = make_cleanup_btrace_data (&btrace);
02d27625 996
969c39fb
MM
997 /* Let's first try to extend the trace we already have. */
998 if (btinfo->end != NULL)
999 {
1000 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1001 if (errcode == 0)
1002 {
1003 /* Success. Let's try to stitch the traces together. */
31fd9caa 1004 errcode = btrace_stitch_trace (&btrace, tp);
969c39fb
MM
1005 }
1006 else
1007 {
1008 /* We failed to read delta trace. Let's try to read new trace. */
1009 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1010
1011 /* If we got any new trace, discard what we have. */
734b0e4b 1012 if (errcode == 0 && !btrace_data_empty (&btrace))
969c39fb
MM
1013 btrace_clear (tp);
1014 }
1015
1016 /* If we were not able to read the trace, we start over. */
1017 if (errcode != 0)
1018 {
1019 btrace_clear (tp);
1020 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1021 }
1022 }
1023 else
1024 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1025
1026 /* If we were not able to read the branch trace, signal an error. */
1027 if (errcode != 0)
1028 error (_("Failed to read branch trace."));
1029
1030 /* Compute the trace, provided we have any. */
734b0e4b 1031 if (!btrace_data_empty (&btrace))
23a7fe75 1032 {
969c39fb 1033 btrace_clear_history (btinfo);
76235df1 1034 btrace_compute_ftrace (tp, &btrace);
23a7fe75 1035 }
02d27625 1036
23a7fe75 1037 do_cleanups (cleanup);
02d27625
MM
1038}
1039
1040/* See btrace.h. */
1041
1042void
1043btrace_clear (struct thread_info *tp)
1044{
1045 struct btrace_thread_info *btinfo;
23a7fe75 1046 struct btrace_function *it, *trash;
02d27625
MM
1047
1048 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1049
0b722aec
MM
1050 /* Make sure btrace frames that may hold a pointer into the branch
1051 trace data are destroyed. */
1052 reinit_frame_cache ();
1053
02d27625
MM
1054 btinfo = &tp->btrace;
1055
23a7fe75
MM
1056 it = btinfo->begin;
1057 while (it != NULL)
1058 {
1059 trash = it;
1060 it = it->flow.next;
02d27625 1061
23a7fe75
MM
1062 xfree (trash);
1063 }
1064
1065 btinfo->begin = NULL;
1066 btinfo->end = NULL;
31fd9caa 1067 btinfo->ngaps = 0;
23a7fe75 1068
969c39fb 1069 btrace_clear_history (btinfo);
02d27625
MM
1070}
1071
1072/* See btrace.h. */
1073
1074void
1075btrace_free_objfile (struct objfile *objfile)
1076{
1077 struct thread_info *tp;
1078
1079 DEBUG ("free objfile");
1080
034f788c 1081 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
1082 btrace_clear (tp);
1083}
c12a2917
MM
1084
1085#if defined (HAVE_LIBEXPAT)
1086
1087/* Check the btrace document version. */
1088
1089static void
1090check_xml_btrace_version (struct gdb_xml_parser *parser,
1091 const struct gdb_xml_element *element,
1092 void *user_data, VEC (gdb_xml_value_s) *attributes)
1093{
1094 const char *version = xml_find_attribute (attributes, "version")->value;
1095
1096 if (strcmp (version, "1.0") != 0)
1097 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1098}
1099
1100/* Parse a btrace "block" xml record. */
1101
1102static void
1103parse_xml_btrace_block (struct gdb_xml_parser *parser,
1104 const struct gdb_xml_element *element,
1105 void *user_data, VEC (gdb_xml_value_s) *attributes)
1106{
734b0e4b 1107 struct btrace_data *btrace;
c12a2917
MM
1108 struct btrace_block *block;
1109 ULONGEST *begin, *end;
1110
1111 btrace = user_data;
734b0e4b
MM
1112
1113 switch (btrace->format)
1114 {
1115 case BTRACE_FORMAT_BTS:
1116 break;
1117
1118 case BTRACE_FORMAT_NONE:
1119 btrace->format = BTRACE_FORMAT_BTS;
1120 btrace->variant.bts.blocks = NULL;
1121 break;
1122
1123 default:
1124 gdb_xml_error (parser, _("Btrace format error."));
1125 }
c12a2917
MM
1126
1127 begin = xml_find_attribute (attributes, "begin")->value;
1128 end = xml_find_attribute (attributes, "end")->value;
1129
734b0e4b 1130 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
c12a2917
MM
1131 block->begin = *begin;
1132 block->end = *end;
1133}
1134
1135static const struct gdb_xml_attribute block_attributes[] = {
1136 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1137 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1138 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1139};
1140
1141static const struct gdb_xml_attribute btrace_attributes[] = {
1142 { "version", GDB_XML_AF_NONE, NULL, NULL },
1143 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1144};
1145
1146static const struct gdb_xml_element btrace_children[] = {
1147 { "block", block_attributes, NULL,
1148 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1149 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1150};
1151
1152static const struct gdb_xml_element btrace_elements[] = {
1153 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1154 check_xml_btrace_version, NULL },
1155 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1156};
1157
1158#endif /* defined (HAVE_LIBEXPAT) */
1159
1160/* See btrace.h. */
1161
734b0e4b
MM
1162void
1163parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
c12a2917 1164{
c12a2917
MM
1165 struct cleanup *cleanup;
1166 int errcode;
1167
1168#if defined (HAVE_LIBEXPAT)
1169
734b0e4b
MM
1170 btrace->format = BTRACE_FORMAT_NONE;
1171
1172 cleanup = make_cleanup_btrace_data (btrace);
c12a2917 1173 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
734b0e4b 1174 buffer, btrace);
c12a2917 1175 if (errcode != 0)
969c39fb 1176 error (_("Error parsing branch trace."));
c12a2917
MM
1177
1178 /* Keep parse results. */
1179 discard_cleanups (cleanup);
1180
1181#else /* !defined (HAVE_LIBEXPAT) */
1182
1183 error (_("Cannot process branch trace. XML parsing is not supported."));
1184
1185#endif /* !defined (HAVE_LIBEXPAT) */
c12a2917 1186}
23a7fe75 1187
f4abbc16
MM
1188#if defined (HAVE_LIBEXPAT)
1189
1190/* Parse a btrace-conf "bts" xml record. */
1191
1192static void
1193parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1194 const struct gdb_xml_element *element,
1195 void *user_data, VEC (gdb_xml_value_s) *attributes)
1196{
1197 struct btrace_config *conf;
d33501a5 1198 struct gdb_xml_value *size;
f4abbc16
MM
1199
1200 conf = user_data;
1201 conf->format = BTRACE_FORMAT_BTS;
d33501a5
MM
1202 conf->bts.size = 0;
1203
1204 size = xml_find_attribute (attributes, "size");
1205 if (size != NULL)
1206 conf->bts.size = (unsigned int) * (ULONGEST *) size->value;
f4abbc16
MM
1207}
1208
d33501a5
MM
1209static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1210 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1211 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1212};
1213
f4abbc16 1214static const struct gdb_xml_element btrace_conf_children[] = {
d33501a5
MM
1215 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1216 parse_xml_btrace_conf_bts, NULL },
f4abbc16
MM
1217 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1218};
1219
1220static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1221 { "version", GDB_XML_AF_NONE, NULL, NULL },
1222 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1223};
1224
1225static const struct gdb_xml_element btrace_conf_elements[] = {
1226 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1227 GDB_XML_EF_NONE, NULL, NULL },
1228 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1229};
1230
1231#endif /* defined (HAVE_LIBEXPAT) */
1232
1233/* See btrace.h. */
1234
1235void
1236parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1237{
1238 int errcode;
1239
1240#if defined (HAVE_LIBEXPAT)
1241
1242 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1243 btrace_conf_elements, xml, conf);
1244 if (errcode != 0)
1245 error (_("Error parsing branch trace configuration."));
1246
1247#else /* !defined (HAVE_LIBEXPAT) */
1248
1249 error (_("XML parsing is not supported."));
1250
1251#endif /* !defined (HAVE_LIBEXPAT) */
1252}
1253
23a7fe75
MM
1254/* See btrace.h. */
1255
1256const struct btrace_insn *
1257btrace_insn_get (const struct btrace_insn_iterator *it)
1258{
1259 const struct btrace_function *bfun;
1260 unsigned int index, end;
1261
1262 index = it->index;
1263 bfun = it->function;
1264
31fd9caa
MM
1265 /* Check if the iterator points to a gap in the trace. */
1266 if (bfun->errcode != 0)
1267 return NULL;
1268
23a7fe75
MM
1269 /* The index is within the bounds of this function's instruction vector. */
1270 end = VEC_length (btrace_insn_s, bfun->insn);
1271 gdb_assert (0 < end);
1272 gdb_assert (index < end);
1273
1274 return VEC_index (btrace_insn_s, bfun->insn, index);
1275}
1276
1277/* See btrace.h. */
1278
1279unsigned int
1280btrace_insn_number (const struct btrace_insn_iterator *it)
1281{
1282 const struct btrace_function *bfun;
1283
1284 bfun = it->function;
31fd9caa
MM
1285
1286 /* Return zero if the iterator points to a gap in the trace. */
1287 if (bfun->errcode != 0)
1288 return 0;
1289
23a7fe75
MM
1290 return bfun->insn_offset + it->index;
1291}
1292
1293/* See btrace.h. */
1294
1295void
1296btrace_insn_begin (struct btrace_insn_iterator *it,
1297 const struct btrace_thread_info *btinfo)
1298{
1299 const struct btrace_function *bfun;
1300
1301 bfun = btinfo->begin;
1302 if (bfun == NULL)
1303 error (_("No trace."));
1304
1305 it->function = bfun;
1306 it->index = 0;
1307}
1308
1309/* See btrace.h. */
1310
1311void
1312btrace_insn_end (struct btrace_insn_iterator *it,
1313 const struct btrace_thread_info *btinfo)
1314{
1315 const struct btrace_function *bfun;
1316 unsigned int length;
1317
1318 bfun = btinfo->end;
1319 if (bfun == NULL)
1320 error (_("No trace."));
1321
23a7fe75
MM
1322 length = VEC_length (btrace_insn_s, bfun->insn);
1323
31fd9caa
MM
1324 /* The last function may either be a gap or it contains the current
1325 instruction, which is one past the end of the execution trace; ignore
1326 it. */
1327 if (length > 0)
1328 length -= 1;
1329
23a7fe75 1330 it->function = bfun;
31fd9caa 1331 it->index = length;
23a7fe75
MM
1332}
1333
1334/* See btrace.h. */
1335
1336unsigned int
1337btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1338{
1339 const struct btrace_function *bfun;
1340 unsigned int index, steps;
1341
1342 bfun = it->function;
1343 steps = 0;
1344 index = it->index;
1345
1346 while (stride != 0)
1347 {
1348 unsigned int end, space, adv;
1349
1350 end = VEC_length (btrace_insn_s, bfun->insn);
1351
31fd9caa
MM
1352 /* An empty function segment represents a gap in the trace. We count
1353 it as one instruction. */
1354 if (end == 0)
1355 {
1356 const struct btrace_function *next;
1357
1358 next = bfun->flow.next;
1359 if (next == NULL)
1360 break;
1361
1362 stride -= 1;
1363 steps += 1;
1364
1365 bfun = next;
1366 index = 0;
1367
1368 continue;
1369 }
1370
23a7fe75
MM
1371 gdb_assert (0 < end);
1372 gdb_assert (index < end);
1373
1374 /* Compute the number of instructions remaining in this segment. */
1375 space = end - index;
1376
1377 /* Advance the iterator as far as possible within this segment. */
1378 adv = min (space, stride);
1379 stride -= adv;
1380 index += adv;
1381 steps += adv;
1382
1383 /* Move to the next function if we're at the end of this one. */
1384 if (index == end)
1385 {
1386 const struct btrace_function *next;
1387
1388 next = bfun->flow.next;
1389 if (next == NULL)
1390 {
1391 /* We stepped past the last function.
1392
1393 Let's adjust the index to point to the last instruction in
1394 the previous function. */
1395 index -= 1;
1396 steps -= 1;
1397 break;
1398 }
1399
1400 /* We now point to the first instruction in the new function. */
1401 bfun = next;
1402 index = 0;
1403 }
1404
1405 /* We did make progress. */
1406 gdb_assert (adv > 0);
1407 }
1408
1409 /* Update the iterator. */
1410 it->function = bfun;
1411 it->index = index;
1412
1413 return steps;
1414}
1415
1416/* See btrace.h. */
1417
1418unsigned int
1419btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1420{
1421 const struct btrace_function *bfun;
1422 unsigned int index, steps;
1423
1424 bfun = it->function;
1425 steps = 0;
1426 index = it->index;
1427
1428 while (stride != 0)
1429 {
1430 unsigned int adv;
1431
1432 /* Move to the previous function if we're at the start of this one. */
1433 if (index == 0)
1434 {
1435 const struct btrace_function *prev;
1436
1437 prev = bfun->flow.prev;
1438 if (prev == NULL)
1439 break;
1440
1441 /* We point to one after the last instruction in the new function. */
1442 bfun = prev;
1443 index = VEC_length (btrace_insn_s, bfun->insn);
1444
31fd9caa
MM
1445 /* An empty function segment represents a gap in the trace. We count
1446 it as one instruction. */
1447 if (index == 0)
1448 {
1449 stride -= 1;
1450 steps += 1;
1451
1452 continue;
1453 }
23a7fe75
MM
1454 }
1455
1456 /* Advance the iterator as far as possible within this segment. */
1457 adv = min (index, stride);
31fd9caa 1458
23a7fe75
MM
1459 stride -= adv;
1460 index -= adv;
1461 steps += adv;
1462
1463 /* We did make progress. */
1464 gdb_assert (adv > 0);
1465 }
1466
1467 /* Update the iterator. */
1468 it->function = bfun;
1469 it->index = index;
1470
1471 return steps;
1472}
1473
1474/* See btrace.h. */
1475
1476int
1477btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1478 const struct btrace_insn_iterator *rhs)
1479{
1480 unsigned int lnum, rnum;
1481
1482 lnum = btrace_insn_number (lhs);
1483 rnum = btrace_insn_number (rhs);
1484
31fd9caa
MM
1485 /* A gap has an instruction number of zero. Things are getting more
1486 complicated if gaps are involved.
1487
1488 We take the instruction number offset from the iterator's function.
1489 This is the number of the first instruction after the gap.
1490
1491 This is OK as long as both lhs and rhs point to gaps. If only one of
1492 them does, we need to adjust the number based on the other's regular
1493 instruction number. Otherwise, a gap might compare equal to an
1494 instruction. */
1495
1496 if (lnum == 0 && rnum == 0)
1497 {
1498 lnum = lhs->function->insn_offset;
1499 rnum = rhs->function->insn_offset;
1500 }
1501 else if (lnum == 0)
1502 {
1503 lnum = lhs->function->insn_offset;
1504
1505 if (lnum == rnum)
1506 lnum -= 1;
1507 }
1508 else if (rnum == 0)
1509 {
1510 rnum = rhs->function->insn_offset;
1511
1512 if (rnum == lnum)
1513 rnum -= 1;
1514 }
1515
23a7fe75
MM
1516 return (int) (lnum - rnum);
1517}
1518
1519/* See btrace.h. */
1520
1521int
1522btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1523 const struct btrace_thread_info *btinfo,
1524 unsigned int number)
1525{
1526 const struct btrace_function *bfun;
31fd9caa 1527 unsigned int end, length;
23a7fe75
MM
1528
1529 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
31fd9caa
MM
1530 {
1531 /* Skip gaps. */
1532 if (bfun->errcode != 0)
1533 continue;
1534
1535 if (bfun->insn_offset <= number)
1536 break;
1537 }
23a7fe75
MM
1538
1539 if (bfun == NULL)
1540 return 0;
1541
31fd9caa
MM
1542 length = VEC_length (btrace_insn_s, bfun->insn);
1543 gdb_assert (length > 0);
1544
1545 end = bfun->insn_offset + length;
23a7fe75
MM
1546 if (end <= number)
1547 return 0;
1548
1549 it->function = bfun;
1550 it->index = number - bfun->insn_offset;
1551
1552 return 1;
1553}
1554
1555/* See btrace.h. */
1556
1557const struct btrace_function *
1558btrace_call_get (const struct btrace_call_iterator *it)
1559{
1560 return it->function;
1561}
1562
1563/* See btrace.h. */
1564
1565unsigned int
1566btrace_call_number (const struct btrace_call_iterator *it)
1567{
1568 const struct btrace_thread_info *btinfo;
1569 const struct btrace_function *bfun;
1570 unsigned int insns;
1571
1572 btinfo = it->btinfo;
1573 bfun = it->function;
1574 if (bfun != NULL)
1575 return bfun->number;
1576
1577 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1578 number of the last function. */
1579 bfun = btinfo->end;
1580 insns = VEC_length (btrace_insn_s, bfun->insn);
1581
1582 /* If the function contains only a single instruction (i.e. the current
1583 instruction), it will be skipped and its number is already the number
1584 we seek. */
1585 if (insns == 1)
1586 return bfun->number;
1587
1588 /* Otherwise, return one more than the number of the last function. */
1589 return bfun->number + 1;
1590}
1591
1592/* See btrace.h. */
1593
1594void
1595btrace_call_begin (struct btrace_call_iterator *it,
1596 const struct btrace_thread_info *btinfo)
1597{
1598 const struct btrace_function *bfun;
1599
1600 bfun = btinfo->begin;
1601 if (bfun == NULL)
1602 error (_("No trace."));
1603
1604 it->btinfo = btinfo;
1605 it->function = bfun;
1606}
1607
1608/* See btrace.h. */
1609
1610void
1611btrace_call_end (struct btrace_call_iterator *it,
1612 const struct btrace_thread_info *btinfo)
1613{
1614 const struct btrace_function *bfun;
1615
1616 bfun = btinfo->end;
1617 if (bfun == NULL)
1618 error (_("No trace."));
1619
1620 it->btinfo = btinfo;
1621 it->function = NULL;
1622}
1623
1624/* See btrace.h. */
1625
1626unsigned int
1627btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1628{
1629 const struct btrace_function *bfun;
1630 unsigned int steps;
1631
1632 bfun = it->function;
1633 steps = 0;
1634 while (bfun != NULL)
1635 {
1636 const struct btrace_function *next;
1637 unsigned int insns;
1638
1639 next = bfun->flow.next;
1640 if (next == NULL)
1641 {
1642 /* Ignore the last function if it only contains a single
1643 (i.e. the current) instruction. */
1644 insns = VEC_length (btrace_insn_s, bfun->insn);
1645 if (insns == 1)
1646 steps -= 1;
1647 }
1648
1649 if (stride == steps)
1650 break;
1651
1652 bfun = next;
1653 steps += 1;
1654 }
1655
1656 it->function = bfun;
1657 return steps;
1658}
1659
1660/* See btrace.h. */
1661
1662unsigned int
1663btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1664{
1665 const struct btrace_thread_info *btinfo;
1666 const struct btrace_function *bfun;
1667 unsigned int steps;
1668
1669 bfun = it->function;
1670 steps = 0;
1671
1672 if (bfun == NULL)
1673 {
1674 unsigned int insns;
1675
1676 btinfo = it->btinfo;
1677 bfun = btinfo->end;
1678 if (bfun == NULL)
1679 return 0;
1680
1681 /* Ignore the last function if it only contains a single
1682 (i.e. the current) instruction. */
1683 insns = VEC_length (btrace_insn_s, bfun->insn);
1684 if (insns == 1)
1685 bfun = bfun->flow.prev;
1686
1687 if (bfun == NULL)
1688 return 0;
1689
1690 steps += 1;
1691 }
1692
1693 while (steps < stride)
1694 {
1695 const struct btrace_function *prev;
1696
1697 prev = bfun->flow.prev;
1698 if (prev == NULL)
1699 break;
1700
1701 bfun = prev;
1702 steps += 1;
1703 }
1704
1705 it->function = bfun;
1706 return steps;
1707}
1708
1709/* See btrace.h. */
1710
1711int
1712btrace_call_cmp (const struct btrace_call_iterator *lhs,
1713 const struct btrace_call_iterator *rhs)
1714{
1715 unsigned int lnum, rnum;
1716
1717 lnum = btrace_call_number (lhs);
1718 rnum = btrace_call_number (rhs);
1719
1720 return (int) (lnum - rnum);
1721}
1722
1723/* See btrace.h. */
1724
1725int
1726btrace_find_call_by_number (struct btrace_call_iterator *it,
1727 const struct btrace_thread_info *btinfo,
1728 unsigned int number)
1729{
1730 const struct btrace_function *bfun;
1731
1732 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1733 {
1734 unsigned int bnum;
1735
1736 bnum = bfun->number;
1737 if (number == bnum)
1738 {
1739 it->btinfo = btinfo;
1740 it->function = bfun;
1741 return 1;
1742 }
1743
1744 /* Functions are ordered and numbered consecutively. We could bail out
1745 earlier. On the other hand, it is very unlikely that we search for
1746 a nonexistent function. */
1747 }
1748
1749 return 0;
1750}
1751
1752/* See btrace.h. */
1753
1754void
1755btrace_set_insn_history (struct btrace_thread_info *btinfo,
1756 const struct btrace_insn_iterator *begin,
1757 const struct btrace_insn_iterator *end)
1758{
1759 if (btinfo->insn_history == NULL)
1760 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1761
1762 btinfo->insn_history->begin = *begin;
1763 btinfo->insn_history->end = *end;
1764}
1765
1766/* See btrace.h. */
1767
1768void
1769btrace_set_call_history (struct btrace_thread_info *btinfo,
1770 const struct btrace_call_iterator *begin,
1771 const struct btrace_call_iterator *end)
1772{
1773 gdb_assert (begin->btinfo == end->btinfo);
1774
1775 if (btinfo->call_history == NULL)
1776 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1777
1778 btinfo->call_history->begin = *begin;
1779 btinfo->call_history->end = *end;
1780}
07bbe694
MM
1781
1782/* See btrace.h. */
1783
1784int
1785btrace_is_replaying (struct thread_info *tp)
1786{
1787 return tp->btrace.replay != NULL;
1788}
6e07b1d2
MM
1789
1790/* See btrace.h. */
1791
1792int
1793btrace_is_empty (struct thread_info *tp)
1794{
1795 struct btrace_insn_iterator begin, end;
1796 struct btrace_thread_info *btinfo;
1797
1798 btinfo = &tp->btrace;
1799
1800 if (btinfo->begin == NULL)
1801 return 1;
1802
1803 btrace_insn_begin (&begin, btinfo);
1804 btrace_insn_end (&end, btinfo);
1805
1806 return btrace_insn_cmp (&begin, &end) == 0;
1807}
734b0e4b
MM
1808
1809/* Forward the cleanup request. */
1810
1811static void
1812do_btrace_data_cleanup (void *arg)
1813{
1814 btrace_data_fini (arg);
1815}
1816
1817/* See btrace.h. */
1818
1819struct cleanup *
1820make_cleanup_btrace_data (struct btrace_data *data)
1821{
1822 return make_cleanup (do_btrace_data_cleanup, data);
1823}
This page took 0.292224 seconds and 4 git commands to generate.