S/390: Fix indentation
[deliverable/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625
MM
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
6e07b1d2 33#include "regcache.h"
b20a6524 34#include "rsp-low.h"
b0627500
MM
35#include "gdbcmd.h"
36#include "cli/cli-utils.h"
b20a6524
MM
37
38#include <inttypes.h>
b0627500 39#include <ctype.h>
325fac50 40#include <algorithm>
b0627500
MM
41
42/* Command lists for btrace maintenance commands. */
43static struct cmd_list_element *maint_btrace_cmdlist;
44static struct cmd_list_element *maint_btrace_set_cmdlist;
45static struct cmd_list_element *maint_btrace_show_cmdlist;
46static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49/* Control whether to skip PAD packets when computing the packet history. */
50static int maint_btrace_pt_skip_pad = 1;
b20a6524 51
d87fdac3
MM
52/* A vector of function segments. */
53typedef struct btrace_function * bfun_s;
54DEF_VEC_P (bfun_s);
55
b20a6524 56static void btrace_add_pc (struct thread_info *tp);
02d27625
MM
57
58/* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
60
61#define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
69
70#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
02d27625
MM
72/* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
74
75static const char *
23a7fe75 76ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
77{
78 struct minimal_symbol *msym;
79 struct symbol *sym;
80
81 msym = bfun->msym;
82 sym = bfun->sym;
83
84 if (sym != NULL)
85 return SYMBOL_PRINT_NAME (sym);
86
87 if (msym != NULL)
efd66ac6 88 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
89
90 return "<unknown>";
91}
92
93/* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
95
96static const char *
23a7fe75 97ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
98{
99 struct symbol *sym;
100 const char *filename;
101
102 sym = bfun->sym;
103
104 if (sym != NULL)
08be3fe3 105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
02d27625
MM
106 else
107 filename = "<unknown>";
108
109 return filename;
110}
111
23a7fe75
MM
112/* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
02d27625 114
23a7fe75
MM
115static const char *
116ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 117{
23a7fe75
MM
118 if (insn == NULL)
119 return "<nil>";
120
121 return core_addr_to_string_nz (insn->pc);
02d27625
MM
122}
123
23a7fe75 124/* Print an ftrace debug status message. */
02d27625
MM
125
126static void
23a7fe75 127ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 128{
23a7fe75
MM
129 const char *fun, *file;
130 unsigned int ibegin, iend;
ce0dfbea 131 int level;
23a7fe75
MM
132
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
136
23a7fe75
MM
137 ibegin = bfun->insn_offset;
138 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
139
ce0dfbea
MM
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
02d27625
MM
142}
143
69090cee
TW
144/* Return the number of instructions in a given function call segment. */
145
146static unsigned int
147ftrace_call_num_insn (const struct btrace_function* bfun)
148{
149 if (bfun == NULL)
150 return 0;
151
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
154 return 1;
155
156 return VEC_length (btrace_insn_s, bfun->insn);
157}
158
23a7fe75
MM
159/* Return non-zero if BFUN does not match MFUN and FUN,
160 return zero otherwise. */
02d27625
MM
161
162static int
23a7fe75
MM
163ftrace_function_switched (const struct btrace_function *bfun,
164 const struct minimal_symbol *mfun,
165 const struct symbol *fun)
02d27625
MM
166{
167 struct minimal_symbol *msym;
168 struct symbol *sym;
169
02d27625
MM
170 msym = bfun->msym;
171 sym = bfun->sym;
172
173 /* If the minimal symbol changed, we certainly switched functions. */
174 if (mfun != NULL && msym != NULL
efd66ac6 175 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
176 return 1;
177
178 /* If the symbol changed, we certainly switched functions. */
179 if (fun != NULL && sym != NULL)
180 {
181 const char *bfname, *fname;
182
183 /* Check the function name. */
184 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
185 return 1;
186
187 /* Check the location of those functions, as well. */
08be3fe3
DE
188 bfname = symtab_to_fullname (symbol_symtab (sym));
189 fname = symtab_to_fullname (symbol_symtab (fun));
02d27625
MM
190 if (filename_cmp (fname, bfname) != 0)
191 return 1;
192 }
193
23a7fe75
MM
194 /* If we lost symbol information, we switched functions. */
195 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
196 return 1;
197
198 /* If we gained symbol information, we switched functions. */
199 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
200 return 1;
201
02d27625
MM
202 return 0;
203}
204
23a7fe75
MM
205/* Allocate and initialize a new branch trace function segment.
206 PREV is the chronologically preceding function segment.
207 MFUN and FUN are the symbol information we have for this function. */
208
209static struct btrace_function *
210ftrace_new_function (struct btrace_function *prev,
211 struct minimal_symbol *mfun,
212 struct symbol *fun)
213{
214 struct btrace_function *bfun;
215
8d749320 216 bfun = XCNEW (struct btrace_function);
23a7fe75
MM
217
218 bfun->msym = mfun;
219 bfun->sym = fun;
220 bfun->flow.prev = prev;
221
5de9129b
MM
222 if (prev == NULL)
223 {
224 /* Start counting at one. */
225 bfun->number = 1;
226 bfun->insn_offset = 1;
227 }
228 else
23a7fe75
MM
229 {
230 gdb_assert (prev->flow.next == NULL);
231 prev->flow.next = bfun;
02d27625 232
23a7fe75 233 bfun->number = prev->number + 1;
69090cee 234 bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
31fd9caa 235 bfun->level = prev->level;
23a7fe75
MM
236 }
237
238 return bfun;
02d27625
MM
239}
240
23a7fe75 241/* Update the UP field of a function segment. */
02d27625 242
23a7fe75
MM
243static void
244ftrace_update_caller (struct btrace_function *bfun,
245 struct btrace_function *caller,
246 enum btrace_function_flag flags)
02d27625 247{
23a7fe75
MM
248 if (bfun->up != NULL)
249 ftrace_debug (bfun, "updating caller");
02d27625 250
23a7fe75
MM
251 bfun->up = caller;
252 bfun->flags = flags;
253
254 ftrace_debug (bfun, "set caller");
d87fdac3 255 ftrace_debug (caller, "..to");
23a7fe75
MM
256}
257
258/* Fix up the caller for all segments of a function. */
259
260static void
261ftrace_fixup_caller (struct btrace_function *bfun,
262 struct btrace_function *caller,
263 enum btrace_function_flag flags)
264{
265 struct btrace_function *prev, *next;
266
267 ftrace_update_caller (bfun, caller, flags);
268
269 /* Update all function segments belonging to the same function. */
270 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
271 ftrace_update_caller (prev, caller, flags);
272
273 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
274 ftrace_update_caller (next, caller, flags);
275}
276
277/* Add a new function segment for a call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
280
281static struct btrace_function *
282ftrace_new_call (struct btrace_function *caller,
283 struct minimal_symbol *mfun,
284 struct symbol *fun)
285{
286 struct btrace_function *bfun;
287
288 bfun = ftrace_new_function (caller, mfun, fun);
289 bfun->up = caller;
31fd9caa 290 bfun->level += 1;
23a7fe75
MM
291
292 ftrace_debug (bfun, "new call");
293
294 return bfun;
295}
296
297/* Add a new function segment for a tail call.
298 CALLER is the chronologically preceding function segment.
299 MFUN and FUN are the symbol information we have for this function. */
300
301static struct btrace_function *
302ftrace_new_tailcall (struct btrace_function *caller,
303 struct minimal_symbol *mfun,
304 struct symbol *fun)
305{
306 struct btrace_function *bfun;
02d27625 307
23a7fe75
MM
308 bfun = ftrace_new_function (caller, mfun, fun);
309 bfun->up = caller;
31fd9caa 310 bfun->level += 1;
23a7fe75 311 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 312
23a7fe75
MM
313 ftrace_debug (bfun, "new tail call");
314
315 return bfun;
316}
317
d87fdac3
MM
318/* Return the caller of BFUN or NULL if there is none. This function skips
319 tail calls in the call chain. */
320static struct btrace_function *
321ftrace_get_caller (struct btrace_function *bfun)
322{
323 for (; bfun != NULL; bfun = bfun->up)
324 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
325 return bfun->up;
326
327 return NULL;
328}
329
23a7fe75
MM
330/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
331 symbol information. */
332
333static struct btrace_function *
334ftrace_find_caller (struct btrace_function *bfun,
335 struct minimal_symbol *mfun,
336 struct symbol *fun)
337{
338 for (; bfun != NULL; bfun = bfun->up)
339 {
340 /* Skip functions with incompatible symbol information. */
341 if (ftrace_function_switched (bfun, mfun, fun))
342 continue;
343
344 /* This is the function segment we're looking for. */
345 break;
346 }
347
348 return bfun;
349}
350
351/* Find the innermost caller in the back trace of BFUN, skipping all
352 function segments that do not end with a call instruction (e.g.
353 tail calls ending with a jump). */
354
355static struct btrace_function *
7d5c24b3 356ftrace_find_call (struct btrace_function *bfun)
23a7fe75
MM
357{
358 for (; bfun != NULL; bfun = bfun->up)
02d27625 359 {
23a7fe75 360 struct btrace_insn *last;
02d27625 361
31fd9caa
MM
362 /* Skip gaps. */
363 if (bfun->errcode != 0)
364 continue;
23a7fe75
MM
365
366 last = VEC_last (btrace_insn_s, bfun->insn);
02d27625 367
7d5c24b3 368 if (last->iclass == BTRACE_INSN_CALL)
23a7fe75
MM
369 break;
370 }
371
372 return bfun;
373}
374
375/* Add a continuation segment for a function into which we return.
376 PREV is the chronologically preceding function segment.
377 MFUN and FUN are the symbol information we have for this function. */
378
379static struct btrace_function *
7d5c24b3 380ftrace_new_return (struct btrace_function *prev,
23a7fe75
MM
381 struct minimal_symbol *mfun,
382 struct symbol *fun)
383{
384 struct btrace_function *bfun, *caller;
385
386 bfun = ftrace_new_function (prev, mfun, fun);
387
388 /* It is important to start at PREV's caller. Otherwise, we might find
389 PREV itself, if PREV is a recursive function. */
390 caller = ftrace_find_caller (prev->up, mfun, fun);
391 if (caller != NULL)
392 {
393 /* The caller of PREV is the preceding btrace function segment in this
394 function instance. */
395 gdb_assert (caller->segment.next == NULL);
396
397 caller->segment.next = bfun;
398 bfun->segment.prev = caller;
399
400 /* Maintain the function level. */
401 bfun->level = caller->level;
402
403 /* Maintain the call stack. */
404 bfun->up = caller->up;
405 bfun->flags = caller->flags;
406
407 ftrace_debug (bfun, "new return");
408 }
409 else
410 {
411 /* We did not find a caller. This could mean that something went
412 wrong or that the call is simply not included in the trace. */
02d27625 413
23a7fe75 414 /* Let's search for some actual call. */
7d5c24b3 415 caller = ftrace_find_call (prev->up);
23a7fe75 416 if (caller == NULL)
02d27625 417 {
23a7fe75
MM
418 /* There is no call in PREV's back trace. We assume that the
419 branch trace did not include it. */
420
259ba1e8
MM
421 /* Let's find the topmost function and add a new caller for it.
422 This should handle a series of initial tail calls. */
23a7fe75
MM
423 while (prev->up != NULL)
424 prev = prev->up;
02d27625 425
259ba1e8 426 bfun->level = prev->level - 1;
23a7fe75
MM
427
428 /* Fix up the call stack for PREV. */
429 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
430
431 ftrace_debug (bfun, "new return - no caller");
432 }
433 else
02d27625 434 {
23a7fe75 435 /* There is a call in PREV's back trace to which we should have
259ba1e8
MM
436 returned but didn't. Let's start a new, separate back trace
437 from PREV's level. */
438 bfun->level = prev->level - 1;
439
440 /* We fix up the back trace for PREV but leave other function segments
441 on the same level as they are.
442 This should handle things like schedule () correctly where we're
443 switching contexts. */
444 prev->up = bfun;
445 prev->flags = BFUN_UP_LINKS_TO_RET;
02d27625 446
23a7fe75 447 ftrace_debug (bfun, "new return - unknown caller");
02d27625 448 }
23a7fe75
MM
449 }
450
451 return bfun;
452}
453
454/* Add a new function segment for a function switch.
455 PREV is the chronologically preceding function segment.
456 MFUN and FUN are the symbol information we have for this function. */
457
458static struct btrace_function *
459ftrace_new_switch (struct btrace_function *prev,
460 struct minimal_symbol *mfun,
461 struct symbol *fun)
462{
463 struct btrace_function *bfun;
464
4c2c7ac6
MM
465 /* This is an unexplained function switch. We can't really be sure about the
466 call stack, yet the best I can think of right now is to preserve it. */
23a7fe75 467 bfun = ftrace_new_function (prev, mfun, fun);
4c2c7ac6
MM
468 bfun->up = prev->up;
469 bfun->flags = prev->flags;
02d27625 470
23a7fe75
MM
471 ftrace_debug (bfun, "new switch");
472
473 return bfun;
474}
475
31fd9caa
MM
476/* Add a new function segment for a gap in the trace due to a decode error.
477 PREV is the chronologically preceding function segment.
478 ERRCODE is the format-specific error code. */
479
480static struct btrace_function *
481ftrace_new_gap (struct btrace_function *prev, int errcode)
482{
483 struct btrace_function *bfun;
484
485 /* We hijack prev if it was empty. */
486 if (prev != NULL && prev->errcode == 0
487 && VEC_empty (btrace_insn_s, prev->insn))
488 bfun = prev;
489 else
490 bfun = ftrace_new_function (prev, NULL, NULL);
491
492 bfun->errcode = errcode;
493
494 ftrace_debug (bfun, "new gap");
495
496 return bfun;
497}
498
23a7fe75
MM
499/* Update BFUN with respect to the instruction at PC. This may create new
500 function segments.
501 Return the chronologically latest function segment, never NULL. */
502
503static struct btrace_function *
7d5c24b3 504ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
23a7fe75
MM
505{
506 struct bound_minimal_symbol bmfun;
507 struct minimal_symbol *mfun;
508 struct symbol *fun;
509 struct btrace_insn *last;
510
511 /* Try to determine the function we're in. We use both types of symbols
512 to avoid surprises when we sometimes get a full symbol and sometimes
513 only a minimal symbol. */
514 fun = find_pc_function (pc);
515 bmfun = lookup_minimal_symbol_by_pc (pc);
516 mfun = bmfun.minsym;
517
518 if (fun == NULL && mfun == NULL)
519 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
520
31fd9caa
MM
521 /* If we didn't have a function or if we had a gap before, we create one. */
522 if (bfun == NULL || bfun->errcode != 0)
23a7fe75
MM
523 return ftrace_new_function (bfun, mfun, fun);
524
525 /* Check the last instruction, if we have one.
526 We do this check first, since it allows us to fill in the call stack
527 links in addition to the normal flow links. */
528 last = NULL;
529 if (!VEC_empty (btrace_insn_s, bfun->insn))
530 last = VEC_last (btrace_insn_s, bfun->insn);
531
532 if (last != NULL)
533 {
7d5c24b3
MM
534 switch (last->iclass)
535 {
536 case BTRACE_INSN_RETURN:
986b6601
MM
537 {
538 const char *fname;
539
540 /* On some systems, _dl_runtime_resolve returns to the resolved
541 function instead of jumping to it. From our perspective,
542 however, this is a tailcall.
543 If we treated it as return, we wouldn't be able to find the
544 resolved function in our stack back trace. Hence, we would
545 lose the current stack back trace and start anew with an empty
546 back trace. When the resolved function returns, we would then
547 create a stack back trace with the same function names but
548 different frame id's. This will confuse stepping. */
549 fname = ftrace_print_function_name (bfun);
550 if (strcmp (fname, "_dl_runtime_resolve") == 0)
551 return ftrace_new_tailcall (bfun, mfun, fun);
552
553 return ftrace_new_return (bfun, mfun, fun);
554 }
23a7fe75 555
7d5c24b3
MM
556 case BTRACE_INSN_CALL:
557 /* Ignore calls to the next instruction. They are used for PIC. */
558 if (last->pc + last->size == pc)
559 break;
23a7fe75 560
7d5c24b3 561 return ftrace_new_call (bfun, mfun, fun);
23a7fe75 562
7d5c24b3
MM
563 case BTRACE_INSN_JUMP:
564 {
565 CORE_ADDR start;
23a7fe75 566
7d5c24b3 567 start = get_pc_function_start (pc);
23a7fe75 568
2dfdb47a
MM
569 /* A jump to the start of a function is (typically) a tail call. */
570 if (start == pc)
571 return ftrace_new_tailcall (bfun, mfun, fun);
572
7d5c24b3 573 /* If we can't determine the function for PC, we treat a jump at
2dfdb47a
MM
574 the end of the block as tail call if we're switching functions
575 and as an intra-function branch if we don't. */
576 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
7d5c24b3 577 return ftrace_new_tailcall (bfun, mfun, fun);
2dfdb47a
MM
578
579 break;
7d5c24b3 580 }
02d27625 581 }
23a7fe75
MM
582 }
583
584 /* Check if we're switching functions for some other reason. */
585 if (ftrace_function_switched (bfun, mfun, fun))
586 {
587 DEBUG_FTRACE ("switching from %s in %s at %s",
588 ftrace_print_insn_addr (last),
589 ftrace_print_function_name (bfun),
590 ftrace_print_filename (bfun));
02d27625 591
23a7fe75
MM
592 return ftrace_new_switch (bfun, mfun, fun);
593 }
594
595 return bfun;
596}
597
23a7fe75
MM
598/* Add the instruction at PC to BFUN's instructions. */
599
600static void
7d5c24b3
MM
601ftrace_update_insns (struct btrace_function *bfun,
602 const struct btrace_insn *insn)
23a7fe75 603{
7d5c24b3 604 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
23a7fe75
MM
605
606 if (record_debug > 1)
607 ftrace_debug (bfun, "update insn");
608}
609
7d5c24b3
MM
610/* Classify the instruction at PC. */
611
612static enum btrace_insn_class
613ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
614{
7d5c24b3
MM
615 enum btrace_insn_class iclass;
616
617 iclass = BTRACE_INSN_OTHER;
492d29ea 618 TRY
7d5c24b3
MM
619 {
620 if (gdbarch_insn_is_call (gdbarch, pc))
621 iclass = BTRACE_INSN_CALL;
622 else if (gdbarch_insn_is_ret (gdbarch, pc))
623 iclass = BTRACE_INSN_RETURN;
624 else if (gdbarch_insn_is_jump (gdbarch, pc))
625 iclass = BTRACE_INSN_JUMP;
626 }
492d29ea
PA
627 CATCH (error, RETURN_MASK_ERROR)
628 {
629 }
630 END_CATCH
7d5c24b3
MM
631
632 return iclass;
633}
634
d87fdac3
MM
635/* Try to match the back trace at LHS to the back trace at RHS. Returns the
636 number of matching function segments or zero if the back traces do not
637 match. */
638
639static int
640ftrace_match_backtrace (struct btrace_function *lhs,
641 struct btrace_function *rhs)
642{
643 int matches;
644
645 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
646 {
647 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
648 return 0;
649
650 lhs = ftrace_get_caller (lhs);
651 rhs = ftrace_get_caller (rhs);
652 }
653
654 return matches;
655}
656
657/* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
658
659static void
660ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
661{
662 if (adjustment == 0)
663 return;
664
665 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
666 ftrace_debug (bfun, "..bfun");
667
668 for (; bfun != NULL; bfun = bfun->flow.next)
669 bfun->level += adjustment;
670}
671
672/* Recompute the global level offset. Traverse the function trace and compute
673 the global level offset as the negative of the minimal function level. */
674
675static void
676ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
677{
678 struct btrace_function *bfun, *end;
679 int level;
680
681 if (btinfo == NULL)
682 return;
683
684 bfun = btinfo->begin;
685 if (bfun == NULL)
686 return;
687
688 /* The last function segment contains the current instruction, which is not
689 really part of the trace. If it contains just this one instruction, we
690 stop when we reach it; otherwise, we let the below loop run to the end. */
691 end = btinfo->end;
692 if (VEC_length (btrace_insn_s, end->insn) > 1)
693 end = NULL;
694
695 level = INT_MAX;
696 for (; bfun != end; bfun = bfun->flow.next)
697 level = std::min (level, bfun->level);
698
699 DEBUG_FTRACE ("setting global level offset: %d", -level);
700 btinfo->level = -level;
701}
702
703/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
704 ftrace_connect_backtrace. */
705
706static void
707ftrace_connect_bfun (struct btrace_function *prev,
708 struct btrace_function *next)
709{
710 DEBUG_FTRACE ("connecting...");
711 ftrace_debug (prev, "..prev");
712 ftrace_debug (next, "..next");
713
714 /* The function segments are not yet connected. */
715 gdb_assert (prev->segment.next == NULL);
716 gdb_assert (next->segment.prev == NULL);
717
718 prev->segment.next = next;
719 next->segment.prev = prev;
720
721 /* We may have moved NEXT to a different function level. */
722 ftrace_fixup_level (next, prev->level - next->level);
723
724 /* If we run out of back trace for one, let's use the other's. */
725 if (prev->up == NULL)
726 {
727 if (next->up != NULL)
728 {
729 DEBUG_FTRACE ("using next's callers");
730 ftrace_fixup_caller (prev, next->up, next->flags);
731 }
732 }
733 else if (next->up == NULL)
734 {
735 if (prev->up != NULL)
736 {
737 DEBUG_FTRACE ("using prev's callers");
738 ftrace_fixup_caller (next, prev->up, prev->flags);
739 }
740 }
741 else
742 {
743 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
744 link to add the tail callers to NEXT's back trace.
745
746 This removes NEXT->UP from NEXT's back trace. It will be added back
747 when connecting NEXT and PREV's callers - provided they exist.
748
749 If PREV's back trace consists of a series of tail calls without an
750 actual call, there will be no further connection and NEXT's caller will
751 be removed for good. To catch this case, we handle it here and connect
752 the top of PREV's back trace to NEXT's caller. */
753 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
754 {
755 struct btrace_function *caller;
756 btrace_function_flags flags;
757
758 /* We checked NEXT->UP above so CALLER can't be NULL. */
759 caller = next->up;
760 flags = next->flags;
761
762 DEBUG_FTRACE ("adding prev's tail calls to next");
763
764 ftrace_fixup_caller (next, prev->up, prev->flags);
765
766 for (prev = prev->up; prev != NULL; prev = prev->up)
767 {
768 /* At the end of PREV's back trace, continue with CALLER. */
769 if (prev->up == NULL)
770 {
771 DEBUG_FTRACE ("fixing up link for tailcall chain");
772 ftrace_debug (prev, "..top");
773 ftrace_debug (caller, "..up");
774
775 ftrace_fixup_caller (prev, caller, flags);
776
777 /* If we skipped any tail calls, this may move CALLER to a
778 different function level.
779
780 Note that changing CALLER's level is only OK because we
781 know that this is the last iteration of the bottom-to-top
782 walk in ftrace_connect_backtrace.
783
784 Otherwise we will fix up CALLER's level when we connect it
785 to PREV's caller in the next iteration. */
786 ftrace_fixup_level (caller, prev->level - caller->level - 1);
787 break;
788 }
789
790 /* There's nothing to do if we find a real call. */
791 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
792 {
793 DEBUG_FTRACE ("will fix up link in next iteration");
794 break;
795 }
796 }
797 }
798 }
799}
800
801/* Connect function segments on the same level in the back trace at LHS and RHS.
802 The back traces at LHS and RHS are expected to match according to
803 ftrace_match_backtrace. */
804
805static void
806ftrace_connect_backtrace (struct btrace_function *lhs,
807 struct btrace_function *rhs)
808{
809 while (lhs != NULL && rhs != NULL)
810 {
811 struct btrace_function *prev, *next;
812
813 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
814
815 /* Connecting LHS and RHS may change the up link. */
816 prev = lhs;
817 next = rhs;
818
819 lhs = ftrace_get_caller (lhs);
820 rhs = ftrace_get_caller (rhs);
821
822 ftrace_connect_bfun (prev, next);
823 }
824}
825
826/* Bridge the gap between two function segments left and right of a gap if their
827 respective back traces match in at least MIN_MATCHES functions.
828
829 Returns non-zero if the gap could be bridged, zero otherwise. */
830
831static int
832ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
833 int min_matches)
834{
835 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
836 int best_matches;
837
838 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
839 rhs->insn_offset - 1, min_matches);
840
841 best_matches = 0;
842 best_l = NULL;
843 best_r = NULL;
844
845 /* We search the back traces of LHS and RHS for valid connections and connect
846 the two functon segments that give the longest combined back trace. */
847
848 for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
849 for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
850 {
851 int matches;
852
853 matches = ftrace_match_backtrace (cand_l, cand_r);
854 if (best_matches < matches)
855 {
856 best_matches = matches;
857 best_l = cand_l;
858 best_r = cand_r;
859 }
860 }
861
862 /* We need at least MIN_MATCHES matches. */
863 gdb_assert (min_matches > 0);
864 if (best_matches < min_matches)
865 return 0;
866
867 DEBUG_FTRACE ("..matches: %d", best_matches);
868
869 /* We will fix up the level of BEST_R and succeeding function segments such
870 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
871
872 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
873 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
874
875 To catch this, we already fix up the level here where we can start at RHS
876 instead of at BEST_R. We will ignore the level fixup when connecting
877 BEST_L to BEST_R as they will already be on the same level. */
878 ftrace_fixup_level (rhs, best_l->level - best_r->level);
879
880 ftrace_connect_backtrace (best_l, best_r);
881
882 return best_matches;
883}
884
885/* Try to bridge gaps due to overflow or decode errors by connecting the
886 function segments that are separated by the gap. */
887
888static void
889btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
890{
891 VEC (bfun_s) *remaining;
892 struct cleanup *old_chain;
893 int min_matches;
894
895 DEBUG ("bridge gaps");
896
897 remaining = NULL;
898 old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
899
900 /* We require a minimum amount of matches for bridging a gap. The number of
901 required matches will be lowered with each iteration.
902
903 The more matches the higher our confidence that the bridging is correct.
904 For big gaps or small traces, however, it may not be feasible to require a
905 high number of matches. */
906 for (min_matches = 5; min_matches > 0; --min_matches)
907 {
908 /* Let's try to bridge as many gaps as we can. In some cases, we need to
909 skip a gap and revisit it again after we closed later gaps. */
910 while (!VEC_empty (bfun_s, *gaps))
911 {
912 struct btrace_function *gap;
913 unsigned int idx;
914
915 for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
916 {
917 struct btrace_function *lhs, *rhs;
918 int bridged;
919
920 /* We may have a sequence of gaps if we run from one error into
921 the next as we try to re-sync onto the trace stream. Ignore
922 all but the leftmost gap in such a sequence.
923
924 Also ignore gaps at the beginning of the trace. */
925 lhs = gap->flow.prev;
926 if (lhs == NULL || lhs->errcode != 0)
927 continue;
928
929 /* Skip gaps to the right. */
930 for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
931 if (rhs->errcode == 0)
932 break;
933
934 /* Ignore gaps at the end of the trace. */
935 if (rhs == NULL)
936 continue;
937
938 bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
939
940 /* Keep track of gaps we were not able to bridge and try again.
941 If we just pushed them to the end of GAPS we would risk an
942 infinite loop in case we simply cannot bridge a gap. */
943 if (bridged == 0)
944 VEC_safe_push (bfun_s, remaining, gap);
945 }
946
947 /* Let's see if we made any progress. */
948 if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
949 break;
950
951 VEC_free (bfun_s, *gaps);
952
953 *gaps = remaining;
954 remaining = NULL;
955 }
956
957 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
958 if (VEC_empty (bfun_s, *gaps))
959 break;
960
961 VEC_free (bfun_s, remaining);
962 }
963
964 do_cleanups (old_chain);
965
966 /* We may omit this in some cases. Not sure it is worth the extra
967 complication, though. */
968 ftrace_compute_global_level_offset (&tp->btrace);
969}
970
734b0e4b 971/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
972
973static void
76235df1 974btrace_compute_ftrace_bts (struct thread_info *tp,
d87fdac3
MM
975 const struct btrace_data_bts *btrace,
976 VEC (bfun_s) **gaps)
23a7fe75 977{
76235df1 978 struct btrace_thread_info *btinfo;
23a7fe75
MM
979 struct btrace_function *begin, *end;
980 struct gdbarch *gdbarch;
d87fdac3 981 unsigned int blk;
23a7fe75
MM
982 int level;
983
23a7fe75 984 gdbarch = target_gdbarch ();
76235df1 985 btinfo = &tp->btrace;
969c39fb
MM
986 begin = btinfo->begin;
987 end = btinfo->end;
988 level = begin != NULL ? -btinfo->level : INT_MAX;
734b0e4b 989 blk = VEC_length (btrace_block_s, btrace->blocks);
23a7fe75
MM
990
991 while (blk != 0)
992 {
993 btrace_block_s *block;
994 CORE_ADDR pc;
995
996 blk -= 1;
997
734b0e4b 998 block = VEC_index (btrace_block_s, btrace->blocks, blk);
23a7fe75
MM
999 pc = block->begin;
1000
1001 for (;;)
1002 {
7d5c24b3 1003 struct btrace_insn insn;
23a7fe75
MM
1004 int size;
1005
1006 /* We should hit the end of the block. Warn if we went too far. */
1007 if (block->end < pc)
1008 {
b61ce85c
MM
1009 /* Indicate the gap in the trace. */
1010 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
1011 if (begin == NULL)
1012 begin = end;
1013
d87fdac3 1014 VEC_safe_push (bfun_s, *gaps, end);
b61ce85c
MM
1015
1016 warning (_("Recorded trace may be corrupted at instruction "
1017 "%u (pc = %s)."), end->insn_offset - 1,
1018 core_addr_to_string_nz (pc));
63ab433e 1019
23a7fe75
MM
1020 break;
1021 }
1022
7d5c24b3 1023 end = ftrace_update_function (end, pc);
23a7fe75
MM
1024 if (begin == NULL)
1025 begin = end;
1026
8710b709
MM
1027 /* Maintain the function level offset.
1028 For all but the last block, we do it here. */
1029 if (blk != 0)
325fac50 1030 level = std::min (level, end->level);
23a7fe75 1031
7d5c24b3 1032 size = 0;
492d29ea
PA
1033 TRY
1034 {
1035 size = gdb_insn_length (gdbarch, pc);
1036 }
1037 CATCH (error, RETURN_MASK_ERROR)
1038 {
1039 }
1040 END_CATCH
7d5c24b3
MM
1041
1042 insn.pc = pc;
1043 insn.size = size;
1044 insn.iclass = ftrace_classify_insn (gdbarch, pc);
da8c46d2 1045 insn.flags = 0;
7d5c24b3
MM
1046
1047 ftrace_update_insns (end, &insn);
23a7fe75
MM
1048
1049 /* We're done once we pushed the instruction at the end. */
1050 if (block->end == pc)
1051 break;
1052
7d5c24b3 1053 /* We can't continue if we fail to compute the size. */
23a7fe75
MM
1054 if (size <= 0)
1055 {
31fd9caa
MM
1056 /* Indicate the gap in the trace. We just added INSN so we're
1057 not at the beginning. */
1058 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
d87fdac3
MM
1059
1060 VEC_safe_push (bfun_s, *gaps, end);
31fd9caa 1061
63ab433e
MM
1062 warning (_("Recorded trace may be incomplete at instruction %u "
1063 "(pc = %s)."), end->insn_offset - 1,
1064 core_addr_to_string_nz (pc));
1065
23a7fe75
MM
1066 break;
1067 }
1068
1069 pc += size;
8710b709
MM
1070
1071 /* Maintain the function level offset.
1072 For the last block, we do it here to not consider the last
1073 instruction.
1074 Since the last instruction corresponds to the current instruction
1075 and is not really part of the execution history, it shouldn't
1076 affect the level. */
1077 if (blk == 0)
325fac50 1078 level = std::min (level, end->level);
23a7fe75 1079 }
02d27625
MM
1080 }
1081
23a7fe75
MM
1082 btinfo->begin = begin;
1083 btinfo->end = end;
1084
1085 /* LEVEL is the minimal function level of all btrace function segments.
1086 Define the global level offset to -LEVEL so all function levels are
1087 normalized to start at zero. */
1088 btinfo->level = -level;
02d27625
MM
1089}
1090
b20a6524
MM
1091#if defined (HAVE_LIBIPT)
1092
1093static enum btrace_insn_class
1094pt_reclassify_insn (enum pt_insn_class iclass)
1095{
1096 switch (iclass)
1097 {
1098 case ptic_call:
1099 return BTRACE_INSN_CALL;
1100
1101 case ptic_return:
1102 return BTRACE_INSN_RETURN;
1103
1104 case ptic_jump:
1105 return BTRACE_INSN_JUMP;
1106
1107 default:
1108 return BTRACE_INSN_OTHER;
1109 }
1110}
1111
da8c46d2
MM
1112/* Return the btrace instruction flags for INSN. */
1113
d7abe101 1114static btrace_insn_flags
b5c36682 1115pt_btrace_insn_flags (const struct pt_insn &insn)
da8c46d2 1116{
d7abe101 1117 btrace_insn_flags flags = 0;
da8c46d2 1118
b5c36682 1119 if (insn.speculative)
da8c46d2
MM
1120 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1121
1122 return flags;
1123}
1124
b5c36682
PA
1125/* Return the btrace instruction for INSN. */
1126
1127static btrace_insn
1128pt_btrace_insn (const struct pt_insn &insn)
1129{
1130 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1131 pt_reclassify_insn (insn.iclass),
1132 pt_btrace_insn_flags (insn)};
1133}
1134
1135
b20a6524
MM
1136/* Add function branch trace using DECODER. */
1137
1138static void
1139ftrace_add_pt (struct pt_insn_decoder *decoder,
1140 struct btrace_function **pbegin,
1141 struct btrace_function **pend, int *plevel,
d87fdac3 1142 VEC (bfun_s) **gaps)
b20a6524
MM
1143{
1144 struct btrace_function *begin, *end, *upd;
1145 uint64_t offset;
63ab433e 1146 int errcode;
b20a6524
MM
1147
1148 begin = *pbegin;
1149 end = *pend;
b20a6524
MM
1150 for (;;)
1151 {
b20a6524
MM
1152 struct pt_insn insn;
1153
1154 errcode = pt_insn_sync_forward (decoder);
1155 if (errcode < 0)
1156 {
1157 if (errcode != -pte_eos)
bc504a31 1158 warning (_("Failed to synchronize onto the Intel Processor "
b20a6524
MM
1159 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1160 break;
1161 }
1162
b20a6524
MM
1163 for (;;)
1164 {
1165 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1166 if (errcode < 0)
1167 break;
1168
1169 /* Look for gaps in the trace - unless we're at the beginning. */
1170 if (begin != NULL)
1171 {
1172 /* Tracing is disabled and re-enabled each time we enter the
1173 kernel. Most times, we continue from the same instruction we
1174 stopped before. This is indicated via the RESUMED instruction
1175 flag. The ENABLED instruction flag means that we continued
1176 from some other instruction. Indicate this as a trace gap. */
1177 if (insn.enabled)
63ab433e
MM
1178 {
1179 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
d87fdac3
MM
1180
1181 VEC_safe_push (bfun_s, *gaps, end);
63ab433e
MM
1182
1183 pt_insn_get_offset (decoder, &offset);
1184
1185 warning (_("Non-contiguous trace at instruction %u (offset "
1186 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1187 end->insn_offset - 1, offset, insn.ip);
1188 }
b61ce85c 1189 }
b20a6524 1190
b61ce85c
MM
1191 /* Indicate trace overflows. */
1192 if (insn.resynced)
1193 {
1194 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
1195 if (begin == NULL)
1196 *pbegin = begin = end;
63ab433e 1197
d87fdac3 1198 VEC_safe_push (bfun_s, *gaps, end);
63ab433e 1199
b61ce85c
MM
1200 pt_insn_get_offset (decoder, &offset);
1201
1202 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1203 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
1204 offset, insn.ip);
b20a6524
MM
1205 }
1206
1207 upd = ftrace_update_function (end, insn.ip);
1208 if (upd != end)
1209 {
1210 *pend = end = upd;
1211
1212 if (begin == NULL)
1213 *pbegin = begin = upd;
1214 }
1215
1216 /* Maintain the function level offset. */
325fac50 1217 *plevel = std::min (*plevel, end->level);
b20a6524 1218
b5c36682 1219 btrace_insn btinsn = pt_btrace_insn (insn);
b20a6524
MM
1220 ftrace_update_insns (end, &btinsn);
1221 }
1222
1223 if (errcode == -pte_eos)
1224 break;
1225
b20a6524
MM
1226 /* Indicate the gap in the trace. */
1227 *pend = end = ftrace_new_gap (end, errcode);
b61ce85c
MM
1228 if (begin == NULL)
1229 *pbegin = begin = end;
d87fdac3
MM
1230
1231 VEC_safe_push (bfun_s, *gaps, end);
b20a6524 1232
63ab433e
MM
1233 pt_insn_get_offset (decoder, &offset);
1234
1235 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1236 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
1237 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1238 }
b20a6524
MM
1239}
1240
1241/* A callback function to allow the trace decoder to read the inferior's
1242 memory. */
1243
1244static int
1245btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
80a2b330 1246 const struct pt_asid *asid, uint64_t pc,
b20a6524
MM
1247 void *context)
1248{
43368e1d 1249 int result, errcode;
b20a6524 1250
43368e1d 1251 result = (int) size;
b20a6524
MM
1252 TRY
1253 {
80a2b330 1254 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
b20a6524 1255 if (errcode != 0)
43368e1d 1256 result = -pte_nomap;
b20a6524
MM
1257 }
1258 CATCH (error, RETURN_MASK_ERROR)
1259 {
43368e1d 1260 result = -pte_nomap;
b20a6524
MM
1261 }
1262 END_CATCH
1263
43368e1d 1264 return result;
b20a6524
MM
1265}
1266
1267/* Translate the vendor from one enum to another. */
1268
1269static enum pt_cpu_vendor
1270pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1271{
1272 switch (vendor)
1273 {
1274 default:
1275 return pcv_unknown;
1276
1277 case CV_INTEL:
1278 return pcv_intel;
1279 }
1280}
1281
1282/* Finalize the function branch trace after decode. */
1283
1284static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1285 struct thread_info *tp, int level)
1286{
1287 pt_insn_free_decoder (decoder);
1288
1289 /* LEVEL is the minimal function level of all btrace function segments.
1290 Define the global level offset to -LEVEL so all function levels are
1291 normalized to start at zero. */
1292 tp->btrace.level = -level;
1293
1294 /* Add a single last instruction entry for the current PC.
1295 This allows us to compute the backtrace at the current PC using both
1296 standard unwind and btrace unwind.
1297 This extra entry is ignored by all record commands. */
1298 btrace_add_pc (tp);
1299}
1300
bc504a31
PA
1301/* Compute the function branch trace from Intel Processor Trace
1302 format. */
b20a6524
MM
1303
1304static void
1305btrace_compute_ftrace_pt (struct thread_info *tp,
d87fdac3
MM
1306 const struct btrace_data_pt *btrace,
1307 VEC (bfun_s) **gaps)
b20a6524
MM
1308{
1309 struct btrace_thread_info *btinfo;
1310 struct pt_insn_decoder *decoder;
1311 struct pt_config config;
1312 int level, errcode;
1313
1314 if (btrace->size == 0)
1315 return;
1316
1317 btinfo = &tp->btrace;
1318 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
1319
1320 pt_config_init(&config);
1321 config.begin = btrace->data;
1322 config.end = btrace->data + btrace->size;
1323
1324 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1325 config.cpu.family = btrace->config.cpu.family;
1326 config.cpu.model = btrace->config.cpu.model;
1327 config.cpu.stepping = btrace->config.cpu.stepping;
1328
1329 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1330 if (errcode < 0)
bc504a31 1331 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b20a6524
MM
1332 pt_errstr (pt_errcode (errcode)));
1333
1334 decoder = pt_insn_alloc_decoder (&config);
1335 if (decoder == NULL)
bc504a31 1336 error (_("Failed to allocate the Intel Processor Trace decoder."));
b20a6524
MM
1337
1338 TRY
1339 {
1340 struct pt_image *image;
1341
1342 image = pt_insn_get_image(decoder);
1343 if (image == NULL)
bc504a31 1344 error (_("Failed to configure the Intel Processor Trace decoder."));
b20a6524
MM
1345
1346 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1347 if (errcode < 0)
bc504a31 1348 error (_("Failed to configure the Intel Processor Trace decoder: "
b20a6524
MM
1349 "%s."), pt_errstr (pt_errcode (errcode)));
1350
d87fdac3 1351 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
b20a6524
MM
1352 }
1353 CATCH (error, RETURN_MASK_ALL)
1354 {
1355 /* Indicate a gap in the trace if we quit trace processing. */
1356 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
1357 {
1358 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
d87fdac3
MM
1359
1360 VEC_safe_push (bfun_s, *gaps, btinfo->end);
b20a6524
MM
1361 }
1362
1363 btrace_finalize_ftrace_pt (decoder, tp, level);
1364
1365 throw_exception (error);
1366 }
1367 END_CATCH
1368
1369 btrace_finalize_ftrace_pt (decoder, tp, level);
1370}
1371
1372#else /* defined (HAVE_LIBIPT) */
1373
1374static void
1375btrace_compute_ftrace_pt (struct thread_info *tp,
d87fdac3
MM
1376 const struct btrace_data_pt *btrace,
1377 VEC (bfun_s) **gaps)
b20a6524
MM
1378{
1379 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1380}
1381
1382#endif /* defined (HAVE_LIBIPT) */
1383
734b0e4b
MM
1384/* Compute the function branch trace from a block branch trace BTRACE for
1385 a thread given by BTINFO. */
1386
1387static void
d87fdac3
MM
1388btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1389 VEC (bfun_s) **gaps)
734b0e4b
MM
1390{
1391 DEBUG ("compute ftrace");
1392
1393 switch (btrace->format)
1394 {
1395 case BTRACE_FORMAT_NONE:
1396 return;
1397
1398 case BTRACE_FORMAT_BTS:
d87fdac3 1399 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
734b0e4b 1400 return;
b20a6524
MM
1401
1402 case BTRACE_FORMAT_PT:
d87fdac3 1403 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
b20a6524 1404 return;
734b0e4b
MM
1405 }
1406
1407 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1408}
1409
d87fdac3
MM
1410static void
1411btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1412{
1413 if (!VEC_empty (bfun_s, *gaps))
1414 {
1415 tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1416 btrace_bridge_gaps (tp, gaps);
1417 }
1418}
1419
1420static void
1421btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1422{
1423 VEC (bfun_s) *gaps;
1424 struct cleanup *old_chain;
1425
1426 gaps = NULL;
1427 old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1428
1429 TRY
1430 {
1431 btrace_compute_ftrace_1 (tp, btrace, &gaps);
1432 }
1433 CATCH (error, RETURN_MASK_ALL)
1434 {
1435 btrace_finalize_ftrace (tp, &gaps);
1436
1437 throw_exception (error);
1438 }
1439 END_CATCH
1440
1441 btrace_finalize_ftrace (tp, &gaps);
1442
1443 do_cleanups (old_chain);
1444}
1445
6e07b1d2
MM
1446/* Add an entry for the current PC. */
1447
1448static void
1449btrace_add_pc (struct thread_info *tp)
1450{
734b0e4b 1451 struct btrace_data btrace;
6e07b1d2
MM
1452 struct btrace_block *block;
1453 struct regcache *regcache;
1454 struct cleanup *cleanup;
1455 CORE_ADDR pc;
1456
1457 regcache = get_thread_regcache (tp->ptid);
1458 pc = regcache_read_pc (regcache);
1459
734b0e4b
MM
1460 btrace_data_init (&btrace);
1461 btrace.format = BTRACE_FORMAT_BTS;
1462 btrace.variant.bts.blocks = NULL;
6e07b1d2 1463
734b0e4b
MM
1464 cleanup = make_cleanup_btrace_data (&btrace);
1465
1466 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
6e07b1d2
MM
1467 block->begin = pc;
1468 block->end = pc;
1469
76235df1 1470 btrace_compute_ftrace (tp, &btrace);
6e07b1d2
MM
1471
1472 do_cleanups (cleanup);
1473}
1474
02d27625
MM
1475/* See btrace.h. */
1476
1477void
f4abbc16 1478btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
1479{
1480 if (tp->btrace.target != NULL)
1481 return;
1482
46a3515b
MM
1483#if !defined (HAVE_LIBIPT)
1484 if (conf->format == BTRACE_FORMAT_PT)
bc504a31 1485 error (_("GDB does not support Intel Processor Trace."));
46a3515b
MM
1486#endif /* !defined (HAVE_LIBIPT) */
1487
f4abbc16 1488 if (!target_supports_btrace (conf->format))
02d27625
MM
1489 error (_("Target does not support branch tracing."));
1490
43792cf0
PA
1491 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1492 target_pid_to_str (tp->ptid));
02d27625 1493
f4abbc16 1494 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
6e07b1d2 1495
cd4007e4
MM
1496 /* We're done if we failed to enable tracing. */
1497 if (tp->btrace.target == NULL)
1498 return;
1499
1500 /* We need to undo the enable in case of errors. */
1501 TRY
1502 {
1503 /* Add an entry for the current PC so we start tracing from where we
1504 enabled it.
1505
1506 If we can't access TP's registers, TP is most likely running. In this
1507 case, we can't really say where tracing was enabled so it should be
1508 safe to simply skip this step.
1509
1510 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1511 start at the PC at which tracing was enabled. */
1512 if (conf->format != BTRACE_FORMAT_PT
1513 && can_access_registers_ptid (tp->ptid))
1514 btrace_add_pc (tp);
1515 }
1516 CATCH (exception, RETURN_MASK_ALL)
1517 {
1518 btrace_disable (tp);
1519
1520 throw_exception (exception);
1521 }
1522 END_CATCH
02d27625
MM
1523}
1524
1525/* See btrace.h. */
1526
f4abbc16
MM
1527const struct btrace_config *
1528btrace_conf (const struct btrace_thread_info *btinfo)
1529{
1530 if (btinfo->target == NULL)
1531 return NULL;
1532
1533 return target_btrace_conf (btinfo->target);
1534}
1535
1536/* See btrace.h. */
1537
02d27625
MM
1538void
1539btrace_disable (struct thread_info *tp)
1540{
1541 struct btrace_thread_info *btp = &tp->btrace;
1542 int errcode = 0;
1543
1544 if (btp->target == NULL)
1545 return;
1546
43792cf0
PA
1547 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1548 target_pid_to_str (tp->ptid));
02d27625
MM
1549
1550 target_disable_btrace (btp->target);
1551 btp->target = NULL;
1552
1553 btrace_clear (tp);
1554}
1555
1556/* See btrace.h. */
1557
1558void
1559btrace_teardown (struct thread_info *tp)
1560{
1561 struct btrace_thread_info *btp = &tp->btrace;
1562 int errcode = 0;
1563
1564 if (btp->target == NULL)
1565 return;
1566
43792cf0
PA
1567 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1568 target_pid_to_str (tp->ptid));
02d27625
MM
1569
1570 target_teardown_btrace (btp->target);
1571 btp->target = NULL;
1572
1573 btrace_clear (tp);
1574}
1575
734b0e4b 1576/* Stitch branch trace in BTS format. */
969c39fb
MM
1577
1578static int
31fd9caa 1579btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
969c39fb 1580{
31fd9caa 1581 struct btrace_thread_info *btinfo;
969c39fb
MM
1582 struct btrace_function *last_bfun;
1583 struct btrace_insn *last_insn;
1584 btrace_block_s *first_new_block;
1585
31fd9caa 1586 btinfo = &tp->btrace;
969c39fb
MM
1587 last_bfun = btinfo->end;
1588 gdb_assert (last_bfun != NULL);
31fd9caa
MM
1589 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1590
1591 /* If the existing trace ends with a gap, we just glue the traces
1592 together. We need to drop the last (i.e. chronologically first) block
1593 of the new trace, though, since we can't fill in the start address.*/
1594 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1595 {
1596 VEC_pop (btrace_block_s, btrace->blocks);
1597 return 0;
1598 }
969c39fb
MM
1599
1600 /* Beware that block trace starts with the most recent block, so the
1601 chronologically first block in the new trace is the last block in
1602 the new trace's block vector. */
734b0e4b 1603 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
969c39fb
MM
1604 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1605
1606 /* If the current PC at the end of the block is the same as in our current
1607 trace, there are two explanations:
1608 1. we executed the instruction and some branch brought us back.
1609 2. we have not made any progress.
1610 In the first case, the delta trace vector should contain at least two
1611 entries.
1612 In the second case, the delta trace vector should contain exactly one
1613 entry for the partial block containing the current PC. Remove it. */
1614 if (first_new_block->end == last_insn->pc
734b0e4b 1615 && VEC_length (btrace_block_s, btrace->blocks) == 1)
969c39fb 1616 {
734b0e4b 1617 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1618 return 0;
1619 }
1620
1621 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1622 core_addr_to_string_nz (first_new_block->end));
1623
1624 /* Do a simple sanity check to make sure we don't accidentally end up
1625 with a bad block. This should not occur in practice. */
1626 if (first_new_block->end < last_insn->pc)
1627 {
1628 warning (_("Error while trying to read delta trace. Falling back to "
1629 "a full read."));
1630 return -1;
1631 }
1632
1633 /* We adjust the last block to start at the end of our current trace. */
1634 gdb_assert (first_new_block->begin == 0);
1635 first_new_block->begin = last_insn->pc;
1636
1637 /* We simply pop the last insn so we can insert it again as part of
1638 the normal branch trace computation.
1639 Since instruction iterators are based on indices in the instructions
1640 vector, we don't leave any pointers dangling. */
1641 DEBUG ("pruning insn at %s for stitching",
1642 ftrace_print_insn_addr (last_insn));
1643
1644 VEC_pop (btrace_insn_s, last_bfun->insn);
1645
1646 /* The instructions vector may become empty temporarily if this has
1647 been the only instruction in this function segment.
1648 This violates the invariant but will be remedied shortly by
1649 btrace_compute_ftrace when we add the new trace. */
31fd9caa
MM
1650
1651 /* The only case where this would hurt is if the entire trace consisted
1652 of just that one instruction. If we remove it, we might turn the now
1653 empty btrace function segment into a gap. But we don't want gaps at
1654 the beginning. To avoid this, we remove the entire old trace. */
1655 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1656 btrace_clear (tp);
1657
969c39fb
MM
1658 return 0;
1659}
1660
734b0e4b
MM
1661/* Adjust the block trace in order to stitch old and new trace together.
1662 BTRACE is the new delta trace between the last and the current stop.
31fd9caa
MM
1663 TP is the traced thread.
1664 May modifx BTRACE as well as the existing trace in TP.
734b0e4b
MM
1665 Return 0 on success, -1 otherwise. */
1666
1667static int
31fd9caa 1668btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
734b0e4b
MM
1669{
1670 /* If we don't have trace, there's nothing to do. */
1671 if (btrace_data_empty (btrace))
1672 return 0;
1673
1674 switch (btrace->format)
1675 {
1676 case BTRACE_FORMAT_NONE:
1677 return 0;
1678
1679 case BTRACE_FORMAT_BTS:
31fd9caa 1680 return btrace_stitch_bts (&btrace->variant.bts, tp);
b20a6524
MM
1681
1682 case BTRACE_FORMAT_PT:
1683 /* Delta reads are not supported. */
1684 return -1;
734b0e4b
MM
1685 }
1686
1687 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1688}
1689
969c39fb
MM
1690/* Clear the branch trace histories in BTINFO. */
1691
1692static void
1693btrace_clear_history (struct btrace_thread_info *btinfo)
1694{
1695 xfree (btinfo->insn_history);
1696 xfree (btinfo->call_history);
1697 xfree (btinfo->replay);
1698
1699 btinfo->insn_history = NULL;
1700 btinfo->call_history = NULL;
1701 btinfo->replay = NULL;
1702}
1703
b0627500
MM
1704/* Clear the branch trace maintenance histories in BTINFO. */
1705
1706static void
1707btrace_maint_clear (struct btrace_thread_info *btinfo)
1708{
1709 switch (btinfo->data.format)
1710 {
1711 default:
1712 break;
1713
1714 case BTRACE_FORMAT_BTS:
1715 btinfo->maint.variant.bts.packet_history.begin = 0;
1716 btinfo->maint.variant.bts.packet_history.end = 0;
1717 break;
1718
1719#if defined (HAVE_LIBIPT)
1720 case BTRACE_FORMAT_PT:
1721 xfree (btinfo->maint.variant.pt.packets);
1722
1723 btinfo->maint.variant.pt.packets = NULL;
1724 btinfo->maint.variant.pt.packet_history.begin = 0;
1725 btinfo->maint.variant.pt.packet_history.end = 0;
1726 break;
1727#endif /* defined (HAVE_LIBIPT) */
1728 }
1729}
1730
02d27625
MM
1731/* See btrace.h. */
1732
508352a9
TW
1733const char *
1734btrace_decode_error (enum btrace_format format, int errcode)
1735{
1736 switch (format)
1737 {
1738 case BTRACE_FORMAT_BTS:
1739 switch (errcode)
1740 {
1741 case BDE_BTS_OVERFLOW:
1742 return _("instruction overflow");
1743
1744 case BDE_BTS_INSN_SIZE:
1745 return _("unknown instruction");
1746
1747 default:
1748 break;
1749 }
1750 break;
1751
1752#if defined (HAVE_LIBIPT)
1753 case BTRACE_FORMAT_PT:
1754 switch (errcode)
1755 {
1756 case BDE_PT_USER_QUIT:
1757 return _("trace decode cancelled");
1758
1759 case BDE_PT_DISABLED:
1760 return _("disabled");
1761
1762 case BDE_PT_OVERFLOW:
1763 return _("overflow");
1764
1765 default:
1766 if (errcode < 0)
1767 return pt_errstr (pt_errcode (errcode));
1768 break;
1769 }
1770 break;
1771#endif /* defined (HAVE_LIBIPT) */
1772
1773 default:
1774 break;
1775 }
1776
1777 return _("unknown");
1778}
1779
1780/* See btrace.h. */
1781
02d27625
MM
1782void
1783btrace_fetch (struct thread_info *tp)
1784{
1785 struct btrace_thread_info *btinfo;
969c39fb 1786 struct btrace_target_info *tinfo;
734b0e4b 1787 struct btrace_data btrace;
23a7fe75 1788 struct cleanup *cleanup;
969c39fb 1789 int errcode;
02d27625 1790
43792cf0
PA
1791 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1792 target_pid_to_str (tp->ptid));
02d27625
MM
1793
1794 btinfo = &tp->btrace;
969c39fb
MM
1795 tinfo = btinfo->target;
1796 if (tinfo == NULL)
1797 return;
1798
1799 /* There's no way we could get new trace while replaying.
1800 On the other hand, delta trace would return a partial record with the
1801 current PC, which is the replay PC, not the last PC, as expected. */
1802 if (btinfo->replay != NULL)
02d27625
MM
1803 return;
1804
ae20e79a
TW
1805 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1806 can store a gdb.Record object in Python referring to a different thread
1807 than the current one, temporarily set INFERIOR_PTID. */
1808 cleanup = save_inferior_ptid ();
1809 inferior_ptid = tp->ptid;
1810
cd4007e4
MM
1811 /* We should not be called on running or exited threads. */
1812 gdb_assert (can_access_registers_ptid (tp->ptid));
1813
734b0e4b 1814 btrace_data_init (&btrace);
ae20e79a 1815 make_cleanup_btrace_data (&btrace);
02d27625 1816
969c39fb
MM
1817 /* Let's first try to extend the trace we already have. */
1818 if (btinfo->end != NULL)
1819 {
1820 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1821 if (errcode == 0)
1822 {
1823 /* Success. Let's try to stitch the traces together. */
31fd9caa 1824 errcode = btrace_stitch_trace (&btrace, tp);
969c39fb
MM
1825 }
1826 else
1827 {
1828 /* We failed to read delta trace. Let's try to read new trace. */
1829 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1830
1831 /* If we got any new trace, discard what we have. */
734b0e4b 1832 if (errcode == 0 && !btrace_data_empty (&btrace))
969c39fb
MM
1833 btrace_clear (tp);
1834 }
1835
1836 /* If we were not able to read the trace, we start over. */
1837 if (errcode != 0)
1838 {
1839 btrace_clear (tp);
1840 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1841 }
1842 }
1843 else
1844 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1845
1846 /* If we were not able to read the branch trace, signal an error. */
1847 if (errcode != 0)
1848 error (_("Failed to read branch trace."));
1849
1850 /* Compute the trace, provided we have any. */
734b0e4b 1851 if (!btrace_data_empty (&btrace))
23a7fe75 1852 {
fdd2bd92
TW
1853 struct btrace_function *bfun;
1854
9be54cae
MM
1855 /* Store the raw trace data. The stored data will be cleared in
1856 btrace_clear, so we always append the new trace. */
1857 btrace_data_append (&btinfo->data, &btrace);
b0627500 1858 btrace_maint_clear (btinfo);
9be54cae 1859
fdd2bd92 1860 VEC_truncate (btrace_fun_p, btinfo->functions, 0);
969c39fb 1861 btrace_clear_history (btinfo);
76235df1 1862 btrace_compute_ftrace (tp, &btrace);
fdd2bd92
TW
1863
1864 for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
1865 VEC_safe_push (btrace_fun_p, btinfo->functions, bfun);
23a7fe75 1866 }
02d27625 1867
23a7fe75 1868 do_cleanups (cleanup);
02d27625
MM
1869}
1870
1871/* See btrace.h. */
1872
1873void
1874btrace_clear (struct thread_info *tp)
1875{
1876 struct btrace_thread_info *btinfo;
23a7fe75 1877 struct btrace_function *it, *trash;
02d27625 1878
43792cf0
PA
1879 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1880 target_pid_to_str (tp->ptid));
02d27625 1881
0b722aec
MM
1882 /* Make sure btrace frames that may hold a pointer into the branch
1883 trace data are destroyed. */
1884 reinit_frame_cache ();
1885
02d27625
MM
1886 btinfo = &tp->btrace;
1887
fdd2bd92
TW
1888 VEC_free (btrace_fun_p, btinfo->functions);
1889
23a7fe75
MM
1890 it = btinfo->begin;
1891 while (it != NULL)
1892 {
1893 trash = it;
1894 it = it->flow.next;
02d27625 1895
7ed1acaf 1896 VEC_free (btrace_insn_s, trash->insn);
23a7fe75
MM
1897 xfree (trash);
1898 }
1899
1900 btinfo->begin = NULL;
1901 btinfo->end = NULL;
31fd9caa 1902 btinfo->ngaps = 0;
23a7fe75 1903
b0627500
MM
1904 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1905 btrace_maint_clear (btinfo);
9be54cae 1906 btrace_data_clear (&btinfo->data);
969c39fb 1907 btrace_clear_history (btinfo);
02d27625
MM
1908}
1909
1910/* See btrace.h. */
1911
1912void
1913btrace_free_objfile (struct objfile *objfile)
1914{
1915 struct thread_info *tp;
1916
1917 DEBUG ("free objfile");
1918
034f788c 1919 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
1920 btrace_clear (tp);
1921}
c12a2917
MM
1922
1923#if defined (HAVE_LIBEXPAT)
1924
1925/* Check the btrace document version. */
1926
1927static void
1928check_xml_btrace_version (struct gdb_xml_parser *parser,
1929 const struct gdb_xml_element *element,
1930 void *user_data, VEC (gdb_xml_value_s) *attributes)
1931{
9a3c8263
SM
1932 const char *version
1933 = (const char *) xml_find_attribute (attributes, "version")->value;
c12a2917
MM
1934
1935 if (strcmp (version, "1.0") != 0)
1936 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1937}
1938
1939/* Parse a btrace "block" xml record. */
1940
1941static void
1942parse_xml_btrace_block (struct gdb_xml_parser *parser,
1943 const struct gdb_xml_element *element,
1944 void *user_data, VEC (gdb_xml_value_s) *attributes)
1945{
734b0e4b 1946 struct btrace_data *btrace;
c12a2917
MM
1947 struct btrace_block *block;
1948 ULONGEST *begin, *end;
1949
9a3c8263 1950 btrace = (struct btrace_data *) user_data;
734b0e4b
MM
1951
1952 switch (btrace->format)
1953 {
1954 case BTRACE_FORMAT_BTS:
1955 break;
1956
1957 case BTRACE_FORMAT_NONE:
1958 btrace->format = BTRACE_FORMAT_BTS;
1959 btrace->variant.bts.blocks = NULL;
1960 break;
1961
1962 default:
1963 gdb_xml_error (parser, _("Btrace format error."));
1964 }
c12a2917 1965
bc84451b
SM
1966 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1967 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
c12a2917 1968
734b0e4b 1969 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
c12a2917
MM
1970 block->begin = *begin;
1971 block->end = *end;
1972}
1973
b20a6524
MM
1974/* Parse a "raw" xml record. */
1975
1976static void
1977parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
e7b01ce0 1978 gdb_byte **pdata, size_t *psize)
b20a6524
MM
1979{
1980 struct cleanup *cleanup;
1981 gdb_byte *data, *bin;
e7b01ce0 1982 size_t len, size;
b20a6524
MM
1983
1984 len = strlen (body_text);
e7b01ce0 1985 if (len % 2 != 0)
b20a6524
MM
1986 gdb_xml_error (parser, _("Bad raw data size."));
1987
e7b01ce0
MM
1988 size = len / 2;
1989
224c3ddb 1990 bin = data = (gdb_byte *) xmalloc (size);
b20a6524
MM
1991 cleanup = make_cleanup (xfree, data);
1992
1993 /* We use hex encoding - see common/rsp-low.h. */
1994 while (len > 0)
1995 {
1996 char hi, lo;
1997
1998 hi = *body_text++;
1999 lo = *body_text++;
2000
2001 if (hi == 0 || lo == 0)
2002 gdb_xml_error (parser, _("Bad hex encoding."));
2003
2004 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2005 len -= 2;
2006 }
2007
2008 discard_cleanups (cleanup);
2009
2010 *pdata = data;
2011 *psize = size;
2012}
2013
2014/* Parse a btrace pt-config "cpu" xml record. */
2015
2016static void
2017parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2018 const struct gdb_xml_element *element,
2019 void *user_data,
2020 VEC (gdb_xml_value_s) *attributes)
2021{
2022 struct btrace_data *btrace;
2023 const char *vendor;
2024 ULONGEST *family, *model, *stepping;
2025
9a3c8263
SM
2026 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2027 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2028 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2029 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
b20a6524 2030
9a3c8263 2031 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2032
2033 if (strcmp (vendor, "GenuineIntel") == 0)
2034 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2035
2036 btrace->variant.pt.config.cpu.family = *family;
2037 btrace->variant.pt.config.cpu.model = *model;
2038 btrace->variant.pt.config.cpu.stepping = *stepping;
2039}
2040
2041/* Parse a btrace pt "raw" xml record. */
2042
2043static void
2044parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2045 const struct gdb_xml_element *element,
2046 void *user_data, const char *body_text)
2047{
2048 struct btrace_data *btrace;
2049
9a3c8263 2050 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2051 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2052 &btrace->variant.pt.size);
2053}
2054
2055/* Parse a btrace "pt" xml record. */
2056
2057static void
2058parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2059 const struct gdb_xml_element *element,
2060 void *user_data, VEC (gdb_xml_value_s) *attributes)
2061{
2062 struct btrace_data *btrace;
2063
9a3c8263 2064 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2065 btrace->format = BTRACE_FORMAT_PT;
2066 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2067 btrace->variant.pt.data = NULL;
2068 btrace->variant.pt.size = 0;
2069}
2070
c12a2917
MM
2071static const struct gdb_xml_attribute block_attributes[] = {
2072 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2073 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2074 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2075};
2076
b20a6524
MM
2077static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2078 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2079 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2080 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2081 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2082 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2083};
2084
2085static const struct gdb_xml_element btrace_pt_config_children[] = {
2086 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2087 parse_xml_btrace_pt_config_cpu, NULL },
2088 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2089};
2090
2091static const struct gdb_xml_element btrace_pt_children[] = {
2092 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2093 NULL },
2094 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2095 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2096};
2097
c12a2917
MM
2098static const struct gdb_xml_attribute btrace_attributes[] = {
2099 { "version", GDB_XML_AF_NONE, NULL, NULL },
2100 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2101};
2102
2103static const struct gdb_xml_element btrace_children[] = {
2104 { "block", block_attributes, NULL,
2105 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
b20a6524
MM
2106 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2107 NULL },
c12a2917
MM
2108 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2109};
2110
2111static const struct gdb_xml_element btrace_elements[] = {
2112 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2113 check_xml_btrace_version, NULL },
2114 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2115};
2116
2117#endif /* defined (HAVE_LIBEXPAT) */
2118
2119/* See btrace.h. */
2120
734b0e4b
MM
2121void
2122parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
c12a2917 2123{
c12a2917
MM
2124 struct cleanup *cleanup;
2125 int errcode;
2126
2127#if defined (HAVE_LIBEXPAT)
2128
734b0e4b
MM
2129 btrace->format = BTRACE_FORMAT_NONE;
2130
2131 cleanup = make_cleanup_btrace_data (btrace);
c12a2917 2132 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
734b0e4b 2133 buffer, btrace);
c12a2917 2134 if (errcode != 0)
969c39fb 2135 error (_("Error parsing branch trace."));
c12a2917
MM
2136
2137 /* Keep parse results. */
2138 discard_cleanups (cleanup);
2139
2140#else /* !defined (HAVE_LIBEXPAT) */
2141
2142 error (_("Cannot process branch trace. XML parsing is not supported."));
2143
2144#endif /* !defined (HAVE_LIBEXPAT) */
c12a2917 2145}
23a7fe75 2146
f4abbc16
MM
2147#if defined (HAVE_LIBEXPAT)
2148
2149/* Parse a btrace-conf "bts" xml record. */
2150
2151static void
2152parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2153 const struct gdb_xml_element *element,
2154 void *user_data, VEC (gdb_xml_value_s) *attributes)
2155{
2156 struct btrace_config *conf;
d33501a5 2157 struct gdb_xml_value *size;
f4abbc16 2158
9a3c8263 2159 conf = (struct btrace_config *) user_data;
f4abbc16 2160 conf->format = BTRACE_FORMAT_BTS;
d33501a5
MM
2161 conf->bts.size = 0;
2162
2163 size = xml_find_attribute (attributes, "size");
2164 if (size != NULL)
b20a6524 2165 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
f4abbc16
MM
2166}
2167
b20a6524
MM
2168/* Parse a btrace-conf "pt" xml record. */
2169
2170static void
2171parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2172 const struct gdb_xml_element *element,
2173 void *user_data, VEC (gdb_xml_value_s) *attributes)
2174{
2175 struct btrace_config *conf;
2176 struct gdb_xml_value *size;
2177
9a3c8263 2178 conf = (struct btrace_config *) user_data;
b20a6524
MM
2179 conf->format = BTRACE_FORMAT_PT;
2180 conf->pt.size = 0;
2181
2182 size = xml_find_attribute (attributes, "size");
2183 if (size != NULL)
2184 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2185}
2186
2187static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2188 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2189 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2190};
2191
d33501a5
MM
2192static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2193 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2194 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2195};
2196
f4abbc16 2197static const struct gdb_xml_element btrace_conf_children[] = {
d33501a5
MM
2198 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2199 parse_xml_btrace_conf_bts, NULL },
b20a6524
MM
2200 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2201 parse_xml_btrace_conf_pt, NULL },
f4abbc16
MM
2202 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2203};
2204
2205static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2206 { "version", GDB_XML_AF_NONE, NULL, NULL },
2207 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2208};
2209
2210static const struct gdb_xml_element btrace_conf_elements[] = {
2211 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2212 GDB_XML_EF_NONE, NULL, NULL },
2213 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2214};
2215
2216#endif /* defined (HAVE_LIBEXPAT) */
2217
2218/* See btrace.h. */
2219
2220void
2221parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2222{
2223 int errcode;
2224
2225#if defined (HAVE_LIBEXPAT)
2226
2227 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2228 btrace_conf_elements, xml, conf);
2229 if (errcode != 0)
2230 error (_("Error parsing branch trace configuration."));
2231
2232#else /* !defined (HAVE_LIBEXPAT) */
2233
2234 error (_("XML parsing is not supported."));
2235
2236#endif /* !defined (HAVE_LIBEXPAT) */
2237}
2238
23a7fe75
MM
2239/* See btrace.h. */
2240
2241const struct btrace_insn *
2242btrace_insn_get (const struct btrace_insn_iterator *it)
2243{
2244 const struct btrace_function *bfun;
2245 unsigned int index, end;
2246
2247 index = it->index;
2248 bfun = it->function;
2249
31fd9caa
MM
2250 /* Check if the iterator points to a gap in the trace. */
2251 if (bfun->errcode != 0)
2252 return NULL;
2253
23a7fe75
MM
2254 /* The index is within the bounds of this function's instruction vector. */
2255 end = VEC_length (btrace_insn_s, bfun->insn);
2256 gdb_assert (0 < end);
2257 gdb_assert (index < end);
2258
2259 return VEC_index (btrace_insn_s, bfun->insn, index);
2260}
2261
2262/* See btrace.h. */
2263
69090cee
TW
2264int
2265btrace_insn_get_error (const struct btrace_insn_iterator *it)
23a7fe75 2266{
69090cee
TW
2267 return it->function->errcode;
2268}
31fd9caa 2269
69090cee 2270/* See btrace.h. */
31fd9caa 2271
69090cee
TW
2272unsigned int
2273btrace_insn_number (const struct btrace_insn_iterator *it)
2274{
2275 return it->function->insn_offset + it->index;
23a7fe75
MM
2276}
2277
2278/* See btrace.h. */
2279
2280void
2281btrace_insn_begin (struct btrace_insn_iterator *it,
2282 const struct btrace_thread_info *btinfo)
2283{
2284 const struct btrace_function *bfun;
2285
2286 bfun = btinfo->begin;
2287 if (bfun == NULL)
2288 error (_("No trace."));
2289
2290 it->function = bfun;
2291 it->index = 0;
2292}
2293
2294/* See btrace.h. */
2295
2296void
2297btrace_insn_end (struct btrace_insn_iterator *it,
2298 const struct btrace_thread_info *btinfo)
2299{
2300 const struct btrace_function *bfun;
2301 unsigned int length;
2302
2303 bfun = btinfo->end;
2304 if (bfun == NULL)
2305 error (_("No trace."));
2306
23a7fe75
MM
2307 length = VEC_length (btrace_insn_s, bfun->insn);
2308
31fd9caa
MM
2309 /* The last function may either be a gap or it contains the current
2310 instruction, which is one past the end of the execution trace; ignore
2311 it. */
2312 if (length > 0)
2313 length -= 1;
2314
23a7fe75 2315 it->function = bfun;
31fd9caa 2316 it->index = length;
23a7fe75
MM
2317}
2318
2319/* See btrace.h. */
2320
2321unsigned int
2322btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2323{
2324 const struct btrace_function *bfun;
2325 unsigned int index, steps;
2326
2327 bfun = it->function;
2328 steps = 0;
2329 index = it->index;
2330
2331 while (stride != 0)
2332 {
2333 unsigned int end, space, adv;
2334
2335 end = VEC_length (btrace_insn_s, bfun->insn);
2336
31fd9caa
MM
2337 /* An empty function segment represents a gap in the trace. We count
2338 it as one instruction. */
2339 if (end == 0)
2340 {
2341 const struct btrace_function *next;
2342
2343 next = bfun->flow.next;
2344 if (next == NULL)
2345 break;
2346
2347 stride -= 1;
2348 steps += 1;
2349
2350 bfun = next;
2351 index = 0;
2352
2353 continue;
2354 }
2355
23a7fe75
MM
2356 gdb_assert (0 < end);
2357 gdb_assert (index < end);
2358
2359 /* Compute the number of instructions remaining in this segment. */
2360 space = end - index;
2361
2362 /* Advance the iterator as far as possible within this segment. */
325fac50 2363 adv = std::min (space, stride);
23a7fe75
MM
2364 stride -= adv;
2365 index += adv;
2366 steps += adv;
2367
2368 /* Move to the next function if we're at the end of this one. */
2369 if (index == end)
2370 {
2371 const struct btrace_function *next;
2372
2373 next = bfun->flow.next;
2374 if (next == NULL)
2375 {
2376 /* We stepped past the last function.
2377
2378 Let's adjust the index to point to the last instruction in
2379 the previous function. */
2380 index -= 1;
2381 steps -= 1;
2382 break;
2383 }
2384
2385 /* We now point to the first instruction in the new function. */
2386 bfun = next;
2387 index = 0;
2388 }
2389
2390 /* We did make progress. */
2391 gdb_assert (adv > 0);
2392 }
2393
2394 /* Update the iterator. */
2395 it->function = bfun;
2396 it->index = index;
2397
2398 return steps;
2399}
2400
2401/* See btrace.h. */
2402
2403unsigned int
2404btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2405{
2406 const struct btrace_function *bfun;
2407 unsigned int index, steps;
2408
2409 bfun = it->function;
2410 steps = 0;
2411 index = it->index;
2412
2413 while (stride != 0)
2414 {
2415 unsigned int adv;
2416
2417 /* Move to the previous function if we're at the start of this one. */
2418 if (index == 0)
2419 {
2420 const struct btrace_function *prev;
2421
2422 prev = bfun->flow.prev;
2423 if (prev == NULL)
2424 break;
2425
2426 /* We point to one after the last instruction in the new function. */
2427 bfun = prev;
2428 index = VEC_length (btrace_insn_s, bfun->insn);
2429
31fd9caa
MM
2430 /* An empty function segment represents a gap in the trace. We count
2431 it as one instruction. */
2432 if (index == 0)
2433 {
2434 stride -= 1;
2435 steps += 1;
2436
2437 continue;
2438 }
23a7fe75
MM
2439 }
2440
2441 /* Advance the iterator as far as possible within this segment. */
325fac50 2442 adv = std::min (index, stride);
31fd9caa 2443
23a7fe75
MM
2444 stride -= adv;
2445 index -= adv;
2446 steps += adv;
2447
2448 /* We did make progress. */
2449 gdb_assert (adv > 0);
2450 }
2451
2452 /* Update the iterator. */
2453 it->function = bfun;
2454 it->index = index;
2455
2456 return steps;
2457}
2458
2459/* See btrace.h. */
2460
2461int
2462btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2463 const struct btrace_insn_iterator *rhs)
2464{
2465 unsigned int lnum, rnum;
2466
2467 lnum = btrace_insn_number (lhs);
2468 rnum = btrace_insn_number (rhs);
2469
2470 return (int) (lnum - rnum);
2471}
2472
2473/* See btrace.h. */
2474
2475int
2476btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2477 const struct btrace_thread_info *btinfo,
2478 unsigned int number)
2479{
2480 const struct btrace_function *bfun;
fdd2bd92 2481 unsigned int upper, lower;
23a7fe75 2482
fdd2bd92
TW
2483 if (VEC_empty (btrace_fun_p, btinfo->functions))
2484 return 0;
23a7fe75 2485
fdd2bd92
TW
2486 lower = 0;
2487 bfun = VEC_index (btrace_fun_p, btinfo->functions, lower);
2488 if (number < bfun->insn_offset)
23a7fe75
MM
2489 return 0;
2490
fdd2bd92
TW
2491 upper = VEC_length (btrace_fun_p, btinfo->functions) - 1;
2492 bfun = VEC_index (btrace_fun_p, btinfo->functions, upper);
2493 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
23a7fe75
MM
2494 return 0;
2495
fdd2bd92
TW
2496 /* We assume that there are no holes in the numbering. */
2497 for (;;)
2498 {
2499 const unsigned int average = lower + (upper - lower) / 2;
2500
2501 bfun = VEC_index (btrace_fun_p, btinfo->functions, average);
2502
2503 if (number < bfun->insn_offset)
2504 {
2505 upper = average - 1;
2506 continue;
2507 }
2508
2509 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2510 {
2511 lower = average + 1;
2512 continue;
2513 }
2514
2515 break;
2516 }
2517
23a7fe75
MM
2518 it->function = bfun;
2519 it->index = number - bfun->insn_offset;
23a7fe75
MM
2520 return 1;
2521}
2522
2523/* See btrace.h. */
2524
2525const struct btrace_function *
2526btrace_call_get (const struct btrace_call_iterator *it)
2527{
2528 return it->function;
2529}
2530
2531/* See btrace.h. */
2532
2533unsigned int
2534btrace_call_number (const struct btrace_call_iterator *it)
2535{
2536 const struct btrace_thread_info *btinfo;
2537 const struct btrace_function *bfun;
2538 unsigned int insns;
2539
2540 btinfo = it->btinfo;
2541 bfun = it->function;
2542 if (bfun != NULL)
2543 return bfun->number;
2544
2545 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2546 number of the last function. */
2547 bfun = btinfo->end;
2548 insns = VEC_length (btrace_insn_s, bfun->insn);
2549
2550 /* If the function contains only a single instruction (i.e. the current
2551 instruction), it will be skipped and its number is already the number
2552 we seek. */
2553 if (insns == 1)
2554 return bfun->number;
2555
2556 /* Otherwise, return one more than the number of the last function. */
2557 return bfun->number + 1;
2558}
2559
2560/* See btrace.h. */
2561
2562void
2563btrace_call_begin (struct btrace_call_iterator *it,
2564 const struct btrace_thread_info *btinfo)
2565{
2566 const struct btrace_function *bfun;
2567
2568 bfun = btinfo->begin;
2569 if (bfun == NULL)
2570 error (_("No trace."));
2571
2572 it->btinfo = btinfo;
2573 it->function = bfun;
2574}
2575
2576/* See btrace.h. */
2577
2578void
2579btrace_call_end (struct btrace_call_iterator *it,
2580 const struct btrace_thread_info *btinfo)
2581{
2582 const struct btrace_function *bfun;
2583
2584 bfun = btinfo->end;
2585 if (bfun == NULL)
2586 error (_("No trace."));
2587
2588 it->btinfo = btinfo;
2589 it->function = NULL;
2590}
2591
2592/* See btrace.h. */
2593
2594unsigned int
2595btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2596{
2597 const struct btrace_function *bfun;
2598 unsigned int steps;
2599
2600 bfun = it->function;
2601 steps = 0;
2602 while (bfun != NULL)
2603 {
2604 const struct btrace_function *next;
2605 unsigned int insns;
2606
2607 next = bfun->flow.next;
2608 if (next == NULL)
2609 {
2610 /* Ignore the last function if it only contains a single
2611 (i.e. the current) instruction. */
2612 insns = VEC_length (btrace_insn_s, bfun->insn);
2613 if (insns == 1)
2614 steps -= 1;
2615 }
2616
2617 if (stride == steps)
2618 break;
2619
2620 bfun = next;
2621 steps += 1;
2622 }
2623
2624 it->function = bfun;
2625 return steps;
2626}
2627
2628/* See btrace.h. */
2629
2630unsigned int
2631btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2632{
2633 const struct btrace_thread_info *btinfo;
2634 const struct btrace_function *bfun;
2635 unsigned int steps;
2636
2637 bfun = it->function;
2638 steps = 0;
2639
2640 if (bfun == NULL)
2641 {
2642 unsigned int insns;
2643
2644 btinfo = it->btinfo;
2645 bfun = btinfo->end;
2646 if (bfun == NULL)
2647 return 0;
2648
2649 /* Ignore the last function if it only contains a single
2650 (i.e. the current) instruction. */
2651 insns = VEC_length (btrace_insn_s, bfun->insn);
2652 if (insns == 1)
2653 bfun = bfun->flow.prev;
2654
2655 if (bfun == NULL)
2656 return 0;
2657
2658 steps += 1;
2659 }
2660
2661 while (steps < stride)
2662 {
2663 const struct btrace_function *prev;
2664
2665 prev = bfun->flow.prev;
2666 if (prev == NULL)
2667 break;
2668
2669 bfun = prev;
2670 steps += 1;
2671 }
2672
2673 it->function = bfun;
2674 return steps;
2675}
2676
2677/* See btrace.h. */
2678
2679int
2680btrace_call_cmp (const struct btrace_call_iterator *lhs,
2681 const struct btrace_call_iterator *rhs)
2682{
2683 unsigned int lnum, rnum;
2684
2685 lnum = btrace_call_number (lhs);
2686 rnum = btrace_call_number (rhs);
2687
2688 return (int) (lnum - rnum);
2689}
2690
2691/* See btrace.h. */
2692
2693int
2694btrace_find_call_by_number (struct btrace_call_iterator *it,
2695 const struct btrace_thread_info *btinfo,
2696 unsigned int number)
2697{
2698 const struct btrace_function *bfun;
2699
2700 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2701 {
2702 unsigned int bnum;
2703
2704 bnum = bfun->number;
2705 if (number == bnum)
2706 {
2707 it->btinfo = btinfo;
2708 it->function = bfun;
2709 return 1;
2710 }
2711
2712 /* Functions are ordered and numbered consecutively. We could bail out
2713 earlier. On the other hand, it is very unlikely that we search for
2714 a nonexistent function. */
2715 }
2716
2717 return 0;
2718}
2719
2720/* See btrace.h. */
2721
2722void
2723btrace_set_insn_history (struct btrace_thread_info *btinfo,
2724 const struct btrace_insn_iterator *begin,
2725 const struct btrace_insn_iterator *end)
2726{
2727 if (btinfo->insn_history == NULL)
8d749320 2728 btinfo->insn_history = XCNEW (struct btrace_insn_history);
23a7fe75
MM
2729
2730 btinfo->insn_history->begin = *begin;
2731 btinfo->insn_history->end = *end;
2732}
2733
2734/* See btrace.h. */
2735
2736void
2737btrace_set_call_history (struct btrace_thread_info *btinfo,
2738 const struct btrace_call_iterator *begin,
2739 const struct btrace_call_iterator *end)
2740{
2741 gdb_assert (begin->btinfo == end->btinfo);
2742
2743 if (btinfo->call_history == NULL)
8d749320 2744 btinfo->call_history = XCNEW (struct btrace_call_history);
23a7fe75
MM
2745
2746 btinfo->call_history->begin = *begin;
2747 btinfo->call_history->end = *end;
2748}
07bbe694
MM
2749
2750/* See btrace.h. */
2751
2752int
2753btrace_is_replaying (struct thread_info *tp)
2754{
2755 return tp->btrace.replay != NULL;
2756}
6e07b1d2
MM
2757
2758/* See btrace.h. */
2759
2760int
2761btrace_is_empty (struct thread_info *tp)
2762{
2763 struct btrace_insn_iterator begin, end;
2764 struct btrace_thread_info *btinfo;
2765
2766 btinfo = &tp->btrace;
2767
2768 if (btinfo->begin == NULL)
2769 return 1;
2770
2771 btrace_insn_begin (&begin, btinfo);
2772 btrace_insn_end (&end, btinfo);
2773
2774 return btrace_insn_cmp (&begin, &end) == 0;
2775}
734b0e4b
MM
2776
2777/* Forward the cleanup request. */
2778
2779static void
2780do_btrace_data_cleanup (void *arg)
2781{
9a3c8263 2782 btrace_data_fini ((struct btrace_data *) arg);
734b0e4b
MM
2783}
2784
2785/* See btrace.h. */
2786
2787struct cleanup *
2788make_cleanup_btrace_data (struct btrace_data *data)
2789{
2790 return make_cleanup (do_btrace_data_cleanup, data);
2791}
b0627500
MM
2792
2793#if defined (HAVE_LIBIPT)
2794
2795/* Print a single packet. */
2796
2797static void
2798pt_print_packet (const struct pt_packet *packet)
2799{
2800 switch (packet->type)
2801 {
2802 default:
2803 printf_unfiltered (("[??: %x]"), packet->type);
2804 break;
2805
2806 case ppt_psb:
2807 printf_unfiltered (("psb"));
2808 break;
2809
2810 case ppt_psbend:
2811 printf_unfiltered (("psbend"));
2812 break;
2813
2814 case ppt_pad:
2815 printf_unfiltered (("pad"));
2816 break;
2817
2818 case ppt_tip:
2819 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2820 packet->payload.ip.ipc,
2821 packet->payload.ip.ip);
2822 break;
2823
2824 case ppt_tip_pge:
2825 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2826 packet->payload.ip.ipc,
2827 packet->payload.ip.ip);
2828 break;
2829
2830 case ppt_tip_pgd:
2831 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2832 packet->payload.ip.ipc,
2833 packet->payload.ip.ip);
2834 break;
2835
2836 case ppt_fup:
2837 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2838 packet->payload.ip.ipc,
2839 packet->payload.ip.ip);
2840 break;
2841
2842 case ppt_tnt_8:
2843 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2844 packet->payload.tnt.bit_size,
2845 packet->payload.tnt.payload);
2846 break;
2847
2848 case ppt_tnt_64:
2849 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2850 packet->payload.tnt.bit_size,
2851 packet->payload.tnt.payload);
2852 break;
2853
2854 case ppt_pip:
37fdfe4c
MM
2855 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2856 packet->payload.pip.nr ? (" nr") : (""));
b0627500
MM
2857 break;
2858
2859 case ppt_tsc:
2860 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2861 break;
2862
2863 case ppt_cbr:
2864 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2865 break;
2866
2867 case ppt_mode:
2868 switch (packet->payload.mode.leaf)
2869 {
2870 default:
2871 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2872 break;
2873
2874 case pt_mol_exec:
2875 printf_unfiltered (("mode.exec%s%s"),
2876 packet->payload.mode.bits.exec.csl
2877 ? (" cs.l") : (""),
2878 packet->payload.mode.bits.exec.csd
2879 ? (" cs.d") : (""));
2880 break;
2881
2882 case pt_mol_tsx:
2883 printf_unfiltered (("mode.tsx%s%s"),
2884 packet->payload.mode.bits.tsx.intx
2885 ? (" intx") : (""),
2886 packet->payload.mode.bits.tsx.abrt
2887 ? (" abrt") : (""));
2888 break;
2889 }
2890 break;
2891
2892 case ppt_ovf:
2893 printf_unfiltered (("ovf"));
2894 break;
2895
37fdfe4c
MM
2896 case ppt_stop:
2897 printf_unfiltered (("stop"));
2898 break;
2899
2900 case ppt_vmcs:
2901 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2902 break;
2903
2904 case ppt_tma:
2905 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2906 packet->payload.tma.fc);
2907 break;
2908
2909 case ppt_mtc:
2910 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2911 break;
2912
2913 case ppt_cyc:
2914 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2915 break;
2916
2917 case ppt_mnt:
2918 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2919 break;
b0627500
MM
2920 }
2921}
2922
2923/* Decode packets into MAINT using DECODER. */
2924
2925static void
2926btrace_maint_decode_pt (struct btrace_maint_info *maint,
2927 struct pt_packet_decoder *decoder)
2928{
2929 int errcode;
2930
2931 for (;;)
2932 {
2933 struct btrace_pt_packet packet;
2934
2935 errcode = pt_pkt_sync_forward (decoder);
2936 if (errcode < 0)
2937 break;
2938
2939 for (;;)
2940 {
2941 pt_pkt_get_offset (decoder, &packet.offset);
2942
2943 errcode = pt_pkt_next (decoder, &packet.packet,
2944 sizeof(packet.packet));
2945 if (errcode < 0)
2946 break;
2947
2948 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2949 {
2950 packet.errcode = pt_errcode (errcode);
2951 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2952 &packet);
2953 }
2954 }
2955
2956 if (errcode == -pte_eos)
2957 break;
2958
2959 packet.errcode = pt_errcode (errcode);
2960 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2961 &packet);
2962
2963 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2964 packet.offset, pt_errstr (packet.errcode));
2965 }
2966
2967 if (errcode != -pte_eos)
bc504a31 2968 warning (_("Failed to synchronize onto the Intel Processor Trace "
b0627500
MM
2969 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2970}
2971
2972/* Update the packet history in BTINFO. */
2973
2974static void
2975btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2976{
2977 volatile struct gdb_exception except;
2978 struct pt_packet_decoder *decoder;
2979 struct btrace_data_pt *pt;
2980 struct pt_config config;
2981 int errcode;
2982
2983 pt = &btinfo->data.variant.pt;
2984
2985 /* Nothing to do if there is no trace. */
2986 if (pt->size == 0)
2987 return;
2988
2989 memset (&config, 0, sizeof(config));
2990
2991 config.size = sizeof (config);
2992 config.begin = pt->data;
2993 config.end = pt->data + pt->size;
2994
2995 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2996 config.cpu.family = pt->config.cpu.family;
2997 config.cpu.model = pt->config.cpu.model;
2998 config.cpu.stepping = pt->config.cpu.stepping;
2999
3000 errcode = pt_cpu_errata (&config.errata, &config.cpu);
3001 if (errcode < 0)
bc504a31 3002 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b0627500
MM
3003 pt_errstr (pt_errcode (errcode)));
3004
3005 decoder = pt_pkt_alloc_decoder (&config);
3006 if (decoder == NULL)
bc504a31 3007 error (_("Failed to allocate the Intel Processor Trace decoder."));
b0627500
MM
3008
3009 TRY
3010 {
3011 btrace_maint_decode_pt (&btinfo->maint, decoder);
3012 }
3013 CATCH (except, RETURN_MASK_ALL)
3014 {
3015 pt_pkt_free_decoder (decoder);
3016
3017 if (except.reason < 0)
3018 throw_exception (except);
3019 }
3020 END_CATCH
3021
3022 pt_pkt_free_decoder (decoder);
3023}
3024
3025#endif /* !defined (HAVE_LIBIPT) */
3026
3027/* Update the packet maintenance information for BTINFO and store the
3028 low and high bounds into BEGIN and END, respectively.
3029 Store the current iterator state into FROM and TO. */
3030
3031static void
3032btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3033 unsigned int *begin, unsigned int *end,
3034 unsigned int *from, unsigned int *to)
3035{
3036 switch (btinfo->data.format)
3037 {
3038 default:
3039 *begin = 0;
3040 *end = 0;
3041 *from = 0;
3042 *to = 0;
3043 break;
3044
3045 case BTRACE_FORMAT_BTS:
3046 /* Nothing to do - we operate directly on BTINFO->DATA. */
3047 *begin = 0;
3048 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3049 *from = btinfo->maint.variant.bts.packet_history.begin;
3050 *to = btinfo->maint.variant.bts.packet_history.end;
3051 break;
3052
3053#if defined (HAVE_LIBIPT)
3054 case BTRACE_FORMAT_PT:
3055 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3056 btrace_maint_update_pt_packets (btinfo);
3057
3058 *begin = 0;
3059 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3060 *from = btinfo->maint.variant.pt.packet_history.begin;
3061 *to = btinfo->maint.variant.pt.packet_history.end;
3062 break;
3063#endif /* defined (HAVE_LIBIPT) */
3064 }
3065}
3066
3067/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3068 update the current iterator position. */
3069
3070static void
3071btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3072 unsigned int begin, unsigned int end)
3073{
3074 switch (btinfo->data.format)
3075 {
3076 default:
3077 break;
3078
3079 case BTRACE_FORMAT_BTS:
3080 {
3081 VEC (btrace_block_s) *blocks;
3082 unsigned int blk;
3083
3084 blocks = btinfo->data.variant.bts.blocks;
3085 for (blk = begin; blk < end; ++blk)
3086 {
3087 const btrace_block_s *block;
3088
3089 block = VEC_index (btrace_block_s, blocks, blk);
3090
3091 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3092 core_addr_to_string_nz (block->begin),
3093 core_addr_to_string_nz (block->end));
3094 }
3095
3096 btinfo->maint.variant.bts.packet_history.begin = begin;
3097 btinfo->maint.variant.bts.packet_history.end = end;
3098 }
3099 break;
3100
3101#if defined (HAVE_LIBIPT)
3102 case BTRACE_FORMAT_PT:
3103 {
3104 VEC (btrace_pt_packet_s) *packets;
3105 unsigned int pkt;
3106
3107 packets = btinfo->maint.variant.pt.packets;
3108 for (pkt = begin; pkt < end; ++pkt)
3109 {
3110 const struct btrace_pt_packet *packet;
3111
3112 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3113
3114 printf_unfiltered ("%u\t", pkt);
3115 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3116
3117 if (packet->errcode == pte_ok)
3118 pt_print_packet (&packet->packet);
3119 else
3120 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3121
3122 printf_unfiltered ("\n");
3123 }
3124
3125 btinfo->maint.variant.pt.packet_history.begin = begin;
3126 btinfo->maint.variant.pt.packet_history.end = end;
3127 }
3128 break;
3129#endif /* defined (HAVE_LIBIPT) */
3130 }
3131}
3132
3133/* Read a number from an argument string. */
3134
3135static unsigned int
3136get_uint (char **arg)
3137{
3138 char *begin, *end, *pos;
3139 unsigned long number;
3140
3141 begin = *arg;
3142 pos = skip_spaces (begin);
3143
3144 if (!isdigit (*pos))
3145 error (_("Expected positive number, got: %s."), pos);
3146
3147 number = strtoul (pos, &end, 10);
3148 if (number > UINT_MAX)
3149 error (_("Number too big."));
3150
3151 *arg += (end - begin);
3152
3153 return (unsigned int) number;
3154}
3155
3156/* Read a context size from an argument string. */
3157
3158static int
3159get_context_size (char **arg)
3160{
3161 char *pos;
3162 int number;
3163
3164 pos = skip_spaces (*arg);
3165
3166 if (!isdigit (*pos))
3167 error (_("Expected positive number, got: %s."), pos);
3168
3169 return strtol (pos, arg, 10);
3170}
3171
3172/* Complain about junk at the end of an argument string. */
3173
3174static void
3175no_chunk (char *arg)
3176{
3177 if (*arg != 0)
3178 error (_("Junk after argument: %s."), arg);
3179}
3180
3181/* The "maintenance btrace packet-history" command. */
3182
3183static void
3184maint_btrace_packet_history_cmd (char *arg, int from_tty)
3185{
3186 struct btrace_thread_info *btinfo;
3187 struct thread_info *tp;
3188 unsigned int size, begin, end, from, to;
3189
3190 tp = find_thread_ptid (inferior_ptid);
3191 if (tp == NULL)
3192 error (_("No thread."));
3193
3194 size = 10;
3195 btinfo = &tp->btrace;
3196
3197 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3198 if (begin == end)
3199 {
3200 printf_unfiltered (_("No trace.\n"));
3201 return;
3202 }
3203
3204 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3205 {
3206 from = to;
3207
3208 if (end - from < size)
3209 size = end - from;
3210 to = from + size;
3211 }
3212 else if (strcmp (arg, "-") == 0)
3213 {
3214 to = from;
3215
3216 if (to - begin < size)
3217 size = to - begin;
3218 from = to - size;
3219 }
3220 else
3221 {
3222 from = get_uint (&arg);
3223 if (end <= from)
3224 error (_("'%u' is out of range."), from);
3225
3226 arg = skip_spaces (arg);
3227 if (*arg == ',')
3228 {
3229 arg = skip_spaces (++arg);
3230
3231 if (*arg == '+')
3232 {
3233 arg += 1;
3234 size = get_context_size (&arg);
3235
3236 no_chunk (arg);
3237
3238 if (end - from < size)
3239 size = end - from;
3240 to = from + size;
3241 }
3242 else if (*arg == '-')
3243 {
3244 arg += 1;
3245 size = get_context_size (&arg);
3246
3247 no_chunk (arg);
3248
3249 /* Include the packet given as first argument. */
3250 from += 1;
3251 to = from;
3252
3253 if (to - begin < size)
3254 size = to - begin;
3255 from = to - size;
3256 }
3257 else
3258 {
3259 to = get_uint (&arg);
3260
3261 /* Include the packet at the second argument and silently
3262 truncate the range. */
3263 if (to < end)
3264 to += 1;
3265 else
3266 to = end;
3267
3268 no_chunk (arg);
3269 }
3270 }
3271 else
3272 {
3273 no_chunk (arg);
3274
3275 if (end - from < size)
3276 size = end - from;
3277 to = from + size;
3278 }
3279
3280 dont_repeat ();
3281 }
3282
3283 btrace_maint_print_packets (btinfo, from, to);
3284}
3285
3286/* The "maintenance btrace clear-packet-history" command. */
3287
3288static void
3289maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3290{
3291 struct btrace_thread_info *btinfo;
3292 struct thread_info *tp;
3293
3294 if (args != NULL && *args != 0)
3295 error (_("Invalid argument."));
3296
3297 tp = find_thread_ptid (inferior_ptid);
3298 if (tp == NULL)
3299 error (_("No thread."));
3300
3301 btinfo = &tp->btrace;
3302
3303 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3304 btrace_maint_clear (btinfo);
3305 btrace_data_clear (&btinfo->data);
3306}
3307
3308/* The "maintenance btrace clear" command. */
3309
3310static void
3311maint_btrace_clear_cmd (char *args, int from_tty)
3312{
3313 struct btrace_thread_info *btinfo;
3314 struct thread_info *tp;
3315
3316 if (args != NULL && *args != 0)
3317 error (_("Invalid argument."));
3318
3319 tp = find_thread_ptid (inferior_ptid);
3320 if (tp == NULL)
3321 error (_("No thread."));
3322
3323 btrace_clear (tp);
3324}
3325
3326/* The "maintenance btrace" command. */
3327
3328static void
3329maint_btrace_cmd (char *args, int from_tty)
3330{
3331 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3332 gdb_stdout);
3333}
3334
3335/* The "maintenance set btrace" command. */
3336
3337static void
3338maint_btrace_set_cmd (char *args, int from_tty)
3339{
3340 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3341 gdb_stdout);
3342}
3343
3344/* The "maintenance show btrace" command. */
3345
3346static void
3347maint_btrace_show_cmd (char *args, int from_tty)
3348{
3349 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3350 all_commands, gdb_stdout);
3351}
3352
3353/* The "maintenance set btrace pt" command. */
3354
3355static void
3356maint_btrace_pt_set_cmd (char *args, int from_tty)
3357{
3358 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3359 all_commands, gdb_stdout);
3360}
3361
3362/* The "maintenance show btrace pt" command. */
3363
3364static void
3365maint_btrace_pt_show_cmd (char *args, int from_tty)
3366{
3367 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3368 all_commands, gdb_stdout);
3369}
3370
3371/* The "maintenance info btrace" command. */
3372
3373static void
3374maint_info_btrace_cmd (char *args, int from_tty)
3375{
3376 struct btrace_thread_info *btinfo;
3377 struct thread_info *tp;
3378 const struct btrace_config *conf;
3379
3380 if (args != NULL && *args != 0)
3381 error (_("Invalid argument."));
3382
3383 tp = find_thread_ptid (inferior_ptid);
3384 if (tp == NULL)
3385 error (_("No thread."));
3386
3387 btinfo = &tp->btrace;
3388
3389 conf = btrace_conf (btinfo);
3390 if (conf == NULL)
3391 error (_("No btrace configuration."));
3392
3393 printf_unfiltered (_("Format: %s.\n"),
3394 btrace_format_string (conf->format));
3395
3396 switch (conf->format)
3397 {
3398 default:
3399 break;
3400
3401 case BTRACE_FORMAT_BTS:
3402 printf_unfiltered (_("Number of packets: %u.\n"),
3403 VEC_length (btrace_block_s,
3404 btinfo->data.variant.bts.blocks));
3405 break;
3406
3407#if defined (HAVE_LIBIPT)
3408 case BTRACE_FORMAT_PT:
3409 {
3410 struct pt_version version;
3411
3412 version = pt_library_version ();
3413 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3414 version.minor, version.build,
3415 version.ext != NULL ? version.ext : "");
3416
3417 btrace_maint_update_pt_packets (btinfo);
3418 printf_unfiltered (_("Number of packets: %u.\n"),
3419 VEC_length (btrace_pt_packet_s,
3420 btinfo->maint.variant.pt.packets));
3421 }
3422 break;
3423#endif /* defined (HAVE_LIBIPT) */
3424 }
3425}
3426
3427/* The "maint show btrace pt skip-pad" show value function. */
3428
3429static void
3430show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3431 struct cmd_list_element *c,
3432 const char *value)
3433{
3434 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3435}
3436
3437
3438/* Initialize btrace maintenance commands. */
3439
3440void _initialize_btrace (void);
3441void
3442_initialize_btrace (void)
3443{
3444 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3445 _("Info about branch tracing data."), &maintenanceinfolist);
3446
3447 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3448 _("Branch tracing maintenance commands."),
3449 &maint_btrace_cmdlist, "maintenance btrace ",
3450 0, &maintenancelist);
3451
3452 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3453Set branch tracing specific variables."),
3454 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3455 0, &maintenance_set_cmdlist);
3456
3457 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
bc504a31 3458Set Intel Processor Trace specific variables."),
b0627500
MM
3459 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3460 0, &maint_btrace_set_cmdlist);
3461
3462 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3463Show branch tracing specific variables."),
3464 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3465 0, &maintenance_show_cmdlist);
3466
3467 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
bc504a31 3468Show Intel Processor Trace specific variables."),
b0627500
MM
3469 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3470 0, &maint_btrace_show_cmdlist);
3471
3472 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3473 &maint_btrace_pt_skip_pad, _("\
3474Set whether PAD packets should be skipped in the btrace packet history."), _("\
3475Show whether PAD packets should be skipped in the btrace packet history."),_("\
3476When enabled, PAD packets are ignored in the btrace packet history."),
3477 NULL, show_maint_btrace_pt_skip_pad,
3478 &maint_btrace_pt_set_cmdlist,
3479 &maint_btrace_pt_show_cmdlist);
3480
3481 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3482 _("Print the raw branch tracing data.\n\
3483With no argument, print ten more packets after the previous ten-line print.\n\
3484With '-' as argument print ten packets before a previous ten-line print.\n\
3485One argument specifies the starting packet of a ten-line print.\n\
3486Two arguments with comma between specify starting and ending packets to \
3487print.\n\
3488Preceded with '+'/'-' the second argument specifies the distance from the \
3489first.\n"),
3490 &maint_btrace_cmdlist);
3491
3492 add_cmd ("clear-packet-history", class_maintenance,
3493 maint_btrace_clear_packet_history_cmd,
3494 _("Clears the branch tracing packet history.\n\
3495Discards the raw branch tracing data but not the execution history data.\n\
3496"),
3497 &maint_btrace_cmdlist);
3498
3499 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3500 _("Clears the branch tracing data.\n\
3501Discards the raw branch tracing data and the execution history data.\n\
3502The next 'record' command will fetch the branch tracing data anew.\n\
3503"),
3504 &maint_btrace_cmdlist);
3505
3506}
This page took 0.768384 seconds and 4 git commands to generate.