Regenerate Makefile.in/aclocal.m4 automake 1.11.6
[deliverable/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
618f726f 3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625
MM
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
6e07b1d2 33#include "regcache.h"
b20a6524 34#include "rsp-low.h"
b0627500
MM
35#include "gdbcmd.h"
36#include "cli/cli-utils.h"
b20a6524
MM
37
38#include <inttypes.h>
b0627500
MM
39#include <ctype.h>
40
41/* Command lists for btrace maintenance commands. */
42static struct cmd_list_element *maint_btrace_cmdlist;
43static struct cmd_list_element *maint_btrace_set_cmdlist;
44static struct cmd_list_element *maint_btrace_show_cmdlist;
45static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
46static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
47
48/* Control whether to skip PAD packets when computing the packet history. */
49static int maint_btrace_pt_skip_pad = 1;
b20a6524
MM
50
51static void btrace_add_pc (struct thread_info *tp);
02d27625
MM
52
53/* Print a record debug message. Use do ... while (0) to avoid ambiguities
54 when used in if statements. */
55
56#define DEBUG(msg, args...) \
57 do \
58 { \
59 if (record_debug != 0) \
60 fprintf_unfiltered (gdb_stdlog, \
61 "[btrace] " msg "\n", ##args); \
62 } \
63 while (0)
64
65#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
66
02d27625
MM
67/* Return the function name of a recorded function segment for printing.
68 This function never returns NULL. */
69
70static const char *
23a7fe75 71ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
72{
73 struct minimal_symbol *msym;
74 struct symbol *sym;
75
76 msym = bfun->msym;
77 sym = bfun->sym;
78
79 if (sym != NULL)
80 return SYMBOL_PRINT_NAME (sym);
81
82 if (msym != NULL)
efd66ac6 83 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
84
85 return "<unknown>";
86}
87
88/* Return the file name of a recorded function segment for printing.
89 This function never returns NULL. */
90
91static const char *
23a7fe75 92ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
93{
94 struct symbol *sym;
95 const char *filename;
96
97 sym = bfun->sym;
98
99 if (sym != NULL)
08be3fe3 100 filename = symtab_to_filename_for_display (symbol_symtab (sym));
02d27625
MM
101 else
102 filename = "<unknown>";
103
104 return filename;
105}
106
23a7fe75
MM
107/* Return a string representation of the address of an instruction.
108 This function never returns NULL. */
02d27625 109
23a7fe75
MM
110static const char *
111ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 112{
23a7fe75
MM
113 if (insn == NULL)
114 return "<nil>";
115
116 return core_addr_to_string_nz (insn->pc);
02d27625
MM
117}
118
23a7fe75 119/* Print an ftrace debug status message. */
02d27625
MM
120
121static void
23a7fe75 122ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 123{
23a7fe75
MM
124 const char *fun, *file;
125 unsigned int ibegin, iend;
ce0dfbea 126 int level;
23a7fe75
MM
127
128 fun = ftrace_print_function_name (bfun);
129 file = ftrace_print_filename (bfun);
130 level = bfun->level;
131
23a7fe75
MM
132 ibegin = bfun->insn_offset;
133 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
134
ce0dfbea
MM
135 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
136 prefix, fun, file, level, ibegin, iend);
02d27625
MM
137}
138
23a7fe75
MM
139/* Return non-zero if BFUN does not match MFUN and FUN,
140 return zero otherwise. */
02d27625
MM
141
142static int
23a7fe75
MM
143ftrace_function_switched (const struct btrace_function *bfun,
144 const struct minimal_symbol *mfun,
145 const struct symbol *fun)
02d27625
MM
146{
147 struct minimal_symbol *msym;
148 struct symbol *sym;
149
02d27625
MM
150 msym = bfun->msym;
151 sym = bfun->sym;
152
153 /* If the minimal symbol changed, we certainly switched functions. */
154 if (mfun != NULL && msym != NULL
efd66ac6 155 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
156 return 1;
157
158 /* If the symbol changed, we certainly switched functions. */
159 if (fun != NULL && sym != NULL)
160 {
161 const char *bfname, *fname;
162
163 /* Check the function name. */
164 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
165 return 1;
166
167 /* Check the location of those functions, as well. */
08be3fe3
DE
168 bfname = symtab_to_fullname (symbol_symtab (sym));
169 fname = symtab_to_fullname (symbol_symtab (fun));
02d27625
MM
170 if (filename_cmp (fname, bfname) != 0)
171 return 1;
172 }
173
23a7fe75
MM
174 /* If we lost symbol information, we switched functions. */
175 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
176 return 1;
177
178 /* If we gained symbol information, we switched functions. */
179 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
180 return 1;
181
02d27625
MM
182 return 0;
183}
184
23a7fe75
MM
185/* Allocate and initialize a new branch trace function segment.
186 PREV is the chronologically preceding function segment.
187 MFUN and FUN are the symbol information we have for this function. */
188
189static struct btrace_function *
190ftrace_new_function (struct btrace_function *prev,
191 struct minimal_symbol *mfun,
192 struct symbol *fun)
193{
194 struct btrace_function *bfun;
195
8d749320 196 bfun = XCNEW (struct btrace_function);
23a7fe75
MM
197
198 bfun->msym = mfun;
199 bfun->sym = fun;
200 bfun->flow.prev = prev;
201
5de9129b
MM
202 if (prev == NULL)
203 {
204 /* Start counting at one. */
205 bfun->number = 1;
206 bfun->insn_offset = 1;
207 }
208 else
23a7fe75
MM
209 {
210 gdb_assert (prev->flow.next == NULL);
211 prev->flow.next = bfun;
02d27625 212
23a7fe75
MM
213 bfun->number = prev->number + 1;
214 bfun->insn_offset = (prev->insn_offset
215 + VEC_length (btrace_insn_s, prev->insn));
31fd9caa 216 bfun->level = prev->level;
23a7fe75
MM
217 }
218
219 return bfun;
02d27625
MM
220}
221
23a7fe75 222/* Update the UP field of a function segment. */
02d27625 223
23a7fe75
MM
224static void
225ftrace_update_caller (struct btrace_function *bfun,
226 struct btrace_function *caller,
227 enum btrace_function_flag flags)
02d27625 228{
23a7fe75
MM
229 if (bfun->up != NULL)
230 ftrace_debug (bfun, "updating caller");
02d27625 231
23a7fe75
MM
232 bfun->up = caller;
233 bfun->flags = flags;
234
235 ftrace_debug (bfun, "set caller");
236}
237
238/* Fix up the caller for all segments of a function. */
239
240static void
241ftrace_fixup_caller (struct btrace_function *bfun,
242 struct btrace_function *caller,
243 enum btrace_function_flag flags)
244{
245 struct btrace_function *prev, *next;
246
247 ftrace_update_caller (bfun, caller, flags);
248
249 /* Update all function segments belonging to the same function. */
250 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
251 ftrace_update_caller (prev, caller, flags);
252
253 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
254 ftrace_update_caller (next, caller, flags);
255}
256
257/* Add a new function segment for a call.
258 CALLER is the chronologically preceding function segment.
259 MFUN and FUN are the symbol information we have for this function. */
260
261static struct btrace_function *
262ftrace_new_call (struct btrace_function *caller,
263 struct minimal_symbol *mfun,
264 struct symbol *fun)
265{
266 struct btrace_function *bfun;
267
268 bfun = ftrace_new_function (caller, mfun, fun);
269 bfun->up = caller;
31fd9caa 270 bfun->level += 1;
23a7fe75
MM
271
272 ftrace_debug (bfun, "new call");
273
274 return bfun;
275}
276
277/* Add a new function segment for a tail call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
280
281static struct btrace_function *
282ftrace_new_tailcall (struct btrace_function *caller,
283 struct minimal_symbol *mfun,
284 struct symbol *fun)
285{
286 struct btrace_function *bfun;
02d27625 287
23a7fe75
MM
288 bfun = ftrace_new_function (caller, mfun, fun);
289 bfun->up = caller;
31fd9caa 290 bfun->level += 1;
23a7fe75 291 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 292
23a7fe75
MM
293 ftrace_debug (bfun, "new tail call");
294
295 return bfun;
296}
297
298/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
299 symbol information. */
300
301static struct btrace_function *
302ftrace_find_caller (struct btrace_function *bfun,
303 struct minimal_symbol *mfun,
304 struct symbol *fun)
305{
306 for (; bfun != NULL; bfun = bfun->up)
307 {
308 /* Skip functions with incompatible symbol information. */
309 if (ftrace_function_switched (bfun, mfun, fun))
310 continue;
311
312 /* This is the function segment we're looking for. */
313 break;
314 }
315
316 return bfun;
317}
318
319/* Find the innermost caller in the back trace of BFUN, skipping all
320 function segments that do not end with a call instruction (e.g.
321 tail calls ending with a jump). */
322
323static struct btrace_function *
7d5c24b3 324ftrace_find_call (struct btrace_function *bfun)
23a7fe75
MM
325{
326 for (; bfun != NULL; bfun = bfun->up)
02d27625 327 {
23a7fe75 328 struct btrace_insn *last;
02d27625 329
31fd9caa
MM
330 /* Skip gaps. */
331 if (bfun->errcode != 0)
332 continue;
23a7fe75
MM
333
334 last = VEC_last (btrace_insn_s, bfun->insn);
02d27625 335
7d5c24b3 336 if (last->iclass == BTRACE_INSN_CALL)
23a7fe75
MM
337 break;
338 }
339
340 return bfun;
341}
342
343/* Add a continuation segment for a function into which we return.
344 PREV is the chronologically preceding function segment.
345 MFUN and FUN are the symbol information we have for this function. */
346
347static struct btrace_function *
7d5c24b3 348ftrace_new_return (struct btrace_function *prev,
23a7fe75
MM
349 struct minimal_symbol *mfun,
350 struct symbol *fun)
351{
352 struct btrace_function *bfun, *caller;
353
354 bfun = ftrace_new_function (prev, mfun, fun);
355
356 /* It is important to start at PREV's caller. Otherwise, we might find
357 PREV itself, if PREV is a recursive function. */
358 caller = ftrace_find_caller (prev->up, mfun, fun);
359 if (caller != NULL)
360 {
361 /* The caller of PREV is the preceding btrace function segment in this
362 function instance. */
363 gdb_assert (caller->segment.next == NULL);
364
365 caller->segment.next = bfun;
366 bfun->segment.prev = caller;
367
368 /* Maintain the function level. */
369 bfun->level = caller->level;
370
371 /* Maintain the call stack. */
372 bfun->up = caller->up;
373 bfun->flags = caller->flags;
374
375 ftrace_debug (bfun, "new return");
376 }
377 else
378 {
379 /* We did not find a caller. This could mean that something went
380 wrong or that the call is simply not included in the trace. */
02d27625 381
23a7fe75 382 /* Let's search for some actual call. */
7d5c24b3 383 caller = ftrace_find_call (prev->up);
23a7fe75 384 if (caller == NULL)
02d27625 385 {
23a7fe75
MM
386 /* There is no call in PREV's back trace. We assume that the
387 branch trace did not include it. */
388
389 /* Let's find the topmost call function - this skips tail calls. */
390 while (prev->up != NULL)
391 prev = prev->up;
02d27625 392
23a7fe75
MM
393 /* We maintain levels for a series of returns for which we have
394 not seen the calls.
395 We start at the preceding function's level in case this has
396 already been a return for which we have not seen the call.
397 We start at level 0 otherwise, to handle tail calls correctly. */
398 bfun->level = min (0, prev->level) - 1;
399
400 /* Fix up the call stack for PREV. */
401 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
402
403 ftrace_debug (bfun, "new return - no caller");
404 }
405 else
02d27625 406 {
23a7fe75
MM
407 /* There is a call in PREV's back trace to which we should have
408 returned. Let's remain at this level. */
409 bfun->level = prev->level;
02d27625 410
23a7fe75 411 ftrace_debug (bfun, "new return - unknown caller");
02d27625 412 }
23a7fe75
MM
413 }
414
415 return bfun;
416}
417
418/* Add a new function segment for a function switch.
419 PREV is the chronologically preceding function segment.
420 MFUN and FUN are the symbol information we have for this function. */
421
422static struct btrace_function *
423ftrace_new_switch (struct btrace_function *prev,
424 struct minimal_symbol *mfun,
425 struct symbol *fun)
426{
427 struct btrace_function *bfun;
428
429 /* This is an unexplained function switch. The call stack will likely
430 be wrong at this point. */
431 bfun = ftrace_new_function (prev, mfun, fun);
02d27625 432
23a7fe75
MM
433 ftrace_debug (bfun, "new switch");
434
435 return bfun;
436}
437
31fd9caa
MM
438/* Add a new function segment for a gap in the trace due to a decode error.
439 PREV is the chronologically preceding function segment.
440 ERRCODE is the format-specific error code. */
441
442static struct btrace_function *
443ftrace_new_gap (struct btrace_function *prev, int errcode)
444{
445 struct btrace_function *bfun;
446
447 /* We hijack prev if it was empty. */
448 if (prev != NULL && prev->errcode == 0
449 && VEC_empty (btrace_insn_s, prev->insn))
450 bfun = prev;
451 else
452 bfun = ftrace_new_function (prev, NULL, NULL);
453
454 bfun->errcode = errcode;
455
456 ftrace_debug (bfun, "new gap");
457
458 return bfun;
459}
460
23a7fe75
MM
461/* Update BFUN with respect to the instruction at PC. This may create new
462 function segments.
463 Return the chronologically latest function segment, never NULL. */
464
465static struct btrace_function *
7d5c24b3 466ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
23a7fe75
MM
467{
468 struct bound_minimal_symbol bmfun;
469 struct minimal_symbol *mfun;
470 struct symbol *fun;
471 struct btrace_insn *last;
472
473 /* Try to determine the function we're in. We use both types of symbols
474 to avoid surprises when we sometimes get a full symbol and sometimes
475 only a minimal symbol. */
476 fun = find_pc_function (pc);
477 bmfun = lookup_minimal_symbol_by_pc (pc);
478 mfun = bmfun.minsym;
479
480 if (fun == NULL && mfun == NULL)
481 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
482
31fd9caa
MM
483 /* If we didn't have a function or if we had a gap before, we create one. */
484 if (bfun == NULL || bfun->errcode != 0)
23a7fe75
MM
485 return ftrace_new_function (bfun, mfun, fun);
486
487 /* Check the last instruction, if we have one.
488 We do this check first, since it allows us to fill in the call stack
489 links in addition to the normal flow links. */
490 last = NULL;
491 if (!VEC_empty (btrace_insn_s, bfun->insn))
492 last = VEC_last (btrace_insn_s, bfun->insn);
493
494 if (last != NULL)
495 {
7d5c24b3
MM
496 switch (last->iclass)
497 {
498 case BTRACE_INSN_RETURN:
986b6601
MM
499 {
500 const char *fname;
501
502 /* On some systems, _dl_runtime_resolve returns to the resolved
503 function instead of jumping to it. From our perspective,
504 however, this is a tailcall.
505 If we treated it as return, we wouldn't be able to find the
506 resolved function in our stack back trace. Hence, we would
507 lose the current stack back trace and start anew with an empty
508 back trace. When the resolved function returns, we would then
509 create a stack back trace with the same function names but
510 different frame id's. This will confuse stepping. */
511 fname = ftrace_print_function_name (bfun);
512 if (strcmp (fname, "_dl_runtime_resolve") == 0)
513 return ftrace_new_tailcall (bfun, mfun, fun);
514
515 return ftrace_new_return (bfun, mfun, fun);
516 }
23a7fe75 517
7d5c24b3
MM
518 case BTRACE_INSN_CALL:
519 /* Ignore calls to the next instruction. They are used for PIC. */
520 if (last->pc + last->size == pc)
521 break;
23a7fe75 522
7d5c24b3 523 return ftrace_new_call (bfun, mfun, fun);
23a7fe75 524
7d5c24b3
MM
525 case BTRACE_INSN_JUMP:
526 {
527 CORE_ADDR start;
23a7fe75 528
7d5c24b3 529 start = get_pc_function_start (pc);
23a7fe75 530
7d5c24b3
MM
531 /* If we can't determine the function for PC, we treat a jump at
532 the end of the block as tail call. */
533 if (start == 0 || start == pc)
534 return ftrace_new_tailcall (bfun, mfun, fun);
535 }
02d27625 536 }
23a7fe75
MM
537 }
538
539 /* Check if we're switching functions for some other reason. */
540 if (ftrace_function_switched (bfun, mfun, fun))
541 {
542 DEBUG_FTRACE ("switching from %s in %s at %s",
543 ftrace_print_insn_addr (last),
544 ftrace_print_function_name (bfun),
545 ftrace_print_filename (bfun));
02d27625 546
23a7fe75
MM
547 return ftrace_new_switch (bfun, mfun, fun);
548 }
549
550 return bfun;
551}
552
23a7fe75
MM
553/* Add the instruction at PC to BFUN's instructions. */
554
555static void
7d5c24b3
MM
556ftrace_update_insns (struct btrace_function *bfun,
557 const struct btrace_insn *insn)
23a7fe75 558{
7d5c24b3 559 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
23a7fe75
MM
560
561 if (record_debug > 1)
562 ftrace_debug (bfun, "update insn");
563}
564
7d5c24b3
MM
565/* Classify the instruction at PC. */
566
567static enum btrace_insn_class
568ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
569{
7d5c24b3
MM
570 enum btrace_insn_class iclass;
571
572 iclass = BTRACE_INSN_OTHER;
492d29ea 573 TRY
7d5c24b3
MM
574 {
575 if (gdbarch_insn_is_call (gdbarch, pc))
576 iclass = BTRACE_INSN_CALL;
577 else if (gdbarch_insn_is_ret (gdbarch, pc))
578 iclass = BTRACE_INSN_RETURN;
579 else if (gdbarch_insn_is_jump (gdbarch, pc))
580 iclass = BTRACE_INSN_JUMP;
581 }
492d29ea
PA
582 CATCH (error, RETURN_MASK_ERROR)
583 {
584 }
585 END_CATCH
7d5c24b3
MM
586
587 return iclass;
588}
589
734b0e4b 590/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
591
592static void
76235df1 593btrace_compute_ftrace_bts (struct thread_info *tp,
734b0e4b 594 const struct btrace_data_bts *btrace)
23a7fe75 595{
76235df1 596 struct btrace_thread_info *btinfo;
23a7fe75
MM
597 struct btrace_function *begin, *end;
598 struct gdbarch *gdbarch;
31fd9caa 599 unsigned int blk, ngaps;
23a7fe75
MM
600 int level;
601
23a7fe75 602 gdbarch = target_gdbarch ();
76235df1 603 btinfo = &tp->btrace;
969c39fb
MM
604 begin = btinfo->begin;
605 end = btinfo->end;
31fd9caa 606 ngaps = btinfo->ngaps;
969c39fb 607 level = begin != NULL ? -btinfo->level : INT_MAX;
734b0e4b 608 blk = VEC_length (btrace_block_s, btrace->blocks);
23a7fe75
MM
609
610 while (blk != 0)
611 {
612 btrace_block_s *block;
613 CORE_ADDR pc;
614
615 blk -= 1;
616
734b0e4b 617 block = VEC_index (btrace_block_s, btrace->blocks, blk);
23a7fe75
MM
618 pc = block->begin;
619
620 for (;;)
621 {
7d5c24b3 622 struct btrace_insn insn;
23a7fe75
MM
623 int size;
624
625 /* We should hit the end of the block. Warn if we went too far. */
626 if (block->end < pc)
627 {
31fd9caa
MM
628 /* Indicate the gap in the trace - unless we're at the
629 beginning. */
630 if (begin != NULL)
631 {
632 warning (_("Recorded trace may be corrupted around %s."),
633 core_addr_to_string_nz (pc));
634
635 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
636 ngaps += 1;
637 }
23a7fe75
MM
638 break;
639 }
640
7d5c24b3 641 end = ftrace_update_function (end, pc);
23a7fe75
MM
642 if (begin == NULL)
643 begin = end;
644
8710b709
MM
645 /* Maintain the function level offset.
646 For all but the last block, we do it here. */
647 if (blk != 0)
648 level = min (level, end->level);
23a7fe75 649
7d5c24b3 650 size = 0;
492d29ea
PA
651 TRY
652 {
653 size = gdb_insn_length (gdbarch, pc);
654 }
655 CATCH (error, RETURN_MASK_ERROR)
656 {
657 }
658 END_CATCH
7d5c24b3
MM
659
660 insn.pc = pc;
661 insn.size = size;
662 insn.iclass = ftrace_classify_insn (gdbarch, pc);
da8c46d2 663 insn.flags = 0;
7d5c24b3
MM
664
665 ftrace_update_insns (end, &insn);
23a7fe75
MM
666
667 /* We're done once we pushed the instruction at the end. */
668 if (block->end == pc)
669 break;
670
7d5c24b3 671 /* We can't continue if we fail to compute the size. */
23a7fe75
MM
672 if (size <= 0)
673 {
674 warning (_("Recorded trace may be incomplete around %s."),
675 core_addr_to_string_nz (pc));
31fd9caa
MM
676
677 /* Indicate the gap in the trace. We just added INSN so we're
678 not at the beginning. */
679 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
680 ngaps += 1;
681
23a7fe75
MM
682 break;
683 }
684
685 pc += size;
8710b709
MM
686
687 /* Maintain the function level offset.
688 For the last block, we do it here to not consider the last
689 instruction.
690 Since the last instruction corresponds to the current instruction
691 and is not really part of the execution history, it shouldn't
692 affect the level. */
693 if (blk == 0)
694 level = min (level, end->level);
23a7fe75 695 }
02d27625
MM
696 }
697
23a7fe75
MM
698 btinfo->begin = begin;
699 btinfo->end = end;
31fd9caa 700 btinfo->ngaps = ngaps;
23a7fe75
MM
701
702 /* LEVEL is the minimal function level of all btrace function segments.
703 Define the global level offset to -LEVEL so all function levels are
704 normalized to start at zero. */
705 btinfo->level = -level;
02d27625
MM
706}
707
b20a6524
MM
708#if defined (HAVE_LIBIPT)
709
710static enum btrace_insn_class
711pt_reclassify_insn (enum pt_insn_class iclass)
712{
713 switch (iclass)
714 {
715 case ptic_call:
716 return BTRACE_INSN_CALL;
717
718 case ptic_return:
719 return BTRACE_INSN_RETURN;
720
721 case ptic_jump:
722 return BTRACE_INSN_JUMP;
723
724 default:
725 return BTRACE_INSN_OTHER;
726 }
727}
728
da8c46d2
MM
729/* Return the btrace instruction flags for INSN. */
730
731static enum btrace_insn_flag
732pt_btrace_insn_flags (const struct pt_insn *insn)
733{
734 enum btrace_insn_flag flags = 0;
735
736 if (insn->speculative)
737 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
738
739 return flags;
740}
741
b20a6524
MM
742/* Add function branch trace using DECODER. */
743
744static void
745ftrace_add_pt (struct pt_insn_decoder *decoder,
746 struct btrace_function **pbegin,
747 struct btrace_function **pend, int *plevel,
748 unsigned int *ngaps)
749{
750 struct btrace_function *begin, *end, *upd;
751 uint64_t offset;
752 int errcode, nerrors;
753
754 begin = *pbegin;
755 end = *pend;
756 nerrors = 0;
757 for (;;)
758 {
759 struct btrace_insn btinsn;
760 struct pt_insn insn;
761
762 errcode = pt_insn_sync_forward (decoder);
763 if (errcode < 0)
764 {
765 if (errcode != -pte_eos)
bc504a31 766 warning (_("Failed to synchronize onto the Intel Processor "
b20a6524
MM
767 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
768 break;
769 }
770
771 memset (&btinsn, 0, sizeof (btinsn));
772 for (;;)
773 {
774 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
775 if (errcode < 0)
776 break;
777
778 /* Look for gaps in the trace - unless we're at the beginning. */
779 if (begin != NULL)
780 {
781 /* Tracing is disabled and re-enabled each time we enter the
782 kernel. Most times, we continue from the same instruction we
783 stopped before. This is indicated via the RESUMED instruction
784 flag. The ENABLED instruction flag means that we continued
785 from some other instruction. Indicate this as a trace gap. */
786 if (insn.enabled)
787 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
788
789 /* Indicate trace overflows. */
790 if (insn.resynced)
791 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
792 }
793
794 upd = ftrace_update_function (end, insn.ip);
795 if (upd != end)
796 {
797 *pend = end = upd;
798
799 if (begin == NULL)
800 *pbegin = begin = upd;
801 }
802
803 /* Maintain the function level offset. */
804 *plevel = min (*plevel, end->level);
805
806 btinsn.pc = (CORE_ADDR) insn.ip;
807 btinsn.size = (gdb_byte) insn.size;
808 btinsn.iclass = pt_reclassify_insn (insn.iclass);
da8c46d2 809 btinsn.flags = pt_btrace_insn_flags (&insn);
b20a6524
MM
810
811 ftrace_update_insns (end, &btinsn);
812 }
813
814 if (errcode == -pte_eos)
815 break;
816
817 /* If the gap is at the very beginning, we ignore it - we will have
818 less trace, but we won't have any holes in the trace. */
819 if (begin == NULL)
820 continue;
821
822 pt_insn_get_offset (decoder, &offset);
823
bc504a31 824 warning (_("Failed to decode Intel Processor Trace near trace "
b20a6524
MM
825 "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
826 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
827
828 /* Indicate the gap in the trace. */
829 *pend = end = ftrace_new_gap (end, errcode);
830 *ngaps += 1;
831 }
832
833 if (nerrors > 0)
834 warning (_("The recorded execution trace may have gaps."));
835}
836
837/* A callback function to allow the trace decoder to read the inferior's
838 memory. */
839
840static int
841btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
80a2b330 842 const struct pt_asid *asid, uint64_t pc,
b20a6524
MM
843 void *context)
844{
43368e1d 845 int result, errcode;
b20a6524 846
43368e1d 847 result = (int) size;
b20a6524
MM
848 TRY
849 {
80a2b330 850 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
b20a6524 851 if (errcode != 0)
43368e1d 852 result = -pte_nomap;
b20a6524
MM
853 }
854 CATCH (error, RETURN_MASK_ERROR)
855 {
43368e1d 856 result = -pte_nomap;
b20a6524
MM
857 }
858 END_CATCH
859
43368e1d 860 return result;
b20a6524
MM
861}
862
863/* Translate the vendor from one enum to another. */
864
865static enum pt_cpu_vendor
866pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
867{
868 switch (vendor)
869 {
870 default:
871 return pcv_unknown;
872
873 case CV_INTEL:
874 return pcv_intel;
875 }
876}
877
878/* Finalize the function branch trace after decode. */
879
880static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
881 struct thread_info *tp, int level)
882{
883 pt_insn_free_decoder (decoder);
884
885 /* LEVEL is the minimal function level of all btrace function segments.
886 Define the global level offset to -LEVEL so all function levels are
887 normalized to start at zero. */
888 tp->btrace.level = -level;
889
890 /* Add a single last instruction entry for the current PC.
891 This allows us to compute the backtrace at the current PC using both
892 standard unwind and btrace unwind.
893 This extra entry is ignored by all record commands. */
894 btrace_add_pc (tp);
895}
896
bc504a31
PA
897/* Compute the function branch trace from Intel Processor Trace
898 format. */
b20a6524
MM
899
900static void
901btrace_compute_ftrace_pt (struct thread_info *tp,
902 const struct btrace_data_pt *btrace)
903{
904 struct btrace_thread_info *btinfo;
905 struct pt_insn_decoder *decoder;
906 struct pt_config config;
907 int level, errcode;
908
909 if (btrace->size == 0)
910 return;
911
912 btinfo = &tp->btrace;
913 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
914
915 pt_config_init(&config);
916 config.begin = btrace->data;
917 config.end = btrace->data + btrace->size;
918
919 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
920 config.cpu.family = btrace->config.cpu.family;
921 config.cpu.model = btrace->config.cpu.model;
922 config.cpu.stepping = btrace->config.cpu.stepping;
923
924 errcode = pt_cpu_errata (&config.errata, &config.cpu);
925 if (errcode < 0)
bc504a31 926 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b20a6524
MM
927 pt_errstr (pt_errcode (errcode)));
928
929 decoder = pt_insn_alloc_decoder (&config);
930 if (decoder == NULL)
bc504a31 931 error (_("Failed to allocate the Intel Processor Trace decoder."));
b20a6524
MM
932
933 TRY
934 {
935 struct pt_image *image;
936
937 image = pt_insn_get_image(decoder);
938 if (image == NULL)
bc504a31 939 error (_("Failed to configure the Intel Processor Trace decoder."));
b20a6524
MM
940
941 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
942 if (errcode < 0)
bc504a31 943 error (_("Failed to configure the Intel Processor Trace decoder: "
b20a6524
MM
944 "%s."), pt_errstr (pt_errcode (errcode)));
945
946 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
947 &btinfo->ngaps);
948 }
949 CATCH (error, RETURN_MASK_ALL)
950 {
951 /* Indicate a gap in the trace if we quit trace processing. */
952 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
953 {
954 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
955 btinfo->ngaps++;
956 }
957
958 btrace_finalize_ftrace_pt (decoder, tp, level);
959
960 throw_exception (error);
961 }
962 END_CATCH
963
964 btrace_finalize_ftrace_pt (decoder, tp, level);
965}
966
967#else /* defined (HAVE_LIBIPT) */
968
969static void
970btrace_compute_ftrace_pt (struct thread_info *tp,
971 const struct btrace_data_pt *btrace)
972{
973 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
974}
975
976#endif /* defined (HAVE_LIBIPT) */
977
734b0e4b
MM
978/* Compute the function branch trace from a block branch trace BTRACE for
979 a thread given by BTINFO. */
980
981static void
76235df1 982btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
734b0e4b
MM
983{
984 DEBUG ("compute ftrace");
985
986 switch (btrace->format)
987 {
988 case BTRACE_FORMAT_NONE:
989 return;
990
991 case BTRACE_FORMAT_BTS:
76235df1 992 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
734b0e4b 993 return;
b20a6524
MM
994
995 case BTRACE_FORMAT_PT:
996 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
997 return;
734b0e4b
MM
998 }
999
1000 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1001}
1002
6e07b1d2
MM
1003/* Add an entry for the current PC. */
1004
1005static void
1006btrace_add_pc (struct thread_info *tp)
1007{
734b0e4b 1008 struct btrace_data btrace;
6e07b1d2
MM
1009 struct btrace_block *block;
1010 struct regcache *regcache;
1011 struct cleanup *cleanup;
1012 CORE_ADDR pc;
1013
1014 regcache = get_thread_regcache (tp->ptid);
1015 pc = regcache_read_pc (regcache);
1016
734b0e4b
MM
1017 btrace_data_init (&btrace);
1018 btrace.format = BTRACE_FORMAT_BTS;
1019 btrace.variant.bts.blocks = NULL;
6e07b1d2 1020
734b0e4b
MM
1021 cleanup = make_cleanup_btrace_data (&btrace);
1022
1023 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
6e07b1d2
MM
1024 block->begin = pc;
1025 block->end = pc;
1026
76235df1 1027 btrace_compute_ftrace (tp, &btrace);
6e07b1d2
MM
1028
1029 do_cleanups (cleanup);
1030}
1031
02d27625
MM
1032/* See btrace.h. */
1033
1034void
f4abbc16 1035btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
1036{
1037 if (tp->btrace.target != NULL)
1038 return;
1039
46a3515b
MM
1040#if !defined (HAVE_LIBIPT)
1041 if (conf->format == BTRACE_FORMAT_PT)
bc504a31 1042 error (_("GDB does not support Intel Processor Trace."));
46a3515b
MM
1043#endif /* !defined (HAVE_LIBIPT) */
1044
f4abbc16 1045 if (!target_supports_btrace (conf->format))
02d27625
MM
1046 error (_("Target does not support branch tracing."));
1047
43792cf0
PA
1048 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1049 target_pid_to_str (tp->ptid));
02d27625 1050
f4abbc16 1051 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
6e07b1d2
MM
1052
1053 /* Add an entry for the current PC so we start tracing from where we
1054 enabled it. */
1055 if (tp->btrace.target != NULL)
1056 btrace_add_pc (tp);
02d27625
MM
1057}
1058
1059/* See btrace.h. */
1060
f4abbc16
MM
1061const struct btrace_config *
1062btrace_conf (const struct btrace_thread_info *btinfo)
1063{
1064 if (btinfo->target == NULL)
1065 return NULL;
1066
1067 return target_btrace_conf (btinfo->target);
1068}
1069
1070/* See btrace.h. */
1071
02d27625
MM
1072void
1073btrace_disable (struct thread_info *tp)
1074{
1075 struct btrace_thread_info *btp = &tp->btrace;
1076 int errcode = 0;
1077
1078 if (btp->target == NULL)
1079 return;
1080
43792cf0
PA
1081 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1082 target_pid_to_str (tp->ptid));
02d27625
MM
1083
1084 target_disable_btrace (btp->target);
1085 btp->target = NULL;
1086
1087 btrace_clear (tp);
1088}
1089
1090/* See btrace.h. */
1091
1092void
1093btrace_teardown (struct thread_info *tp)
1094{
1095 struct btrace_thread_info *btp = &tp->btrace;
1096 int errcode = 0;
1097
1098 if (btp->target == NULL)
1099 return;
1100
43792cf0
PA
1101 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1102 target_pid_to_str (tp->ptid));
02d27625
MM
1103
1104 target_teardown_btrace (btp->target);
1105 btp->target = NULL;
1106
1107 btrace_clear (tp);
1108}
1109
734b0e4b 1110/* Stitch branch trace in BTS format. */
969c39fb
MM
1111
1112static int
31fd9caa 1113btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
969c39fb 1114{
31fd9caa 1115 struct btrace_thread_info *btinfo;
969c39fb
MM
1116 struct btrace_function *last_bfun;
1117 struct btrace_insn *last_insn;
1118 btrace_block_s *first_new_block;
1119
31fd9caa 1120 btinfo = &tp->btrace;
969c39fb
MM
1121 last_bfun = btinfo->end;
1122 gdb_assert (last_bfun != NULL);
31fd9caa
MM
1123 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1124
1125 /* If the existing trace ends with a gap, we just glue the traces
1126 together. We need to drop the last (i.e. chronologically first) block
1127 of the new trace, though, since we can't fill in the start address.*/
1128 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1129 {
1130 VEC_pop (btrace_block_s, btrace->blocks);
1131 return 0;
1132 }
969c39fb
MM
1133
1134 /* Beware that block trace starts with the most recent block, so the
1135 chronologically first block in the new trace is the last block in
1136 the new trace's block vector. */
734b0e4b 1137 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
969c39fb
MM
1138 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1139
1140 /* If the current PC at the end of the block is the same as in our current
1141 trace, there are two explanations:
1142 1. we executed the instruction and some branch brought us back.
1143 2. we have not made any progress.
1144 In the first case, the delta trace vector should contain at least two
1145 entries.
1146 In the second case, the delta trace vector should contain exactly one
1147 entry for the partial block containing the current PC. Remove it. */
1148 if (first_new_block->end == last_insn->pc
734b0e4b 1149 && VEC_length (btrace_block_s, btrace->blocks) == 1)
969c39fb 1150 {
734b0e4b 1151 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1152 return 0;
1153 }
1154
1155 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1156 core_addr_to_string_nz (first_new_block->end));
1157
1158 /* Do a simple sanity check to make sure we don't accidentally end up
1159 with a bad block. This should not occur in practice. */
1160 if (first_new_block->end < last_insn->pc)
1161 {
1162 warning (_("Error while trying to read delta trace. Falling back to "
1163 "a full read."));
1164 return -1;
1165 }
1166
1167 /* We adjust the last block to start at the end of our current trace. */
1168 gdb_assert (first_new_block->begin == 0);
1169 first_new_block->begin = last_insn->pc;
1170
1171 /* We simply pop the last insn so we can insert it again as part of
1172 the normal branch trace computation.
1173 Since instruction iterators are based on indices in the instructions
1174 vector, we don't leave any pointers dangling. */
1175 DEBUG ("pruning insn at %s for stitching",
1176 ftrace_print_insn_addr (last_insn));
1177
1178 VEC_pop (btrace_insn_s, last_bfun->insn);
1179
1180 /* The instructions vector may become empty temporarily if this has
1181 been the only instruction in this function segment.
1182 This violates the invariant but will be remedied shortly by
1183 btrace_compute_ftrace when we add the new trace. */
31fd9caa
MM
1184
1185 /* The only case where this would hurt is if the entire trace consisted
1186 of just that one instruction. If we remove it, we might turn the now
1187 empty btrace function segment into a gap. But we don't want gaps at
1188 the beginning. To avoid this, we remove the entire old trace. */
1189 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1190 btrace_clear (tp);
1191
969c39fb
MM
1192 return 0;
1193}
1194
734b0e4b
MM
1195/* Adjust the block trace in order to stitch old and new trace together.
1196 BTRACE is the new delta trace between the last and the current stop.
31fd9caa
MM
1197 TP is the traced thread.
1198 May modifx BTRACE as well as the existing trace in TP.
734b0e4b
MM
1199 Return 0 on success, -1 otherwise. */
1200
1201static int
31fd9caa 1202btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
734b0e4b
MM
1203{
1204 /* If we don't have trace, there's nothing to do. */
1205 if (btrace_data_empty (btrace))
1206 return 0;
1207
1208 switch (btrace->format)
1209 {
1210 case BTRACE_FORMAT_NONE:
1211 return 0;
1212
1213 case BTRACE_FORMAT_BTS:
31fd9caa 1214 return btrace_stitch_bts (&btrace->variant.bts, tp);
b20a6524
MM
1215
1216 case BTRACE_FORMAT_PT:
1217 /* Delta reads are not supported. */
1218 return -1;
734b0e4b
MM
1219 }
1220
1221 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1222}
1223
969c39fb
MM
1224/* Clear the branch trace histories in BTINFO. */
1225
1226static void
1227btrace_clear_history (struct btrace_thread_info *btinfo)
1228{
1229 xfree (btinfo->insn_history);
1230 xfree (btinfo->call_history);
1231 xfree (btinfo->replay);
1232
1233 btinfo->insn_history = NULL;
1234 btinfo->call_history = NULL;
1235 btinfo->replay = NULL;
1236}
1237
b0627500
MM
1238/* Clear the branch trace maintenance histories in BTINFO. */
1239
1240static void
1241btrace_maint_clear (struct btrace_thread_info *btinfo)
1242{
1243 switch (btinfo->data.format)
1244 {
1245 default:
1246 break;
1247
1248 case BTRACE_FORMAT_BTS:
1249 btinfo->maint.variant.bts.packet_history.begin = 0;
1250 btinfo->maint.variant.bts.packet_history.end = 0;
1251 break;
1252
1253#if defined (HAVE_LIBIPT)
1254 case BTRACE_FORMAT_PT:
1255 xfree (btinfo->maint.variant.pt.packets);
1256
1257 btinfo->maint.variant.pt.packets = NULL;
1258 btinfo->maint.variant.pt.packet_history.begin = 0;
1259 btinfo->maint.variant.pt.packet_history.end = 0;
1260 break;
1261#endif /* defined (HAVE_LIBIPT) */
1262 }
1263}
1264
02d27625
MM
1265/* See btrace.h. */
1266
1267void
1268btrace_fetch (struct thread_info *tp)
1269{
1270 struct btrace_thread_info *btinfo;
969c39fb 1271 struct btrace_target_info *tinfo;
734b0e4b 1272 struct btrace_data btrace;
23a7fe75 1273 struct cleanup *cleanup;
969c39fb 1274 int errcode;
02d27625 1275
43792cf0
PA
1276 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1277 target_pid_to_str (tp->ptid));
02d27625
MM
1278
1279 btinfo = &tp->btrace;
969c39fb
MM
1280 tinfo = btinfo->target;
1281 if (tinfo == NULL)
1282 return;
1283
1284 /* There's no way we could get new trace while replaying.
1285 On the other hand, delta trace would return a partial record with the
1286 current PC, which is the replay PC, not the last PC, as expected. */
1287 if (btinfo->replay != NULL)
02d27625
MM
1288 return;
1289
734b0e4b
MM
1290 btrace_data_init (&btrace);
1291 cleanup = make_cleanup_btrace_data (&btrace);
02d27625 1292
969c39fb
MM
1293 /* Let's first try to extend the trace we already have. */
1294 if (btinfo->end != NULL)
1295 {
1296 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1297 if (errcode == 0)
1298 {
1299 /* Success. Let's try to stitch the traces together. */
31fd9caa 1300 errcode = btrace_stitch_trace (&btrace, tp);
969c39fb
MM
1301 }
1302 else
1303 {
1304 /* We failed to read delta trace. Let's try to read new trace. */
1305 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1306
1307 /* If we got any new trace, discard what we have. */
734b0e4b 1308 if (errcode == 0 && !btrace_data_empty (&btrace))
969c39fb
MM
1309 btrace_clear (tp);
1310 }
1311
1312 /* If we were not able to read the trace, we start over. */
1313 if (errcode != 0)
1314 {
1315 btrace_clear (tp);
1316 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1317 }
1318 }
1319 else
1320 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1321
1322 /* If we were not able to read the branch trace, signal an error. */
1323 if (errcode != 0)
1324 error (_("Failed to read branch trace."));
1325
1326 /* Compute the trace, provided we have any. */
734b0e4b 1327 if (!btrace_data_empty (&btrace))
23a7fe75 1328 {
9be54cae
MM
1329 /* Store the raw trace data. The stored data will be cleared in
1330 btrace_clear, so we always append the new trace. */
1331 btrace_data_append (&btinfo->data, &btrace);
b0627500 1332 btrace_maint_clear (btinfo);
9be54cae 1333
969c39fb 1334 btrace_clear_history (btinfo);
76235df1 1335 btrace_compute_ftrace (tp, &btrace);
23a7fe75 1336 }
02d27625 1337
23a7fe75 1338 do_cleanups (cleanup);
02d27625
MM
1339}
1340
1341/* See btrace.h. */
1342
1343void
1344btrace_clear (struct thread_info *tp)
1345{
1346 struct btrace_thread_info *btinfo;
23a7fe75 1347 struct btrace_function *it, *trash;
02d27625 1348
43792cf0
PA
1349 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1350 target_pid_to_str (tp->ptid));
02d27625 1351
0b722aec
MM
1352 /* Make sure btrace frames that may hold a pointer into the branch
1353 trace data are destroyed. */
1354 reinit_frame_cache ();
1355
02d27625
MM
1356 btinfo = &tp->btrace;
1357
23a7fe75
MM
1358 it = btinfo->begin;
1359 while (it != NULL)
1360 {
1361 trash = it;
1362 it = it->flow.next;
02d27625 1363
23a7fe75
MM
1364 xfree (trash);
1365 }
1366
1367 btinfo->begin = NULL;
1368 btinfo->end = NULL;
31fd9caa 1369 btinfo->ngaps = 0;
23a7fe75 1370
b0627500
MM
1371 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1372 btrace_maint_clear (btinfo);
9be54cae 1373 btrace_data_clear (&btinfo->data);
969c39fb 1374 btrace_clear_history (btinfo);
02d27625
MM
1375}
1376
1377/* See btrace.h. */
1378
1379void
1380btrace_free_objfile (struct objfile *objfile)
1381{
1382 struct thread_info *tp;
1383
1384 DEBUG ("free objfile");
1385
034f788c 1386 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
1387 btrace_clear (tp);
1388}
c12a2917
MM
1389
1390#if defined (HAVE_LIBEXPAT)
1391
1392/* Check the btrace document version. */
1393
1394static void
1395check_xml_btrace_version (struct gdb_xml_parser *parser,
1396 const struct gdb_xml_element *element,
1397 void *user_data, VEC (gdb_xml_value_s) *attributes)
1398{
9a3c8263
SM
1399 const char *version
1400 = (const char *) xml_find_attribute (attributes, "version")->value;
c12a2917
MM
1401
1402 if (strcmp (version, "1.0") != 0)
1403 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1404}
1405
1406/* Parse a btrace "block" xml record. */
1407
1408static void
1409parse_xml_btrace_block (struct gdb_xml_parser *parser,
1410 const struct gdb_xml_element *element,
1411 void *user_data, VEC (gdb_xml_value_s) *attributes)
1412{
734b0e4b 1413 struct btrace_data *btrace;
c12a2917
MM
1414 struct btrace_block *block;
1415 ULONGEST *begin, *end;
1416
9a3c8263 1417 btrace = (struct btrace_data *) user_data;
734b0e4b
MM
1418
1419 switch (btrace->format)
1420 {
1421 case BTRACE_FORMAT_BTS:
1422 break;
1423
1424 case BTRACE_FORMAT_NONE:
1425 btrace->format = BTRACE_FORMAT_BTS;
1426 btrace->variant.bts.blocks = NULL;
1427 break;
1428
1429 default:
1430 gdb_xml_error (parser, _("Btrace format error."));
1431 }
c12a2917 1432
bc84451b
SM
1433 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1434 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
c12a2917 1435
734b0e4b 1436 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
c12a2917
MM
1437 block->begin = *begin;
1438 block->end = *end;
1439}
1440
b20a6524
MM
1441/* Parse a "raw" xml record. */
1442
1443static void
1444parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
e7b01ce0 1445 gdb_byte **pdata, size_t *psize)
b20a6524
MM
1446{
1447 struct cleanup *cleanup;
1448 gdb_byte *data, *bin;
e7b01ce0 1449 size_t len, size;
b20a6524
MM
1450
1451 len = strlen (body_text);
e7b01ce0 1452 if (len % 2 != 0)
b20a6524
MM
1453 gdb_xml_error (parser, _("Bad raw data size."));
1454
e7b01ce0
MM
1455 size = len / 2;
1456
224c3ddb 1457 bin = data = (gdb_byte *) xmalloc (size);
b20a6524
MM
1458 cleanup = make_cleanup (xfree, data);
1459
1460 /* We use hex encoding - see common/rsp-low.h. */
1461 while (len > 0)
1462 {
1463 char hi, lo;
1464
1465 hi = *body_text++;
1466 lo = *body_text++;
1467
1468 if (hi == 0 || lo == 0)
1469 gdb_xml_error (parser, _("Bad hex encoding."));
1470
1471 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1472 len -= 2;
1473 }
1474
1475 discard_cleanups (cleanup);
1476
1477 *pdata = data;
1478 *psize = size;
1479}
1480
1481/* Parse a btrace pt-config "cpu" xml record. */
1482
1483static void
1484parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1485 const struct gdb_xml_element *element,
1486 void *user_data,
1487 VEC (gdb_xml_value_s) *attributes)
1488{
1489 struct btrace_data *btrace;
1490 const char *vendor;
1491 ULONGEST *family, *model, *stepping;
1492
9a3c8263
SM
1493 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1494 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1495 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1496 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
b20a6524 1497
9a3c8263 1498 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1499
1500 if (strcmp (vendor, "GenuineIntel") == 0)
1501 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1502
1503 btrace->variant.pt.config.cpu.family = *family;
1504 btrace->variant.pt.config.cpu.model = *model;
1505 btrace->variant.pt.config.cpu.stepping = *stepping;
1506}
1507
1508/* Parse a btrace pt "raw" xml record. */
1509
1510static void
1511parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1512 const struct gdb_xml_element *element,
1513 void *user_data, const char *body_text)
1514{
1515 struct btrace_data *btrace;
1516
9a3c8263 1517 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1518 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1519 &btrace->variant.pt.size);
1520}
1521
1522/* Parse a btrace "pt" xml record. */
1523
1524static void
1525parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1526 const struct gdb_xml_element *element,
1527 void *user_data, VEC (gdb_xml_value_s) *attributes)
1528{
1529 struct btrace_data *btrace;
1530
9a3c8263 1531 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1532 btrace->format = BTRACE_FORMAT_PT;
1533 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1534 btrace->variant.pt.data = NULL;
1535 btrace->variant.pt.size = 0;
1536}
1537
c12a2917
MM
1538static const struct gdb_xml_attribute block_attributes[] = {
1539 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1540 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1541 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1542};
1543
b20a6524
MM
1544static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1545 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1546 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1547 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1548 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1549 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1550};
1551
1552static const struct gdb_xml_element btrace_pt_config_children[] = {
1553 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1554 parse_xml_btrace_pt_config_cpu, NULL },
1555 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1556};
1557
1558static const struct gdb_xml_element btrace_pt_children[] = {
1559 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1560 NULL },
1561 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1562 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1563};
1564
c12a2917
MM
1565static const struct gdb_xml_attribute btrace_attributes[] = {
1566 { "version", GDB_XML_AF_NONE, NULL, NULL },
1567 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1568};
1569
1570static const struct gdb_xml_element btrace_children[] = {
1571 { "block", block_attributes, NULL,
1572 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
b20a6524
MM
1573 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1574 NULL },
c12a2917
MM
1575 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1576};
1577
1578static const struct gdb_xml_element btrace_elements[] = {
1579 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1580 check_xml_btrace_version, NULL },
1581 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1582};
1583
1584#endif /* defined (HAVE_LIBEXPAT) */
1585
1586/* See btrace.h. */
1587
734b0e4b
MM
1588void
1589parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
c12a2917 1590{
c12a2917
MM
1591 struct cleanup *cleanup;
1592 int errcode;
1593
1594#if defined (HAVE_LIBEXPAT)
1595
734b0e4b
MM
1596 btrace->format = BTRACE_FORMAT_NONE;
1597
1598 cleanup = make_cleanup_btrace_data (btrace);
c12a2917 1599 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
734b0e4b 1600 buffer, btrace);
c12a2917 1601 if (errcode != 0)
969c39fb 1602 error (_("Error parsing branch trace."));
c12a2917
MM
1603
1604 /* Keep parse results. */
1605 discard_cleanups (cleanup);
1606
1607#else /* !defined (HAVE_LIBEXPAT) */
1608
1609 error (_("Cannot process branch trace. XML parsing is not supported."));
1610
1611#endif /* !defined (HAVE_LIBEXPAT) */
c12a2917 1612}
23a7fe75 1613
f4abbc16
MM
1614#if defined (HAVE_LIBEXPAT)
1615
1616/* Parse a btrace-conf "bts" xml record. */
1617
1618static void
1619parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1620 const struct gdb_xml_element *element,
1621 void *user_data, VEC (gdb_xml_value_s) *attributes)
1622{
1623 struct btrace_config *conf;
d33501a5 1624 struct gdb_xml_value *size;
f4abbc16 1625
9a3c8263 1626 conf = (struct btrace_config *) user_data;
f4abbc16 1627 conf->format = BTRACE_FORMAT_BTS;
d33501a5
MM
1628 conf->bts.size = 0;
1629
1630 size = xml_find_attribute (attributes, "size");
1631 if (size != NULL)
b20a6524 1632 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
f4abbc16
MM
1633}
1634
b20a6524
MM
1635/* Parse a btrace-conf "pt" xml record. */
1636
1637static void
1638parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1639 const struct gdb_xml_element *element,
1640 void *user_data, VEC (gdb_xml_value_s) *attributes)
1641{
1642 struct btrace_config *conf;
1643 struct gdb_xml_value *size;
1644
9a3c8263 1645 conf = (struct btrace_config *) user_data;
b20a6524
MM
1646 conf->format = BTRACE_FORMAT_PT;
1647 conf->pt.size = 0;
1648
1649 size = xml_find_attribute (attributes, "size");
1650 if (size != NULL)
1651 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1652}
1653
1654static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1655 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1656 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1657};
1658
d33501a5
MM
1659static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1660 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1661 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1662};
1663
f4abbc16 1664static const struct gdb_xml_element btrace_conf_children[] = {
d33501a5
MM
1665 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1666 parse_xml_btrace_conf_bts, NULL },
b20a6524
MM
1667 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1668 parse_xml_btrace_conf_pt, NULL },
f4abbc16
MM
1669 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1670};
1671
1672static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1673 { "version", GDB_XML_AF_NONE, NULL, NULL },
1674 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1675};
1676
1677static const struct gdb_xml_element btrace_conf_elements[] = {
1678 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1679 GDB_XML_EF_NONE, NULL, NULL },
1680 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1681};
1682
1683#endif /* defined (HAVE_LIBEXPAT) */
1684
1685/* See btrace.h. */
1686
1687void
1688parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1689{
1690 int errcode;
1691
1692#if defined (HAVE_LIBEXPAT)
1693
1694 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1695 btrace_conf_elements, xml, conf);
1696 if (errcode != 0)
1697 error (_("Error parsing branch trace configuration."));
1698
1699#else /* !defined (HAVE_LIBEXPAT) */
1700
1701 error (_("XML parsing is not supported."));
1702
1703#endif /* !defined (HAVE_LIBEXPAT) */
1704}
1705
23a7fe75
MM
1706/* See btrace.h. */
1707
1708const struct btrace_insn *
1709btrace_insn_get (const struct btrace_insn_iterator *it)
1710{
1711 const struct btrace_function *bfun;
1712 unsigned int index, end;
1713
1714 index = it->index;
1715 bfun = it->function;
1716
31fd9caa
MM
1717 /* Check if the iterator points to a gap in the trace. */
1718 if (bfun->errcode != 0)
1719 return NULL;
1720
23a7fe75
MM
1721 /* The index is within the bounds of this function's instruction vector. */
1722 end = VEC_length (btrace_insn_s, bfun->insn);
1723 gdb_assert (0 < end);
1724 gdb_assert (index < end);
1725
1726 return VEC_index (btrace_insn_s, bfun->insn, index);
1727}
1728
1729/* See btrace.h. */
1730
1731unsigned int
1732btrace_insn_number (const struct btrace_insn_iterator *it)
1733{
1734 const struct btrace_function *bfun;
1735
1736 bfun = it->function;
31fd9caa
MM
1737
1738 /* Return zero if the iterator points to a gap in the trace. */
1739 if (bfun->errcode != 0)
1740 return 0;
1741
23a7fe75
MM
1742 return bfun->insn_offset + it->index;
1743}
1744
1745/* See btrace.h. */
1746
1747void
1748btrace_insn_begin (struct btrace_insn_iterator *it,
1749 const struct btrace_thread_info *btinfo)
1750{
1751 const struct btrace_function *bfun;
1752
1753 bfun = btinfo->begin;
1754 if (bfun == NULL)
1755 error (_("No trace."));
1756
1757 it->function = bfun;
1758 it->index = 0;
1759}
1760
1761/* See btrace.h. */
1762
1763void
1764btrace_insn_end (struct btrace_insn_iterator *it,
1765 const struct btrace_thread_info *btinfo)
1766{
1767 const struct btrace_function *bfun;
1768 unsigned int length;
1769
1770 bfun = btinfo->end;
1771 if (bfun == NULL)
1772 error (_("No trace."));
1773
23a7fe75
MM
1774 length = VEC_length (btrace_insn_s, bfun->insn);
1775
31fd9caa
MM
1776 /* The last function may either be a gap or it contains the current
1777 instruction, which is one past the end of the execution trace; ignore
1778 it. */
1779 if (length > 0)
1780 length -= 1;
1781
23a7fe75 1782 it->function = bfun;
31fd9caa 1783 it->index = length;
23a7fe75
MM
1784}
1785
1786/* See btrace.h. */
1787
1788unsigned int
1789btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1790{
1791 const struct btrace_function *bfun;
1792 unsigned int index, steps;
1793
1794 bfun = it->function;
1795 steps = 0;
1796 index = it->index;
1797
1798 while (stride != 0)
1799 {
1800 unsigned int end, space, adv;
1801
1802 end = VEC_length (btrace_insn_s, bfun->insn);
1803
31fd9caa
MM
1804 /* An empty function segment represents a gap in the trace. We count
1805 it as one instruction. */
1806 if (end == 0)
1807 {
1808 const struct btrace_function *next;
1809
1810 next = bfun->flow.next;
1811 if (next == NULL)
1812 break;
1813
1814 stride -= 1;
1815 steps += 1;
1816
1817 bfun = next;
1818 index = 0;
1819
1820 continue;
1821 }
1822
23a7fe75
MM
1823 gdb_assert (0 < end);
1824 gdb_assert (index < end);
1825
1826 /* Compute the number of instructions remaining in this segment. */
1827 space = end - index;
1828
1829 /* Advance the iterator as far as possible within this segment. */
1830 adv = min (space, stride);
1831 stride -= adv;
1832 index += adv;
1833 steps += adv;
1834
1835 /* Move to the next function if we're at the end of this one. */
1836 if (index == end)
1837 {
1838 const struct btrace_function *next;
1839
1840 next = bfun->flow.next;
1841 if (next == NULL)
1842 {
1843 /* We stepped past the last function.
1844
1845 Let's adjust the index to point to the last instruction in
1846 the previous function. */
1847 index -= 1;
1848 steps -= 1;
1849 break;
1850 }
1851
1852 /* We now point to the first instruction in the new function. */
1853 bfun = next;
1854 index = 0;
1855 }
1856
1857 /* We did make progress. */
1858 gdb_assert (adv > 0);
1859 }
1860
1861 /* Update the iterator. */
1862 it->function = bfun;
1863 it->index = index;
1864
1865 return steps;
1866}
1867
1868/* See btrace.h. */
1869
1870unsigned int
1871btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1872{
1873 const struct btrace_function *bfun;
1874 unsigned int index, steps;
1875
1876 bfun = it->function;
1877 steps = 0;
1878 index = it->index;
1879
1880 while (stride != 0)
1881 {
1882 unsigned int adv;
1883
1884 /* Move to the previous function if we're at the start of this one. */
1885 if (index == 0)
1886 {
1887 const struct btrace_function *prev;
1888
1889 prev = bfun->flow.prev;
1890 if (prev == NULL)
1891 break;
1892
1893 /* We point to one after the last instruction in the new function. */
1894 bfun = prev;
1895 index = VEC_length (btrace_insn_s, bfun->insn);
1896
31fd9caa
MM
1897 /* An empty function segment represents a gap in the trace. We count
1898 it as one instruction. */
1899 if (index == 0)
1900 {
1901 stride -= 1;
1902 steps += 1;
1903
1904 continue;
1905 }
23a7fe75
MM
1906 }
1907
1908 /* Advance the iterator as far as possible within this segment. */
1909 adv = min (index, stride);
31fd9caa 1910
23a7fe75
MM
1911 stride -= adv;
1912 index -= adv;
1913 steps += adv;
1914
1915 /* We did make progress. */
1916 gdb_assert (adv > 0);
1917 }
1918
1919 /* Update the iterator. */
1920 it->function = bfun;
1921 it->index = index;
1922
1923 return steps;
1924}
1925
1926/* See btrace.h. */
1927
1928int
1929btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1930 const struct btrace_insn_iterator *rhs)
1931{
1932 unsigned int lnum, rnum;
1933
1934 lnum = btrace_insn_number (lhs);
1935 rnum = btrace_insn_number (rhs);
1936
31fd9caa
MM
1937 /* A gap has an instruction number of zero. Things are getting more
1938 complicated if gaps are involved.
1939
1940 We take the instruction number offset from the iterator's function.
1941 This is the number of the first instruction after the gap.
1942
1943 This is OK as long as both lhs and rhs point to gaps. If only one of
1944 them does, we need to adjust the number based on the other's regular
1945 instruction number. Otherwise, a gap might compare equal to an
1946 instruction. */
1947
1948 if (lnum == 0 && rnum == 0)
1949 {
1950 lnum = lhs->function->insn_offset;
1951 rnum = rhs->function->insn_offset;
1952 }
1953 else if (lnum == 0)
1954 {
1955 lnum = lhs->function->insn_offset;
1956
1957 if (lnum == rnum)
1958 lnum -= 1;
1959 }
1960 else if (rnum == 0)
1961 {
1962 rnum = rhs->function->insn_offset;
1963
1964 if (rnum == lnum)
1965 rnum -= 1;
1966 }
1967
23a7fe75
MM
1968 return (int) (lnum - rnum);
1969}
1970
1971/* See btrace.h. */
1972
1973int
1974btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1975 const struct btrace_thread_info *btinfo,
1976 unsigned int number)
1977{
1978 const struct btrace_function *bfun;
31fd9caa 1979 unsigned int end, length;
23a7fe75
MM
1980
1981 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
31fd9caa
MM
1982 {
1983 /* Skip gaps. */
1984 if (bfun->errcode != 0)
1985 continue;
1986
1987 if (bfun->insn_offset <= number)
1988 break;
1989 }
23a7fe75
MM
1990
1991 if (bfun == NULL)
1992 return 0;
1993
31fd9caa
MM
1994 length = VEC_length (btrace_insn_s, bfun->insn);
1995 gdb_assert (length > 0);
1996
1997 end = bfun->insn_offset + length;
23a7fe75
MM
1998 if (end <= number)
1999 return 0;
2000
2001 it->function = bfun;
2002 it->index = number - bfun->insn_offset;
2003
2004 return 1;
2005}
2006
2007/* See btrace.h. */
2008
2009const struct btrace_function *
2010btrace_call_get (const struct btrace_call_iterator *it)
2011{
2012 return it->function;
2013}
2014
2015/* See btrace.h. */
2016
2017unsigned int
2018btrace_call_number (const struct btrace_call_iterator *it)
2019{
2020 const struct btrace_thread_info *btinfo;
2021 const struct btrace_function *bfun;
2022 unsigned int insns;
2023
2024 btinfo = it->btinfo;
2025 bfun = it->function;
2026 if (bfun != NULL)
2027 return bfun->number;
2028
2029 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2030 number of the last function. */
2031 bfun = btinfo->end;
2032 insns = VEC_length (btrace_insn_s, bfun->insn);
2033
2034 /* If the function contains only a single instruction (i.e. the current
2035 instruction), it will be skipped and its number is already the number
2036 we seek. */
2037 if (insns == 1)
2038 return bfun->number;
2039
2040 /* Otherwise, return one more than the number of the last function. */
2041 return bfun->number + 1;
2042}
2043
2044/* See btrace.h. */
2045
2046void
2047btrace_call_begin (struct btrace_call_iterator *it,
2048 const struct btrace_thread_info *btinfo)
2049{
2050 const struct btrace_function *bfun;
2051
2052 bfun = btinfo->begin;
2053 if (bfun == NULL)
2054 error (_("No trace."));
2055
2056 it->btinfo = btinfo;
2057 it->function = bfun;
2058}
2059
2060/* See btrace.h. */
2061
2062void
2063btrace_call_end (struct btrace_call_iterator *it,
2064 const struct btrace_thread_info *btinfo)
2065{
2066 const struct btrace_function *bfun;
2067
2068 bfun = btinfo->end;
2069 if (bfun == NULL)
2070 error (_("No trace."));
2071
2072 it->btinfo = btinfo;
2073 it->function = NULL;
2074}
2075
2076/* See btrace.h. */
2077
2078unsigned int
2079btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2080{
2081 const struct btrace_function *bfun;
2082 unsigned int steps;
2083
2084 bfun = it->function;
2085 steps = 0;
2086 while (bfun != NULL)
2087 {
2088 const struct btrace_function *next;
2089 unsigned int insns;
2090
2091 next = bfun->flow.next;
2092 if (next == NULL)
2093 {
2094 /* Ignore the last function if it only contains a single
2095 (i.e. the current) instruction. */
2096 insns = VEC_length (btrace_insn_s, bfun->insn);
2097 if (insns == 1)
2098 steps -= 1;
2099 }
2100
2101 if (stride == steps)
2102 break;
2103
2104 bfun = next;
2105 steps += 1;
2106 }
2107
2108 it->function = bfun;
2109 return steps;
2110}
2111
2112/* See btrace.h. */
2113
2114unsigned int
2115btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2116{
2117 const struct btrace_thread_info *btinfo;
2118 const struct btrace_function *bfun;
2119 unsigned int steps;
2120
2121 bfun = it->function;
2122 steps = 0;
2123
2124 if (bfun == NULL)
2125 {
2126 unsigned int insns;
2127
2128 btinfo = it->btinfo;
2129 bfun = btinfo->end;
2130 if (bfun == NULL)
2131 return 0;
2132
2133 /* Ignore the last function if it only contains a single
2134 (i.e. the current) instruction. */
2135 insns = VEC_length (btrace_insn_s, bfun->insn);
2136 if (insns == 1)
2137 bfun = bfun->flow.prev;
2138
2139 if (bfun == NULL)
2140 return 0;
2141
2142 steps += 1;
2143 }
2144
2145 while (steps < stride)
2146 {
2147 const struct btrace_function *prev;
2148
2149 prev = bfun->flow.prev;
2150 if (prev == NULL)
2151 break;
2152
2153 bfun = prev;
2154 steps += 1;
2155 }
2156
2157 it->function = bfun;
2158 return steps;
2159}
2160
2161/* See btrace.h. */
2162
2163int
2164btrace_call_cmp (const struct btrace_call_iterator *lhs,
2165 const struct btrace_call_iterator *rhs)
2166{
2167 unsigned int lnum, rnum;
2168
2169 lnum = btrace_call_number (lhs);
2170 rnum = btrace_call_number (rhs);
2171
2172 return (int) (lnum - rnum);
2173}
2174
2175/* See btrace.h. */
2176
2177int
2178btrace_find_call_by_number (struct btrace_call_iterator *it,
2179 const struct btrace_thread_info *btinfo,
2180 unsigned int number)
2181{
2182 const struct btrace_function *bfun;
2183
2184 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2185 {
2186 unsigned int bnum;
2187
2188 bnum = bfun->number;
2189 if (number == bnum)
2190 {
2191 it->btinfo = btinfo;
2192 it->function = bfun;
2193 return 1;
2194 }
2195
2196 /* Functions are ordered and numbered consecutively. We could bail out
2197 earlier. On the other hand, it is very unlikely that we search for
2198 a nonexistent function. */
2199 }
2200
2201 return 0;
2202}
2203
2204/* See btrace.h. */
2205
2206void
2207btrace_set_insn_history (struct btrace_thread_info *btinfo,
2208 const struct btrace_insn_iterator *begin,
2209 const struct btrace_insn_iterator *end)
2210{
2211 if (btinfo->insn_history == NULL)
8d749320 2212 btinfo->insn_history = XCNEW (struct btrace_insn_history);
23a7fe75
MM
2213
2214 btinfo->insn_history->begin = *begin;
2215 btinfo->insn_history->end = *end;
2216}
2217
2218/* See btrace.h. */
2219
2220void
2221btrace_set_call_history (struct btrace_thread_info *btinfo,
2222 const struct btrace_call_iterator *begin,
2223 const struct btrace_call_iterator *end)
2224{
2225 gdb_assert (begin->btinfo == end->btinfo);
2226
2227 if (btinfo->call_history == NULL)
8d749320 2228 btinfo->call_history = XCNEW (struct btrace_call_history);
23a7fe75
MM
2229
2230 btinfo->call_history->begin = *begin;
2231 btinfo->call_history->end = *end;
2232}
07bbe694
MM
2233
2234/* See btrace.h. */
2235
2236int
2237btrace_is_replaying (struct thread_info *tp)
2238{
2239 return tp->btrace.replay != NULL;
2240}
6e07b1d2
MM
2241
2242/* See btrace.h. */
2243
2244int
2245btrace_is_empty (struct thread_info *tp)
2246{
2247 struct btrace_insn_iterator begin, end;
2248 struct btrace_thread_info *btinfo;
2249
2250 btinfo = &tp->btrace;
2251
2252 if (btinfo->begin == NULL)
2253 return 1;
2254
2255 btrace_insn_begin (&begin, btinfo);
2256 btrace_insn_end (&end, btinfo);
2257
2258 return btrace_insn_cmp (&begin, &end) == 0;
2259}
734b0e4b
MM
2260
2261/* Forward the cleanup request. */
2262
2263static void
2264do_btrace_data_cleanup (void *arg)
2265{
9a3c8263 2266 btrace_data_fini ((struct btrace_data *) arg);
734b0e4b
MM
2267}
2268
2269/* See btrace.h. */
2270
2271struct cleanup *
2272make_cleanup_btrace_data (struct btrace_data *data)
2273{
2274 return make_cleanup (do_btrace_data_cleanup, data);
2275}
b0627500
MM
2276
2277#if defined (HAVE_LIBIPT)
2278
2279/* Print a single packet. */
2280
2281static void
2282pt_print_packet (const struct pt_packet *packet)
2283{
2284 switch (packet->type)
2285 {
2286 default:
2287 printf_unfiltered (("[??: %x]"), packet->type);
2288 break;
2289
2290 case ppt_psb:
2291 printf_unfiltered (("psb"));
2292 break;
2293
2294 case ppt_psbend:
2295 printf_unfiltered (("psbend"));
2296 break;
2297
2298 case ppt_pad:
2299 printf_unfiltered (("pad"));
2300 break;
2301
2302 case ppt_tip:
2303 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2304 packet->payload.ip.ipc,
2305 packet->payload.ip.ip);
2306 break;
2307
2308 case ppt_tip_pge:
2309 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2310 packet->payload.ip.ipc,
2311 packet->payload.ip.ip);
2312 break;
2313
2314 case ppt_tip_pgd:
2315 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2316 packet->payload.ip.ipc,
2317 packet->payload.ip.ip);
2318 break;
2319
2320 case ppt_fup:
2321 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2322 packet->payload.ip.ipc,
2323 packet->payload.ip.ip);
2324 break;
2325
2326 case ppt_tnt_8:
2327 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2328 packet->payload.tnt.bit_size,
2329 packet->payload.tnt.payload);
2330 break;
2331
2332 case ppt_tnt_64:
2333 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2334 packet->payload.tnt.bit_size,
2335 packet->payload.tnt.payload);
2336 break;
2337
2338 case ppt_pip:
37fdfe4c
MM
2339 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2340 packet->payload.pip.nr ? (" nr") : (""));
b0627500
MM
2341 break;
2342
2343 case ppt_tsc:
2344 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2345 break;
2346
2347 case ppt_cbr:
2348 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2349 break;
2350
2351 case ppt_mode:
2352 switch (packet->payload.mode.leaf)
2353 {
2354 default:
2355 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2356 break;
2357
2358 case pt_mol_exec:
2359 printf_unfiltered (("mode.exec%s%s"),
2360 packet->payload.mode.bits.exec.csl
2361 ? (" cs.l") : (""),
2362 packet->payload.mode.bits.exec.csd
2363 ? (" cs.d") : (""));
2364 break;
2365
2366 case pt_mol_tsx:
2367 printf_unfiltered (("mode.tsx%s%s"),
2368 packet->payload.mode.bits.tsx.intx
2369 ? (" intx") : (""),
2370 packet->payload.mode.bits.tsx.abrt
2371 ? (" abrt") : (""));
2372 break;
2373 }
2374 break;
2375
2376 case ppt_ovf:
2377 printf_unfiltered (("ovf"));
2378 break;
2379
37fdfe4c
MM
2380 case ppt_stop:
2381 printf_unfiltered (("stop"));
2382 break;
2383
2384 case ppt_vmcs:
2385 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2386 break;
2387
2388 case ppt_tma:
2389 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2390 packet->payload.tma.fc);
2391 break;
2392
2393 case ppt_mtc:
2394 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2395 break;
2396
2397 case ppt_cyc:
2398 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2399 break;
2400
2401 case ppt_mnt:
2402 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2403 break;
b0627500
MM
2404 }
2405}
2406
2407/* Decode packets into MAINT using DECODER. */
2408
2409static void
2410btrace_maint_decode_pt (struct btrace_maint_info *maint,
2411 struct pt_packet_decoder *decoder)
2412{
2413 int errcode;
2414
2415 for (;;)
2416 {
2417 struct btrace_pt_packet packet;
2418
2419 errcode = pt_pkt_sync_forward (decoder);
2420 if (errcode < 0)
2421 break;
2422
2423 for (;;)
2424 {
2425 pt_pkt_get_offset (decoder, &packet.offset);
2426
2427 errcode = pt_pkt_next (decoder, &packet.packet,
2428 sizeof(packet.packet));
2429 if (errcode < 0)
2430 break;
2431
2432 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2433 {
2434 packet.errcode = pt_errcode (errcode);
2435 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2436 &packet);
2437 }
2438 }
2439
2440 if (errcode == -pte_eos)
2441 break;
2442
2443 packet.errcode = pt_errcode (errcode);
2444 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2445 &packet);
2446
2447 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2448 packet.offset, pt_errstr (packet.errcode));
2449 }
2450
2451 if (errcode != -pte_eos)
bc504a31 2452 warning (_("Failed to synchronize onto the Intel Processor Trace "
b0627500
MM
2453 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2454}
2455
2456/* Update the packet history in BTINFO. */
2457
2458static void
2459btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2460{
2461 volatile struct gdb_exception except;
2462 struct pt_packet_decoder *decoder;
2463 struct btrace_data_pt *pt;
2464 struct pt_config config;
2465 int errcode;
2466
2467 pt = &btinfo->data.variant.pt;
2468
2469 /* Nothing to do if there is no trace. */
2470 if (pt->size == 0)
2471 return;
2472
2473 memset (&config, 0, sizeof(config));
2474
2475 config.size = sizeof (config);
2476 config.begin = pt->data;
2477 config.end = pt->data + pt->size;
2478
2479 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2480 config.cpu.family = pt->config.cpu.family;
2481 config.cpu.model = pt->config.cpu.model;
2482 config.cpu.stepping = pt->config.cpu.stepping;
2483
2484 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2485 if (errcode < 0)
bc504a31 2486 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b0627500
MM
2487 pt_errstr (pt_errcode (errcode)));
2488
2489 decoder = pt_pkt_alloc_decoder (&config);
2490 if (decoder == NULL)
bc504a31 2491 error (_("Failed to allocate the Intel Processor Trace decoder."));
b0627500
MM
2492
2493 TRY
2494 {
2495 btrace_maint_decode_pt (&btinfo->maint, decoder);
2496 }
2497 CATCH (except, RETURN_MASK_ALL)
2498 {
2499 pt_pkt_free_decoder (decoder);
2500
2501 if (except.reason < 0)
2502 throw_exception (except);
2503 }
2504 END_CATCH
2505
2506 pt_pkt_free_decoder (decoder);
2507}
2508
2509#endif /* !defined (HAVE_LIBIPT) */
2510
2511/* Update the packet maintenance information for BTINFO and store the
2512 low and high bounds into BEGIN and END, respectively.
2513 Store the current iterator state into FROM and TO. */
2514
2515static void
2516btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2517 unsigned int *begin, unsigned int *end,
2518 unsigned int *from, unsigned int *to)
2519{
2520 switch (btinfo->data.format)
2521 {
2522 default:
2523 *begin = 0;
2524 *end = 0;
2525 *from = 0;
2526 *to = 0;
2527 break;
2528
2529 case BTRACE_FORMAT_BTS:
2530 /* Nothing to do - we operate directly on BTINFO->DATA. */
2531 *begin = 0;
2532 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2533 *from = btinfo->maint.variant.bts.packet_history.begin;
2534 *to = btinfo->maint.variant.bts.packet_history.end;
2535 break;
2536
2537#if defined (HAVE_LIBIPT)
2538 case BTRACE_FORMAT_PT:
2539 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2540 btrace_maint_update_pt_packets (btinfo);
2541
2542 *begin = 0;
2543 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2544 *from = btinfo->maint.variant.pt.packet_history.begin;
2545 *to = btinfo->maint.variant.pt.packet_history.end;
2546 break;
2547#endif /* defined (HAVE_LIBIPT) */
2548 }
2549}
2550
2551/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2552 update the current iterator position. */
2553
2554static void
2555btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2556 unsigned int begin, unsigned int end)
2557{
2558 switch (btinfo->data.format)
2559 {
2560 default:
2561 break;
2562
2563 case BTRACE_FORMAT_BTS:
2564 {
2565 VEC (btrace_block_s) *blocks;
2566 unsigned int blk;
2567
2568 blocks = btinfo->data.variant.bts.blocks;
2569 for (blk = begin; blk < end; ++blk)
2570 {
2571 const btrace_block_s *block;
2572
2573 block = VEC_index (btrace_block_s, blocks, blk);
2574
2575 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2576 core_addr_to_string_nz (block->begin),
2577 core_addr_to_string_nz (block->end));
2578 }
2579
2580 btinfo->maint.variant.bts.packet_history.begin = begin;
2581 btinfo->maint.variant.bts.packet_history.end = end;
2582 }
2583 break;
2584
2585#if defined (HAVE_LIBIPT)
2586 case BTRACE_FORMAT_PT:
2587 {
2588 VEC (btrace_pt_packet_s) *packets;
2589 unsigned int pkt;
2590
2591 packets = btinfo->maint.variant.pt.packets;
2592 for (pkt = begin; pkt < end; ++pkt)
2593 {
2594 const struct btrace_pt_packet *packet;
2595
2596 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2597
2598 printf_unfiltered ("%u\t", pkt);
2599 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2600
2601 if (packet->errcode == pte_ok)
2602 pt_print_packet (&packet->packet);
2603 else
2604 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2605
2606 printf_unfiltered ("\n");
2607 }
2608
2609 btinfo->maint.variant.pt.packet_history.begin = begin;
2610 btinfo->maint.variant.pt.packet_history.end = end;
2611 }
2612 break;
2613#endif /* defined (HAVE_LIBIPT) */
2614 }
2615}
2616
2617/* Read a number from an argument string. */
2618
2619static unsigned int
2620get_uint (char **arg)
2621{
2622 char *begin, *end, *pos;
2623 unsigned long number;
2624
2625 begin = *arg;
2626 pos = skip_spaces (begin);
2627
2628 if (!isdigit (*pos))
2629 error (_("Expected positive number, got: %s."), pos);
2630
2631 number = strtoul (pos, &end, 10);
2632 if (number > UINT_MAX)
2633 error (_("Number too big."));
2634
2635 *arg += (end - begin);
2636
2637 return (unsigned int) number;
2638}
2639
2640/* Read a context size from an argument string. */
2641
2642static int
2643get_context_size (char **arg)
2644{
2645 char *pos;
2646 int number;
2647
2648 pos = skip_spaces (*arg);
2649
2650 if (!isdigit (*pos))
2651 error (_("Expected positive number, got: %s."), pos);
2652
2653 return strtol (pos, arg, 10);
2654}
2655
2656/* Complain about junk at the end of an argument string. */
2657
2658static void
2659no_chunk (char *arg)
2660{
2661 if (*arg != 0)
2662 error (_("Junk after argument: %s."), arg);
2663}
2664
2665/* The "maintenance btrace packet-history" command. */
2666
2667static void
2668maint_btrace_packet_history_cmd (char *arg, int from_tty)
2669{
2670 struct btrace_thread_info *btinfo;
2671 struct thread_info *tp;
2672 unsigned int size, begin, end, from, to;
2673
2674 tp = find_thread_ptid (inferior_ptid);
2675 if (tp == NULL)
2676 error (_("No thread."));
2677
2678 size = 10;
2679 btinfo = &tp->btrace;
2680
2681 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2682 if (begin == end)
2683 {
2684 printf_unfiltered (_("No trace.\n"));
2685 return;
2686 }
2687
2688 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2689 {
2690 from = to;
2691
2692 if (end - from < size)
2693 size = end - from;
2694 to = from + size;
2695 }
2696 else if (strcmp (arg, "-") == 0)
2697 {
2698 to = from;
2699
2700 if (to - begin < size)
2701 size = to - begin;
2702 from = to - size;
2703 }
2704 else
2705 {
2706 from = get_uint (&arg);
2707 if (end <= from)
2708 error (_("'%u' is out of range."), from);
2709
2710 arg = skip_spaces (arg);
2711 if (*arg == ',')
2712 {
2713 arg = skip_spaces (++arg);
2714
2715 if (*arg == '+')
2716 {
2717 arg += 1;
2718 size = get_context_size (&arg);
2719
2720 no_chunk (arg);
2721
2722 if (end - from < size)
2723 size = end - from;
2724 to = from + size;
2725 }
2726 else if (*arg == '-')
2727 {
2728 arg += 1;
2729 size = get_context_size (&arg);
2730
2731 no_chunk (arg);
2732
2733 /* Include the packet given as first argument. */
2734 from += 1;
2735 to = from;
2736
2737 if (to - begin < size)
2738 size = to - begin;
2739 from = to - size;
2740 }
2741 else
2742 {
2743 to = get_uint (&arg);
2744
2745 /* Include the packet at the second argument and silently
2746 truncate the range. */
2747 if (to < end)
2748 to += 1;
2749 else
2750 to = end;
2751
2752 no_chunk (arg);
2753 }
2754 }
2755 else
2756 {
2757 no_chunk (arg);
2758
2759 if (end - from < size)
2760 size = end - from;
2761 to = from + size;
2762 }
2763
2764 dont_repeat ();
2765 }
2766
2767 btrace_maint_print_packets (btinfo, from, to);
2768}
2769
2770/* The "maintenance btrace clear-packet-history" command. */
2771
2772static void
2773maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2774{
2775 struct btrace_thread_info *btinfo;
2776 struct thread_info *tp;
2777
2778 if (args != NULL && *args != 0)
2779 error (_("Invalid argument."));
2780
2781 tp = find_thread_ptid (inferior_ptid);
2782 if (tp == NULL)
2783 error (_("No thread."));
2784
2785 btinfo = &tp->btrace;
2786
2787 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2788 btrace_maint_clear (btinfo);
2789 btrace_data_clear (&btinfo->data);
2790}
2791
2792/* The "maintenance btrace clear" command. */
2793
2794static void
2795maint_btrace_clear_cmd (char *args, int from_tty)
2796{
2797 struct btrace_thread_info *btinfo;
2798 struct thread_info *tp;
2799
2800 if (args != NULL && *args != 0)
2801 error (_("Invalid argument."));
2802
2803 tp = find_thread_ptid (inferior_ptid);
2804 if (tp == NULL)
2805 error (_("No thread."));
2806
2807 btrace_clear (tp);
2808}
2809
2810/* The "maintenance btrace" command. */
2811
2812static void
2813maint_btrace_cmd (char *args, int from_tty)
2814{
2815 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2816 gdb_stdout);
2817}
2818
2819/* The "maintenance set btrace" command. */
2820
2821static void
2822maint_btrace_set_cmd (char *args, int from_tty)
2823{
2824 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2825 gdb_stdout);
2826}
2827
2828/* The "maintenance show btrace" command. */
2829
2830static void
2831maint_btrace_show_cmd (char *args, int from_tty)
2832{
2833 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2834 all_commands, gdb_stdout);
2835}
2836
2837/* The "maintenance set btrace pt" command. */
2838
2839static void
2840maint_btrace_pt_set_cmd (char *args, int from_tty)
2841{
2842 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2843 all_commands, gdb_stdout);
2844}
2845
2846/* The "maintenance show btrace pt" command. */
2847
2848static void
2849maint_btrace_pt_show_cmd (char *args, int from_tty)
2850{
2851 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2852 all_commands, gdb_stdout);
2853}
2854
2855/* The "maintenance info btrace" command. */
2856
2857static void
2858maint_info_btrace_cmd (char *args, int from_tty)
2859{
2860 struct btrace_thread_info *btinfo;
2861 struct thread_info *tp;
2862 const struct btrace_config *conf;
2863
2864 if (args != NULL && *args != 0)
2865 error (_("Invalid argument."));
2866
2867 tp = find_thread_ptid (inferior_ptid);
2868 if (tp == NULL)
2869 error (_("No thread."));
2870
2871 btinfo = &tp->btrace;
2872
2873 conf = btrace_conf (btinfo);
2874 if (conf == NULL)
2875 error (_("No btrace configuration."));
2876
2877 printf_unfiltered (_("Format: %s.\n"),
2878 btrace_format_string (conf->format));
2879
2880 switch (conf->format)
2881 {
2882 default:
2883 break;
2884
2885 case BTRACE_FORMAT_BTS:
2886 printf_unfiltered (_("Number of packets: %u.\n"),
2887 VEC_length (btrace_block_s,
2888 btinfo->data.variant.bts.blocks));
2889 break;
2890
2891#if defined (HAVE_LIBIPT)
2892 case BTRACE_FORMAT_PT:
2893 {
2894 struct pt_version version;
2895
2896 version = pt_library_version ();
2897 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2898 version.minor, version.build,
2899 version.ext != NULL ? version.ext : "");
2900
2901 btrace_maint_update_pt_packets (btinfo);
2902 printf_unfiltered (_("Number of packets: %u.\n"),
2903 VEC_length (btrace_pt_packet_s,
2904 btinfo->maint.variant.pt.packets));
2905 }
2906 break;
2907#endif /* defined (HAVE_LIBIPT) */
2908 }
2909}
2910
2911/* The "maint show btrace pt skip-pad" show value function. */
2912
2913static void
2914show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
2915 struct cmd_list_element *c,
2916 const char *value)
2917{
2918 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2919}
2920
2921
2922/* Initialize btrace maintenance commands. */
2923
2924void _initialize_btrace (void);
2925void
2926_initialize_btrace (void)
2927{
2928 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
2929 _("Info about branch tracing data."), &maintenanceinfolist);
2930
2931 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
2932 _("Branch tracing maintenance commands."),
2933 &maint_btrace_cmdlist, "maintenance btrace ",
2934 0, &maintenancelist);
2935
2936 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
2937Set branch tracing specific variables."),
2938 &maint_btrace_set_cmdlist, "maintenance set btrace ",
2939 0, &maintenance_set_cmdlist);
2940
2941 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
bc504a31 2942Set Intel Processor Trace specific variables."),
b0627500
MM
2943 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2944 0, &maint_btrace_set_cmdlist);
2945
2946 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
2947Show branch tracing specific variables."),
2948 &maint_btrace_show_cmdlist, "maintenance show btrace ",
2949 0, &maintenance_show_cmdlist);
2950
2951 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
bc504a31 2952Show Intel Processor Trace specific variables."),
b0627500
MM
2953 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2954 0, &maint_btrace_show_cmdlist);
2955
2956 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
2957 &maint_btrace_pt_skip_pad, _("\
2958Set whether PAD packets should be skipped in the btrace packet history."), _("\
2959Show whether PAD packets should be skipped in the btrace packet history."),_("\
2960When enabled, PAD packets are ignored in the btrace packet history."),
2961 NULL, show_maint_btrace_pt_skip_pad,
2962 &maint_btrace_pt_set_cmdlist,
2963 &maint_btrace_pt_show_cmdlist);
2964
2965 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
2966 _("Print the raw branch tracing data.\n\
2967With no argument, print ten more packets after the previous ten-line print.\n\
2968With '-' as argument print ten packets before a previous ten-line print.\n\
2969One argument specifies the starting packet of a ten-line print.\n\
2970Two arguments with comma between specify starting and ending packets to \
2971print.\n\
2972Preceded with '+'/'-' the second argument specifies the distance from the \
2973first.\n"),
2974 &maint_btrace_cmdlist);
2975
2976 add_cmd ("clear-packet-history", class_maintenance,
2977 maint_btrace_clear_packet_history_cmd,
2978 _("Clears the branch tracing packet history.\n\
2979Discards the raw branch tracing data but not the execution history data.\n\
2980"),
2981 &maint_btrace_cmdlist);
2982
2983 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
2984 _("Clears the branch tracing data.\n\
2985Discards the raw branch tracing data and the execution history data.\n\
2986The next 'record' command will fetch the branch tracing data anew.\n\
2987"),
2988 &maint_btrace_cmdlist);
2989
2990}
This page took 0.430217 seconds and 4 git commands to generate.