btrace: preserve function level for unexpected returns
[deliverable/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
618f726f 3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625
MM
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
6e07b1d2 33#include "regcache.h"
b20a6524 34#include "rsp-low.h"
b0627500
MM
35#include "gdbcmd.h"
36#include "cli/cli-utils.h"
b20a6524
MM
37
38#include <inttypes.h>
b0627500 39#include <ctype.h>
325fac50 40#include <algorithm>
b0627500
MM
41
42/* Command lists for btrace maintenance commands. */
43static struct cmd_list_element *maint_btrace_cmdlist;
44static struct cmd_list_element *maint_btrace_set_cmdlist;
45static struct cmd_list_element *maint_btrace_show_cmdlist;
46static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49/* Control whether to skip PAD packets when computing the packet history. */
50static int maint_btrace_pt_skip_pad = 1;
b20a6524
MM
51
52static void btrace_add_pc (struct thread_info *tp);
02d27625
MM
53
54/* Print a record debug message. Use do ... while (0) to avoid ambiguities
55 when used in if statements. */
56
57#define DEBUG(msg, args...) \
58 do \
59 { \
60 if (record_debug != 0) \
61 fprintf_unfiltered (gdb_stdlog, \
62 "[btrace] " msg "\n", ##args); \
63 } \
64 while (0)
65
66#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
67
02d27625
MM
68/* Return the function name of a recorded function segment for printing.
69 This function never returns NULL. */
70
71static const char *
23a7fe75 72ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
73{
74 struct minimal_symbol *msym;
75 struct symbol *sym;
76
77 msym = bfun->msym;
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 return SYMBOL_PRINT_NAME (sym);
82
83 if (msym != NULL)
efd66ac6 84 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
85
86 return "<unknown>";
87}
88
89/* Return the file name of a recorded function segment for printing.
90 This function never returns NULL. */
91
92static const char *
23a7fe75 93ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
94{
95 struct symbol *sym;
96 const char *filename;
97
98 sym = bfun->sym;
99
100 if (sym != NULL)
08be3fe3 101 filename = symtab_to_filename_for_display (symbol_symtab (sym));
02d27625
MM
102 else
103 filename = "<unknown>";
104
105 return filename;
106}
107
23a7fe75
MM
108/* Return a string representation of the address of an instruction.
109 This function never returns NULL. */
02d27625 110
23a7fe75
MM
111static const char *
112ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 113{
23a7fe75
MM
114 if (insn == NULL)
115 return "<nil>";
116
117 return core_addr_to_string_nz (insn->pc);
02d27625
MM
118}
119
23a7fe75 120/* Print an ftrace debug status message. */
02d27625
MM
121
122static void
23a7fe75 123ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 124{
23a7fe75
MM
125 const char *fun, *file;
126 unsigned int ibegin, iend;
ce0dfbea 127 int level;
23a7fe75
MM
128
129 fun = ftrace_print_function_name (bfun);
130 file = ftrace_print_filename (bfun);
131 level = bfun->level;
132
23a7fe75
MM
133 ibegin = bfun->insn_offset;
134 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
135
ce0dfbea
MM
136 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
137 prefix, fun, file, level, ibegin, iend);
02d27625
MM
138}
139
23a7fe75
MM
140/* Return non-zero if BFUN does not match MFUN and FUN,
141 return zero otherwise. */
02d27625
MM
142
143static int
23a7fe75
MM
144ftrace_function_switched (const struct btrace_function *bfun,
145 const struct minimal_symbol *mfun,
146 const struct symbol *fun)
02d27625
MM
147{
148 struct minimal_symbol *msym;
149 struct symbol *sym;
150
02d27625
MM
151 msym = bfun->msym;
152 sym = bfun->sym;
153
154 /* If the minimal symbol changed, we certainly switched functions. */
155 if (mfun != NULL && msym != NULL
efd66ac6 156 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
157 return 1;
158
159 /* If the symbol changed, we certainly switched functions. */
160 if (fun != NULL && sym != NULL)
161 {
162 const char *bfname, *fname;
163
164 /* Check the function name. */
165 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
166 return 1;
167
168 /* Check the location of those functions, as well. */
08be3fe3
DE
169 bfname = symtab_to_fullname (symbol_symtab (sym));
170 fname = symtab_to_fullname (symbol_symtab (fun));
02d27625
MM
171 if (filename_cmp (fname, bfname) != 0)
172 return 1;
173 }
174
23a7fe75
MM
175 /* If we lost symbol information, we switched functions. */
176 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
177 return 1;
178
179 /* If we gained symbol information, we switched functions. */
180 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
181 return 1;
182
02d27625
MM
183 return 0;
184}
185
23a7fe75
MM
186/* Allocate and initialize a new branch trace function segment.
187 PREV is the chronologically preceding function segment.
188 MFUN and FUN are the symbol information we have for this function. */
189
190static struct btrace_function *
191ftrace_new_function (struct btrace_function *prev,
192 struct minimal_symbol *mfun,
193 struct symbol *fun)
194{
195 struct btrace_function *bfun;
196
8d749320 197 bfun = XCNEW (struct btrace_function);
23a7fe75
MM
198
199 bfun->msym = mfun;
200 bfun->sym = fun;
201 bfun->flow.prev = prev;
202
5de9129b
MM
203 if (prev == NULL)
204 {
205 /* Start counting at one. */
206 bfun->number = 1;
207 bfun->insn_offset = 1;
208 }
209 else
23a7fe75
MM
210 {
211 gdb_assert (prev->flow.next == NULL);
212 prev->flow.next = bfun;
02d27625 213
23a7fe75
MM
214 bfun->number = prev->number + 1;
215 bfun->insn_offset = (prev->insn_offset
216 + VEC_length (btrace_insn_s, prev->insn));
31fd9caa 217 bfun->level = prev->level;
23a7fe75
MM
218 }
219
220 return bfun;
02d27625
MM
221}
222
23a7fe75 223/* Update the UP field of a function segment. */
02d27625 224
23a7fe75
MM
225static void
226ftrace_update_caller (struct btrace_function *bfun,
227 struct btrace_function *caller,
228 enum btrace_function_flag flags)
02d27625 229{
23a7fe75
MM
230 if (bfun->up != NULL)
231 ftrace_debug (bfun, "updating caller");
02d27625 232
23a7fe75
MM
233 bfun->up = caller;
234 bfun->flags = flags;
235
236 ftrace_debug (bfun, "set caller");
237}
238
239/* Fix up the caller for all segments of a function. */
240
241static void
242ftrace_fixup_caller (struct btrace_function *bfun,
243 struct btrace_function *caller,
244 enum btrace_function_flag flags)
245{
246 struct btrace_function *prev, *next;
247
248 ftrace_update_caller (bfun, caller, flags);
249
250 /* Update all function segments belonging to the same function. */
251 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
252 ftrace_update_caller (prev, caller, flags);
253
254 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
255 ftrace_update_caller (next, caller, flags);
256}
257
258/* Add a new function segment for a call.
259 CALLER is the chronologically preceding function segment.
260 MFUN and FUN are the symbol information we have for this function. */
261
262static struct btrace_function *
263ftrace_new_call (struct btrace_function *caller,
264 struct minimal_symbol *mfun,
265 struct symbol *fun)
266{
267 struct btrace_function *bfun;
268
269 bfun = ftrace_new_function (caller, mfun, fun);
270 bfun->up = caller;
31fd9caa 271 bfun->level += 1;
23a7fe75
MM
272
273 ftrace_debug (bfun, "new call");
274
275 return bfun;
276}
277
278/* Add a new function segment for a tail call.
279 CALLER is the chronologically preceding function segment.
280 MFUN and FUN are the symbol information we have for this function. */
281
282static struct btrace_function *
283ftrace_new_tailcall (struct btrace_function *caller,
284 struct minimal_symbol *mfun,
285 struct symbol *fun)
286{
287 struct btrace_function *bfun;
02d27625 288
23a7fe75
MM
289 bfun = ftrace_new_function (caller, mfun, fun);
290 bfun->up = caller;
31fd9caa 291 bfun->level += 1;
23a7fe75 292 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 293
23a7fe75
MM
294 ftrace_debug (bfun, "new tail call");
295
296 return bfun;
297}
298
299/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
300 symbol information. */
301
302static struct btrace_function *
303ftrace_find_caller (struct btrace_function *bfun,
304 struct minimal_symbol *mfun,
305 struct symbol *fun)
306{
307 for (; bfun != NULL; bfun = bfun->up)
308 {
309 /* Skip functions with incompatible symbol information. */
310 if (ftrace_function_switched (bfun, mfun, fun))
311 continue;
312
313 /* This is the function segment we're looking for. */
314 break;
315 }
316
317 return bfun;
318}
319
320/* Find the innermost caller in the back trace of BFUN, skipping all
321 function segments that do not end with a call instruction (e.g.
322 tail calls ending with a jump). */
323
324static struct btrace_function *
7d5c24b3 325ftrace_find_call (struct btrace_function *bfun)
23a7fe75
MM
326{
327 for (; bfun != NULL; bfun = bfun->up)
02d27625 328 {
23a7fe75 329 struct btrace_insn *last;
02d27625 330
31fd9caa
MM
331 /* Skip gaps. */
332 if (bfun->errcode != 0)
333 continue;
23a7fe75
MM
334
335 last = VEC_last (btrace_insn_s, bfun->insn);
02d27625 336
7d5c24b3 337 if (last->iclass == BTRACE_INSN_CALL)
23a7fe75
MM
338 break;
339 }
340
341 return bfun;
342}
343
344/* Add a continuation segment for a function into which we return.
345 PREV is the chronologically preceding function segment.
346 MFUN and FUN are the symbol information we have for this function. */
347
348static struct btrace_function *
7d5c24b3 349ftrace_new_return (struct btrace_function *prev,
23a7fe75
MM
350 struct minimal_symbol *mfun,
351 struct symbol *fun)
352{
353 struct btrace_function *bfun, *caller;
354
355 bfun = ftrace_new_function (prev, mfun, fun);
356
357 /* It is important to start at PREV's caller. Otherwise, we might find
358 PREV itself, if PREV is a recursive function. */
359 caller = ftrace_find_caller (prev->up, mfun, fun);
360 if (caller != NULL)
361 {
362 /* The caller of PREV is the preceding btrace function segment in this
363 function instance. */
364 gdb_assert (caller->segment.next == NULL);
365
366 caller->segment.next = bfun;
367 bfun->segment.prev = caller;
368
369 /* Maintain the function level. */
370 bfun->level = caller->level;
371
372 /* Maintain the call stack. */
373 bfun->up = caller->up;
374 bfun->flags = caller->flags;
375
376 ftrace_debug (bfun, "new return");
377 }
378 else
379 {
380 /* We did not find a caller. This could mean that something went
381 wrong or that the call is simply not included in the trace. */
02d27625 382
23a7fe75 383 /* Let's search for some actual call. */
7d5c24b3 384 caller = ftrace_find_call (prev->up);
23a7fe75 385 if (caller == NULL)
02d27625 386 {
23a7fe75
MM
387 /* There is no call in PREV's back trace. We assume that the
388 branch trace did not include it. */
389
259ba1e8
MM
390 /* Let's find the topmost function and add a new caller for it.
391 This should handle a series of initial tail calls. */
23a7fe75
MM
392 while (prev->up != NULL)
393 prev = prev->up;
02d27625 394
259ba1e8 395 bfun->level = prev->level - 1;
23a7fe75
MM
396
397 /* Fix up the call stack for PREV. */
398 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
399
400 ftrace_debug (bfun, "new return - no caller");
401 }
402 else
02d27625 403 {
23a7fe75 404 /* There is a call in PREV's back trace to which we should have
259ba1e8
MM
405 returned but didn't. Let's start a new, separate back trace
406 from PREV's level. */
407 bfun->level = prev->level - 1;
408
409 /* We fix up the back trace for PREV but leave other function segments
410 on the same level as they are.
411 This should handle things like schedule () correctly where we're
412 switching contexts. */
413 prev->up = bfun;
414 prev->flags = BFUN_UP_LINKS_TO_RET;
02d27625 415
23a7fe75 416 ftrace_debug (bfun, "new return - unknown caller");
02d27625 417 }
23a7fe75
MM
418 }
419
420 return bfun;
421}
422
423/* Add a new function segment for a function switch.
424 PREV is the chronologically preceding function segment.
425 MFUN and FUN are the symbol information we have for this function. */
426
427static struct btrace_function *
428ftrace_new_switch (struct btrace_function *prev,
429 struct minimal_symbol *mfun,
430 struct symbol *fun)
431{
432 struct btrace_function *bfun;
433
434 /* This is an unexplained function switch. The call stack will likely
435 be wrong at this point. */
436 bfun = ftrace_new_function (prev, mfun, fun);
02d27625 437
23a7fe75
MM
438 ftrace_debug (bfun, "new switch");
439
440 return bfun;
441}
442
31fd9caa
MM
443/* Add a new function segment for a gap in the trace due to a decode error.
444 PREV is the chronologically preceding function segment.
445 ERRCODE is the format-specific error code. */
446
447static struct btrace_function *
448ftrace_new_gap (struct btrace_function *prev, int errcode)
449{
450 struct btrace_function *bfun;
451
452 /* We hijack prev if it was empty. */
453 if (prev != NULL && prev->errcode == 0
454 && VEC_empty (btrace_insn_s, prev->insn))
455 bfun = prev;
456 else
457 bfun = ftrace_new_function (prev, NULL, NULL);
458
459 bfun->errcode = errcode;
460
461 ftrace_debug (bfun, "new gap");
462
463 return bfun;
464}
465
23a7fe75
MM
466/* Update BFUN with respect to the instruction at PC. This may create new
467 function segments.
468 Return the chronologically latest function segment, never NULL. */
469
470static struct btrace_function *
7d5c24b3 471ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
23a7fe75
MM
472{
473 struct bound_minimal_symbol bmfun;
474 struct minimal_symbol *mfun;
475 struct symbol *fun;
476 struct btrace_insn *last;
477
478 /* Try to determine the function we're in. We use both types of symbols
479 to avoid surprises when we sometimes get a full symbol and sometimes
480 only a minimal symbol. */
481 fun = find_pc_function (pc);
482 bmfun = lookup_minimal_symbol_by_pc (pc);
483 mfun = bmfun.minsym;
484
485 if (fun == NULL && mfun == NULL)
486 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
487
31fd9caa
MM
488 /* If we didn't have a function or if we had a gap before, we create one. */
489 if (bfun == NULL || bfun->errcode != 0)
23a7fe75
MM
490 return ftrace_new_function (bfun, mfun, fun);
491
492 /* Check the last instruction, if we have one.
493 We do this check first, since it allows us to fill in the call stack
494 links in addition to the normal flow links. */
495 last = NULL;
496 if (!VEC_empty (btrace_insn_s, bfun->insn))
497 last = VEC_last (btrace_insn_s, bfun->insn);
498
499 if (last != NULL)
500 {
7d5c24b3
MM
501 switch (last->iclass)
502 {
503 case BTRACE_INSN_RETURN:
986b6601
MM
504 {
505 const char *fname;
506
507 /* On some systems, _dl_runtime_resolve returns to the resolved
508 function instead of jumping to it. From our perspective,
509 however, this is a tailcall.
510 If we treated it as return, we wouldn't be able to find the
511 resolved function in our stack back trace. Hence, we would
512 lose the current stack back trace and start anew with an empty
513 back trace. When the resolved function returns, we would then
514 create a stack back trace with the same function names but
515 different frame id's. This will confuse stepping. */
516 fname = ftrace_print_function_name (bfun);
517 if (strcmp (fname, "_dl_runtime_resolve") == 0)
518 return ftrace_new_tailcall (bfun, mfun, fun);
519
520 return ftrace_new_return (bfun, mfun, fun);
521 }
23a7fe75 522
7d5c24b3
MM
523 case BTRACE_INSN_CALL:
524 /* Ignore calls to the next instruction. They are used for PIC. */
525 if (last->pc + last->size == pc)
526 break;
23a7fe75 527
7d5c24b3 528 return ftrace_new_call (bfun, mfun, fun);
23a7fe75 529
7d5c24b3
MM
530 case BTRACE_INSN_JUMP:
531 {
532 CORE_ADDR start;
23a7fe75 533
7d5c24b3 534 start = get_pc_function_start (pc);
23a7fe75 535
2dfdb47a
MM
536 /* A jump to the start of a function is (typically) a tail call. */
537 if (start == pc)
538 return ftrace_new_tailcall (bfun, mfun, fun);
539
7d5c24b3 540 /* If we can't determine the function for PC, we treat a jump at
2dfdb47a
MM
541 the end of the block as tail call if we're switching functions
542 and as an intra-function branch if we don't. */
543 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
7d5c24b3 544 return ftrace_new_tailcall (bfun, mfun, fun);
2dfdb47a
MM
545
546 break;
7d5c24b3 547 }
02d27625 548 }
23a7fe75
MM
549 }
550
551 /* Check if we're switching functions for some other reason. */
552 if (ftrace_function_switched (bfun, mfun, fun))
553 {
554 DEBUG_FTRACE ("switching from %s in %s at %s",
555 ftrace_print_insn_addr (last),
556 ftrace_print_function_name (bfun),
557 ftrace_print_filename (bfun));
02d27625 558
23a7fe75
MM
559 return ftrace_new_switch (bfun, mfun, fun);
560 }
561
562 return bfun;
563}
564
23a7fe75
MM
565/* Add the instruction at PC to BFUN's instructions. */
566
567static void
7d5c24b3
MM
568ftrace_update_insns (struct btrace_function *bfun,
569 const struct btrace_insn *insn)
23a7fe75 570{
7d5c24b3 571 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
23a7fe75
MM
572
573 if (record_debug > 1)
574 ftrace_debug (bfun, "update insn");
575}
576
7d5c24b3
MM
577/* Classify the instruction at PC. */
578
579static enum btrace_insn_class
580ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
581{
7d5c24b3
MM
582 enum btrace_insn_class iclass;
583
584 iclass = BTRACE_INSN_OTHER;
492d29ea 585 TRY
7d5c24b3
MM
586 {
587 if (gdbarch_insn_is_call (gdbarch, pc))
588 iclass = BTRACE_INSN_CALL;
589 else if (gdbarch_insn_is_ret (gdbarch, pc))
590 iclass = BTRACE_INSN_RETURN;
591 else if (gdbarch_insn_is_jump (gdbarch, pc))
592 iclass = BTRACE_INSN_JUMP;
593 }
492d29ea
PA
594 CATCH (error, RETURN_MASK_ERROR)
595 {
596 }
597 END_CATCH
7d5c24b3
MM
598
599 return iclass;
600}
601
734b0e4b 602/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
603
604static void
76235df1 605btrace_compute_ftrace_bts (struct thread_info *tp,
734b0e4b 606 const struct btrace_data_bts *btrace)
23a7fe75 607{
76235df1 608 struct btrace_thread_info *btinfo;
23a7fe75
MM
609 struct btrace_function *begin, *end;
610 struct gdbarch *gdbarch;
31fd9caa 611 unsigned int blk, ngaps;
23a7fe75
MM
612 int level;
613
23a7fe75 614 gdbarch = target_gdbarch ();
76235df1 615 btinfo = &tp->btrace;
969c39fb
MM
616 begin = btinfo->begin;
617 end = btinfo->end;
31fd9caa 618 ngaps = btinfo->ngaps;
969c39fb 619 level = begin != NULL ? -btinfo->level : INT_MAX;
734b0e4b 620 blk = VEC_length (btrace_block_s, btrace->blocks);
23a7fe75
MM
621
622 while (blk != 0)
623 {
624 btrace_block_s *block;
625 CORE_ADDR pc;
626
627 blk -= 1;
628
734b0e4b 629 block = VEC_index (btrace_block_s, btrace->blocks, blk);
23a7fe75
MM
630 pc = block->begin;
631
632 for (;;)
633 {
7d5c24b3 634 struct btrace_insn insn;
23a7fe75
MM
635 int size;
636
637 /* We should hit the end of the block. Warn if we went too far. */
638 if (block->end < pc)
639 {
b61ce85c
MM
640 /* Indicate the gap in the trace. */
641 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
642 if (begin == NULL)
643 begin = end;
644
645 ngaps += 1;
646
647 warning (_("Recorded trace may be corrupted at instruction "
648 "%u (pc = %s)."), end->insn_offset - 1,
649 core_addr_to_string_nz (pc));
63ab433e 650
23a7fe75
MM
651 break;
652 }
653
7d5c24b3 654 end = ftrace_update_function (end, pc);
23a7fe75
MM
655 if (begin == NULL)
656 begin = end;
657
8710b709
MM
658 /* Maintain the function level offset.
659 For all but the last block, we do it here. */
660 if (blk != 0)
325fac50 661 level = std::min (level, end->level);
23a7fe75 662
7d5c24b3 663 size = 0;
492d29ea
PA
664 TRY
665 {
666 size = gdb_insn_length (gdbarch, pc);
667 }
668 CATCH (error, RETURN_MASK_ERROR)
669 {
670 }
671 END_CATCH
7d5c24b3
MM
672
673 insn.pc = pc;
674 insn.size = size;
675 insn.iclass = ftrace_classify_insn (gdbarch, pc);
da8c46d2 676 insn.flags = 0;
7d5c24b3
MM
677
678 ftrace_update_insns (end, &insn);
23a7fe75
MM
679
680 /* We're done once we pushed the instruction at the end. */
681 if (block->end == pc)
682 break;
683
7d5c24b3 684 /* We can't continue if we fail to compute the size. */
23a7fe75
MM
685 if (size <= 0)
686 {
31fd9caa
MM
687 /* Indicate the gap in the trace. We just added INSN so we're
688 not at the beginning. */
689 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
690 ngaps += 1;
691
63ab433e
MM
692 warning (_("Recorded trace may be incomplete at instruction %u "
693 "(pc = %s)."), end->insn_offset - 1,
694 core_addr_to_string_nz (pc));
695
23a7fe75
MM
696 break;
697 }
698
699 pc += size;
8710b709
MM
700
701 /* Maintain the function level offset.
702 For the last block, we do it here to not consider the last
703 instruction.
704 Since the last instruction corresponds to the current instruction
705 and is not really part of the execution history, it shouldn't
706 affect the level. */
707 if (blk == 0)
325fac50 708 level = std::min (level, end->level);
23a7fe75 709 }
02d27625
MM
710 }
711
23a7fe75
MM
712 btinfo->begin = begin;
713 btinfo->end = end;
31fd9caa 714 btinfo->ngaps = ngaps;
23a7fe75
MM
715
716 /* LEVEL is the minimal function level of all btrace function segments.
717 Define the global level offset to -LEVEL so all function levels are
718 normalized to start at zero. */
719 btinfo->level = -level;
02d27625
MM
720}
721
b20a6524
MM
722#if defined (HAVE_LIBIPT)
723
724static enum btrace_insn_class
725pt_reclassify_insn (enum pt_insn_class iclass)
726{
727 switch (iclass)
728 {
729 case ptic_call:
730 return BTRACE_INSN_CALL;
731
732 case ptic_return:
733 return BTRACE_INSN_RETURN;
734
735 case ptic_jump:
736 return BTRACE_INSN_JUMP;
737
738 default:
739 return BTRACE_INSN_OTHER;
740 }
741}
742
da8c46d2
MM
743/* Return the btrace instruction flags for INSN. */
744
d7abe101 745static btrace_insn_flags
da8c46d2
MM
746pt_btrace_insn_flags (const struct pt_insn *insn)
747{
d7abe101 748 btrace_insn_flags flags = 0;
da8c46d2
MM
749
750 if (insn->speculative)
751 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
752
753 return flags;
754}
755
b20a6524
MM
756/* Add function branch trace using DECODER. */
757
758static void
759ftrace_add_pt (struct pt_insn_decoder *decoder,
760 struct btrace_function **pbegin,
761 struct btrace_function **pend, int *plevel,
762 unsigned int *ngaps)
763{
764 struct btrace_function *begin, *end, *upd;
765 uint64_t offset;
63ab433e 766 int errcode;
b20a6524
MM
767
768 begin = *pbegin;
769 end = *pend;
b20a6524
MM
770 for (;;)
771 {
772 struct btrace_insn btinsn;
773 struct pt_insn insn;
774
775 errcode = pt_insn_sync_forward (decoder);
776 if (errcode < 0)
777 {
778 if (errcode != -pte_eos)
bc504a31 779 warning (_("Failed to synchronize onto the Intel Processor "
b20a6524
MM
780 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
781 break;
782 }
783
784 memset (&btinsn, 0, sizeof (btinsn));
785 for (;;)
786 {
787 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
788 if (errcode < 0)
789 break;
790
791 /* Look for gaps in the trace - unless we're at the beginning. */
792 if (begin != NULL)
793 {
794 /* Tracing is disabled and re-enabled each time we enter the
795 kernel. Most times, we continue from the same instruction we
796 stopped before. This is indicated via the RESUMED instruction
797 flag. The ENABLED instruction flag means that we continued
798 from some other instruction. Indicate this as a trace gap. */
799 if (insn.enabled)
63ab433e
MM
800 {
801 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
802 *ngaps += 1;
803
804 pt_insn_get_offset (decoder, &offset);
805
806 warning (_("Non-contiguous trace at instruction %u (offset "
807 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
808 end->insn_offset - 1, offset, insn.ip);
809 }
b61ce85c 810 }
b20a6524 811
b61ce85c
MM
812 /* Indicate trace overflows. */
813 if (insn.resynced)
814 {
815 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
816 if (begin == NULL)
817 *pbegin = begin = end;
63ab433e 818
b61ce85c 819 *ngaps += 1;
63ab433e 820
b61ce85c
MM
821 pt_insn_get_offset (decoder, &offset);
822
823 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
824 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
825 offset, insn.ip);
b20a6524
MM
826 }
827
828 upd = ftrace_update_function (end, insn.ip);
829 if (upd != end)
830 {
831 *pend = end = upd;
832
833 if (begin == NULL)
834 *pbegin = begin = upd;
835 }
836
837 /* Maintain the function level offset. */
325fac50 838 *plevel = std::min (*plevel, end->level);
b20a6524
MM
839
840 btinsn.pc = (CORE_ADDR) insn.ip;
841 btinsn.size = (gdb_byte) insn.size;
842 btinsn.iclass = pt_reclassify_insn (insn.iclass);
da8c46d2 843 btinsn.flags = pt_btrace_insn_flags (&insn);
b20a6524
MM
844
845 ftrace_update_insns (end, &btinsn);
846 }
847
848 if (errcode == -pte_eos)
849 break;
850
b20a6524
MM
851 /* Indicate the gap in the trace. */
852 *pend = end = ftrace_new_gap (end, errcode);
b61ce85c
MM
853 if (begin == NULL)
854 *pbegin = begin = end;
b20a6524 855 *ngaps += 1;
b20a6524 856
63ab433e
MM
857 pt_insn_get_offset (decoder, &offset);
858
859 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
860 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
861 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
862 }
b20a6524
MM
863}
864
865/* A callback function to allow the trace decoder to read the inferior's
866 memory. */
867
868static int
869btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
80a2b330 870 const struct pt_asid *asid, uint64_t pc,
b20a6524
MM
871 void *context)
872{
43368e1d 873 int result, errcode;
b20a6524 874
43368e1d 875 result = (int) size;
b20a6524
MM
876 TRY
877 {
80a2b330 878 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
b20a6524 879 if (errcode != 0)
43368e1d 880 result = -pte_nomap;
b20a6524
MM
881 }
882 CATCH (error, RETURN_MASK_ERROR)
883 {
43368e1d 884 result = -pte_nomap;
b20a6524
MM
885 }
886 END_CATCH
887
43368e1d 888 return result;
b20a6524
MM
889}
890
891/* Translate the vendor from one enum to another. */
892
893static enum pt_cpu_vendor
894pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
895{
896 switch (vendor)
897 {
898 default:
899 return pcv_unknown;
900
901 case CV_INTEL:
902 return pcv_intel;
903 }
904}
905
906/* Finalize the function branch trace after decode. */
907
908static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
909 struct thread_info *tp, int level)
910{
911 pt_insn_free_decoder (decoder);
912
913 /* LEVEL is the minimal function level of all btrace function segments.
914 Define the global level offset to -LEVEL so all function levels are
915 normalized to start at zero. */
916 tp->btrace.level = -level;
917
918 /* Add a single last instruction entry for the current PC.
919 This allows us to compute the backtrace at the current PC using both
920 standard unwind and btrace unwind.
921 This extra entry is ignored by all record commands. */
922 btrace_add_pc (tp);
923}
924
bc504a31
PA
925/* Compute the function branch trace from Intel Processor Trace
926 format. */
b20a6524
MM
927
928static void
929btrace_compute_ftrace_pt (struct thread_info *tp,
930 const struct btrace_data_pt *btrace)
931{
932 struct btrace_thread_info *btinfo;
933 struct pt_insn_decoder *decoder;
934 struct pt_config config;
935 int level, errcode;
936
937 if (btrace->size == 0)
938 return;
939
940 btinfo = &tp->btrace;
941 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
942
943 pt_config_init(&config);
944 config.begin = btrace->data;
945 config.end = btrace->data + btrace->size;
946
947 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
948 config.cpu.family = btrace->config.cpu.family;
949 config.cpu.model = btrace->config.cpu.model;
950 config.cpu.stepping = btrace->config.cpu.stepping;
951
952 errcode = pt_cpu_errata (&config.errata, &config.cpu);
953 if (errcode < 0)
bc504a31 954 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b20a6524
MM
955 pt_errstr (pt_errcode (errcode)));
956
957 decoder = pt_insn_alloc_decoder (&config);
958 if (decoder == NULL)
bc504a31 959 error (_("Failed to allocate the Intel Processor Trace decoder."));
b20a6524
MM
960
961 TRY
962 {
963 struct pt_image *image;
964
965 image = pt_insn_get_image(decoder);
966 if (image == NULL)
bc504a31 967 error (_("Failed to configure the Intel Processor Trace decoder."));
b20a6524
MM
968
969 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
970 if (errcode < 0)
bc504a31 971 error (_("Failed to configure the Intel Processor Trace decoder: "
b20a6524
MM
972 "%s."), pt_errstr (pt_errcode (errcode)));
973
974 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
975 &btinfo->ngaps);
976 }
977 CATCH (error, RETURN_MASK_ALL)
978 {
979 /* Indicate a gap in the trace if we quit trace processing. */
980 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
981 {
982 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
983 btinfo->ngaps++;
984 }
985
986 btrace_finalize_ftrace_pt (decoder, tp, level);
987
988 throw_exception (error);
989 }
990 END_CATCH
991
992 btrace_finalize_ftrace_pt (decoder, tp, level);
993}
994
995#else /* defined (HAVE_LIBIPT) */
996
997static void
998btrace_compute_ftrace_pt (struct thread_info *tp,
999 const struct btrace_data_pt *btrace)
1000{
1001 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1002}
1003
1004#endif /* defined (HAVE_LIBIPT) */
1005
734b0e4b
MM
1006/* Compute the function branch trace from a block branch trace BTRACE for
1007 a thread given by BTINFO. */
1008
1009static void
76235df1 1010btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
734b0e4b
MM
1011{
1012 DEBUG ("compute ftrace");
1013
1014 switch (btrace->format)
1015 {
1016 case BTRACE_FORMAT_NONE:
1017 return;
1018
1019 case BTRACE_FORMAT_BTS:
76235df1 1020 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
734b0e4b 1021 return;
b20a6524
MM
1022
1023 case BTRACE_FORMAT_PT:
1024 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
1025 return;
734b0e4b
MM
1026 }
1027
1028 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1029}
1030
6e07b1d2
MM
1031/* Add an entry for the current PC. */
1032
1033static void
1034btrace_add_pc (struct thread_info *tp)
1035{
734b0e4b 1036 struct btrace_data btrace;
6e07b1d2
MM
1037 struct btrace_block *block;
1038 struct regcache *regcache;
1039 struct cleanup *cleanup;
1040 CORE_ADDR pc;
1041
1042 regcache = get_thread_regcache (tp->ptid);
1043 pc = regcache_read_pc (regcache);
1044
734b0e4b
MM
1045 btrace_data_init (&btrace);
1046 btrace.format = BTRACE_FORMAT_BTS;
1047 btrace.variant.bts.blocks = NULL;
6e07b1d2 1048
734b0e4b
MM
1049 cleanup = make_cleanup_btrace_data (&btrace);
1050
1051 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
6e07b1d2
MM
1052 block->begin = pc;
1053 block->end = pc;
1054
76235df1 1055 btrace_compute_ftrace (tp, &btrace);
6e07b1d2
MM
1056
1057 do_cleanups (cleanup);
1058}
1059
02d27625
MM
1060/* See btrace.h. */
1061
1062void
f4abbc16 1063btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
1064{
1065 if (tp->btrace.target != NULL)
1066 return;
1067
46a3515b
MM
1068#if !defined (HAVE_LIBIPT)
1069 if (conf->format == BTRACE_FORMAT_PT)
bc504a31 1070 error (_("GDB does not support Intel Processor Trace."));
46a3515b
MM
1071#endif /* !defined (HAVE_LIBIPT) */
1072
f4abbc16 1073 if (!target_supports_btrace (conf->format))
02d27625
MM
1074 error (_("Target does not support branch tracing."));
1075
43792cf0
PA
1076 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1077 target_pid_to_str (tp->ptid));
02d27625 1078
f4abbc16 1079 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
6e07b1d2
MM
1080
1081 /* Add an entry for the current PC so we start tracing from where we
1082 enabled it. */
1083 if (tp->btrace.target != NULL)
1084 btrace_add_pc (tp);
02d27625
MM
1085}
1086
1087/* See btrace.h. */
1088
f4abbc16
MM
1089const struct btrace_config *
1090btrace_conf (const struct btrace_thread_info *btinfo)
1091{
1092 if (btinfo->target == NULL)
1093 return NULL;
1094
1095 return target_btrace_conf (btinfo->target);
1096}
1097
1098/* See btrace.h. */
1099
02d27625
MM
1100void
1101btrace_disable (struct thread_info *tp)
1102{
1103 struct btrace_thread_info *btp = &tp->btrace;
1104 int errcode = 0;
1105
1106 if (btp->target == NULL)
1107 return;
1108
43792cf0
PA
1109 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1110 target_pid_to_str (tp->ptid));
02d27625
MM
1111
1112 target_disable_btrace (btp->target);
1113 btp->target = NULL;
1114
1115 btrace_clear (tp);
1116}
1117
1118/* See btrace.h. */
1119
1120void
1121btrace_teardown (struct thread_info *tp)
1122{
1123 struct btrace_thread_info *btp = &tp->btrace;
1124 int errcode = 0;
1125
1126 if (btp->target == NULL)
1127 return;
1128
43792cf0
PA
1129 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1130 target_pid_to_str (tp->ptid));
02d27625
MM
1131
1132 target_teardown_btrace (btp->target);
1133 btp->target = NULL;
1134
1135 btrace_clear (tp);
1136}
1137
734b0e4b 1138/* Stitch branch trace in BTS format. */
969c39fb
MM
1139
1140static int
31fd9caa 1141btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
969c39fb 1142{
31fd9caa 1143 struct btrace_thread_info *btinfo;
969c39fb
MM
1144 struct btrace_function *last_bfun;
1145 struct btrace_insn *last_insn;
1146 btrace_block_s *first_new_block;
1147
31fd9caa 1148 btinfo = &tp->btrace;
969c39fb
MM
1149 last_bfun = btinfo->end;
1150 gdb_assert (last_bfun != NULL);
31fd9caa
MM
1151 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1152
1153 /* If the existing trace ends with a gap, we just glue the traces
1154 together. We need to drop the last (i.e. chronologically first) block
1155 of the new trace, though, since we can't fill in the start address.*/
1156 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1157 {
1158 VEC_pop (btrace_block_s, btrace->blocks);
1159 return 0;
1160 }
969c39fb
MM
1161
1162 /* Beware that block trace starts with the most recent block, so the
1163 chronologically first block in the new trace is the last block in
1164 the new trace's block vector. */
734b0e4b 1165 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
969c39fb
MM
1166 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1167
1168 /* If the current PC at the end of the block is the same as in our current
1169 trace, there are two explanations:
1170 1. we executed the instruction and some branch brought us back.
1171 2. we have not made any progress.
1172 In the first case, the delta trace vector should contain at least two
1173 entries.
1174 In the second case, the delta trace vector should contain exactly one
1175 entry for the partial block containing the current PC. Remove it. */
1176 if (first_new_block->end == last_insn->pc
734b0e4b 1177 && VEC_length (btrace_block_s, btrace->blocks) == 1)
969c39fb 1178 {
734b0e4b 1179 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1180 return 0;
1181 }
1182
1183 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1184 core_addr_to_string_nz (first_new_block->end));
1185
1186 /* Do a simple sanity check to make sure we don't accidentally end up
1187 with a bad block. This should not occur in practice. */
1188 if (first_new_block->end < last_insn->pc)
1189 {
1190 warning (_("Error while trying to read delta trace. Falling back to "
1191 "a full read."));
1192 return -1;
1193 }
1194
1195 /* We adjust the last block to start at the end of our current trace. */
1196 gdb_assert (first_new_block->begin == 0);
1197 first_new_block->begin = last_insn->pc;
1198
1199 /* We simply pop the last insn so we can insert it again as part of
1200 the normal branch trace computation.
1201 Since instruction iterators are based on indices in the instructions
1202 vector, we don't leave any pointers dangling. */
1203 DEBUG ("pruning insn at %s for stitching",
1204 ftrace_print_insn_addr (last_insn));
1205
1206 VEC_pop (btrace_insn_s, last_bfun->insn);
1207
1208 /* The instructions vector may become empty temporarily if this has
1209 been the only instruction in this function segment.
1210 This violates the invariant but will be remedied shortly by
1211 btrace_compute_ftrace when we add the new trace. */
31fd9caa
MM
1212
1213 /* The only case where this would hurt is if the entire trace consisted
1214 of just that one instruction. If we remove it, we might turn the now
1215 empty btrace function segment into a gap. But we don't want gaps at
1216 the beginning. To avoid this, we remove the entire old trace. */
1217 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1218 btrace_clear (tp);
1219
969c39fb
MM
1220 return 0;
1221}
1222
734b0e4b
MM
1223/* Adjust the block trace in order to stitch old and new trace together.
1224 BTRACE is the new delta trace between the last and the current stop.
31fd9caa
MM
1225 TP is the traced thread.
1226 May modifx BTRACE as well as the existing trace in TP.
734b0e4b
MM
1227 Return 0 on success, -1 otherwise. */
1228
1229static int
31fd9caa 1230btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
734b0e4b
MM
1231{
1232 /* If we don't have trace, there's nothing to do. */
1233 if (btrace_data_empty (btrace))
1234 return 0;
1235
1236 switch (btrace->format)
1237 {
1238 case BTRACE_FORMAT_NONE:
1239 return 0;
1240
1241 case BTRACE_FORMAT_BTS:
31fd9caa 1242 return btrace_stitch_bts (&btrace->variant.bts, tp);
b20a6524
MM
1243
1244 case BTRACE_FORMAT_PT:
1245 /* Delta reads are not supported. */
1246 return -1;
734b0e4b
MM
1247 }
1248
1249 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1250}
1251
969c39fb
MM
1252/* Clear the branch trace histories in BTINFO. */
1253
1254static void
1255btrace_clear_history (struct btrace_thread_info *btinfo)
1256{
1257 xfree (btinfo->insn_history);
1258 xfree (btinfo->call_history);
1259 xfree (btinfo->replay);
1260
1261 btinfo->insn_history = NULL;
1262 btinfo->call_history = NULL;
1263 btinfo->replay = NULL;
1264}
1265
b0627500
MM
1266/* Clear the branch trace maintenance histories in BTINFO. */
1267
1268static void
1269btrace_maint_clear (struct btrace_thread_info *btinfo)
1270{
1271 switch (btinfo->data.format)
1272 {
1273 default:
1274 break;
1275
1276 case BTRACE_FORMAT_BTS:
1277 btinfo->maint.variant.bts.packet_history.begin = 0;
1278 btinfo->maint.variant.bts.packet_history.end = 0;
1279 break;
1280
1281#if defined (HAVE_LIBIPT)
1282 case BTRACE_FORMAT_PT:
1283 xfree (btinfo->maint.variant.pt.packets);
1284
1285 btinfo->maint.variant.pt.packets = NULL;
1286 btinfo->maint.variant.pt.packet_history.begin = 0;
1287 btinfo->maint.variant.pt.packet_history.end = 0;
1288 break;
1289#endif /* defined (HAVE_LIBIPT) */
1290 }
1291}
1292
02d27625
MM
1293/* See btrace.h. */
1294
1295void
1296btrace_fetch (struct thread_info *tp)
1297{
1298 struct btrace_thread_info *btinfo;
969c39fb 1299 struct btrace_target_info *tinfo;
734b0e4b 1300 struct btrace_data btrace;
23a7fe75 1301 struct cleanup *cleanup;
969c39fb 1302 int errcode;
02d27625 1303
43792cf0
PA
1304 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1305 target_pid_to_str (tp->ptid));
02d27625
MM
1306
1307 btinfo = &tp->btrace;
969c39fb
MM
1308 tinfo = btinfo->target;
1309 if (tinfo == NULL)
1310 return;
1311
1312 /* There's no way we could get new trace while replaying.
1313 On the other hand, delta trace would return a partial record with the
1314 current PC, which is the replay PC, not the last PC, as expected. */
1315 if (btinfo->replay != NULL)
02d27625
MM
1316 return;
1317
734b0e4b
MM
1318 btrace_data_init (&btrace);
1319 cleanup = make_cleanup_btrace_data (&btrace);
02d27625 1320
969c39fb
MM
1321 /* Let's first try to extend the trace we already have. */
1322 if (btinfo->end != NULL)
1323 {
1324 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1325 if (errcode == 0)
1326 {
1327 /* Success. Let's try to stitch the traces together. */
31fd9caa 1328 errcode = btrace_stitch_trace (&btrace, tp);
969c39fb
MM
1329 }
1330 else
1331 {
1332 /* We failed to read delta trace. Let's try to read new trace. */
1333 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1334
1335 /* If we got any new trace, discard what we have. */
734b0e4b 1336 if (errcode == 0 && !btrace_data_empty (&btrace))
969c39fb
MM
1337 btrace_clear (tp);
1338 }
1339
1340 /* If we were not able to read the trace, we start over. */
1341 if (errcode != 0)
1342 {
1343 btrace_clear (tp);
1344 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1345 }
1346 }
1347 else
1348 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1349
1350 /* If we were not able to read the branch trace, signal an error. */
1351 if (errcode != 0)
1352 error (_("Failed to read branch trace."));
1353
1354 /* Compute the trace, provided we have any. */
734b0e4b 1355 if (!btrace_data_empty (&btrace))
23a7fe75 1356 {
9be54cae
MM
1357 /* Store the raw trace data. The stored data will be cleared in
1358 btrace_clear, so we always append the new trace. */
1359 btrace_data_append (&btinfo->data, &btrace);
b0627500 1360 btrace_maint_clear (btinfo);
9be54cae 1361
969c39fb 1362 btrace_clear_history (btinfo);
76235df1 1363 btrace_compute_ftrace (tp, &btrace);
23a7fe75 1364 }
02d27625 1365
23a7fe75 1366 do_cleanups (cleanup);
02d27625
MM
1367}
1368
1369/* See btrace.h. */
1370
1371void
1372btrace_clear (struct thread_info *tp)
1373{
1374 struct btrace_thread_info *btinfo;
23a7fe75 1375 struct btrace_function *it, *trash;
02d27625 1376
43792cf0
PA
1377 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1378 target_pid_to_str (tp->ptid));
02d27625 1379
0b722aec
MM
1380 /* Make sure btrace frames that may hold a pointer into the branch
1381 trace data are destroyed. */
1382 reinit_frame_cache ();
1383
02d27625
MM
1384 btinfo = &tp->btrace;
1385
23a7fe75
MM
1386 it = btinfo->begin;
1387 while (it != NULL)
1388 {
1389 trash = it;
1390 it = it->flow.next;
02d27625 1391
23a7fe75
MM
1392 xfree (trash);
1393 }
1394
1395 btinfo->begin = NULL;
1396 btinfo->end = NULL;
31fd9caa 1397 btinfo->ngaps = 0;
23a7fe75 1398
b0627500
MM
1399 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1400 btrace_maint_clear (btinfo);
9be54cae 1401 btrace_data_clear (&btinfo->data);
969c39fb 1402 btrace_clear_history (btinfo);
02d27625
MM
1403}
1404
1405/* See btrace.h. */
1406
1407void
1408btrace_free_objfile (struct objfile *objfile)
1409{
1410 struct thread_info *tp;
1411
1412 DEBUG ("free objfile");
1413
034f788c 1414 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
1415 btrace_clear (tp);
1416}
c12a2917
MM
1417
1418#if defined (HAVE_LIBEXPAT)
1419
1420/* Check the btrace document version. */
1421
1422static void
1423check_xml_btrace_version (struct gdb_xml_parser *parser,
1424 const struct gdb_xml_element *element,
1425 void *user_data, VEC (gdb_xml_value_s) *attributes)
1426{
9a3c8263
SM
1427 const char *version
1428 = (const char *) xml_find_attribute (attributes, "version")->value;
c12a2917
MM
1429
1430 if (strcmp (version, "1.0") != 0)
1431 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1432}
1433
1434/* Parse a btrace "block" xml record. */
1435
1436static void
1437parse_xml_btrace_block (struct gdb_xml_parser *parser,
1438 const struct gdb_xml_element *element,
1439 void *user_data, VEC (gdb_xml_value_s) *attributes)
1440{
734b0e4b 1441 struct btrace_data *btrace;
c12a2917
MM
1442 struct btrace_block *block;
1443 ULONGEST *begin, *end;
1444
9a3c8263 1445 btrace = (struct btrace_data *) user_data;
734b0e4b
MM
1446
1447 switch (btrace->format)
1448 {
1449 case BTRACE_FORMAT_BTS:
1450 break;
1451
1452 case BTRACE_FORMAT_NONE:
1453 btrace->format = BTRACE_FORMAT_BTS;
1454 btrace->variant.bts.blocks = NULL;
1455 break;
1456
1457 default:
1458 gdb_xml_error (parser, _("Btrace format error."));
1459 }
c12a2917 1460
bc84451b
SM
1461 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1462 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
c12a2917 1463
734b0e4b 1464 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
c12a2917
MM
1465 block->begin = *begin;
1466 block->end = *end;
1467}
1468
b20a6524
MM
1469/* Parse a "raw" xml record. */
1470
1471static void
1472parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
e7b01ce0 1473 gdb_byte **pdata, size_t *psize)
b20a6524
MM
1474{
1475 struct cleanup *cleanup;
1476 gdb_byte *data, *bin;
e7b01ce0 1477 size_t len, size;
b20a6524
MM
1478
1479 len = strlen (body_text);
e7b01ce0 1480 if (len % 2 != 0)
b20a6524
MM
1481 gdb_xml_error (parser, _("Bad raw data size."));
1482
e7b01ce0
MM
1483 size = len / 2;
1484
224c3ddb 1485 bin = data = (gdb_byte *) xmalloc (size);
b20a6524
MM
1486 cleanup = make_cleanup (xfree, data);
1487
1488 /* We use hex encoding - see common/rsp-low.h. */
1489 while (len > 0)
1490 {
1491 char hi, lo;
1492
1493 hi = *body_text++;
1494 lo = *body_text++;
1495
1496 if (hi == 0 || lo == 0)
1497 gdb_xml_error (parser, _("Bad hex encoding."));
1498
1499 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1500 len -= 2;
1501 }
1502
1503 discard_cleanups (cleanup);
1504
1505 *pdata = data;
1506 *psize = size;
1507}
1508
1509/* Parse a btrace pt-config "cpu" xml record. */
1510
1511static void
1512parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1513 const struct gdb_xml_element *element,
1514 void *user_data,
1515 VEC (gdb_xml_value_s) *attributes)
1516{
1517 struct btrace_data *btrace;
1518 const char *vendor;
1519 ULONGEST *family, *model, *stepping;
1520
9a3c8263
SM
1521 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1522 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1523 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1524 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
b20a6524 1525
9a3c8263 1526 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1527
1528 if (strcmp (vendor, "GenuineIntel") == 0)
1529 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1530
1531 btrace->variant.pt.config.cpu.family = *family;
1532 btrace->variant.pt.config.cpu.model = *model;
1533 btrace->variant.pt.config.cpu.stepping = *stepping;
1534}
1535
1536/* Parse a btrace pt "raw" xml record. */
1537
1538static void
1539parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1540 const struct gdb_xml_element *element,
1541 void *user_data, const char *body_text)
1542{
1543 struct btrace_data *btrace;
1544
9a3c8263 1545 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1546 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1547 &btrace->variant.pt.size);
1548}
1549
1550/* Parse a btrace "pt" xml record. */
1551
1552static void
1553parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1554 const struct gdb_xml_element *element,
1555 void *user_data, VEC (gdb_xml_value_s) *attributes)
1556{
1557 struct btrace_data *btrace;
1558
9a3c8263 1559 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1560 btrace->format = BTRACE_FORMAT_PT;
1561 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1562 btrace->variant.pt.data = NULL;
1563 btrace->variant.pt.size = 0;
1564}
1565
c12a2917
MM
1566static const struct gdb_xml_attribute block_attributes[] = {
1567 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1568 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1569 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1570};
1571
b20a6524
MM
1572static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1573 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1574 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1575 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1576 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1577 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1578};
1579
1580static const struct gdb_xml_element btrace_pt_config_children[] = {
1581 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1582 parse_xml_btrace_pt_config_cpu, NULL },
1583 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1584};
1585
1586static const struct gdb_xml_element btrace_pt_children[] = {
1587 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1588 NULL },
1589 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1590 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1591};
1592
c12a2917
MM
1593static const struct gdb_xml_attribute btrace_attributes[] = {
1594 { "version", GDB_XML_AF_NONE, NULL, NULL },
1595 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1596};
1597
1598static const struct gdb_xml_element btrace_children[] = {
1599 { "block", block_attributes, NULL,
1600 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
b20a6524
MM
1601 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1602 NULL },
c12a2917
MM
1603 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1604};
1605
1606static const struct gdb_xml_element btrace_elements[] = {
1607 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1608 check_xml_btrace_version, NULL },
1609 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1610};
1611
1612#endif /* defined (HAVE_LIBEXPAT) */
1613
1614/* See btrace.h. */
1615
734b0e4b
MM
1616void
1617parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
c12a2917 1618{
c12a2917
MM
1619 struct cleanup *cleanup;
1620 int errcode;
1621
1622#if defined (HAVE_LIBEXPAT)
1623
734b0e4b
MM
1624 btrace->format = BTRACE_FORMAT_NONE;
1625
1626 cleanup = make_cleanup_btrace_data (btrace);
c12a2917 1627 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
734b0e4b 1628 buffer, btrace);
c12a2917 1629 if (errcode != 0)
969c39fb 1630 error (_("Error parsing branch trace."));
c12a2917
MM
1631
1632 /* Keep parse results. */
1633 discard_cleanups (cleanup);
1634
1635#else /* !defined (HAVE_LIBEXPAT) */
1636
1637 error (_("Cannot process branch trace. XML parsing is not supported."));
1638
1639#endif /* !defined (HAVE_LIBEXPAT) */
c12a2917 1640}
23a7fe75 1641
f4abbc16
MM
1642#if defined (HAVE_LIBEXPAT)
1643
1644/* Parse a btrace-conf "bts" xml record. */
1645
1646static void
1647parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1648 const struct gdb_xml_element *element,
1649 void *user_data, VEC (gdb_xml_value_s) *attributes)
1650{
1651 struct btrace_config *conf;
d33501a5 1652 struct gdb_xml_value *size;
f4abbc16 1653
9a3c8263 1654 conf = (struct btrace_config *) user_data;
f4abbc16 1655 conf->format = BTRACE_FORMAT_BTS;
d33501a5
MM
1656 conf->bts.size = 0;
1657
1658 size = xml_find_attribute (attributes, "size");
1659 if (size != NULL)
b20a6524 1660 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
f4abbc16
MM
1661}
1662
b20a6524
MM
1663/* Parse a btrace-conf "pt" xml record. */
1664
1665static void
1666parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1667 const struct gdb_xml_element *element,
1668 void *user_data, VEC (gdb_xml_value_s) *attributes)
1669{
1670 struct btrace_config *conf;
1671 struct gdb_xml_value *size;
1672
9a3c8263 1673 conf = (struct btrace_config *) user_data;
b20a6524
MM
1674 conf->format = BTRACE_FORMAT_PT;
1675 conf->pt.size = 0;
1676
1677 size = xml_find_attribute (attributes, "size");
1678 if (size != NULL)
1679 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1680}
1681
1682static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1683 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1684 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1685};
1686
d33501a5
MM
1687static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1688 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1689 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1690};
1691
f4abbc16 1692static const struct gdb_xml_element btrace_conf_children[] = {
d33501a5
MM
1693 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1694 parse_xml_btrace_conf_bts, NULL },
b20a6524
MM
1695 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1696 parse_xml_btrace_conf_pt, NULL },
f4abbc16
MM
1697 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1698};
1699
1700static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1701 { "version", GDB_XML_AF_NONE, NULL, NULL },
1702 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1703};
1704
1705static const struct gdb_xml_element btrace_conf_elements[] = {
1706 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1707 GDB_XML_EF_NONE, NULL, NULL },
1708 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1709};
1710
1711#endif /* defined (HAVE_LIBEXPAT) */
1712
1713/* See btrace.h. */
1714
1715void
1716parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1717{
1718 int errcode;
1719
1720#if defined (HAVE_LIBEXPAT)
1721
1722 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1723 btrace_conf_elements, xml, conf);
1724 if (errcode != 0)
1725 error (_("Error parsing branch trace configuration."));
1726
1727#else /* !defined (HAVE_LIBEXPAT) */
1728
1729 error (_("XML parsing is not supported."));
1730
1731#endif /* !defined (HAVE_LIBEXPAT) */
1732}
1733
23a7fe75
MM
1734/* See btrace.h. */
1735
1736const struct btrace_insn *
1737btrace_insn_get (const struct btrace_insn_iterator *it)
1738{
1739 const struct btrace_function *bfun;
1740 unsigned int index, end;
1741
1742 index = it->index;
1743 bfun = it->function;
1744
31fd9caa
MM
1745 /* Check if the iterator points to a gap in the trace. */
1746 if (bfun->errcode != 0)
1747 return NULL;
1748
23a7fe75
MM
1749 /* The index is within the bounds of this function's instruction vector. */
1750 end = VEC_length (btrace_insn_s, bfun->insn);
1751 gdb_assert (0 < end);
1752 gdb_assert (index < end);
1753
1754 return VEC_index (btrace_insn_s, bfun->insn, index);
1755}
1756
1757/* See btrace.h. */
1758
1759unsigned int
1760btrace_insn_number (const struct btrace_insn_iterator *it)
1761{
1762 const struct btrace_function *bfun;
1763
1764 bfun = it->function;
31fd9caa
MM
1765
1766 /* Return zero if the iterator points to a gap in the trace. */
1767 if (bfun->errcode != 0)
1768 return 0;
1769
23a7fe75
MM
1770 return bfun->insn_offset + it->index;
1771}
1772
1773/* See btrace.h. */
1774
1775void
1776btrace_insn_begin (struct btrace_insn_iterator *it,
1777 const struct btrace_thread_info *btinfo)
1778{
1779 const struct btrace_function *bfun;
1780
1781 bfun = btinfo->begin;
1782 if (bfun == NULL)
1783 error (_("No trace."));
1784
1785 it->function = bfun;
1786 it->index = 0;
1787}
1788
1789/* See btrace.h. */
1790
1791void
1792btrace_insn_end (struct btrace_insn_iterator *it,
1793 const struct btrace_thread_info *btinfo)
1794{
1795 const struct btrace_function *bfun;
1796 unsigned int length;
1797
1798 bfun = btinfo->end;
1799 if (bfun == NULL)
1800 error (_("No trace."));
1801
23a7fe75
MM
1802 length = VEC_length (btrace_insn_s, bfun->insn);
1803
31fd9caa
MM
1804 /* The last function may either be a gap or it contains the current
1805 instruction, which is one past the end of the execution trace; ignore
1806 it. */
1807 if (length > 0)
1808 length -= 1;
1809
23a7fe75 1810 it->function = bfun;
31fd9caa 1811 it->index = length;
23a7fe75
MM
1812}
1813
1814/* See btrace.h. */
1815
1816unsigned int
1817btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1818{
1819 const struct btrace_function *bfun;
1820 unsigned int index, steps;
1821
1822 bfun = it->function;
1823 steps = 0;
1824 index = it->index;
1825
1826 while (stride != 0)
1827 {
1828 unsigned int end, space, adv;
1829
1830 end = VEC_length (btrace_insn_s, bfun->insn);
1831
31fd9caa
MM
1832 /* An empty function segment represents a gap in the trace. We count
1833 it as one instruction. */
1834 if (end == 0)
1835 {
1836 const struct btrace_function *next;
1837
1838 next = bfun->flow.next;
1839 if (next == NULL)
1840 break;
1841
1842 stride -= 1;
1843 steps += 1;
1844
1845 bfun = next;
1846 index = 0;
1847
1848 continue;
1849 }
1850
23a7fe75
MM
1851 gdb_assert (0 < end);
1852 gdb_assert (index < end);
1853
1854 /* Compute the number of instructions remaining in this segment. */
1855 space = end - index;
1856
1857 /* Advance the iterator as far as possible within this segment. */
325fac50 1858 adv = std::min (space, stride);
23a7fe75
MM
1859 stride -= adv;
1860 index += adv;
1861 steps += adv;
1862
1863 /* Move to the next function if we're at the end of this one. */
1864 if (index == end)
1865 {
1866 const struct btrace_function *next;
1867
1868 next = bfun->flow.next;
1869 if (next == NULL)
1870 {
1871 /* We stepped past the last function.
1872
1873 Let's adjust the index to point to the last instruction in
1874 the previous function. */
1875 index -= 1;
1876 steps -= 1;
1877 break;
1878 }
1879
1880 /* We now point to the first instruction in the new function. */
1881 bfun = next;
1882 index = 0;
1883 }
1884
1885 /* We did make progress. */
1886 gdb_assert (adv > 0);
1887 }
1888
1889 /* Update the iterator. */
1890 it->function = bfun;
1891 it->index = index;
1892
1893 return steps;
1894}
1895
1896/* See btrace.h. */
1897
1898unsigned int
1899btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1900{
1901 const struct btrace_function *bfun;
1902 unsigned int index, steps;
1903
1904 bfun = it->function;
1905 steps = 0;
1906 index = it->index;
1907
1908 while (stride != 0)
1909 {
1910 unsigned int adv;
1911
1912 /* Move to the previous function if we're at the start of this one. */
1913 if (index == 0)
1914 {
1915 const struct btrace_function *prev;
1916
1917 prev = bfun->flow.prev;
1918 if (prev == NULL)
1919 break;
1920
1921 /* We point to one after the last instruction in the new function. */
1922 bfun = prev;
1923 index = VEC_length (btrace_insn_s, bfun->insn);
1924
31fd9caa
MM
1925 /* An empty function segment represents a gap in the trace. We count
1926 it as one instruction. */
1927 if (index == 0)
1928 {
1929 stride -= 1;
1930 steps += 1;
1931
1932 continue;
1933 }
23a7fe75
MM
1934 }
1935
1936 /* Advance the iterator as far as possible within this segment. */
325fac50 1937 adv = std::min (index, stride);
31fd9caa 1938
23a7fe75
MM
1939 stride -= adv;
1940 index -= adv;
1941 steps += adv;
1942
1943 /* We did make progress. */
1944 gdb_assert (adv > 0);
1945 }
1946
1947 /* Update the iterator. */
1948 it->function = bfun;
1949 it->index = index;
1950
1951 return steps;
1952}
1953
1954/* See btrace.h. */
1955
1956int
1957btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1958 const struct btrace_insn_iterator *rhs)
1959{
1960 unsigned int lnum, rnum;
1961
1962 lnum = btrace_insn_number (lhs);
1963 rnum = btrace_insn_number (rhs);
1964
31fd9caa
MM
1965 /* A gap has an instruction number of zero. Things are getting more
1966 complicated if gaps are involved.
1967
1968 We take the instruction number offset from the iterator's function.
1969 This is the number of the first instruction after the gap.
1970
1971 This is OK as long as both lhs and rhs point to gaps. If only one of
1972 them does, we need to adjust the number based on the other's regular
1973 instruction number. Otherwise, a gap might compare equal to an
1974 instruction. */
1975
1976 if (lnum == 0 && rnum == 0)
1977 {
1978 lnum = lhs->function->insn_offset;
1979 rnum = rhs->function->insn_offset;
1980 }
1981 else if (lnum == 0)
1982 {
1983 lnum = lhs->function->insn_offset;
1984
1985 if (lnum == rnum)
1986 lnum -= 1;
1987 }
1988 else if (rnum == 0)
1989 {
1990 rnum = rhs->function->insn_offset;
1991
1992 if (rnum == lnum)
1993 rnum -= 1;
1994 }
1995
23a7fe75
MM
1996 return (int) (lnum - rnum);
1997}
1998
1999/* See btrace.h. */
2000
2001int
2002btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2003 const struct btrace_thread_info *btinfo,
2004 unsigned int number)
2005{
2006 const struct btrace_function *bfun;
31fd9caa 2007 unsigned int end, length;
23a7fe75
MM
2008
2009 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
31fd9caa
MM
2010 {
2011 /* Skip gaps. */
2012 if (bfun->errcode != 0)
2013 continue;
2014
2015 if (bfun->insn_offset <= number)
2016 break;
2017 }
23a7fe75
MM
2018
2019 if (bfun == NULL)
2020 return 0;
2021
31fd9caa
MM
2022 length = VEC_length (btrace_insn_s, bfun->insn);
2023 gdb_assert (length > 0);
2024
2025 end = bfun->insn_offset + length;
23a7fe75
MM
2026 if (end <= number)
2027 return 0;
2028
2029 it->function = bfun;
2030 it->index = number - bfun->insn_offset;
2031
2032 return 1;
2033}
2034
2035/* See btrace.h. */
2036
2037const struct btrace_function *
2038btrace_call_get (const struct btrace_call_iterator *it)
2039{
2040 return it->function;
2041}
2042
2043/* See btrace.h. */
2044
2045unsigned int
2046btrace_call_number (const struct btrace_call_iterator *it)
2047{
2048 const struct btrace_thread_info *btinfo;
2049 const struct btrace_function *bfun;
2050 unsigned int insns;
2051
2052 btinfo = it->btinfo;
2053 bfun = it->function;
2054 if (bfun != NULL)
2055 return bfun->number;
2056
2057 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2058 number of the last function. */
2059 bfun = btinfo->end;
2060 insns = VEC_length (btrace_insn_s, bfun->insn);
2061
2062 /* If the function contains only a single instruction (i.e. the current
2063 instruction), it will be skipped and its number is already the number
2064 we seek. */
2065 if (insns == 1)
2066 return bfun->number;
2067
2068 /* Otherwise, return one more than the number of the last function. */
2069 return bfun->number + 1;
2070}
2071
2072/* See btrace.h. */
2073
2074void
2075btrace_call_begin (struct btrace_call_iterator *it,
2076 const struct btrace_thread_info *btinfo)
2077{
2078 const struct btrace_function *bfun;
2079
2080 bfun = btinfo->begin;
2081 if (bfun == NULL)
2082 error (_("No trace."));
2083
2084 it->btinfo = btinfo;
2085 it->function = bfun;
2086}
2087
2088/* See btrace.h. */
2089
2090void
2091btrace_call_end (struct btrace_call_iterator *it,
2092 const struct btrace_thread_info *btinfo)
2093{
2094 const struct btrace_function *bfun;
2095
2096 bfun = btinfo->end;
2097 if (bfun == NULL)
2098 error (_("No trace."));
2099
2100 it->btinfo = btinfo;
2101 it->function = NULL;
2102}
2103
2104/* See btrace.h. */
2105
2106unsigned int
2107btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2108{
2109 const struct btrace_function *bfun;
2110 unsigned int steps;
2111
2112 bfun = it->function;
2113 steps = 0;
2114 while (bfun != NULL)
2115 {
2116 const struct btrace_function *next;
2117 unsigned int insns;
2118
2119 next = bfun->flow.next;
2120 if (next == NULL)
2121 {
2122 /* Ignore the last function if it only contains a single
2123 (i.e. the current) instruction. */
2124 insns = VEC_length (btrace_insn_s, bfun->insn);
2125 if (insns == 1)
2126 steps -= 1;
2127 }
2128
2129 if (stride == steps)
2130 break;
2131
2132 bfun = next;
2133 steps += 1;
2134 }
2135
2136 it->function = bfun;
2137 return steps;
2138}
2139
2140/* See btrace.h. */
2141
2142unsigned int
2143btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2144{
2145 const struct btrace_thread_info *btinfo;
2146 const struct btrace_function *bfun;
2147 unsigned int steps;
2148
2149 bfun = it->function;
2150 steps = 0;
2151
2152 if (bfun == NULL)
2153 {
2154 unsigned int insns;
2155
2156 btinfo = it->btinfo;
2157 bfun = btinfo->end;
2158 if (bfun == NULL)
2159 return 0;
2160
2161 /* Ignore the last function if it only contains a single
2162 (i.e. the current) instruction. */
2163 insns = VEC_length (btrace_insn_s, bfun->insn);
2164 if (insns == 1)
2165 bfun = bfun->flow.prev;
2166
2167 if (bfun == NULL)
2168 return 0;
2169
2170 steps += 1;
2171 }
2172
2173 while (steps < stride)
2174 {
2175 const struct btrace_function *prev;
2176
2177 prev = bfun->flow.prev;
2178 if (prev == NULL)
2179 break;
2180
2181 bfun = prev;
2182 steps += 1;
2183 }
2184
2185 it->function = bfun;
2186 return steps;
2187}
2188
2189/* See btrace.h. */
2190
2191int
2192btrace_call_cmp (const struct btrace_call_iterator *lhs,
2193 const struct btrace_call_iterator *rhs)
2194{
2195 unsigned int lnum, rnum;
2196
2197 lnum = btrace_call_number (lhs);
2198 rnum = btrace_call_number (rhs);
2199
2200 return (int) (lnum - rnum);
2201}
2202
2203/* See btrace.h. */
2204
2205int
2206btrace_find_call_by_number (struct btrace_call_iterator *it,
2207 const struct btrace_thread_info *btinfo,
2208 unsigned int number)
2209{
2210 const struct btrace_function *bfun;
2211
2212 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2213 {
2214 unsigned int bnum;
2215
2216 bnum = bfun->number;
2217 if (number == bnum)
2218 {
2219 it->btinfo = btinfo;
2220 it->function = bfun;
2221 return 1;
2222 }
2223
2224 /* Functions are ordered and numbered consecutively. We could bail out
2225 earlier. On the other hand, it is very unlikely that we search for
2226 a nonexistent function. */
2227 }
2228
2229 return 0;
2230}
2231
2232/* See btrace.h. */
2233
2234void
2235btrace_set_insn_history (struct btrace_thread_info *btinfo,
2236 const struct btrace_insn_iterator *begin,
2237 const struct btrace_insn_iterator *end)
2238{
2239 if (btinfo->insn_history == NULL)
8d749320 2240 btinfo->insn_history = XCNEW (struct btrace_insn_history);
23a7fe75
MM
2241
2242 btinfo->insn_history->begin = *begin;
2243 btinfo->insn_history->end = *end;
2244}
2245
2246/* See btrace.h. */
2247
2248void
2249btrace_set_call_history (struct btrace_thread_info *btinfo,
2250 const struct btrace_call_iterator *begin,
2251 const struct btrace_call_iterator *end)
2252{
2253 gdb_assert (begin->btinfo == end->btinfo);
2254
2255 if (btinfo->call_history == NULL)
8d749320 2256 btinfo->call_history = XCNEW (struct btrace_call_history);
23a7fe75
MM
2257
2258 btinfo->call_history->begin = *begin;
2259 btinfo->call_history->end = *end;
2260}
07bbe694
MM
2261
2262/* See btrace.h. */
2263
2264int
2265btrace_is_replaying (struct thread_info *tp)
2266{
2267 return tp->btrace.replay != NULL;
2268}
6e07b1d2
MM
2269
2270/* See btrace.h. */
2271
2272int
2273btrace_is_empty (struct thread_info *tp)
2274{
2275 struct btrace_insn_iterator begin, end;
2276 struct btrace_thread_info *btinfo;
2277
2278 btinfo = &tp->btrace;
2279
2280 if (btinfo->begin == NULL)
2281 return 1;
2282
2283 btrace_insn_begin (&begin, btinfo);
2284 btrace_insn_end (&end, btinfo);
2285
2286 return btrace_insn_cmp (&begin, &end) == 0;
2287}
734b0e4b
MM
2288
2289/* Forward the cleanup request. */
2290
2291static void
2292do_btrace_data_cleanup (void *arg)
2293{
9a3c8263 2294 btrace_data_fini ((struct btrace_data *) arg);
734b0e4b
MM
2295}
2296
2297/* See btrace.h. */
2298
2299struct cleanup *
2300make_cleanup_btrace_data (struct btrace_data *data)
2301{
2302 return make_cleanup (do_btrace_data_cleanup, data);
2303}
b0627500
MM
2304
2305#if defined (HAVE_LIBIPT)
2306
2307/* Print a single packet. */
2308
2309static void
2310pt_print_packet (const struct pt_packet *packet)
2311{
2312 switch (packet->type)
2313 {
2314 default:
2315 printf_unfiltered (("[??: %x]"), packet->type);
2316 break;
2317
2318 case ppt_psb:
2319 printf_unfiltered (("psb"));
2320 break;
2321
2322 case ppt_psbend:
2323 printf_unfiltered (("psbend"));
2324 break;
2325
2326 case ppt_pad:
2327 printf_unfiltered (("pad"));
2328 break;
2329
2330 case ppt_tip:
2331 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2332 packet->payload.ip.ipc,
2333 packet->payload.ip.ip);
2334 break;
2335
2336 case ppt_tip_pge:
2337 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2338 packet->payload.ip.ipc,
2339 packet->payload.ip.ip);
2340 break;
2341
2342 case ppt_tip_pgd:
2343 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2344 packet->payload.ip.ipc,
2345 packet->payload.ip.ip);
2346 break;
2347
2348 case ppt_fup:
2349 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2350 packet->payload.ip.ipc,
2351 packet->payload.ip.ip);
2352 break;
2353
2354 case ppt_tnt_8:
2355 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2356 packet->payload.tnt.bit_size,
2357 packet->payload.tnt.payload);
2358 break;
2359
2360 case ppt_tnt_64:
2361 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2362 packet->payload.tnt.bit_size,
2363 packet->payload.tnt.payload);
2364 break;
2365
2366 case ppt_pip:
37fdfe4c
MM
2367 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2368 packet->payload.pip.nr ? (" nr") : (""));
b0627500
MM
2369 break;
2370
2371 case ppt_tsc:
2372 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2373 break;
2374
2375 case ppt_cbr:
2376 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2377 break;
2378
2379 case ppt_mode:
2380 switch (packet->payload.mode.leaf)
2381 {
2382 default:
2383 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2384 break;
2385
2386 case pt_mol_exec:
2387 printf_unfiltered (("mode.exec%s%s"),
2388 packet->payload.mode.bits.exec.csl
2389 ? (" cs.l") : (""),
2390 packet->payload.mode.bits.exec.csd
2391 ? (" cs.d") : (""));
2392 break;
2393
2394 case pt_mol_tsx:
2395 printf_unfiltered (("mode.tsx%s%s"),
2396 packet->payload.mode.bits.tsx.intx
2397 ? (" intx") : (""),
2398 packet->payload.mode.bits.tsx.abrt
2399 ? (" abrt") : (""));
2400 break;
2401 }
2402 break;
2403
2404 case ppt_ovf:
2405 printf_unfiltered (("ovf"));
2406 break;
2407
37fdfe4c
MM
2408 case ppt_stop:
2409 printf_unfiltered (("stop"));
2410 break;
2411
2412 case ppt_vmcs:
2413 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2414 break;
2415
2416 case ppt_tma:
2417 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2418 packet->payload.tma.fc);
2419 break;
2420
2421 case ppt_mtc:
2422 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2423 break;
2424
2425 case ppt_cyc:
2426 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2427 break;
2428
2429 case ppt_mnt:
2430 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2431 break;
b0627500
MM
2432 }
2433}
2434
2435/* Decode packets into MAINT using DECODER. */
2436
2437static void
2438btrace_maint_decode_pt (struct btrace_maint_info *maint,
2439 struct pt_packet_decoder *decoder)
2440{
2441 int errcode;
2442
2443 for (;;)
2444 {
2445 struct btrace_pt_packet packet;
2446
2447 errcode = pt_pkt_sync_forward (decoder);
2448 if (errcode < 0)
2449 break;
2450
2451 for (;;)
2452 {
2453 pt_pkt_get_offset (decoder, &packet.offset);
2454
2455 errcode = pt_pkt_next (decoder, &packet.packet,
2456 sizeof(packet.packet));
2457 if (errcode < 0)
2458 break;
2459
2460 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2461 {
2462 packet.errcode = pt_errcode (errcode);
2463 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2464 &packet);
2465 }
2466 }
2467
2468 if (errcode == -pte_eos)
2469 break;
2470
2471 packet.errcode = pt_errcode (errcode);
2472 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2473 &packet);
2474
2475 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2476 packet.offset, pt_errstr (packet.errcode));
2477 }
2478
2479 if (errcode != -pte_eos)
bc504a31 2480 warning (_("Failed to synchronize onto the Intel Processor Trace "
b0627500
MM
2481 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2482}
2483
2484/* Update the packet history in BTINFO. */
2485
2486static void
2487btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2488{
2489 volatile struct gdb_exception except;
2490 struct pt_packet_decoder *decoder;
2491 struct btrace_data_pt *pt;
2492 struct pt_config config;
2493 int errcode;
2494
2495 pt = &btinfo->data.variant.pt;
2496
2497 /* Nothing to do if there is no trace. */
2498 if (pt->size == 0)
2499 return;
2500
2501 memset (&config, 0, sizeof(config));
2502
2503 config.size = sizeof (config);
2504 config.begin = pt->data;
2505 config.end = pt->data + pt->size;
2506
2507 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2508 config.cpu.family = pt->config.cpu.family;
2509 config.cpu.model = pt->config.cpu.model;
2510 config.cpu.stepping = pt->config.cpu.stepping;
2511
2512 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2513 if (errcode < 0)
bc504a31 2514 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b0627500
MM
2515 pt_errstr (pt_errcode (errcode)));
2516
2517 decoder = pt_pkt_alloc_decoder (&config);
2518 if (decoder == NULL)
bc504a31 2519 error (_("Failed to allocate the Intel Processor Trace decoder."));
b0627500
MM
2520
2521 TRY
2522 {
2523 btrace_maint_decode_pt (&btinfo->maint, decoder);
2524 }
2525 CATCH (except, RETURN_MASK_ALL)
2526 {
2527 pt_pkt_free_decoder (decoder);
2528
2529 if (except.reason < 0)
2530 throw_exception (except);
2531 }
2532 END_CATCH
2533
2534 pt_pkt_free_decoder (decoder);
2535}
2536
2537#endif /* !defined (HAVE_LIBIPT) */
2538
2539/* Update the packet maintenance information for BTINFO and store the
2540 low and high bounds into BEGIN and END, respectively.
2541 Store the current iterator state into FROM and TO. */
2542
2543static void
2544btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2545 unsigned int *begin, unsigned int *end,
2546 unsigned int *from, unsigned int *to)
2547{
2548 switch (btinfo->data.format)
2549 {
2550 default:
2551 *begin = 0;
2552 *end = 0;
2553 *from = 0;
2554 *to = 0;
2555 break;
2556
2557 case BTRACE_FORMAT_BTS:
2558 /* Nothing to do - we operate directly on BTINFO->DATA. */
2559 *begin = 0;
2560 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2561 *from = btinfo->maint.variant.bts.packet_history.begin;
2562 *to = btinfo->maint.variant.bts.packet_history.end;
2563 break;
2564
2565#if defined (HAVE_LIBIPT)
2566 case BTRACE_FORMAT_PT:
2567 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2568 btrace_maint_update_pt_packets (btinfo);
2569
2570 *begin = 0;
2571 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2572 *from = btinfo->maint.variant.pt.packet_history.begin;
2573 *to = btinfo->maint.variant.pt.packet_history.end;
2574 break;
2575#endif /* defined (HAVE_LIBIPT) */
2576 }
2577}
2578
2579/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2580 update the current iterator position. */
2581
2582static void
2583btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2584 unsigned int begin, unsigned int end)
2585{
2586 switch (btinfo->data.format)
2587 {
2588 default:
2589 break;
2590
2591 case BTRACE_FORMAT_BTS:
2592 {
2593 VEC (btrace_block_s) *blocks;
2594 unsigned int blk;
2595
2596 blocks = btinfo->data.variant.bts.blocks;
2597 for (blk = begin; blk < end; ++blk)
2598 {
2599 const btrace_block_s *block;
2600
2601 block = VEC_index (btrace_block_s, blocks, blk);
2602
2603 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2604 core_addr_to_string_nz (block->begin),
2605 core_addr_to_string_nz (block->end));
2606 }
2607
2608 btinfo->maint.variant.bts.packet_history.begin = begin;
2609 btinfo->maint.variant.bts.packet_history.end = end;
2610 }
2611 break;
2612
2613#if defined (HAVE_LIBIPT)
2614 case BTRACE_FORMAT_PT:
2615 {
2616 VEC (btrace_pt_packet_s) *packets;
2617 unsigned int pkt;
2618
2619 packets = btinfo->maint.variant.pt.packets;
2620 for (pkt = begin; pkt < end; ++pkt)
2621 {
2622 const struct btrace_pt_packet *packet;
2623
2624 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2625
2626 printf_unfiltered ("%u\t", pkt);
2627 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2628
2629 if (packet->errcode == pte_ok)
2630 pt_print_packet (&packet->packet);
2631 else
2632 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2633
2634 printf_unfiltered ("\n");
2635 }
2636
2637 btinfo->maint.variant.pt.packet_history.begin = begin;
2638 btinfo->maint.variant.pt.packet_history.end = end;
2639 }
2640 break;
2641#endif /* defined (HAVE_LIBIPT) */
2642 }
2643}
2644
2645/* Read a number from an argument string. */
2646
2647static unsigned int
2648get_uint (char **arg)
2649{
2650 char *begin, *end, *pos;
2651 unsigned long number;
2652
2653 begin = *arg;
2654 pos = skip_spaces (begin);
2655
2656 if (!isdigit (*pos))
2657 error (_("Expected positive number, got: %s."), pos);
2658
2659 number = strtoul (pos, &end, 10);
2660 if (number > UINT_MAX)
2661 error (_("Number too big."));
2662
2663 *arg += (end - begin);
2664
2665 return (unsigned int) number;
2666}
2667
2668/* Read a context size from an argument string. */
2669
2670static int
2671get_context_size (char **arg)
2672{
2673 char *pos;
2674 int number;
2675
2676 pos = skip_spaces (*arg);
2677
2678 if (!isdigit (*pos))
2679 error (_("Expected positive number, got: %s."), pos);
2680
2681 return strtol (pos, arg, 10);
2682}
2683
2684/* Complain about junk at the end of an argument string. */
2685
2686static void
2687no_chunk (char *arg)
2688{
2689 if (*arg != 0)
2690 error (_("Junk after argument: %s."), arg);
2691}
2692
2693/* The "maintenance btrace packet-history" command. */
2694
2695static void
2696maint_btrace_packet_history_cmd (char *arg, int from_tty)
2697{
2698 struct btrace_thread_info *btinfo;
2699 struct thread_info *tp;
2700 unsigned int size, begin, end, from, to;
2701
2702 tp = find_thread_ptid (inferior_ptid);
2703 if (tp == NULL)
2704 error (_("No thread."));
2705
2706 size = 10;
2707 btinfo = &tp->btrace;
2708
2709 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2710 if (begin == end)
2711 {
2712 printf_unfiltered (_("No trace.\n"));
2713 return;
2714 }
2715
2716 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2717 {
2718 from = to;
2719
2720 if (end - from < size)
2721 size = end - from;
2722 to = from + size;
2723 }
2724 else if (strcmp (arg, "-") == 0)
2725 {
2726 to = from;
2727
2728 if (to - begin < size)
2729 size = to - begin;
2730 from = to - size;
2731 }
2732 else
2733 {
2734 from = get_uint (&arg);
2735 if (end <= from)
2736 error (_("'%u' is out of range."), from);
2737
2738 arg = skip_spaces (arg);
2739 if (*arg == ',')
2740 {
2741 arg = skip_spaces (++arg);
2742
2743 if (*arg == '+')
2744 {
2745 arg += 1;
2746 size = get_context_size (&arg);
2747
2748 no_chunk (arg);
2749
2750 if (end - from < size)
2751 size = end - from;
2752 to = from + size;
2753 }
2754 else if (*arg == '-')
2755 {
2756 arg += 1;
2757 size = get_context_size (&arg);
2758
2759 no_chunk (arg);
2760
2761 /* Include the packet given as first argument. */
2762 from += 1;
2763 to = from;
2764
2765 if (to - begin < size)
2766 size = to - begin;
2767 from = to - size;
2768 }
2769 else
2770 {
2771 to = get_uint (&arg);
2772
2773 /* Include the packet at the second argument and silently
2774 truncate the range. */
2775 if (to < end)
2776 to += 1;
2777 else
2778 to = end;
2779
2780 no_chunk (arg);
2781 }
2782 }
2783 else
2784 {
2785 no_chunk (arg);
2786
2787 if (end - from < size)
2788 size = end - from;
2789 to = from + size;
2790 }
2791
2792 dont_repeat ();
2793 }
2794
2795 btrace_maint_print_packets (btinfo, from, to);
2796}
2797
2798/* The "maintenance btrace clear-packet-history" command. */
2799
2800static void
2801maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2802{
2803 struct btrace_thread_info *btinfo;
2804 struct thread_info *tp;
2805
2806 if (args != NULL && *args != 0)
2807 error (_("Invalid argument."));
2808
2809 tp = find_thread_ptid (inferior_ptid);
2810 if (tp == NULL)
2811 error (_("No thread."));
2812
2813 btinfo = &tp->btrace;
2814
2815 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2816 btrace_maint_clear (btinfo);
2817 btrace_data_clear (&btinfo->data);
2818}
2819
2820/* The "maintenance btrace clear" command. */
2821
2822static void
2823maint_btrace_clear_cmd (char *args, int from_tty)
2824{
2825 struct btrace_thread_info *btinfo;
2826 struct thread_info *tp;
2827
2828 if (args != NULL && *args != 0)
2829 error (_("Invalid argument."));
2830
2831 tp = find_thread_ptid (inferior_ptid);
2832 if (tp == NULL)
2833 error (_("No thread."));
2834
2835 btrace_clear (tp);
2836}
2837
2838/* The "maintenance btrace" command. */
2839
2840static void
2841maint_btrace_cmd (char *args, int from_tty)
2842{
2843 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2844 gdb_stdout);
2845}
2846
2847/* The "maintenance set btrace" command. */
2848
2849static void
2850maint_btrace_set_cmd (char *args, int from_tty)
2851{
2852 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2853 gdb_stdout);
2854}
2855
2856/* The "maintenance show btrace" command. */
2857
2858static void
2859maint_btrace_show_cmd (char *args, int from_tty)
2860{
2861 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2862 all_commands, gdb_stdout);
2863}
2864
2865/* The "maintenance set btrace pt" command. */
2866
2867static void
2868maint_btrace_pt_set_cmd (char *args, int from_tty)
2869{
2870 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2871 all_commands, gdb_stdout);
2872}
2873
2874/* The "maintenance show btrace pt" command. */
2875
2876static void
2877maint_btrace_pt_show_cmd (char *args, int from_tty)
2878{
2879 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2880 all_commands, gdb_stdout);
2881}
2882
2883/* The "maintenance info btrace" command. */
2884
2885static void
2886maint_info_btrace_cmd (char *args, int from_tty)
2887{
2888 struct btrace_thread_info *btinfo;
2889 struct thread_info *tp;
2890 const struct btrace_config *conf;
2891
2892 if (args != NULL && *args != 0)
2893 error (_("Invalid argument."));
2894
2895 tp = find_thread_ptid (inferior_ptid);
2896 if (tp == NULL)
2897 error (_("No thread."));
2898
2899 btinfo = &tp->btrace;
2900
2901 conf = btrace_conf (btinfo);
2902 if (conf == NULL)
2903 error (_("No btrace configuration."));
2904
2905 printf_unfiltered (_("Format: %s.\n"),
2906 btrace_format_string (conf->format));
2907
2908 switch (conf->format)
2909 {
2910 default:
2911 break;
2912
2913 case BTRACE_FORMAT_BTS:
2914 printf_unfiltered (_("Number of packets: %u.\n"),
2915 VEC_length (btrace_block_s,
2916 btinfo->data.variant.bts.blocks));
2917 break;
2918
2919#if defined (HAVE_LIBIPT)
2920 case BTRACE_FORMAT_PT:
2921 {
2922 struct pt_version version;
2923
2924 version = pt_library_version ();
2925 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2926 version.minor, version.build,
2927 version.ext != NULL ? version.ext : "");
2928
2929 btrace_maint_update_pt_packets (btinfo);
2930 printf_unfiltered (_("Number of packets: %u.\n"),
2931 VEC_length (btrace_pt_packet_s,
2932 btinfo->maint.variant.pt.packets));
2933 }
2934 break;
2935#endif /* defined (HAVE_LIBIPT) */
2936 }
2937}
2938
2939/* The "maint show btrace pt skip-pad" show value function. */
2940
2941static void
2942show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
2943 struct cmd_list_element *c,
2944 const char *value)
2945{
2946 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2947}
2948
2949
2950/* Initialize btrace maintenance commands. */
2951
2952void _initialize_btrace (void);
2953void
2954_initialize_btrace (void)
2955{
2956 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
2957 _("Info about branch tracing data."), &maintenanceinfolist);
2958
2959 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
2960 _("Branch tracing maintenance commands."),
2961 &maint_btrace_cmdlist, "maintenance btrace ",
2962 0, &maintenancelist);
2963
2964 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
2965Set branch tracing specific variables."),
2966 &maint_btrace_set_cmdlist, "maintenance set btrace ",
2967 0, &maintenance_set_cmdlist);
2968
2969 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
bc504a31 2970Set Intel Processor Trace specific variables."),
b0627500
MM
2971 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2972 0, &maint_btrace_set_cmdlist);
2973
2974 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
2975Show branch tracing specific variables."),
2976 &maint_btrace_show_cmdlist, "maintenance show btrace ",
2977 0, &maintenance_show_cmdlist);
2978
2979 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
bc504a31 2980Show Intel Processor Trace specific variables."),
b0627500
MM
2981 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2982 0, &maint_btrace_show_cmdlist);
2983
2984 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
2985 &maint_btrace_pt_skip_pad, _("\
2986Set whether PAD packets should be skipped in the btrace packet history."), _("\
2987Show whether PAD packets should be skipped in the btrace packet history."),_("\
2988When enabled, PAD packets are ignored in the btrace packet history."),
2989 NULL, show_maint_btrace_pt_skip_pad,
2990 &maint_btrace_pt_set_cmdlist,
2991 &maint_btrace_pt_show_cmdlist);
2992
2993 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
2994 _("Print the raw branch tracing data.\n\
2995With no argument, print ten more packets after the previous ten-line print.\n\
2996With '-' as argument print ten packets before a previous ten-line print.\n\
2997One argument specifies the starting packet of a ten-line print.\n\
2998Two arguments with comma between specify starting and ending packets to \
2999print.\n\
3000Preceded with '+'/'-' the second argument specifies the distance from the \
3001first.\n"),
3002 &maint_btrace_cmdlist);
3003
3004 add_cmd ("clear-packet-history", class_maintenance,
3005 maint_btrace_clear_packet_history_cmd,
3006 _("Clears the branch tracing packet history.\n\
3007Discards the raw branch tracing data but not the execution history data.\n\
3008"),
3009 &maint_btrace_cmdlist);
3010
3011 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3012 _("Clears the branch tracing data.\n\
3013Discards the raw branch tracing data and the execution history data.\n\
3014The next 'record' command will fetch the branch tracing data anew.\n\
3015"),
3016 &maint_btrace_cmdlist);
3017
3018}
This page took 0.468178 seconds and 4 git commands to generate.