btrace: Replace struct btrace_function::segment.
[deliverable/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625
MM
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
6e07b1d2 33#include "regcache.h"
b20a6524 34#include "rsp-low.h"
b0627500
MM
35#include "gdbcmd.h"
36#include "cli/cli-utils.h"
b20a6524
MM
37
38#include <inttypes.h>
b0627500 39#include <ctype.h>
325fac50 40#include <algorithm>
b0627500
MM
41
42/* Command lists for btrace maintenance commands. */
43static struct cmd_list_element *maint_btrace_cmdlist;
44static struct cmd_list_element *maint_btrace_set_cmdlist;
45static struct cmd_list_element *maint_btrace_show_cmdlist;
46static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49/* Control whether to skip PAD packets when computing the packet history. */
50static int maint_btrace_pt_skip_pad = 1;
b20a6524 51
d87fdac3
MM
52/* A vector of function segments. */
53typedef struct btrace_function * bfun_s;
54DEF_VEC_P (bfun_s);
55
b20a6524 56static void btrace_add_pc (struct thread_info *tp);
02d27625
MM
57
58/* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
60
61#define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
69
70#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
02d27625
MM
72/* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
74
75static const char *
23a7fe75 76ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
77{
78 struct minimal_symbol *msym;
79 struct symbol *sym;
80
81 msym = bfun->msym;
82 sym = bfun->sym;
83
84 if (sym != NULL)
85 return SYMBOL_PRINT_NAME (sym);
86
87 if (msym != NULL)
efd66ac6 88 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
89
90 return "<unknown>";
91}
92
93/* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
95
96static const char *
23a7fe75 97ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
98{
99 struct symbol *sym;
100 const char *filename;
101
102 sym = bfun->sym;
103
104 if (sym != NULL)
08be3fe3 105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
02d27625
MM
106 else
107 filename = "<unknown>";
108
109 return filename;
110}
111
23a7fe75
MM
112/* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
02d27625 114
23a7fe75
MM
115static const char *
116ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 117{
23a7fe75
MM
118 if (insn == NULL)
119 return "<nil>";
120
121 return core_addr_to_string_nz (insn->pc);
02d27625
MM
122}
123
23a7fe75 124/* Print an ftrace debug status message. */
02d27625
MM
125
126static void
23a7fe75 127ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 128{
23a7fe75
MM
129 const char *fun, *file;
130 unsigned int ibegin, iend;
ce0dfbea 131 int level;
23a7fe75
MM
132
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
136
23a7fe75
MM
137 ibegin = bfun->insn_offset;
138 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
139
ce0dfbea
MM
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
02d27625
MM
142}
143
69090cee
TW
144/* Return the number of instructions in a given function call segment. */
145
146static unsigned int
147ftrace_call_num_insn (const struct btrace_function* bfun)
148{
149 if (bfun == NULL)
150 return 0;
151
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
154 return 1;
155
156 return VEC_length (btrace_insn_s, bfun->insn);
157}
158
42bfe59e
TW
159/* Return the function segment with the given NUMBER or NULL if no such segment
160 exists. BTINFO is the branch trace information for the current thread. */
161
162static struct btrace_function *
163ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
164 unsigned int number)
165{
166 if (number == 0 || number > btinfo->functions.size ())
167 return NULL;
168
169 return btinfo->functions[number - 1];
170}
171
23a7fe75
MM
172/* Return non-zero if BFUN does not match MFUN and FUN,
173 return zero otherwise. */
02d27625
MM
174
175static int
23a7fe75
MM
176ftrace_function_switched (const struct btrace_function *bfun,
177 const struct minimal_symbol *mfun,
178 const struct symbol *fun)
02d27625
MM
179{
180 struct minimal_symbol *msym;
181 struct symbol *sym;
182
02d27625
MM
183 msym = bfun->msym;
184 sym = bfun->sym;
185
186 /* If the minimal symbol changed, we certainly switched functions. */
187 if (mfun != NULL && msym != NULL
efd66ac6 188 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
189 return 1;
190
191 /* If the symbol changed, we certainly switched functions. */
192 if (fun != NULL && sym != NULL)
193 {
194 const char *bfname, *fname;
195
196 /* Check the function name. */
197 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
198 return 1;
199
200 /* Check the location of those functions, as well. */
08be3fe3
DE
201 bfname = symtab_to_fullname (symbol_symtab (sym));
202 fname = symtab_to_fullname (symbol_symtab (fun));
02d27625
MM
203 if (filename_cmp (fname, bfname) != 0)
204 return 1;
205 }
206
23a7fe75
MM
207 /* If we lost symbol information, we switched functions. */
208 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
209 return 1;
210
211 /* If we gained symbol information, we switched functions. */
212 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
213 return 1;
214
02d27625
MM
215 return 0;
216}
217
8286623c
TW
218/* Allocate and initialize a new branch trace function segment at the end of
219 the trace.
17b89b34 220 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
221 MFUN and FUN are the symbol information we have for this function. */
222
223static struct btrace_function *
17b89b34 224ftrace_new_function (struct btrace_thread_info *btinfo,
23a7fe75
MM
225 struct minimal_symbol *mfun,
226 struct symbol *fun)
227{
b54b03bd 228 struct btrace_function *bfun;
23a7fe75 229
8d749320 230 bfun = XCNEW (struct btrace_function);
23a7fe75
MM
231
232 bfun->msym = mfun;
233 bfun->sym = fun;
23a7fe75 234
b54b03bd 235 if (btinfo->functions.empty ())
5de9129b
MM
236 {
237 /* Start counting at one. */
238 bfun->number = 1;
239 bfun->insn_offset = 1;
240 }
241 else
23a7fe75 242 {
b54b03bd
TW
243 struct btrace_function *prev = btinfo->functions.back ();
244
23a7fe75 245 bfun->number = prev->number + 1;
69090cee 246 bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
31fd9caa 247 bfun->level = prev->level;
23a7fe75
MM
248 }
249
17b89b34 250 btinfo->functions.push_back (bfun);
23a7fe75 251 return bfun;
02d27625
MM
252}
253
23a7fe75 254/* Update the UP field of a function segment. */
02d27625 255
23a7fe75
MM
256static void
257ftrace_update_caller (struct btrace_function *bfun,
258 struct btrace_function *caller,
259 enum btrace_function_flag flags)
02d27625 260{
42bfe59e 261 if (bfun->up != 0)
23a7fe75 262 ftrace_debug (bfun, "updating caller");
02d27625 263
42bfe59e 264 bfun->up = caller->number;
23a7fe75
MM
265 bfun->flags = flags;
266
267 ftrace_debug (bfun, "set caller");
d87fdac3 268 ftrace_debug (caller, "..to");
23a7fe75
MM
269}
270
271/* Fix up the caller for all segments of a function. */
272
273static void
4aeb0dfc
TW
274ftrace_fixup_caller (struct btrace_thread_info *btinfo,
275 struct btrace_function *bfun,
23a7fe75
MM
276 struct btrace_function *caller,
277 enum btrace_function_flag flags)
278{
4aeb0dfc 279 unsigned int prev, next;
23a7fe75 280
4aeb0dfc
TW
281 prev = bfun->prev;
282 next = bfun->next;
23a7fe75
MM
283 ftrace_update_caller (bfun, caller, flags);
284
285 /* Update all function segments belonging to the same function. */
4aeb0dfc
TW
286 for (; prev != 0; prev = bfun->prev)
287 {
288 bfun = ftrace_find_call_by_number (btinfo, prev);
289 ftrace_update_caller (bfun, caller, flags);
290 }
23a7fe75 291
4aeb0dfc
TW
292 for (; next != 0; next = bfun->next)
293 {
294 bfun = ftrace_find_call_by_number (btinfo, next);
295 ftrace_update_caller (bfun, caller, flags);
296 }
23a7fe75
MM
297}
298
8286623c 299/* Add a new function segment for a call at the end of the trace.
17b89b34 300 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
301 MFUN and FUN are the symbol information we have for this function. */
302
303static struct btrace_function *
17b89b34 304ftrace_new_call (struct btrace_thread_info *btinfo,
23a7fe75
MM
305 struct minimal_symbol *mfun,
306 struct symbol *fun)
307{
b54b03bd 308 const unsigned int length = btinfo->functions.size ();
8286623c 309 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
23a7fe75 310
42bfe59e 311 bfun->up = length;
31fd9caa 312 bfun->level += 1;
23a7fe75
MM
313
314 ftrace_debug (bfun, "new call");
315
316 return bfun;
317}
318
8286623c 319/* Add a new function segment for a tail call at the end of the trace.
17b89b34 320 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
321 MFUN and FUN are the symbol information we have for this function. */
322
323static struct btrace_function *
17b89b34 324ftrace_new_tailcall (struct btrace_thread_info *btinfo,
23a7fe75
MM
325 struct minimal_symbol *mfun,
326 struct symbol *fun)
327{
b54b03bd 328 const unsigned int length = btinfo->functions.size ();
8286623c 329 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
02d27625 330
42bfe59e 331 bfun->up = length;
31fd9caa 332 bfun->level += 1;
23a7fe75 333 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 334
23a7fe75
MM
335 ftrace_debug (bfun, "new tail call");
336
337 return bfun;
338}
339
d87fdac3 340/* Return the caller of BFUN or NULL if there is none. This function skips
42bfe59e
TW
341 tail calls in the call chain. BTINFO is the branch trace information for
342 the current thread. */
d87fdac3 343static struct btrace_function *
42bfe59e
TW
344ftrace_get_caller (struct btrace_thread_info *btinfo,
345 struct btrace_function *bfun)
d87fdac3 346{
42bfe59e 347 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
d87fdac3 348 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
42bfe59e 349 return ftrace_find_call_by_number (btinfo, bfun->up);
d87fdac3
MM
350
351 return NULL;
352}
353
23a7fe75 354/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
42bfe59e
TW
355 symbol information. BTINFO is the branch trace information for the current
356 thread. */
23a7fe75
MM
357
358static struct btrace_function *
42bfe59e
TW
359ftrace_find_caller (struct btrace_thread_info *btinfo,
360 struct btrace_function *bfun,
23a7fe75
MM
361 struct minimal_symbol *mfun,
362 struct symbol *fun)
363{
42bfe59e 364 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
23a7fe75
MM
365 {
366 /* Skip functions with incompatible symbol information. */
367 if (ftrace_function_switched (bfun, mfun, fun))
368 continue;
369
370 /* This is the function segment we're looking for. */
371 break;
372 }
373
374 return bfun;
375}
376
377/* Find the innermost caller in the back trace of BFUN, skipping all
378 function segments that do not end with a call instruction (e.g.
42bfe59e
TW
379 tail calls ending with a jump). BTINFO is the branch trace information for
380 the current thread. */
23a7fe75
MM
381
382static struct btrace_function *
42bfe59e
TW
383ftrace_find_call (struct btrace_thread_info *btinfo,
384 struct btrace_function *bfun)
23a7fe75 385{
42bfe59e 386 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
02d27625 387 {
23a7fe75 388 struct btrace_insn *last;
02d27625 389
31fd9caa
MM
390 /* Skip gaps. */
391 if (bfun->errcode != 0)
392 continue;
23a7fe75
MM
393
394 last = VEC_last (btrace_insn_s, bfun->insn);
02d27625 395
7d5c24b3 396 if (last->iclass == BTRACE_INSN_CALL)
23a7fe75
MM
397 break;
398 }
399
400 return bfun;
401}
402
8286623c
TW
403/* Add a continuation segment for a function into which we return at the end of
404 the trace.
17b89b34 405 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
406 MFUN and FUN are the symbol information we have for this function. */
407
408static struct btrace_function *
17b89b34 409ftrace_new_return (struct btrace_thread_info *btinfo,
23a7fe75
MM
410 struct minimal_symbol *mfun,
411 struct symbol *fun)
412{
b54b03bd 413 struct btrace_function *prev = btinfo->functions.back ();
23a7fe75
MM
414 struct btrace_function *bfun, *caller;
415
8286623c 416 bfun = ftrace_new_function (btinfo, mfun, fun);
23a7fe75
MM
417
418 /* It is important to start at PREV's caller. Otherwise, we might find
419 PREV itself, if PREV is a recursive function. */
42bfe59e
TW
420 caller = ftrace_find_call_by_number (btinfo, prev->up);
421 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
23a7fe75
MM
422 if (caller != NULL)
423 {
424 /* The caller of PREV is the preceding btrace function segment in this
425 function instance. */
4aeb0dfc 426 gdb_assert (caller->next == 0);
23a7fe75 427
4aeb0dfc
TW
428 caller->next = bfun->number;
429 bfun->prev = caller->number;
23a7fe75
MM
430
431 /* Maintain the function level. */
432 bfun->level = caller->level;
433
434 /* Maintain the call stack. */
435 bfun->up = caller->up;
436 bfun->flags = caller->flags;
437
438 ftrace_debug (bfun, "new return");
439 }
440 else
441 {
442 /* We did not find a caller. This could mean that something went
443 wrong or that the call is simply not included in the trace. */
02d27625 444
23a7fe75 445 /* Let's search for some actual call. */
42bfe59e
TW
446 caller = ftrace_find_call_by_number (btinfo, prev->up);
447 caller = ftrace_find_call (btinfo, caller);
23a7fe75 448 if (caller == NULL)
02d27625 449 {
23a7fe75
MM
450 /* There is no call in PREV's back trace. We assume that the
451 branch trace did not include it. */
452
259ba1e8
MM
453 /* Let's find the topmost function and add a new caller for it.
454 This should handle a series of initial tail calls. */
42bfe59e
TW
455 while (prev->up != 0)
456 prev = ftrace_find_call_by_number (btinfo, prev->up);
02d27625 457
259ba1e8 458 bfun->level = prev->level - 1;
23a7fe75
MM
459
460 /* Fix up the call stack for PREV. */
4aeb0dfc 461 ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
23a7fe75
MM
462
463 ftrace_debug (bfun, "new return - no caller");
464 }
465 else
02d27625 466 {
23a7fe75 467 /* There is a call in PREV's back trace to which we should have
259ba1e8
MM
468 returned but didn't. Let's start a new, separate back trace
469 from PREV's level. */
470 bfun->level = prev->level - 1;
471
472 /* We fix up the back trace for PREV but leave other function segments
473 on the same level as they are.
474 This should handle things like schedule () correctly where we're
475 switching contexts. */
42bfe59e 476 prev->up = bfun->number;
259ba1e8 477 prev->flags = BFUN_UP_LINKS_TO_RET;
02d27625 478
23a7fe75 479 ftrace_debug (bfun, "new return - unknown caller");
02d27625 480 }
23a7fe75
MM
481 }
482
483 return bfun;
484}
485
8286623c 486/* Add a new function segment for a function switch at the end of the trace.
17b89b34 487 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
488 MFUN and FUN are the symbol information we have for this function. */
489
490static struct btrace_function *
17b89b34 491ftrace_new_switch (struct btrace_thread_info *btinfo,
23a7fe75
MM
492 struct minimal_symbol *mfun,
493 struct symbol *fun)
494{
b54b03bd 495 struct btrace_function *prev = btinfo->functions.back ();
23a7fe75
MM
496 struct btrace_function *bfun;
497
4c2c7ac6
MM
498 /* This is an unexplained function switch. We can't really be sure about the
499 call stack, yet the best I can think of right now is to preserve it. */
8286623c 500 bfun = ftrace_new_function (btinfo, mfun, fun);
4c2c7ac6
MM
501 bfun->up = prev->up;
502 bfun->flags = prev->flags;
02d27625 503
23a7fe75
MM
504 ftrace_debug (bfun, "new switch");
505
506 return bfun;
507}
508
8286623c
TW
509/* Add a new function segment for a gap in the trace due to a decode error at
510 the end of the trace.
17b89b34 511 BTINFO is the branch trace information for the current thread.
31fd9caa
MM
512 ERRCODE is the format-specific error code. */
513
514static struct btrace_function *
8286623c 515ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
31fd9caa
MM
516{
517 struct btrace_function *bfun;
518
b54b03bd 519 if (btinfo->functions.empty ())
8286623c 520 bfun = ftrace_new_function (btinfo, NULL, NULL);
b54b03bd
TW
521 else
522 {
523 /* We hijack the previous function segment if it was empty. */
524 bfun = btinfo->functions.back ();
525 if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
526 bfun = ftrace_new_function (btinfo, NULL, NULL);
527 }
31fd9caa
MM
528
529 bfun->errcode = errcode;
530
531 ftrace_debug (bfun, "new gap");
532
533 return bfun;
534}
535
8286623c
TW
536/* Update the current function segment at the end of the trace in BTINFO with
537 respect to the instruction at PC. This may create new function segments.
23a7fe75
MM
538 Return the chronologically latest function segment, never NULL. */
539
540static struct btrace_function *
8286623c 541ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
23a7fe75
MM
542{
543 struct bound_minimal_symbol bmfun;
544 struct minimal_symbol *mfun;
545 struct symbol *fun;
546 struct btrace_insn *last;
b54b03bd 547 struct btrace_function *bfun;
23a7fe75
MM
548
549 /* Try to determine the function we're in. We use both types of symbols
550 to avoid surprises when we sometimes get a full symbol and sometimes
551 only a minimal symbol. */
552 fun = find_pc_function (pc);
553 bmfun = lookup_minimal_symbol_by_pc (pc);
554 mfun = bmfun.minsym;
555
556 if (fun == NULL && mfun == NULL)
557 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
558
b54b03bd
TW
559 /* If we didn't have a function, we create one. */
560 if (btinfo->functions.empty ())
561 return ftrace_new_function (btinfo, mfun, fun);
562
563 /* If we had a gap before, we create a function. */
564 bfun = btinfo->functions.back ();
565 if (bfun->errcode != 0)
8286623c 566 return ftrace_new_function (btinfo, mfun, fun);
23a7fe75
MM
567
568 /* Check the last instruction, if we have one.
569 We do this check first, since it allows us to fill in the call stack
570 links in addition to the normal flow links. */
571 last = NULL;
572 if (!VEC_empty (btrace_insn_s, bfun->insn))
573 last = VEC_last (btrace_insn_s, bfun->insn);
574
575 if (last != NULL)
576 {
7d5c24b3
MM
577 switch (last->iclass)
578 {
579 case BTRACE_INSN_RETURN:
986b6601
MM
580 {
581 const char *fname;
582
583 /* On some systems, _dl_runtime_resolve returns to the resolved
584 function instead of jumping to it. From our perspective,
585 however, this is a tailcall.
586 If we treated it as return, we wouldn't be able to find the
587 resolved function in our stack back trace. Hence, we would
588 lose the current stack back trace and start anew with an empty
589 back trace. When the resolved function returns, we would then
590 create a stack back trace with the same function names but
591 different frame id's. This will confuse stepping. */
592 fname = ftrace_print_function_name (bfun);
593 if (strcmp (fname, "_dl_runtime_resolve") == 0)
8286623c 594 return ftrace_new_tailcall (btinfo, mfun, fun);
986b6601 595
8286623c 596 return ftrace_new_return (btinfo, mfun, fun);
986b6601 597 }
23a7fe75 598
7d5c24b3
MM
599 case BTRACE_INSN_CALL:
600 /* Ignore calls to the next instruction. They are used for PIC. */
601 if (last->pc + last->size == pc)
602 break;
23a7fe75 603
8286623c 604 return ftrace_new_call (btinfo, mfun, fun);
23a7fe75 605
7d5c24b3
MM
606 case BTRACE_INSN_JUMP:
607 {
608 CORE_ADDR start;
23a7fe75 609
7d5c24b3 610 start = get_pc_function_start (pc);
23a7fe75 611
2dfdb47a
MM
612 /* A jump to the start of a function is (typically) a tail call. */
613 if (start == pc)
8286623c 614 return ftrace_new_tailcall (btinfo, mfun, fun);
2dfdb47a 615
7d5c24b3 616 /* If we can't determine the function for PC, we treat a jump at
2dfdb47a
MM
617 the end of the block as tail call if we're switching functions
618 and as an intra-function branch if we don't. */
619 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
8286623c 620 return ftrace_new_tailcall (btinfo, mfun, fun);
2dfdb47a
MM
621
622 break;
7d5c24b3 623 }
02d27625 624 }
23a7fe75
MM
625 }
626
627 /* Check if we're switching functions for some other reason. */
628 if (ftrace_function_switched (bfun, mfun, fun))
629 {
630 DEBUG_FTRACE ("switching from %s in %s at %s",
631 ftrace_print_insn_addr (last),
632 ftrace_print_function_name (bfun),
633 ftrace_print_filename (bfun));
02d27625 634
8286623c 635 return ftrace_new_switch (btinfo, mfun, fun);
23a7fe75
MM
636 }
637
638 return bfun;
639}
640
23a7fe75
MM
641/* Add the instruction at PC to BFUN's instructions. */
642
643static void
7d5c24b3
MM
644ftrace_update_insns (struct btrace_function *bfun,
645 const struct btrace_insn *insn)
23a7fe75 646{
7d5c24b3 647 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
23a7fe75
MM
648
649 if (record_debug > 1)
650 ftrace_debug (bfun, "update insn");
651}
652
7d5c24b3
MM
653/* Classify the instruction at PC. */
654
655static enum btrace_insn_class
656ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
657{
7d5c24b3
MM
658 enum btrace_insn_class iclass;
659
660 iclass = BTRACE_INSN_OTHER;
492d29ea 661 TRY
7d5c24b3
MM
662 {
663 if (gdbarch_insn_is_call (gdbarch, pc))
664 iclass = BTRACE_INSN_CALL;
665 else if (gdbarch_insn_is_ret (gdbarch, pc))
666 iclass = BTRACE_INSN_RETURN;
667 else if (gdbarch_insn_is_jump (gdbarch, pc))
668 iclass = BTRACE_INSN_JUMP;
669 }
492d29ea
PA
670 CATCH (error, RETURN_MASK_ERROR)
671 {
672 }
673 END_CATCH
7d5c24b3
MM
674
675 return iclass;
676}
677
d87fdac3
MM
678/* Try to match the back trace at LHS to the back trace at RHS. Returns the
679 number of matching function segments or zero if the back traces do not
42bfe59e 680 match. BTINFO is the branch trace information for the current thread. */
d87fdac3
MM
681
682static int
42bfe59e
TW
683ftrace_match_backtrace (struct btrace_thread_info *btinfo,
684 struct btrace_function *lhs,
d87fdac3
MM
685 struct btrace_function *rhs)
686{
687 int matches;
688
689 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
690 {
691 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
692 return 0;
693
42bfe59e
TW
694 lhs = ftrace_get_caller (btinfo, lhs);
695 rhs = ftrace_get_caller (btinfo, rhs);
d87fdac3
MM
696 }
697
698 return matches;
699}
700
eb8f2b9c
TW
701/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
702 BTINFO is the branch trace information for the current thread. */
d87fdac3
MM
703
704static void
eb8f2b9c
TW
705ftrace_fixup_level (struct btrace_thread_info *btinfo,
706 struct btrace_function *bfun, int adjustment)
d87fdac3
MM
707{
708 if (adjustment == 0)
709 return;
710
711 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
712 ftrace_debug (bfun, "..bfun");
713
eb8f2b9c
TW
714 while (bfun != NULL)
715 {
716 bfun->level += adjustment;
717 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
718 }
d87fdac3
MM
719}
720
721/* Recompute the global level offset. Traverse the function trace and compute
722 the global level offset as the negative of the minimal function level. */
723
724static void
725ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
726{
b54b03bd 727 int level = INT_MAX;
d87fdac3
MM
728
729 if (btinfo == NULL)
730 return;
731
b54b03bd 732 if (btinfo->functions.empty ())
d87fdac3
MM
733 return;
734
b54b03bd
TW
735 unsigned int length = btinfo->functions.size() - 1;
736 for (unsigned int i = 0; i < length; ++i)
737 level = std::min (level, btinfo->functions[i]->level);
738
d87fdac3
MM
739 /* The last function segment contains the current instruction, which is not
740 really part of the trace. If it contains just this one instruction, we
b54b03bd
TW
741 ignore the segment. */
742 struct btrace_function *last = btinfo->functions.back();
743 if (VEC_length (btrace_insn_s, last->insn) != 1)
744 level = std::min (level, last->level);
d87fdac3
MM
745
746 DEBUG_FTRACE ("setting global level offset: %d", -level);
747 btinfo->level = -level;
748}
749
750/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
42bfe59e
TW
751 ftrace_connect_backtrace. BTINFO is the branch trace information for the
752 current thread. */
d87fdac3
MM
753
754static void
42bfe59e
TW
755ftrace_connect_bfun (struct btrace_thread_info *btinfo,
756 struct btrace_function *prev,
d87fdac3
MM
757 struct btrace_function *next)
758{
759 DEBUG_FTRACE ("connecting...");
760 ftrace_debug (prev, "..prev");
761 ftrace_debug (next, "..next");
762
763 /* The function segments are not yet connected. */
4aeb0dfc
TW
764 gdb_assert (prev->next == 0);
765 gdb_assert (next->prev == 0);
d87fdac3 766
4aeb0dfc
TW
767 prev->next = next->number;
768 next->prev = prev->number;
d87fdac3
MM
769
770 /* We may have moved NEXT to a different function level. */
eb8f2b9c 771 ftrace_fixup_level (btinfo, next, prev->level - next->level);
d87fdac3
MM
772
773 /* If we run out of back trace for one, let's use the other's. */
42bfe59e 774 if (prev->up == 0)
d87fdac3 775 {
42bfe59e
TW
776 const btrace_function_flags flags = next->flags;
777
778 next = ftrace_find_call_by_number (btinfo, next->up);
779 if (next != NULL)
d87fdac3
MM
780 {
781 DEBUG_FTRACE ("using next's callers");
4aeb0dfc 782 ftrace_fixup_caller (btinfo, prev, next, flags);
d87fdac3
MM
783 }
784 }
42bfe59e 785 else if (next->up == 0)
d87fdac3 786 {
42bfe59e
TW
787 const btrace_function_flags flags = prev->flags;
788
789 prev = ftrace_find_call_by_number (btinfo, prev->up);
790 if (prev != NULL)
d87fdac3
MM
791 {
792 DEBUG_FTRACE ("using prev's callers");
4aeb0dfc 793 ftrace_fixup_caller (btinfo, next, prev, flags);
d87fdac3
MM
794 }
795 }
796 else
797 {
798 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
799 link to add the tail callers to NEXT's back trace.
800
801 This removes NEXT->UP from NEXT's back trace. It will be added back
802 when connecting NEXT and PREV's callers - provided they exist.
803
804 If PREV's back trace consists of a series of tail calls without an
805 actual call, there will be no further connection and NEXT's caller will
806 be removed for good. To catch this case, we handle it here and connect
807 the top of PREV's back trace to NEXT's caller. */
808 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
809 {
810 struct btrace_function *caller;
42bfe59e 811 btrace_function_flags next_flags, prev_flags;
d87fdac3
MM
812
813 /* We checked NEXT->UP above so CALLER can't be NULL. */
42bfe59e
TW
814 caller = ftrace_find_call_by_number (btinfo, next->up);
815 next_flags = next->flags;
816 prev_flags = prev->flags;
d87fdac3
MM
817
818 DEBUG_FTRACE ("adding prev's tail calls to next");
819
42bfe59e 820 prev = ftrace_find_call_by_number (btinfo, prev->up);
4aeb0dfc 821 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
d87fdac3 822
42bfe59e
TW
823 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
824 prev->up))
d87fdac3
MM
825 {
826 /* At the end of PREV's back trace, continue with CALLER. */
42bfe59e 827 if (prev->up == 0)
d87fdac3
MM
828 {
829 DEBUG_FTRACE ("fixing up link for tailcall chain");
830 ftrace_debug (prev, "..top");
831 ftrace_debug (caller, "..up");
832
4aeb0dfc 833 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
d87fdac3
MM
834
835 /* If we skipped any tail calls, this may move CALLER to a
836 different function level.
837
838 Note that changing CALLER's level is only OK because we
839 know that this is the last iteration of the bottom-to-top
840 walk in ftrace_connect_backtrace.
841
842 Otherwise we will fix up CALLER's level when we connect it
843 to PREV's caller in the next iteration. */
eb8f2b9c
TW
844 ftrace_fixup_level (btinfo, caller,
845 prev->level - caller->level - 1);
d87fdac3
MM
846 break;
847 }
848
849 /* There's nothing to do if we find a real call. */
850 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
851 {
852 DEBUG_FTRACE ("will fix up link in next iteration");
853 break;
854 }
855 }
856 }
857 }
858}
859
860/* Connect function segments on the same level in the back trace at LHS and RHS.
861 The back traces at LHS and RHS are expected to match according to
42bfe59e
TW
862 ftrace_match_backtrace. BTINFO is the branch trace information for the
863 current thread. */
d87fdac3
MM
864
865static void
42bfe59e
TW
866ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
867 struct btrace_function *lhs,
d87fdac3
MM
868 struct btrace_function *rhs)
869{
870 while (lhs != NULL && rhs != NULL)
871 {
872 struct btrace_function *prev, *next;
873
874 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
875
876 /* Connecting LHS and RHS may change the up link. */
877 prev = lhs;
878 next = rhs;
879
42bfe59e
TW
880 lhs = ftrace_get_caller (btinfo, lhs);
881 rhs = ftrace_get_caller (btinfo, rhs);
d87fdac3 882
42bfe59e 883 ftrace_connect_bfun (btinfo, prev, next);
d87fdac3
MM
884 }
885}
886
887/* Bridge the gap between two function segments left and right of a gap if their
42bfe59e
TW
888 respective back traces match in at least MIN_MATCHES functions. BTINFO is
889 the branch trace information for the current thread.
d87fdac3
MM
890
891 Returns non-zero if the gap could be bridged, zero otherwise. */
892
893static int
42bfe59e
TW
894ftrace_bridge_gap (struct btrace_thread_info *btinfo,
895 struct btrace_function *lhs, struct btrace_function *rhs,
d87fdac3
MM
896 int min_matches)
897{
898 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
899 int best_matches;
900
901 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
902 rhs->insn_offset - 1, min_matches);
903
904 best_matches = 0;
905 best_l = NULL;
906 best_r = NULL;
907
908 /* We search the back traces of LHS and RHS for valid connections and connect
909 the two functon segments that give the longest combined back trace. */
910
42bfe59e
TW
911 for (cand_l = lhs; cand_l != NULL;
912 cand_l = ftrace_get_caller (btinfo, cand_l))
913 for (cand_r = rhs; cand_r != NULL;
914 cand_r = ftrace_get_caller (btinfo, cand_r))
d87fdac3
MM
915 {
916 int matches;
917
42bfe59e 918 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
d87fdac3
MM
919 if (best_matches < matches)
920 {
921 best_matches = matches;
922 best_l = cand_l;
923 best_r = cand_r;
924 }
925 }
926
927 /* We need at least MIN_MATCHES matches. */
928 gdb_assert (min_matches > 0);
929 if (best_matches < min_matches)
930 return 0;
931
932 DEBUG_FTRACE ("..matches: %d", best_matches);
933
934 /* We will fix up the level of BEST_R and succeeding function segments such
935 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
936
937 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
938 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
939
940 To catch this, we already fix up the level here where we can start at RHS
941 instead of at BEST_R. We will ignore the level fixup when connecting
942 BEST_L to BEST_R as they will already be on the same level. */
eb8f2b9c 943 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
d87fdac3 944
42bfe59e 945 ftrace_connect_backtrace (btinfo, best_l, best_r);
d87fdac3
MM
946
947 return best_matches;
948}
949
950/* Try to bridge gaps due to overflow or decode errors by connecting the
951 function segments that are separated by the gap. */
952
953static void
954btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
955{
4aeb0dfc 956 struct btrace_thread_info *btinfo = &tp->btrace;
d87fdac3
MM
957 VEC (bfun_s) *remaining;
958 struct cleanup *old_chain;
959 int min_matches;
960
961 DEBUG ("bridge gaps");
962
963 remaining = NULL;
964 old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
965
966 /* We require a minimum amount of matches for bridging a gap. The number of
967 required matches will be lowered with each iteration.
968
969 The more matches the higher our confidence that the bridging is correct.
970 For big gaps or small traces, however, it may not be feasible to require a
971 high number of matches. */
972 for (min_matches = 5; min_matches > 0; --min_matches)
973 {
974 /* Let's try to bridge as many gaps as we can. In some cases, we need to
975 skip a gap and revisit it again after we closed later gaps. */
976 while (!VEC_empty (bfun_s, *gaps))
977 {
978 struct btrace_function *gap;
979 unsigned int idx;
980
981 for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
982 {
983 struct btrace_function *lhs, *rhs;
984 int bridged;
985
986 /* We may have a sequence of gaps if we run from one error into
987 the next as we try to re-sync onto the trace stream. Ignore
988 all but the leftmost gap in such a sequence.
989
990 Also ignore gaps at the beginning of the trace. */
eb8f2b9c 991 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
d87fdac3
MM
992 if (lhs == NULL || lhs->errcode != 0)
993 continue;
994
995 /* Skip gaps to the right. */
eb8f2b9c
TW
996 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
997 while (rhs != NULL && rhs->errcode != 0)
998 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
d87fdac3
MM
999
1000 /* Ignore gaps at the end of the trace. */
1001 if (rhs == NULL)
1002 continue;
1003
eb8f2b9c 1004 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
d87fdac3
MM
1005
1006 /* Keep track of gaps we were not able to bridge and try again.
1007 If we just pushed them to the end of GAPS we would risk an
1008 infinite loop in case we simply cannot bridge a gap. */
1009 if (bridged == 0)
1010 VEC_safe_push (bfun_s, remaining, gap);
1011 }
1012
1013 /* Let's see if we made any progress. */
1014 if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
1015 break;
1016
1017 VEC_free (bfun_s, *gaps);
1018
1019 *gaps = remaining;
1020 remaining = NULL;
1021 }
1022
1023 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1024 if (VEC_empty (bfun_s, *gaps))
1025 break;
1026
1027 VEC_free (bfun_s, remaining);
1028 }
1029
1030 do_cleanups (old_chain);
1031
1032 /* We may omit this in some cases. Not sure it is worth the extra
1033 complication, though. */
eb8f2b9c 1034 ftrace_compute_global_level_offset (btinfo);
d87fdac3
MM
1035}
1036
734b0e4b 1037/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
1038
1039static void
76235df1 1040btrace_compute_ftrace_bts (struct thread_info *tp,
d87fdac3
MM
1041 const struct btrace_data_bts *btrace,
1042 VEC (bfun_s) **gaps)
23a7fe75 1043{
76235df1 1044 struct btrace_thread_info *btinfo;
23a7fe75 1045 struct gdbarch *gdbarch;
d87fdac3 1046 unsigned int blk;
23a7fe75
MM
1047 int level;
1048
23a7fe75 1049 gdbarch = target_gdbarch ();
76235df1 1050 btinfo = &tp->btrace;
734b0e4b 1051 blk = VEC_length (btrace_block_s, btrace->blocks);
23a7fe75 1052
b54b03bd
TW
1053 if (btinfo->functions.empty ())
1054 level = INT_MAX;
1055 else
1056 level = -btinfo->level;
1057
23a7fe75
MM
1058 while (blk != 0)
1059 {
1060 btrace_block_s *block;
1061 CORE_ADDR pc;
1062
1063 blk -= 1;
1064
734b0e4b 1065 block = VEC_index (btrace_block_s, btrace->blocks, blk);
23a7fe75
MM
1066 pc = block->begin;
1067
1068 for (;;)
1069 {
b54b03bd 1070 struct btrace_function *bfun;
7d5c24b3 1071 struct btrace_insn insn;
23a7fe75
MM
1072 int size;
1073
1074 /* We should hit the end of the block. Warn if we went too far. */
1075 if (block->end < pc)
1076 {
b61ce85c 1077 /* Indicate the gap in the trace. */
b54b03bd 1078 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
b61ce85c 1079
b54b03bd 1080 VEC_safe_push (bfun_s, *gaps, bfun);
b61ce85c
MM
1081
1082 warning (_("Recorded trace may be corrupted at instruction "
b54b03bd 1083 "%u (pc = %s)."), bfun->insn_offset - 1,
b61ce85c 1084 core_addr_to_string_nz (pc));
63ab433e 1085
23a7fe75
MM
1086 break;
1087 }
1088
b54b03bd 1089 bfun = ftrace_update_function (btinfo, pc);
23a7fe75 1090
8710b709
MM
1091 /* Maintain the function level offset.
1092 For all but the last block, we do it here. */
1093 if (blk != 0)
b54b03bd 1094 level = std::min (level, bfun->level);
23a7fe75 1095
7d5c24b3 1096 size = 0;
492d29ea
PA
1097 TRY
1098 {
1099 size = gdb_insn_length (gdbarch, pc);
1100 }
1101 CATCH (error, RETURN_MASK_ERROR)
1102 {
1103 }
1104 END_CATCH
7d5c24b3
MM
1105
1106 insn.pc = pc;
1107 insn.size = size;
1108 insn.iclass = ftrace_classify_insn (gdbarch, pc);
da8c46d2 1109 insn.flags = 0;
7d5c24b3 1110
b54b03bd 1111 ftrace_update_insns (bfun, &insn);
23a7fe75
MM
1112
1113 /* We're done once we pushed the instruction at the end. */
1114 if (block->end == pc)
1115 break;
1116
7d5c24b3 1117 /* We can't continue if we fail to compute the size. */
23a7fe75
MM
1118 if (size <= 0)
1119 {
31fd9caa
MM
1120 /* Indicate the gap in the trace. We just added INSN so we're
1121 not at the beginning. */
b54b03bd 1122 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
d87fdac3 1123
b54b03bd 1124 VEC_safe_push (bfun_s, *gaps, bfun);
31fd9caa 1125
63ab433e 1126 warning (_("Recorded trace may be incomplete at instruction %u "
b54b03bd 1127 "(pc = %s)."), bfun->insn_offset - 1,
63ab433e
MM
1128 core_addr_to_string_nz (pc));
1129
23a7fe75
MM
1130 break;
1131 }
1132
1133 pc += size;
8710b709
MM
1134
1135 /* Maintain the function level offset.
1136 For the last block, we do it here to not consider the last
1137 instruction.
1138 Since the last instruction corresponds to the current instruction
1139 and is not really part of the execution history, it shouldn't
1140 affect the level. */
1141 if (blk == 0)
b54b03bd 1142 level = std::min (level, bfun->level);
23a7fe75 1143 }
02d27625
MM
1144 }
1145
23a7fe75
MM
1146 /* LEVEL is the minimal function level of all btrace function segments.
1147 Define the global level offset to -LEVEL so all function levels are
1148 normalized to start at zero. */
1149 btinfo->level = -level;
02d27625
MM
1150}
1151
b20a6524
MM
1152#if defined (HAVE_LIBIPT)
1153
1154static enum btrace_insn_class
1155pt_reclassify_insn (enum pt_insn_class iclass)
1156{
1157 switch (iclass)
1158 {
1159 case ptic_call:
1160 return BTRACE_INSN_CALL;
1161
1162 case ptic_return:
1163 return BTRACE_INSN_RETURN;
1164
1165 case ptic_jump:
1166 return BTRACE_INSN_JUMP;
1167
1168 default:
1169 return BTRACE_INSN_OTHER;
1170 }
1171}
1172
da8c46d2
MM
1173/* Return the btrace instruction flags for INSN. */
1174
d7abe101 1175static btrace_insn_flags
b5c36682 1176pt_btrace_insn_flags (const struct pt_insn &insn)
da8c46d2 1177{
d7abe101 1178 btrace_insn_flags flags = 0;
da8c46d2 1179
b5c36682 1180 if (insn.speculative)
da8c46d2
MM
1181 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1182
1183 return flags;
1184}
1185
b5c36682
PA
1186/* Return the btrace instruction for INSN. */
1187
1188static btrace_insn
1189pt_btrace_insn (const struct pt_insn &insn)
1190{
1191 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1192 pt_reclassify_insn (insn.iclass),
1193 pt_btrace_insn_flags (insn)};
1194}
1195
1196
17b89b34 1197/* Add function branch trace to BTINFO using DECODER. */
b20a6524
MM
1198
1199static void
17b89b34
TW
1200ftrace_add_pt (struct btrace_thread_info *btinfo,
1201 struct pt_insn_decoder *decoder,
b54b03bd 1202 int *plevel,
d87fdac3 1203 VEC (bfun_s) **gaps)
b20a6524 1204{
b54b03bd 1205 struct btrace_function *bfun;
b20a6524 1206 uint64_t offset;
63ab433e 1207 int errcode;
b20a6524 1208
b20a6524
MM
1209 for (;;)
1210 {
b20a6524
MM
1211 struct pt_insn insn;
1212
1213 errcode = pt_insn_sync_forward (decoder);
1214 if (errcode < 0)
1215 {
1216 if (errcode != -pte_eos)
bc504a31 1217 warning (_("Failed to synchronize onto the Intel Processor "
b20a6524
MM
1218 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1219 break;
1220 }
1221
b20a6524
MM
1222 for (;;)
1223 {
1224 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1225 if (errcode < 0)
1226 break;
1227
1228 /* Look for gaps in the trace - unless we're at the beginning. */
b54b03bd 1229 if (!btinfo->functions.empty ())
b20a6524
MM
1230 {
1231 /* Tracing is disabled and re-enabled each time we enter the
1232 kernel. Most times, we continue from the same instruction we
1233 stopped before. This is indicated via the RESUMED instruction
1234 flag. The ENABLED instruction flag means that we continued
1235 from some other instruction. Indicate this as a trace gap. */
1236 if (insn.enabled)
63ab433e 1237 {
b54b03bd 1238 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
d87fdac3 1239
b54b03bd 1240 VEC_safe_push (bfun_s, *gaps, bfun);
63ab433e
MM
1241
1242 pt_insn_get_offset (decoder, &offset);
1243
1244 warning (_("Non-contiguous trace at instruction %u (offset "
1245 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
b54b03bd 1246 bfun->insn_offset - 1, offset, insn.ip);
63ab433e 1247 }
b61ce85c 1248 }
b20a6524 1249
b61ce85c
MM
1250 /* Indicate trace overflows. */
1251 if (insn.resynced)
1252 {
b54b03bd 1253 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
63ab433e 1254
b54b03bd 1255 VEC_safe_push (bfun_s, *gaps, bfun);
63ab433e 1256
b61ce85c
MM
1257 pt_insn_get_offset (decoder, &offset);
1258
1259 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
b54b03bd 1260 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1,
b61ce85c 1261 offset, insn.ip);
b20a6524
MM
1262 }
1263
b54b03bd 1264 bfun = ftrace_update_function (btinfo, insn.ip);
b20a6524
MM
1265
1266 /* Maintain the function level offset. */
b54b03bd 1267 *plevel = std::min (*plevel, bfun->level);
b20a6524 1268
b5c36682 1269 btrace_insn btinsn = pt_btrace_insn (insn);
b54b03bd 1270 ftrace_update_insns (bfun, &btinsn);
b20a6524
MM
1271 }
1272
1273 if (errcode == -pte_eos)
1274 break;
1275
b20a6524 1276 /* Indicate the gap in the trace. */
b54b03bd 1277 bfun = ftrace_new_gap (btinfo, errcode);
d87fdac3 1278
b54b03bd 1279 VEC_safe_push (bfun_s, *gaps, bfun);
b20a6524 1280
63ab433e
MM
1281 pt_insn_get_offset (decoder, &offset);
1282
1283 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
b54b03bd 1284 ", pc = 0x%" PRIx64 "): %s."), errcode, bfun->insn_offset - 1,
63ab433e
MM
1285 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1286 }
b20a6524
MM
1287}
1288
1289/* A callback function to allow the trace decoder to read the inferior's
1290 memory. */
1291
1292static int
1293btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
80a2b330 1294 const struct pt_asid *asid, uint64_t pc,
b20a6524
MM
1295 void *context)
1296{
43368e1d 1297 int result, errcode;
b20a6524 1298
43368e1d 1299 result = (int) size;
b20a6524
MM
1300 TRY
1301 {
80a2b330 1302 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
b20a6524 1303 if (errcode != 0)
43368e1d 1304 result = -pte_nomap;
b20a6524
MM
1305 }
1306 CATCH (error, RETURN_MASK_ERROR)
1307 {
43368e1d 1308 result = -pte_nomap;
b20a6524
MM
1309 }
1310 END_CATCH
1311
43368e1d 1312 return result;
b20a6524
MM
1313}
1314
1315/* Translate the vendor from one enum to another. */
1316
1317static enum pt_cpu_vendor
1318pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1319{
1320 switch (vendor)
1321 {
1322 default:
1323 return pcv_unknown;
1324
1325 case CV_INTEL:
1326 return pcv_intel;
1327 }
1328}
1329
1330/* Finalize the function branch trace after decode. */
1331
1332static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1333 struct thread_info *tp, int level)
1334{
1335 pt_insn_free_decoder (decoder);
1336
1337 /* LEVEL is the minimal function level of all btrace function segments.
1338 Define the global level offset to -LEVEL so all function levels are
1339 normalized to start at zero. */
1340 tp->btrace.level = -level;
1341
1342 /* Add a single last instruction entry for the current PC.
1343 This allows us to compute the backtrace at the current PC using both
1344 standard unwind and btrace unwind.
1345 This extra entry is ignored by all record commands. */
1346 btrace_add_pc (tp);
1347}
1348
bc504a31
PA
1349/* Compute the function branch trace from Intel Processor Trace
1350 format. */
b20a6524
MM
1351
1352static void
1353btrace_compute_ftrace_pt (struct thread_info *tp,
d87fdac3
MM
1354 const struct btrace_data_pt *btrace,
1355 VEC (bfun_s) **gaps)
b20a6524
MM
1356{
1357 struct btrace_thread_info *btinfo;
1358 struct pt_insn_decoder *decoder;
1359 struct pt_config config;
1360 int level, errcode;
1361
1362 if (btrace->size == 0)
1363 return;
1364
1365 btinfo = &tp->btrace;
b54b03bd
TW
1366 if (btinfo->functions.empty ())
1367 level = INT_MAX;
1368 else
1369 level = -btinfo->level;
b20a6524
MM
1370
1371 pt_config_init(&config);
1372 config.begin = btrace->data;
1373 config.end = btrace->data + btrace->size;
1374
1375 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1376 config.cpu.family = btrace->config.cpu.family;
1377 config.cpu.model = btrace->config.cpu.model;
1378 config.cpu.stepping = btrace->config.cpu.stepping;
1379
1380 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1381 if (errcode < 0)
bc504a31 1382 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b20a6524
MM
1383 pt_errstr (pt_errcode (errcode)));
1384
1385 decoder = pt_insn_alloc_decoder (&config);
1386 if (decoder == NULL)
bc504a31 1387 error (_("Failed to allocate the Intel Processor Trace decoder."));
b20a6524
MM
1388
1389 TRY
1390 {
1391 struct pt_image *image;
1392
1393 image = pt_insn_get_image(decoder);
1394 if (image == NULL)
bc504a31 1395 error (_("Failed to configure the Intel Processor Trace decoder."));
b20a6524
MM
1396
1397 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1398 if (errcode < 0)
bc504a31 1399 error (_("Failed to configure the Intel Processor Trace decoder: "
b20a6524
MM
1400 "%s."), pt_errstr (pt_errcode (errcode)));
1401
b54b03bd 1402 ftrace_add_pt (btinfo, decoder, &level, gaps);
b20a6524
MM
1403 }
1404 CATCH (error, RETURN_MASK_ALL)
1405 {
1406 /* Indicate a gap in the trace if we quit trace processing. */
b54b03bd 1407 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
b20a6524 1408 {
b54b03bd 1409 struct btrace_function *bfun;
d87fdac3 1410
b54b03bd
TW
1411 bfun = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
1412
1413 VEC_safe_push (bfun_s, *gaps, bfun);
b20a6524
MM
1414 }
1415
1416 btrace_finalize_ftrace_pt (decoder, tp, level);
1417
1418 throw_exception (error);
1419 }
1420 END_CATCH
1421
1422 btrace_finalize_ftrace_pt (decoder, tp, level);
1423}
1424
1425#else /* defined (HAVE_LIBIPT) */
1426
1427static void
1428btrace_compute_ftrace_pt (struct thread_info *tp,
d87fdac3
MM
1429 const struct btrace_data_pt *btrace,
1430 VEC (bfun_s) **gaps)
b20a6524
MM
1431{
1432 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1433}
1434
1435#endif /* defined (HAVE_LIBIPT) */
1436
734b0e4b
MM
1437/* Compute the function branch trace from a block branch trace BTRACE for
1438 a thread given by BTINFO. */
1439
1440static void
d87fdac3
MM
1441btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1442 VEC (bfun_s) **gaps)
734b0e4b
MM
1443{
1444 DEBUG ("compute ftrace");
1445
1446 switch (btrace->format)
1447 {
1448 case BTRACE_FORMAT_NONE:
1449 return;
1450
1451 case BTRACE_FORMAT_BTS:
d87fdac3 1452 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
734b0e4b 1453 return;
b20a6524
MM
1454
1455 case BTRACE_FORMAT_PT:
d87fdac3 1456 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
b20a6524 1457 return;
734b0e4b
MM
1458 }
1459
1460 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1461}
1462
d87fdac3
MM
1463static void
1464btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1465{
1466 if (!VEC_empty (bfun_s, *gaps))
1467 {
1468 tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1469 btrace_bridge_gaps (tp, gaps);
1470 }
1471}
1472
1473static void
1474btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1475{
1476 VEC (bfun_s) *gaps;
1477 struct cleanup *old_chain;
1478
1479 gaps = NULL;
1480 old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1481
1482 TRY
1483 {
1484 btrace_compute_ftrace_1 (tp, btrace, &gaps);
1485 }
1486 CATCH (error, RETURN_MASK_ALL)
1487 {
1488 btrace_finalize_ftrace (tp, &gaps);
1489
1490 throw_exception (error);
1491 }
1492 END_CATCH
1493
1494 btrace_finalize_ftrace (tp, &gaps);
1495
1496 do_cleanups (old_chain);
1497}
1498
6e07b1d2
MM
1499/* Add an entry for the current PC. */
1500
1501static void
1502btrace_add_pc (struct thread_info *tp)
1503{
734b0e4b 1504 struct btrace_data btrace;
6e07b1d2
MM
1505 struct btrace_block *block;
1506 struct regcache *regcache;
1507 struct cleanup *cleanup;
1508 CORE_ADDR pc;
1509
1510 regcache = get_thread_regcache (tp->ptid);
1511 pc = regcache_read_pc (regcache);
1512
734b0e4b
MM
1513 btrace_data_init (&btrace);
1514 btrace.format = BTRACE_FORMAT_BTS;
1515 btrace.variant.bts.blocks = NULL;
6e07b1d2 1516
734b0e4b
MM
1517 cleanup = make_cleanup_btrace_data (&btrace);
1518
1519 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
6e07b1d2
MM
1520 block->begin = pc;
1521 block->end = pc;
1522
76235df1 1523 btrace_compute_ftrace (tp, &btrace);
6e07b1d2
MM
1524
1525 do_cleanups (cleanup);
1526}
1527
02d27625
MM
1528/* See btrace.h. */
1529
1530void
f4abbc16 1531btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
1532{
1533 if (tp->btrace.target != NULL)
1534 return;
1535
46a3515b
MM
1536#if !defined (HAVE_LIBIPT)
1537 if (conf->format == BTRACE_FORMAT_PT)
bc504a31 1538 error (_("GDB does not support Intel Processor Trace."));
46a3515b
MM
1539#endif /* !defined (HAVE_LIBIPT) */
1540
f4abbc16 1541 if (!target_supports_btrace (conf->format))
02d27625
MM
1542 error (_("Target does not support branch tracing."));
1543
43792cf0
PA
1544 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1545 target_pid_to_str (tp->ptid));
02d27625 1546
f4abbc16 1547 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
6e07b1d2 1548
cd4007e4
MM
1549 /* We're done if we failed to enable tracing. */
1550 if (tp->btrace.target == NULL)
1551 return;
1552
1553 /* We need to undo the enable in case of errors. */
1554 TRY
1555 {
1556 /* Add an entry for the current PC so we start tracing from where we
1557 enabled it.
1558
1559 If we can't access TP's registers, TP is most likely running. In this
1560 case, we can't really say where tracing was enabled so it should be
1561 safe to simply skip this step.
1562
1563 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1564 start at the PC at which tracing was enabled. */
1565 if (conf->format != BTRACE_FORMAT_PT
1566 && can_access_registers_ptid (tp->ptid))
1567 btrace_add_pc (tp);
1568 }
1569 CATCH (exception, RETURN_MASK_ALL)
1570 {
1571 btrace_disable (tp);
1572
1573 throw_exception (exception);
1574 }
1575 END_CATCH
02d27625
MM
1576}
1577
1578/* See btrace.h. */
1579
f4abbc16
MM
1580const struct btrace_config *
1581btrace_conf (const struct btrace_thread_info *btinfo)
1582{
1583 if (btinfo->target == NULL)
1584 return NULL;
1585
1586 return target_btrace_conf (btinfo->target);
1587}
1588
1589/* See btrace.h. */
1590
02d27625
MM
1591void
1592btrace_disable (struct thread_info *tp)
1593{
1594 struct btrace_thread_info *btp = &tp->btrace;
1595 int errcode = 0;
1596
1597 if (btp->target == NULL)
1598 return;
1599
43792cf0
PA
1600 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1601 target_pid_to_str (tp->ptid));
02d27625
MM
1602
1603 target_disable_btrace (btp->target);
1604 btp->target = NULL;
1605
1606 btrace_clear (tp);
1607}
1608
1609/* See btrace.h. */
1610
1611void
1612btrace_teardown (struct thread_info *tp)
1613{
1614 struct btrace_thread_info *btp = &tp->btrace;
1615 int errcode = 0;
1616
1617 if (btp->target == NULL)
1618 return;
1619
43792cf0
PA
1620 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1621 target_pid_to_str (tp->ptid));
02d27625
MM
1622
1623 target_teardown_btrace (btp->target);
1624 btp->target = NULL;
1625
1626 btrace_clear (tp);
1627}
1628
734b0e4b 1629/* Stitch branch trace in BTS format. */
969c39fb
MM
1630
1631static int
31fd9caa 1632btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
969c39fb 1633{
31fd9caa 1634 struct btrace_thread_info *btinfo;
969c39fb
MM
1635 struct btrace_function *last_bfun;
1636 struct btrace_insn *last_insn;
1637 btrace_block_s *first_new_block;
1638
31fd9caa 1639 btinfo = &tp->btrace;
b54b03bd 1640 gdb_assert (!btinfo->functions.empty ());
31fd9caa
MM
1641 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1642
b54b03bd
TW
1643 last_bfun = btinfo->functions.back ();
1644
31fd9caa
MM
1645 /* If the existing trace ends with a gap, we just glue the traces
1646 together. We need to drop the last (i.e. chronologically first) block
1647 of the new trace, though, since we can't fill in the start address.*/
1648 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1649 {
1650 VEC_pop (btrace_block_s, btrace->blocks);
1651 return 0;
1652 }
969c39fb
MM
1653
1654 /* Beware that block trace starts with the most recent block, so the
1655 chronologically first block in the new trace is the last block in
1656 the new trace's block vector. */
734b0e4b 1657 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
969c39fb
MM
1658 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1659
1660 /* If the current PC at the end of the block is the same as in our current
1661 trace, there are two explanations:
1662 1. we executed the instruction and some branch brought us back.
1663 2. we have not made any progress.
1664 In the first case, the delta trace vector should contain at least two
1665 entries.
1666 In the second case, the delta trace vector should contain exactly one
1667 entry for the partial block containing the current PC. Remove it. */
1668 if (first_new_block->end == last_insn->pc
734b0e4b 1669 && VEC_length (btrace_block_s, btrace->blocks) == 1)
969c39fb 1670 {
734b0e4b 1671 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1672 return 0;
1673 }
1674
1675 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1676 core_addr_to_string_nz (first_new_block->end));
1677
1678 /* Do a simple sanity check to make sure we don't accidentally end up
1679 with a bad block. This should not occur in practice. */
1680 if (first_new_block->end < last_insn->pc)
1681 {
1682 warning (_("Error while trying to read delta trace. Falling back to "
1683 "a full read."));
1684 return -1;
1685 }
1686
1687 /* We adjust the last block to start at the end of our current trace. */
1688 gdb_assert (first_new_block->begin == 0);
1689 first_new_block->begin = last_insn->pc;
1690
1691 /* We simply pop the last insn so we can insert it again as part of
1692 the normal branch trace computation.
1693 Since instruction iterators are based on indices in the instructions
1694 vector, we don't leave any pointers dangling. */
1695 DEBUG ("pruning insn at %s for stitching",
1696 ftrace_print_insn_addr (last_insn));
1697
1698 VEC_pop (btrace_insn_s, last_bfun->insn);
1699
1700 /* The instructions vector may become empty temporarily if this has
1701 been the only instruction in this function segment.
1702 This violates the invariant but will be remedied shortly by
1703 btrace_compute_ftrace when we add the new trace. */
31fd9caa
MM
1704
1705 /* The only case where this would hurt is if the entire trace consisted
1706 of just that one instruction. If we remove it, we might turn the now
1707 empty btrace function segment into a gap. But we don't want gaps at
1708 the beginning. To avoid this, we remove the entire old trace. */
b54b03bd 1709 if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
31fd9caa
MM
1710 btrace_clear (tp);
1711
969c39fb
MM
1712 return 0;
1713}
1714
734b0e4b
MM
1715/* Adjust the block trace in order to stitch old and new trace together.
1716 BTRACE is the new delta trace between the last and the current stop.
31fd9caa
MM
1717 TP is the traced thread.
1718 May modifx BTRACE as well as the existing trace in TP.
734b0e4b
MM
1719 Return 0 on success, -1 otherwise. */
1720
1721static int
31fd9caa 1722btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
734b0e4b
MM
1723{
1724 /* If we don't have trace, there's nothing to do. */
1725 if (btrace_data_empty (btrace))
1726 return 0;
1727
1728 switch (btrace->format)
1729 {
1730 case BTRACE_FORMAT_NONE:
1731 return 0;
1732
1733 case BTRACE_FORMAT_BTS:
31fd9caa 1734 return btrace_stitch_bts (&btrace->variant.bts, tp);
b20a6524
MM
1735
1736 case BTRACE_FORMAT_PT:
1737 /* Delta reads are not supported. */
1738 return -1;
734b0e4b
MM
1739 }
1740
1741 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1742}
1743
969c39fb
MM
1744/* Clear the branch trace histories in BTINFO. */
1745
1746static void
1747btrace_clear_history (struct btrace_thread_info *btinfo)
1748{
1749 xfree (btinfo->insn_history);
1750 xfree (btinfo->call_history);
1751 xfree (btinfo->replay);
1752
1753 btinfo->insn_history = NULL;
1754 btinfo->call_history = NULL;
1755 btinfo->replay = NULL;
1756}
1757
b0627500
MM
1758/* Clear the branch trace maintenance histories in BTINFO. */
1759
1760static void
1761btrace_maint_clear (struct btrace_thread_info *btinfo)
1762{
1763 switch (btinfo->data.format)
1764 {
1765 default:
1766 break;
1767
1768 case BTRACE_FORMAT_BTS:
1769 btinfo->maint.variant.bts.packet_history.begin = 0;
1770 btinfo->maint.variant.bts.packet_history.end = 0;
1771 break;
1772
1773#if defined (HAVE_LIBIPT)
1774 case BTRACE_FORMAT_PT:
1775 xfree (btinfo->maint.variant.pt.packets);
1776
1777 btinfo->maint.variant.pt.packets = NULL;
1778 btinfo->maint.variant.pt.packet_history.begin = 0;
1779 btinfo->maint.variant.pt.packet_history.end = 0;
1780 break;
1781#endif /* defined (HAVE_LIBIPT) */
1782 }
1783}
1784
02d27625
MM
1785/* See btrace.h. */
1786
508352a9
TW
1787const char *
1788btrace_decode_error (enum btrace_format format, int errcode)
1789{
1790 switch (format)
1791 {
1792 case BTRACE_FORMAT_BTS:
1793 switch (errcode)
1794 {
1795 case BDE_BTS_OVERFLOW:
1796 return _("instruction overflow");
1797
1798 case BDE_BTS_INSN_SIZE:
1799 return _("unknown instruction");
1800
1801 default:
1802 break;
1803 }
1804 break;
1805
1806#if defined (HAVE_LIBIPT)
1807 case BTRACE_FORMAT_PT:
1808 switch (errcode)
1809 {
1810 case BDE_PT_USER_QUIT:
1811 return _("trace decode cancelled");
1812
1813 case BDE_PT_DISABLED:
1814 return _("disabled");
1815
1816 case BDE_PT_OVERFLOW:
1817 return _("overflow");
1818
1819 default:
1820 if (errcode < 0)
1821 return pt_errstr (pt_errcode (errcode));
1822 break;
1823 }
1824 break;
1825#endif /* defined (HAVE_LIBIPT) */
1826
1827 default:
1828 break;
1829 }
1830
1831 return _("unknown");
1832}
1833
1834/* See btrace.h. */
1835
02d27625
MM
1836void
1837btrace_fetch (struct thread_info *tp)
1838{
1839 struct btrace_thread_info *btinfo;
969c39fb 1840 struct btrace_target_info *tinfo;
734b0e4b 1841 struct btrace_data btrace;
23a7fe75 1842 struct cleanup *cleanup;
969c39fb 1843 int errcode;
02d27625 1844
43792cf0
PA
1845 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1846 target_pid_to_str (tp->ptid));
02d27625
MM
1847
1848 btinfo = &tp->btrace;
969c39fb
MM
1849 tinfo = btinfo->target;
1850 if (tinfo == NULL)
1851 return;
1852
1853 /* There's no way we could get new trace while replaying.
1854 On the other hand, delta trace would return a partial record with the
1855 current PC, which is the replay PC, not the last PC, as expected. */
1856 if (btinfo->replay != NULL)
02d27625
MM
1857 return;
1858
ae20e79a
TW
1859 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1860 can store a gdb.Record object in Python referring to a different thread
1861 than the current one, temporarily set INFERIOR_PTID. */
1862 cleanup = save_inferior_ptid ();
1863 inferior_ptid = tp->ptid;
1864
cd4007e4
MM
1865 /* We should not be called on running or exited threads. */
1866 gdb_assert (can_access_registers_ptid (tp->ptid));
1867
734b0e4b 1868 btrace_data_init (&btrace);
ae20e79a 1869 make_cleanup_btrace_data (&btrace);
02d27625 1870
969c39fb 1871 /* Let's first try to extend the trace we already have. */
b54b03bd 1872 if (!btinfo->functions.empty ())
969c39fb
MM
1873 {
1874 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1875 if (errcode == 0)
1876 {
1877 /* Success. Let's try to stitch the traces together. */
31fd9caa 1878 errcode = btrace_stitch_trace (&btrace, tp);
969c39fb
MM
1879 }
1880 else
1881 {
1882 /* We failed to read delta trace. Let's try to read new trace. */
1883 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1884
1885 /* If we got any new trace, discard what we have. */
734b0e4b 1886 if (errcode == 0 && !btrace_data_empty (&btrace))
969c39fb
MM
1887 btrace_clear (tp);
1888 }
1889
1890 /* If we were not able to read the trace, we start over. */
1891 if (errcode != 0)
1892 {
1893 btrace_clear (tp);
1894 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1895 }
1896 }
1897 else
1898 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1899
1900 /* If we were not able to read the branch trace, signal an error. */
1901 if (errcode != 0)
1902 error (_("Failed to read branch trace."));
1903
1904 /* Compute the trace, provided we have any. */
734b0e4b 1905 if (!btrace_data_empty (&btrace))
23a7fe75 1906 {
9be54cae
MM
1907 /* Store the raw trace data. The stored data will be cleared in
1908 btrace_clear, so we always append the new trace. */
1909 btrace_data_append (&btinfo->data, &btrace);
b0627500 1910 btrace_maint_clear (btinfo);
9be54cae 1911
969c39fb 1912 btrace_clear_history (btinfo);
76235df1 1913 btrace_compute_ftrace (tp, &btrace);
23a7fe75 1914 }
02d27625 1915
23a7fe75 1916 do_cleanups (cleanup);
02d27625
MM
1917}
1918
1919/* See btrace.h. */
1920
1921void
1922btrace_clear (struct thread_info *tp)
1923{
1924 struct btrace_thread_info *btinfo;
1925
43792cf0
PA
1926 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1927 target_pid_to_str (tp->ptid));
02d27625 1928
0b722aec
MM
1929 /* Make sure btrace frames that may hold a pointer into the branch
1930 trace data are destroyed. */
1931 reinit_frame_cache ();
1932
02d27625 1933 btinfo = &tp->btrace;
17b89b34 1934 for (auto &bfun : btinfo->functions)
23a7fe75 1935 {
17b89b34
TW
1936 VEC_free (btrace_insn_s, bfun->insn);
1937 xfree (bfun);
23a7fe75
MM
1938 }
1939
17b89b34 1940 btinfo->functions.clear ();
31fd9caa 1941 btinfo->ngaps = 0;
23a7fe75 1942
b0627500
MM
1943 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1944 btrace_maint_clear (btinfo);
9be54cae 1945 btrace_data_clear (&btinfo->data);
969c39fb 1946 btrace_clear_history (btinfo);
02d27625
MM
1947}
1948
1949/* See btrace.h. */
1950
1951void
1952btrace_free_objfile (struct objfile *objfile)
1953{
1954 struct thread_info *tp;
1955
1956 DEBUG ("free objfile");
1957
034f788c 1958 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
1959 btrace_clear (tp);
1960}
c12a2917
MM
1961
1962#if defined (HAVE_LIBEXPAT)
1963
1964/* Check the btrace document version. */
1965
1966static void
1967check_xml_btrace_version (struct gdb_xml_parser *parser,
1968 const struct gdb_xml_element *element,
1969 void *user_data, VEC (gdb_xml_value_s) *attributes)
1970{
9a3c8263
SM
1971 const char *version
1972 = (const char *) xml_find_attribute (attributes, "version")->value;
c12a2917
MM
1973
1974 if (strcmp (version, "1.0") != 0)
1975 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1976}
1977
1978/* Parse a btrace "block" xml record. */
1979
1980static void
1981parse_xml_btrace_block (struct gdb_xml_parser *parser,
1982 const struct gdb_xml_element *element,
1983 void *user_data, VEC (gdb_xml_value_s) *attributes)
1984{
734b0e4b 1985 struct btrace_data *btrace;
c12a2917
MM
1986 struct btrace_block *block;
1987 ULONGEST *begin, *end;
1988
9a3c8263 1989 btrace = (struct btrace_data *) user_data;
734b0e4b
MM
1990
1991 switch (btrace->format)
1992 {
1993 case BTRACE_FORMAT_BTS:
1994 break;
1995
1996 case BTRACE_FORMAT_NONE:
1997 btrace->format = BTRACE_FORMAT_BTS;
1998 btrace->variant.bts.blocks = NULL;
1999 break;
2000
2001 default:
2002 gdb_xml_error (parser, _("Btrace format error."));
2003 }
c12a2917 2004
bc84451b
SM
2005 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
2006 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
c12a2917 2007
734b0e4b 2008 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
c12a2917
MM
2009 block->begin = *begin;
2010 block->end = *end;
2011}
2012
b20a6524
MM
2013/* Parse a "raw" xml record. */
2014
2015static void
2016parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
e7b01ce0 2017 gdb_byte **pdata, size_t *psize)
b20a6524
MM
2018{
2019 struct cleanup *cleanup;
2020 gdb_byte *data, *bin;
e7b01ce0 2021 size_t len, size;
b20a6524
MM
2022
2023 len = strlen (body_text);
e7b01ce0 2024 if (len % 2 != 0)
b20a6524
MM
2025 gdb_xml_error (parser, _("Bad raw data size."));
2026
e7b01ce0
MM
2027 size = len / 2;
2028
224c3ddb 2029 bin = data = (gdb_byte *) xmalloc (size);
b20a6524
MM
2030 cleanup = make_cleanup (xfree, data);
2031
2032 /* We use hex encoding - see common/rsp-low.h. */
2033 while (len > 0)
2034 {
2035 char hi, lo;
2036
2037 hi = *body_text++;
2038 lo = *body_text++;
2039
2040 if (hi == 0 || lo == 0)
2041 gdb_xml_error (parser, _("Bad hex encoding."));
2042
2043 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2044 len -= 2;
2045 }
2046
2047 discard_cleanups (cleanup);
2048
2049 *pdata = data;
2050 *psize = size;
2051}
2052
2053/* Parse a btrace pt-config "cpu" xml record. */
2054
2055static void
2056parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2057 const struct gdb_xml_element *element,
2058 void *user_data,
2059 VEC (gdb_xml_value_s) *attributes)
2060{
2061 struct btrace_data *btrace;
2062 const char *vendor;
2063 ULONGEST *family, *model, *stepping;
2064
9a3c8263
SM
2065 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2066 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2067 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2068 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
b20a6524 2069
9a3c8263 2070 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2071
2072 if (strcmp (vendor, "GenuineIntel") == 0)
2073 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2074
2075 btrace->variant.pt.config.cpu.family = *family;
2076 btrace->variant.pt.config.cpu.model = *model;
2077 btrace->variant.pt.config.cpu.stepping = *stepping;
2078}
2079
2080/* Parse a btrace pt "raw" xml record. */
2081
2082static void
2083parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2084 const struct gdb_xml_element *element,
2085 void *user_data, const char *body_text)
2086{
2087 struct btrace_data *btrace;
2088
9a3c8263 2089 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2090 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2091 &btrace->variant.pt.size);
2092}
2093
2094/* Parse a btrace "pt" xml record. */
2095
2096static void
2097parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2098 const struct gdb_xml_element *element,
2099 void *user_data, VEC (gdb_xml_value_s) *attributes)
2100{
2101 struct btrace_data *btrace;
2102
9a3c8263 2103 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2104 btrace->format = BTRACE_FORMAT_PT;
2105 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2106 btrace->variant.pt.data = NULL;
2107 btrace->variant.pt.size = 0;
2108}
2109
c12a2917
MM
2110static const struct gdb_xml_attribute block_attributes[] = {
2111 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2112 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2113 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2114};
2115
b20a6524
MM
2116static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2117 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2118 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2119 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2120 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2121 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2122};
2123
2124static const struct gdb_xml_element btrace_pt_config_children[] = {
2125 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2126 parse_xml_btrace_pt_config_cpu, NULL },
2127 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2128};
2129
2130static const struct gdb_xml_element btrace_pt_children[] = {
2131 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2132 NULL },
2133 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2134 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2135};
2136
c12a2917
MM
2137static const struct gdb_xml_attribute btrace_attributes[] = {
2138 { "version", GDB_XML_AF_NONE, NULL, NULL },
2139 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2140};
2141
2142static const struct gdb_xml_element btrace_children[] = {
2143 { "block", block_attributes, NULL,
2144 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
b20a6524
MM
2145 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2146 NULL },
c12a2917
MM
2147 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2148};
2149
2150static const struct gdb_xml_element btrace_elements[] = {
2151 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2152 check_xml_btrace_version, NULL },
2153 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2154};
2155
2156#endif /* defined (HAVE_LIBEXPAT) */
2157
2158/* See btrace.h. */
2159
734b0e4b
MM
2160void
2161parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
c12a2917 2162{
c12a2917
MM
2163 struct cleanup *cleanup;
2164 int errcode;
2165
2166#if defined (HAVE_LIBEXPAT)
2167
734b0e4b
MM
2168 btrace->format = BTRACE_FORMAT_NONE;
2169
2170 cleanup = make_cleanup_btrace_data (btrace);
c12a2917 2171 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
734b0e4b 2172 buffer, btrace);
c12a2917 2173 if (errcode != 0)
969c39fb 2174 error (_("Error parsing branch trace."));
c12a2917
MM
2175
2176 /* Keep parse results. */
2177 discard_cleanups (cleanup);
2178
2179#else /* !defined (HAVE_LIBEXPAT) */
2180
2181 error (_("Cannot process branch trace. XML parsing is not supported."));
2182
2183#endif /* !defined (HAVE_LIBEXPAT) */
c12a2917 2184}
23a7fe75 2185
f4abbc16
MM
2186#if defined (HAVE_LIBEXPAT)
2187
2188/* Parse a btrace-conf "bts" xml record. */
2189
2190static void
2191parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2192 const struct gdb_xml_element *element,
2193 void *user_data, VEC (gdb_xml_value_s) *attributes)
2194{
2195 struct btrace_config *conf;
d33501a5 2196 struct gdb_xml_value *size;
f4abbc16 2197
9a3c8263 2198 conf = (struct btrace_config *) user_data;
f4abbc16 2199 conf->format = BTRACE_FORMAT_BTS;
d33501a5
MM
2200 conf->bts.size = 0;
2201
2202 size = xml_find_attribute (attributes, "size");
2203 if (size != NULL)
b20a6524 2204 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
f4abbc16
MM
2205}
2206
b20a6524
MM
2207/* Parse a btrace-conf "pt" xml record. */
2208
2209static void
2210parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2211 const struct gdb_xml_element *element,
2212 void *user_data, VEC (gdb_xml_value_s) *attributes)
2213{
2214 struct btrace_config *conf;
2215 struct gdb_xml_value *size;
2216
9a3c8263 2217 conf = (struct btrace_config *) user_data;
b20a6524
MM
2218 conf->format = BTRACE_FORMAT_PT;
2219 conf->pt.size = 0;
2220
2221 size = xml_find_attribute (attributes, "size");
2222 if (size != NULL)
2223 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2224}
2225
2226static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2227 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2228 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2229};
2230
d33501a5
MM
2231static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2232 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2233 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2234};
2235
f4abbc16 2236static const struct gdb_xml_element btrace_conf_children[] = {
d33501a5
MM
2237 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2238 parse_xml_btrace_conf_bts, NULL },
b20a6524
MM
2239 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2240 parse_xml_btrace_conf_pt, NULL },
f4abbc16
MM
2241 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2242};
2243
2244static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2245 { "version", GDB_XML_AF_NONE, NULL, NULL },
2246 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2247};
2248
2249static const struct gdb_xml_element btrace_conf_elements[] = {
2250 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2251 GDB_XML_EF_NONE, NULL, NULL },
2252 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2253};
2254
2255#endif /* defined (HAVE_LIBEXPAT) */
2256
2257/* See btrace.h. */
2258
2259void
2260parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2261{
2262 int errcode;
2263
2264#if defined (HAVE_LIBEXPAT)
2265
2266 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2267 btrace_conf_elements, xml, conf);
2268 if (errcode != 0)
2269 error (_("Error parsing branch trace configuration."));
2270
2271#else /* !defined (HAVE_LIBEXPAT) */
2272
2273 error (_("XML parsing is not supported."));
2274
2275#endif /* !defined (HAVE_LIBEXPAT) */
2276}
2277
23a7fe75
MM
2278/* See btrace.h. */
2279
2280const struct btrace_insn *
2281btrace_insn_get (const struct btrace_insn_iterator *it)
2282{
2283 const struct btrace_function *bfun;
2284 unsigned int index, end;
2285
a0f1b963
TW
2286 index = it->insn_index;
2287 bfun = it->btinfo->functions[it->call_index];
23a7fe75 2288
31fd9caa
MM
2289 /* Check if the iterator points to a gap in the trace. */
2290 if (bfun->errcode != 0)
2291 return NULL;
2292
23a7fe75
MM
2293 /* The index is within the bounds of this function's instruction vector. */
2294 end = VEC_length (btrace_insn_s, bfun->insn);
2295 gdb_assert (0 < end);
2296 gdb_assert (index < end);
2297
2298 return VEC_index (btrace_insn_s, bfun->insn, index);
2299}
2300
2301/* See btrace.h. */
2302
69090cee
TW
2303int
2304btrace_insn_get_error (const struct btrace_insn_iterator *it)
23a7fe75 2305{
a0f1b963
TW
2306 const struct btrace_function *bfun;
2307
2308 bfun = it->btinfo->functions[it->call_index];
2309 return bfun->errcode;
69090cee 2310}
31fd9caa 2311
69090cee 2312/* See btrace.h. */
31fd9caa 2313
69090cee
TW
2314unsigned int
2315btrace_insn_number (const struct btrace_insn_iterator *it)
2316{
a0f1b963
TW
2317 const struct btrace_function *bfun;
2318
2319 bfun = it->btinfo->functions[it->call_index];
2320 return bfun->insn_offset + it->insn_index;
23a7fe75
MM
2321}
2322
2323/* See btrace.h. */
2324
2325void
2326btrace_insn_begin (struct btrace_insn_iterator *it,
2327 const struct btrace_thread_info *btinfo)
2328{
b54b03bd 2329 if (btinfo->functions.empty ())
23a7fe75
MM
2330 error (_("No trace."));
2331
521103fd 2332 it->btinfo = btinfo;
a0f1b963
TW
2333 it->call_index = 0;
2334 it->insn_index = 0;
23a7fe75
MM
2335}
2336
2337/* See btrace.h. */
2338
2339void
2340btrace_insn_end (struct btrace_insn_iterator *it,
2341 const struct btrace_thread_info *btinfo)
2342{
2343 const struct btrace_function *bfun;
2344 unsigned int length;
2345
b54b03bd 2346 if (btinfo->functions.empty ())
23a7fe75
MM
2347 error (_("No trace."));
2348
b54b03bd 2349 bfun = btinfo->functions.back ();
23a7fe75
MM
2350 length = VEC_length (btrace_insn_s, bfun->insn);
2351
31fd9caa
MM
2352 /* The last function may either be a gap or it contains the current
2353 instruction, which is one past the end of the execution trace; ignore
2354 it. */
2355 if (length > 0)
2356 length -= 1;
2357
521103fd 2358 it->btinfo = btinfo;
a0f1b963
TW
2359 it->call_index = bfun->number - 1;
2360 it->insn_index = length;
23a7fe75
MM
2361}
2362
2363/* See btrace.h. */
2364
2365unsigned int
2366btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2367{
2368 const struct btrace_function *bfun;
2369 unsigned int index, steps;
2370
a0f1b963 2371 bfun = it->btinfo->functions[it->call_index];
23a7fe75 2372 steps = 0;
a0f1b963 2373 index = it->insn_index;
23a7fe75
MM
2374
2375 while (stride != 0)
2376 {
2377 unsigned int end, space, adv;
2378
2379 end = VEC_length (btrace_insn_s, bfun->insn);
2380
31fd9caa
MM
2381 /* An empty function segment represents a gap in the trace. We count
2382 it as one instruction. */
2383 if (end == 0)
2384 {
2385 const struct btrace_function *next;
2386
eb8f2b9c 2387 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
31fd9caa
MM
2388 if (next == NULL)
2389 break;
2390
2391 stride -= 1;
2392 steps += 1;
2393
2394 bfun = next;
2395 index = 0;
2396
2397 continue;
2398 }
2399
23a7fe75
MM
2400 gdb_assert (0 < end);
2401 gdb_assert (index < end);
2402
2403 /* Compute the number of instructions remaining in this segment. */
2404 space = end - index;
2405
2406 /* Advance the iterator as far as possible within this segment. */
325fac50 2407 adv = std::min (space, stride);
23a7fe75
MM
2408 stride -= adv;
2409 index += adv;
2410 steps += adv;
2411
2412 /* Move to the next function if we're at the end of this one. */
2413 if (index == end)
2414 {
2415 const struct btrace_function *next;
2416
eb8f2b9c 2417 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
23a7fe75
MM
2418 if (next == NULL)
2419 {
2420 /* We stepped past the last function.
2421
2422 Let's adjust the index to point to the last instruction in
2423 the previous function. */
2424 index -= 1;
2425 steps -= 1;
2426 break;
2427 }
2428
2429 /* We now point to the first instruction in the new function. */
2430 bfun = next;
2431 index = 0;
2432 }
2433
2434 /* We did make progress. */
2435 gdb_assert (adv > 0);
2436 }
2437
2438 /* Update the iterator. */
a0f1b963
TW
2439 it->call_index = bfun->number - 1;
2440 it->insn_index = index;
23a7fe75
MM
2441
2442 return steps;
2443}
2444
2445/* See btrace.h. */
2446
2447unsigned int
2448btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2449{
2450 const struct btrace_function *bfun;
2451 unsigned int index, steps;
2452
a0f1b963 2453 bfun = it->btinfo->functions[it->call_index];
23a7fe75 2454 steps = 0;
a0f1b963 2455 index = it->insn_index;
23a7fe75
MM
2456
2457 while (stride != 0)
2458 {
2459 unsigned int adv;
2460
2461 /* Move to the previous function if we're at the start of this one. */
2462 if (index == 0)
2463 {
2464 const struct btrace_function *prev;
2465
eb8f2b9c 2466 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
23a7fe75
MM
2467 if (prev == NULL)
2468 break;
2469
2470 /* We point to one after the last instruction in the new function. */
2471 bfun = prev;
2472 index = VEC_length (btrace_insn_s, bfun->insn);
2473
31fd9caa
MM
2474 /* An empty function segment represents a gap in the trace. We count
2475 it as one instruction. */
2476 if (index == 0)
2477 {
2478 stride -= 1;
2479 steps += 1;
2480
2481 continue;
2482 }
23a7fe75
MM
2483 }
2484
2485 /* Advance the iterator as far as possible within this segment. */
325fac50 2486 adv = std::min (index, stride);
31fd9caa 2487
23a7fe75
MM
2488 stride -= adv;
2489 index -= adv;
2490 steps += adv;
2491
2492 /* We did make progress. */
2493 gdb_assert (adv > 0);
2494 }
2495
2496 /* Update the iterator. */
a0f1b963
TW
2497 it->call_index = bfun->number - 1;
2498 it->insn_index = index;
23a7fe75
MM
2499
2500 return steps;
2501}
2502
2503/* See btrace.h. */
2504
2505int
2506btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2507 const struct btrace_insn_iterator *rhs)
2508{
a0f1b963 2509 gdb_assert (lhs->btinfo == rhs->btinfo);
23a7fe75 2510
a0f1b963
TW
2511 if (lhs->call_index != rhs->call_index)
2512 return lhs->call_index - rhs->call_index;
23a7fe75 2513
a0f1b963 2514 return lhs->insn_index - rhs->insn_index;
23a7fe75
MM
2515}
2516
2517/* See btrace.h. */
2518
2519int
2520btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2521 const struct btrace_thread_info *btinfo,
2522 unsigned int number)
2523{
2524 const struct btrace_function *bfun;
fdd2bd92 2525 unsigned int upper, lower;
23a7fe75 2526
2b51eddc 2527 if (btinfo->functions.empty ())
fdd2bd92 2528 return 0;
23a7fe75 2529
fdd2bd92 2530 lower = 0;
2b51eddc 2531 bfun = btinfo->functions[lower];
fdd2bd92 2532 if (number < bfun->insn_offset)
23a7fe75
MM
2533 return 0;
2534
2b51eddc
TW
2535 upper = btinfo->functions.size () - 1;
2536 bfun = btinfo->functions[upper];
fdd2bd92 2537 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
23a7fe75
MM
2538 return 0;
2539
fdd2bd92
TW
2540 /* We assume that there are no holes in the numbering. */
2541 for (;;)
2542 {
2543 const unsigned int average = lower + (upper - lower) / 2;
2544
2b51eddc 2545 bfun = btinfo->functions[average];
fdd2bd92
TW
2546
2547 if (number < bfun->insn_offset)
2548 {
2549 upper = average - 1;
2550 continue;
2551 }
2552
2553 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2554 {
2555 lower = average + 1;
2556 continue;
2557 }
2558
2559 break;
2560 }
2561
521103fd 2562 it->btinfo = btinfo;
a0f1b963
TW
2563 it->call_index = bfun->number - 1;
2564 it->insn_index = number - bfun->insn_offset;
23a7fe75
MM
2565 return 1;
2566}
2567
f158f208
TW
2568/* Returns true if the recording ends with a function segment that
2569 contains only a single (i.e. the current) instruction. */
2570
2571static bool
2572btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2573{
2574 const btrace_function *bfun;
2575
2576 if (btinfo->functions.empty ())
2577 return false;
2578
2579 bfun = btinfo->functions.back ();
2580 if (bfun->errcode != 0)
2581 return false;
2582
2583 return ftrace_call_num_insn (bfun) == 1;
2584}
2585
23a7fe75
MM
2586/* See btrace.h. */
2587
2588const struct btrace_function *
2589btrace_call_get (const struct btrace_call_iterator *it)
2590{
f158f208
TW
2591 if (it->index >= it->btinfo->functions.size ())
2592 return NULL;
2593
2594 return it->btinfo->functions[it->index];
23a7fe75
MM
2595}
2596
2597/* See btrace.h. */
2598
2599unsigned int
2600btrace_call_number (const struct btrace_call_iterator *it)
2601{
f158f208 2602 const unsigned int length = it->btinfo->functions.size ();
23a7fe75 2603
f158f208
TW
2604 /* If the last function segment contains only a single instruction (i.e. the
2605 current instruction), skip it. */
2606 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2607 return length;
23a7fe75 2608
f158f208 2609 return it->index + 1;
23a7fe75
MM
2610}
2611
2612/* See btrace.h. */
2613
2614void
2615btrace_call_begin (struct btrace_call_iterator *it,
2616 const struct btrace_thread_info *btinfo)
2617{
f158f208 2618 if (btinfo->functions.empty ())
23a7fe75
MM
2619 error (_("No trace."));
2620
2621 it->btinfo = btinfo;
f158f208 2622 it->index = 0;
23a7fe75
MM
2623}
2624
2625/* See btrace.h. */
2626
2627void
2628btrace_call_end (struct btrace_call_iterator *it,
2629 const struct btrace_thread_info *btinfo)
2630{
f158f208 2631 if (btinfo->functions.empty ())
23a7fe75
MM
2632 error (_("No trace."));
2633
2634 it->btinfo = btinfo;
f158f208 2635 it->index = btinfo->functions.size ();
23a7fe75
MM
2636}
2637
2638/* See btrace.h. */
2639
2640unsigned int
2641btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2642{
f158f208 2643 const unsigned int length = it->btinfo->functions.size ();
23a7fe75 2644
f158f208
TW
2645 if (it->index + stride < length - 1)
2646 /* Default case: Simply advance the iterator. */
2647 it->index += stride;
2648 else if (it->index + stride == length - 1)
23a7fe75 2649 {
f158f208
TW
2650 /* We land exactly at the last function segment. If it contains only one
2651 instruction (i.e. the current instruction) it is not actually part of
2652 the trace. */
2653 if (btrace_ends_with_single_insn (it->btinfo))
2654 it->index = length;
2655 else
2656 it->index = length - 1;
2657 }
2658 else
2659 {
2660 /* We land past the last function segment and have to adjust the stride.
2661 If the last function segment contains only one instruction (i.e. the
2662 current instruction) it is not actually part of the trace. */
2663 if (btrace_ends_with_single_insn (it->btinfo))
2664 stride = length - it->index - 1;
2665 else
2666 stride = length - it->index;
23a7fe75 2667
f158f208 2668 it->index = length;
23a7fe75
MM
2669 }
2670
f158f208 2671 return stride;
23a7fe75
MM
2672}
2673
2674/* See btrace.h. */
2675
2676unsigned int
2677btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2678{
f158f208
TW
2679 const unsigned int length = it->btinfo->functions.size ();
2680 int steps = 0;
23a7fe75 2681
f158f208 2682 gdb_assert (it->index <= length);
23a7fe75 2683
f158f208
TW
2684 if (stride == 0 || it->index == 0)
2685 return 0;
23a7fe75 2686
f158f208
TW
2687 /* If we are at the end, the first step is a special case. If the last
2688 function segment contains only one instruction (i.e. the current
2689 instruction) it is not actually part of the trace. To be able to step
2690 over this instruction, we need at least one more function segment. */
2691 if ((it->index == length) && (length > 1))
23a7fe75 2692 {
f158f208
TW
2693 if (btrace_ends_with_single_insn (it->btinfo))
2694 it->index = length - 2;
2695 else
2696 it->index = length - 1;
23a7fe75 2697
f158f208
TW
2698 steps = 1;
2699 stride -= 1;
23a7fe75
MM
2700 }
2701
f158f208
TW
2702 stride = std::min (stride, it->index);
2703
2704 it->index -= stride;
2705 return steps + stride;
23a7fe75
MM
2706}
2707
2708/* See btrace.h. */
2709
2710int
2711btrace_call_cmp (const struct btrace_call_iterator *lhs,
2712 const struct btrace_call_iterator *rhs)
2713{
f158f208
TW
2714 gdb_assert (lhs->btinfo == rhs->btinfo);
2715 return (int) (lhs->index - rhs->index);
23a7fe75
MM
2716}
2717
2718/* See btrace.h. */
2719
2720int
2721btrace_find_call_by_number (struct btrace_call_iterator *it,
2722 const struct btrace_thread_info *btinfo,
2723 unsigned int number)
2724{
f158f208 2725 const unsigned int length = btinfo->functions.size ();
23a7fe75 2726
f158f208
TW
2727 if ((number == 0) || (number > length))
2728 return 0;
23a7fe75 2729
f158f208
TW
2730 it->btinfo = btinfo;
2731 it->index = number - 1;
2732 return 1;
23a7fe75
MM
2733}
2734
2735/* See btrace.h. */
2736
2737void
2738btrace_set_insn_history (struct btrace_thread_info *btinfo,
2739 const struct btrace_insn_iterator *begin,
2740 const struct btrace_insn_iterator *end)
2741{
2742 if (btinfo->insn_history == NULL)
8d749320 2743 btinfo->insn_history = XCNEW (struct btrace_insn_history);
23a7fe75
MM
2744
2745 btinfo->insn_history->begin = *begin;
2746 btinfo->insn_history->end = *end;
2747}
2748
2749/* See btrace.h. */
2750
2751void
2752btrace_set_call_history (struct btrace_thread_info *btinfo,
2753 const struct btrace_call_iterator *begin,
2754 const struct btrace_call_iterator *end)
2755{
2756 gdb_assert (begin->btinfo == end->btinfo);
2757
2758 if (btinfo->call_history == NULL)
8d749320 2759 btinfo->call_history = XCNEW (struct btrace_call_history);
23a7fe75
MM
2760
2761 btinfo->call_history->begin = *begin;
2762 btinfo->call_history->end = *end;
2763}
07bbe694
MM
2764
2765/* See btrace.h. */
2766
2767int
2768btrace_is_replaying (struct thread_info *tp)
2769{
2770 return tp->btrace.replay != NULL;
2771}
6e07b1d2
MM
2772
2773/* See btrace.h. */
2774
2775int
2776btrace_is_empty (struct thread_info *tp)
2777{
2778 struct btrace_insn_iterator begin, end;
2779 struct btrace_thread_info *btinfo;
2780
2781 btinfo = &tp->btrace;
2782
b54b03bd 2783 if (btinfo->functions.empty ())
6e07b1d2
MM
2784 return 1;
2785
2786 btrace_insn_begin (&begin, btinfo);
2787 btrace_insn_end (&end, btinfo);
2788
2789 return btrace_insn_cmp (&begin, &end) == 0;
2790}
734b0e4b
MM
2791
2792/* Forward the cleanup request. */
2793
2794static void
2795do_btrace_data_cleanup (void *arg)
2796{
9a3c8263 2797 btrace_data_fini ((struct btrace_data *) arg);
734b0e4b
MM
2798}
2799
2800/* See btrace.h. */
2801
2802struct cleanup *
2803make_cleanup_btrace_data (struct btrace_data *data)
2804{
2805 return make_cleanup (do_btrace_data_cleanup, data);
2806}
b0627500
MM
2807
2808#if defined (HAVE_LIBIPT)
2809
2810/* Print a single packet. */
2811
2812static void
2813pt_print_packet (const struct pt_packet *packet)
2814{
2815 switch (packet->type)
2816 {
2817 default:
2818 printf_unfiltered (("[??: %x]"), packet->type);
2819 break;
2820
2821 case ppt_psb:
2822 printf_unfiltered (("psb"));
2823 break;
2824
2825 case ppt_psbend:
2826 printf_unfiltered (("psbend"));
2827 break;
2828
2829 case ppt_pad:
2830 printf_unfiltered (("pad"));
2831 break;
2832
2833 case ppt_tip:
2834 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2835 packet->payload.ip.ipc,
2836 packet->payload.ip.ip);
2837 break;
2838
2839 case ppt_tip_pge:
2840 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2841 packet->payload.ip.ipc,
2842 packet->payload.ip.ip);
2843 break;
2844
2845 case ppt_tip_pgd:
2846 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2847 packet->payload.ip.ipc,
2848 packet->payload.ip.ip);
2849 break;
2850
2851 case ppt_fup:
2852 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2853 packet->payload.ip.ipc,
2854 packet->payload.ip.ip);
2855 break;
2856
2857 case ppt_tnt_8:
2858 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2859 packet->payload.tnt.bit_size,
2860 packet->payload.tnt.payload);
2861 break;
2862
2863 case ppt_tnt_64:
2864 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2865 packet->payload.tnt.bit_size,
2866 packet->payload.tnt.payload);
2867 break;
2868
2869 case ppt_pip:
37fdfe4c
MM
2870 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2871 packet->payload.pip.nr ? (" nr") : (""));
b0627500
MM
2872 break;
2873
2874 case ppt_tsc:
2875 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2876 break;
2877
2878 case ppt_cbr:
2879 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2880 break;
2881
2882 case ppt_mode:
2883 switch (packet->payload.mode.leaf)
2884 {
2885 default:
2886 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2887 break;
2888
2889 case pt_mol_exec:
2890 printf_unfiltered (("mode.exec%s%s"),
2891 packet->payload.mode.bits.exec.csl
2892 ? (" cs.l") : (""),
2893 packet->payload.mode.bits.exec.csd
2894 ? (" cs.d") : (""));
2895 break;
2896
2897 case pt_mol_tsx:
2898 printf_unfiltered (("mode.tsx%s%s"),
2899 packet->payload.mode.bits.tsx.intx
2900 ? (" intx") : (""),
2901 packet->payload.mode.bits.tsx.abrt
2902 ? (" abrt") : (""));
2903 break;
2904 }
2905 break;
2906
2907 case ppt_ovf:
2908 printf_unfiltered (("ovf"));
2909 break;
2910
37fdfe4c
MM
2911 case ppt_stop:
2912 printf_unfiltered (("stop"));
2913 break;
2914
2915 case ppt_vmcs:
2916 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2917 break;
2918
2919 case ppt_tma:
2920 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2921 packet->payload.tma.fc);
2922 break;
2923
2924 case ppt_mtc:
2925 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2926 break;
2927
2928 case ppt_cyc:
2929 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2930 break;
2931
2932 case ppt_mnt:
2933 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2934 break;
b0627500
MM
2935 }
2936}
2937
2938/* Decode packets into MAINT using DECODER. */
2939
2940static void
2941btrace_maint_decode_pt (struct btrace_maint_info *maint,
2942 struct pt_packet_decoder *decoder)
2943{
2944 int errcode;
2945
2946 for (;;)
2947 {
2948 struct btrace_pt_packet packet;
2949
2950 errcode = pt_pkt_sync_forward (decoder);
2951 if (errcode < 0)
2952 break;
2953
2954 for (;;)
2955 {
2956 pt_pkt_get_offset (decoder, &packet.offset);
2957
2958 errcode = pt_pkt_next (decoder, &packet.packet,
2959 sizeof(packet.packet));
2960 if (errcode < 0)
2961 break;
2962
2963 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2964 {
2965 packet.errcode = pt_errcode (errcode);
2966 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2967 &packet);
2968 }
2969 }
2970
2971 if (errcode == -pte_eos)
2972 break;
2973
2974 packet.errcode = pt_errcode (errcode);
2975 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2976 &packet);
2977
2978 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2979 packet.offset, pt_errstr (packet.errcode));
2980 }
2981
2982 if (errcode != -pte_eos)
bc504a31 2983 warning (_("Failed to synchronize onto the Intel Processor Trace "
b0627500
MM
2984 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2985}
2986
2987/* Update the packet history in BTINFO. */
2988
2989static void
2990btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2991{
2992 volatile struct gdb_exception except;
2993 struct pt_packet_decoder *decoder;
2994 struct btrace_data_pt *pt;
2995 struct pt_config config;
2996 int errcode;
2997
2998 pt = &btinfo->data.variant.pt;
2999
3000 /* Nothing to do if there is no trace. */
3001 if (pt->size == 0)
3002 return;
3003
3004 memset (&config, 0, sizeof(config));
3005
3006 config.size = sizeof (config);
3007 config.begin = pt->data;
3008 config.end = pt->data + pt->size;
3009
3010 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
3011 config.cpu.family = pt->config.cpu.family;
3012 config.cpu.model = pt->config.cpu.model;
3013 config.cpu.stepping = pt->config.cpu.stepping;
3014
3015 errcode = pt_cpu_errata (&config.errata, &config.cpu);
3016 if (errcode < 0)
bc504a31 3017 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b0627500
MM
3018 pt_errstr (pt_errcode (errcode)));
3019
3020 decoder = pt_pkt_alloc_decoder (&config);
3021 if (decoder == NULL)
bc504a31 3022 error (_("Failed to allocate the Intel Processor Trace decoder."));
b0627500
MM
3023
3024 TRY
3025 {
3026 btrace_maint_decode_pt (&btinfo->maint, decoder);
3027 }
3028 CATCH (except, RETURN_MASK_ALL)
3029 {
3030 pt_pkt_free_decoder (decoder);
3031
3032 if (except.reason < 0)
3033 throw_exception (except);
3034 }
3035 END_CATCH
3036
3037 pt_pkt_free_decoder (decoder);
3038}
3039
3040#endif /* !defined (HAVE_LIBIPT) */
3041
3042/* Update the packet maintenance information for BTINFO and store the
3043 low and high bounds into BEGIN and END, respectively.
3044 Store the current iterator state into FROM and TO. */
3045
3046static void
3047btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3048 unsigned int *begin, unsigned int *end,
3049 unsigned int *from, unsigned int *to)
3050{
3051 switch (btinfo->data.format)
3052 {
3053 default:
3054 *begin = 0;
3055 *end = 0;
3056 *from = 0;
3057 *to = 0;
3058 break;
3059
3060 case BTRACE_FORMAT_BTS:
3061 /* Nothing to do - we operate directly on BTINFO->DATA. */
3062 *begin = 0;
3063 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3064 *from = btinfo->maint.variant.bts.packet_history.begin;
3065 *to = btinfo->maint.variant.bts.packet_history.end;
3066 break;
3067
3068#if defined (HAVE_LIBIPT)
3069 case BTRACE_FORMAT_PT:
3070 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3071 btrace_maint_update_pt_packets (btinfo);
3072
3073 *begin = 0;
3074 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3075 *from = btinfo->maint.variant.pt.packet_history.begin;
3076 *to = btinfo->maint.variant.pt.packet_history.end;
3077 break;
3078#endif /* defined (HAVE_LIBIPT) */
3079 }
3080}
3081
3082/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3083 update the current iterator position. */
3084
3085static void
3086btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3087 unsigned int begin, unsigned int end)
3088{
3089 switch (btinfo->data.format)
3090 {
3091 default:
3092 break;
3093
3094 case BTRACE_FORMAT_BTS:
3095 {
3096 VEC (btrace_block_s) *blocks;
3097 unsigned int blk;
3098
3099 blocks = btinfo->data.variant.bts.blocks;
3100 for (blk = begin; blk < end; ++blk)
3101 {
3102 const btrace_block_s *block;
3103
3104 block = VEC_index (btrace_block_s, blocks, blk);
3105
3106 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3107 core_addr_to_string_nz (block->begin),
3108 core_addr_to_string_nz (block->end));
3109 }
3110
3111 btinfo->maint.variant.bts.packet_history.begin = begin;
3112 btinfo->maint.variant.bts.packet_history.end = end;
3113 }
3114 break;
3115
3116#if defined (HAVE_LIBIPT)
3117 case BTRACE_FORMAT_PT:
3118 {
3119 VEC (btrace_pt_packet_s) *packets;
3120 unsigned int pkt;
3121
3122 packets = btinfo->maint.variant.pt.packets;
3123 for (pkt = begin; pkt < end; ++pkt)
3124 {
3125 const struct btrace_pt_packet *packet;
3126
3127 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3128
3129 printf_unfiltered ("%u\t", pkt);
3130 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3131
3132 if (packet->errcode == pte_ok)
3133 pt_print_packet (&packet->packet);
3134 else
3135 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3136
3137 printf_unfiltered ("\n");
3138 }
3139
3140 btinfo->maint.variant.pt.packet_history.begin = begin;
3141 btinfo->maint.variant.pt.packet_history.end = end;
3142 }
3143 break;
3144#endif /* defined (HAVE_LIBIPT) */
3145 }
3146}
3147
3148/* Read a number from an argument string. */
3149
3150static unsigned int
3151get_uint (char **arg)
3152{
3153 char *begin, *end, *pos;
3154 unsigned long number;
3155
3156 begin = *arg;
3157 pos = skip_spaces (begin);
3158
3159 if (!isdigit (*pos))
3160 error (_("Expected positive number, got: %s."), pos);
3161
3162 number = strtoul (pos, &end, 10);
3163 if (number > UINT_MAX)
3164 error (_("Number too big."));
3165
3166 *arg += (end - begin);
3167
3168 return (unsigned int) number;
3169}
3170
3171/* Read a context size from an argument string. */
3172
3173static int
3174get_context_size (char **arg)
3175{
3176 char *pos;
3177 int number;
3178
3179 pos = skip_spaces (*arg);
3180
3181 if (!isdigit (*pos))
3182 error (_("Expected positive number, got: %s."), pos);
3183
3184 return strtol (pos, arg, 10);
3185}
3186
3187/* Complain about junk at the end of an argument string. */
3188
3189static void
3190no_chunk (char *arg)
3191{
3192 if (*arg != 0)
3193 error (_("Junk after argument: %s."), arg);
3194}
3195
3196/* The "maintenance btrace packet-history" command. */
3197
3198static void
3199maint_btrace_packet_history_cmd (char *arg, int from_tty)
3200{
3201 struct btrace_thread_info *btinfo;
3202 struct thread_info *tp;
3203 unsigned int size, begin, end, from, to;
3204
3205 tp = find_thread_ptid (inferior_ptid);
3206 if (tp == NULL)
3207 error (_("No thread."));
3208
3209 size = 10;
3210 btinfo = &tp->btrace;
3211
3212 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3213 if (begin == end)
3214 {
3215 printf_unfiltered (_("No trace.\n"));
3216 return;
3217 }
3218
3219 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3220 {
3221 from = to;
3222
3223 if (end - from < size)
3224 size = end - from;
3225 to = from + size;
3226 }
3227 else if (strcmp (arg, "-") == 0)
3228 {
3229 to = from;
3230
3231 if (to - begin < size)
3232 size = to - begin;
3233 from = to - size;
3234 }
3235 else
3236 {
3237 from = get_uint (&arg);
3238 if (end <= from)
3239 error (_("'%u' is out of range."), from);
3240
3241 arg = skip_spaces (arg);
3242 if (*arg == ',')
3243 {
3244 arg = skip_spaces (++arg);
3245
3246 if (*arg == '+')
3247 {
3248 arg += 1;
3249 size = get_context_size (&arg);
3250
3251 no_chunk (arg);
3252
3253 if (end - from < size)
3254 size = end - from;
3255 to = from + size;
3256 }
3257 else if (*arg == '-')
3258 {
3259 arg += 1;
3260 size = get_context_size (&arg);
3261
3262 no_chunk (arg);
3263
3264 /* Include the packet given as first argument. */
3265 from += 1;
3266 to = from;
3267
3268 if (to - begin < size)
3269 size = to - begin;
3270 from = to - size;
3271 }
3272 else
3273 {
3274 to = get_uint (&arg);
3275
3276 /* Include the packet at the second argument and silently
3277 truncate the range. */
3278 if (to < end)
3279 to += 1;
3280 else
3281 to = end;
3282
3283 no_chunk (arg);
3284 }
3285 }
3286 else
3287 {
3288 no_chunk (arg);
3289
3290 if (end - from < size)
3291 size = end - from;
3292 to = from + size;
3293 }
3294
3295 dont_repeat ();
3296 }
3297
3298 btrace_maint_print_packets (btinfo, from, to);
3299}
3300
3301/* The "maintenance btrace clear-packet-history" command. */
3302
3303static void
3304maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3305{
3306 struct btrace_thread_info *btinfo;
3307 struct thread_info *tp;
3308
3309 if (args != NULL && *args != 0)
3310 error (_("Invalid argument."));
3311
3312 tp = find_thread_ptid (inferior_ptid);
3313 if (tp == NULL)
3314 error (_("No thread."));
3315
3316 btinfo = &tp->btrace;
3317
3318 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3319 btrace_maint_clear (btinfo);
3320 btrace_data_clear (&btinfo->data);
3321}
3322
3323/* The "maintenance btrace clear" command. */
3324
3325static void
3326maint_btrace_clear_cmd (char *args, int from_tty)
3327{
3328 struct btrace_thread_info *btinfo;
3329 struct thread_info *tp;
3330
3331 if (args != NULL && *args != 0)
3332 error (_("Invalid argument."));
3333
3334 tp = find_thread_ptid (inferior_ptid);
3335 if (tp == NULL)
3336 error (_("No thread."));
3337
3338 btrace_clear (tp);
3339}
3340
3341/* The "maintenance btrace" command. */
3342
3343static void
3344maint_btrace_cmd (char *args, int from_tty)
3345{
3346 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3347 gdb_stdout);
3348}
3349
3350/* The "maintenance set btrace" command. */
3351
3352static void
3353maint_btrace_set_cmd (char *args, int from_tty)
3354{
3355 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3356 gdb_stdout);
3357}
3358
3359/* The "maintenance show btrace" command. */
3360
3361static void
3362maint_btrace_show_cmd (char *args, int from_tty)
3363{
3364 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3365 all_commands, gdb_stdout);
3366}
3367
3368/* The "maintenance set btrace pt" command. */
3369
3370static void
3371maint_btrace_pt_set_cmd (char *args, int from_tty)
3372{
3373 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3374 all_commands, gdb_stdout);
3375}
3376
3377/* The "maintenance show btrace pt" command. */
3378
3379static void
3380maint_btrace_pt_show_cmd (char *args, int from_tty)
3381{
3382 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3383 all_commands, gdb_stdout);
3384}
3385
3386/* The "maintenance info btrace" command. */
3387
3388static void
3389maint_info_btrace_cmd (char *args, int from_tty)
3390{
3391 struct btrace_thread_info *btinfo;
3392 struct thread_info *tp;
3393 const struct btrace_config *conf;
3394
3395 if (args != NULL && *args != 0)
3396 error (_("Invalid argument."));
3397
3398 tp = find_thread_ptid (inferior_ptid);
3399 if (tp == NULL)
3400 error (_("No thread."));
3401
3402 btinfo = &tp->btrace;
3403
3404 conf = btrace_conf (btinfo);
3405 if (conf == NULL)
3406 error (_("No btrace configuration."));
3407
3408 printf_unfiltered (_("Format: %s.\n"),
3409 btrace_format_string (conf->format));
3410
3411 switch (conf->format)
3412 {
3413 default:
3414 break;
3415
3416 case BTRACE_FORMAT_BTS:
3417 printf_unfiltered (_("Number of packets: %u.\n"),
3418 VEC_length (btrace_block_s,
3419 btinfo->data.variant.bts.blocks));
3420 break;
3421
3422#if defined (HAVE_LIBIPT)
3423 case BTRACE_FORMAT_PT:
3424 {
3425 struct pt_version version;
3426
3427 version = pt_library_version ();
3428 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3429 version.minor, version.build,
3430 version.ext != NULL ? version.ext : "");
3431
3432 btrace_maint_update_pt_packets (btinfo);
3433 printf_unfiltered (_("Number of packets: %u.\n"),
3434 VEC_length (btrace_pt_packet_s,
3435 btinfo->maint.variant.pt.packets));
3436 }
3437 break;
3438#endif /* defined (HAVE_LIBIPT) */
3439 }
3440}
3441
3442/* The "maint show btrace pt skip-pad" show value function. */
3443
3444static void
3445show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3446 struct cmd_list_element *c,
3447 const char *value)
3448{
3449 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3450}
3451
3452
3453/* Initialize btrace maintenance commands. */
3454
3455void _initialize_btrace (void);
3456void
3457_initialize_btrace (void)
3458{
3459 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3460 _("Info about branch tracing data."), &maintenanceinfolist);
3461
3462 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3463 _("Branch tracing maintenance commands."),
3464 &maint_btrace_cmdlist, "maintenance btrace ",
3465 0, &maintenancelist);
3466
3467 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3468Set branch tracing specific variables."),
3469 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3470 0, &maintenance_set_cmdlist);
3471
3472 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
bc504a31 3473Set Intel Processor Trace specific variables."),
b0627500
MM
3474 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3475 0, &maint_btrace_set_cmdlist);
3476
3477 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3478Show branch tracing specific variables."),
3479 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3480 0, &maintenance_show_cmdlist);
3481
3482 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
bc504a31 3483Show Intel Processor Trace specific variables."),
b0627500
MM
3484 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3485 0, &maint_btrace_show_cmdlist);
3486
3487 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3488 &maint_btrace_pt_skip_pad, _("\
3489Set whether PAD packets should be skipped in the btrace packet history."), _("\
3490Show whether PAD packets should be skipped in the btrace packet history."),_("\
3491When enabled, PAD packets are ignored in the btrace packet history."),
3492 NULL, show_maint_btrace_pt_skip_pad,
3493 &maint_btrace_pt_set_cmdlist,
3494 &maint_btrace_pt_show_cmdlist);
3495
3496 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3497 _("Print the raw branch tracing data.\n\
3498With no argument, print ten more packets after the previous ten-line print.\n\
3499With '-' as argument print ten packets before a previous ten-line print.\n\
3500One argument specifies the starting packet of a ten-line print.\n\
3501Two arguments with comma between specify starting and ending packets to \
3502print.\n\
3503Preceded with '+'/'-' the second argument specifies the distance from the \
3504first.\n"),
3505 &maint_btrace_cmdlist);
3506
3507 add_cmd ("clear-packet-history", class_maintenance,
3508 maint_btrace_clear_packet_history_cmd,
3509 _("Clears the branch tracing packet history.\n\
3510Discards the raw branch tracing data but not the execution history data.\n\
3511"),
3512 &maint_btrace_cmdlist);
3513
3514 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3515 _("Clears the branch tracing data.\n\
3516Discards the raw branch tracing data and the execution history data.\n\
3517The next 'record' command will fetch the branch tracing data anew.\n\
3518"),
3519 &maint_btrace_cmdlist);
3520
3521}
This page took 0.626341 seconds and 4 git commands to generate.