ba8d27c879c1713bfe439217012b6166a5b6f203
[deliverable/binutils-gdb.git] / gdb / btrace.h
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #ifndef BTRACE_H
23 #define BTRACE_H
24
25 /* Branch tracing (btrace) is a per-thread control-flow execution trace of the
26 inferior. For presentation purposes, the branch trace is represented as a
27 list of sequential control-flow blocks, one such list per thread. */
28
29 #include "gdbsupport/btrace-common.h"
30 #include "target/waitstatus.h" /* For enum target_stop_reason. */
31 #include "gdbsupport/enum-flags.h"
32
33 #if defined (HAVE_LIBIPT)
34 # include <intel-pt.h>
35 #endif
36
37 #include <vector>
38
39 struct thread_info;
40 struct btrace_function;
41
42 /* A coarse instruction classification. */
43 enum btrace_insn_class
44 {
45 /* The instruction is something not listed below. */
46 BTRACE_INSN_OTHER,
47
48 /* The instruction is a function call. */
49 BTRACE_INSN_CALL,
50
51 /* The instruction is a function return. */
52 BTRACE_INSN_RETURN,
53
54 /* The instruction is an unconditional jump. */
55 BTRACE_INSN_JUMP
56 };
57
58 /* Instruction flags. */
59 enum btrace_insn_flag
60 {
61 /* The instruction has been executed speculatively. */
62 BTRACE_INSN_FLAG_SPECULATIVE = (1 << 0)
63 };
64 DEF_ENUM_FLAGS_TYPE (enum btrace_insn_flag, btrace_insn_flags);
65
66 /* A branch trace instruction.
67
68 This represents a single instruction in a branch trace. */
69 struct btrace_insn
70 {
71 /* The address of this instruction. */
72 CORE_ADDR pc;
73
74 /* The size of this instruction in bytes. */
75 gdb_byte size;
76
77 /* The instruction class of this instruction. */
78 enum btrace_insn_class iclass;
79
80 /* A bit vector of BTRACE_INSN_FLAGS. */
81 btrace_insn_flags flags;
82 };
83
84 /* Flags for btrace function segments. */
85 enum btrace_function_flag
86 {
87 /* The 'up' link interpretation.
88 If set, it points to the function segment we returned to.
89 If clear, it points to the function segment we called from. */
90 BFUN_UP_LINKS_TO_RET = (1 << 0),
91
92 /* The 'up' link points to a tail call. This obviously only makes sense
93 if bfun_up_links_to_ret is clear. */
94 BFUN_UP_LINKS_TO_TAILCALL = (1 << 1)
95 };
96 DEF_ENUM_FLAGS_TYPE (enum btrace_function_flag, btrace_function_flags);
97
98 /* Decode errors for the BTS recording format. */
99 enum btrace_bts_error
100 {
101 /* The instruction trace overflowed the end of the trace block. */
102 BDE_BTS_OVERFLOW = 1,
103
104 /* The instruction size could not be determined. */
105 BDE_BTS_INSN_SIZE
106 };
107
108 /* Decode errors for the Intel Processor Trace recording format. */
109 enum btrace_pt_error
110 {
111 /* The user cancelled trace processing. */
112 BDE_PT_USER_QUIT = 1,
113
114 /* Tracing was temporarily disabled. */
115 BDE_PT_DISABLED,
116
117 /* Trace recording overflowed. */
118 BDE_PT_OVERFLOW
119
120 /* Negative numbers are used by the decoder library. */
121 };
122
123 /* A branch trace function segment.
124
125 This represents a function segment in a branch trace, i.e. a consecutive
126 number of instructions belonging to the same function.
127
128 In case of decode errors, we add an empty function segment to indicate
129 the gap in the trace.
130
131 We do not allow function segments without instructions otherwise. */
132 struct btrace_function
133 {
134 btrace_function (struct minimal_symbol *msym_, struct symbol *sym_,
135 unsigned int number_, unsigned int insn_offset_, int level_)
136 : msym (msym_), sym (sym_), insn_offset (insn_offset_), number (number_),
137 level (level_)
138 {
139 }
140
141 /* The full and minimal symbol for the function. Both may be NULL. */
142 struct minimal_symbol *msym;
143 struct symbol *sym;
144
145 /* The function segment numbers of the previous and next segment belonging to
146 the same function. If a function calls another function, the former will
147 have at least two segments: one before the call and another after the
148 return. Will be zero if there is no such function segment. */
149 unsigned int prev = 0;
150 unsigned int next = 0;
151
152 /* The function segment number of the directly preceding function segment in
153 a (fake) call stack. Will be zero if there is no such function segment in
154 the record. */
155 unsigned int up = 0;
156
157 /* The instructions in this function segment.
158 The instruction vector will be empty if the function segment
159 represents a decode error. */
160 std::vector<btrace_insn> insn;
161
162 /* The error code of a decode error that led to a gap.
163 Must be zero unless INSN is empty; non-zero otherwise. */
164 int errcode = 0;
165
166 /* The instruction number offset for the first instruction in this
167 function segment.
168 If INSN is empty this is the insn_offset of the succeding function
169 segment in control-flow order. */
170 unsigned int insn_offset;
171
172 /* The 1-based function number in control-flow order.
173 If INSN is empty indicating a gap in the trace due to a decode error,
174 we still count the gap as a function. */
175 unsigned int number;
176
177 /* The function level in a back trace across the entire branch trace.
178 A caller's level is one lower than the level of its callee.
179
180 Levels can be negative if we see returns for which we have not seen
181 the corresponding calls. The branch trace thread information provides
182 a fixup to normalize function levels so the smallest level is zero. */
183 int level;
184
185 /* A bit-vector of btrace_function_flag. */
186 btrace_function_flags flags = 0;
187 };
188
189 /* A branch trace instruction iterator. */
190 struct btrace_insn_iterator
191 {
192 /* The branch trace information for this thread. Will never be NULL. */
193 const struct btrace_thread_info *btinfo;
194
195 /* The index of the function segment in BTINFO->FUNCTIONS. */
196 unsigned int call_index;
197
198 /* The index into the function segment's instruction vector. */
199 unsigned int insn_index;
200 };
201
202 /* A branch trace function call iterator. */
203 struct btrace_call_iterator
204 {
205 /* The branch trace information for this thread. Will never be NULL. */
206 const struct btrace_thread_info *btinfo;
207
208 /* The index of the function segment in BTINFO->FUNCTIONS. */
209 unsigned int index;
210 };
211
212 /* Branch trace iteration state for "record instruction-history". */
213 struct btrace_insn_history
214 {
215 /* The branch trace instruction range from BEGIN (inclusive) to
216 END (exclusive) that has been covered last time. */
217 struct btrace_insn_iterator begin;
218 struct btrace_insn_iterator end;
219 };
220
221 /* Branch trace iteration state for "record function-call-history". */
222 struct btrace_call_history
223 {
224 /* The branch trace function range from BEGIN (inclusive) to END (exclusive)
225 that has been covered last time. */
226 struct btrace_call_iterator begin;
227 struct btrace_call_iterator end;
228 };
229
230 /* Branch trace thread flags. */
231 enum btrace_thread_flag : unsigned
232 {
233 /* The thread is to be stepped forwards. */
234 BTHR_STEP = (1 << 0),
235
236 /* The thread is to be stepped backwards. */
237 BTHR_RSTEP = (1 << 1),
238
239 /* The thread is to be continued forwards. */
240 BTHR_CONT = (1 << 2),
241
242 /* The thread is to be continued backwards. */
243 BTHR_RCONT = (1 << 3),
244
245 /* The thread is to be moved. */
246 BTHR_MOVE = (BTHR_STEP | BTHR_RSTEP | BTHR_CONT | BTHR_RCONT),
247
248 /* The thread is to be stopped. */
249 BTHR_STOP = (1 << 4)
250 };
251 DEF_ENUM_FLAGS_TYPE (enum btrace_thread_flag, btrace_thread_flags);
252
253 #if defined (HAVE_LIBIPT)
254 /* A packet. */
255 struct btrace_pt_packet
256 {
257 /* The offset in the trace stream. */
258 uint64_t offset;
259
260 /* The decode error code. */
261 enum pt_error_code errcode;
262
263 /* The decoded packet. Only valid if ERRCODE == pte_ok. */
264 struct pt_packet packet;
265 };
266
267 /* Define functions operating on a vector of packets. */
268 typedef struct btrace_pt_packet btrace_pt_packet_s;
269 DEF_VEC_O (btrace_pt_packet_s);
270 #endif /* defined (HAVE_LIBIPT) */
271
272 /* Branch trace iteration state for "maintenance btrace packet-history". */
273 struct btrace_maint_packet_history
274 {
275 /* The branch trace packet range from BEGIN (inclusive) to
276 END (exclusive) that has been covered last time. */
277 unsigned int begin;
278 unsigned int end;
279 };
280
281 /* Branch trace maintenance information per thread.
282
283 This information is used by "maintenance btrace" commands. */
284 struct btrace_maint_info
285 {
286 /* Most information is format-specific.
287 The format can be found in the BTRACE.DATA.FORMAT field of each thread. */
288 union
289 {
290 /* BTRACE.DATA.FORMAT == BTRACE_FORMAT_BTS */
291 struct
292 {
293 /* The packet history iterator.
294 We are iterating over BTRACE.DATA.FORMAT.VARIANT.BTS.BLOCKS. */
295 struct btrace_maint_packet_history packet_history;
296 } bts;
297
298 #if defined (HAVE_LIBIPT)
299 /* BTRACE.DATA.FORMAT == BTRACE_FORMAT_PT */
300 struct
301 {
302 /* A vector of decoded packets. */
303 VEC (btrace_pt_packet_s) *packets;
304
305 /* The packet history iterator.
306 We are iterating over the above PACKETS vector. */
307 struct btrace_maint_packet_history packet_history;
308 } pt;
309 #endif /* defined (HAVE_LIBIPT) */
310 } variant;
311 };
312
313 /* Branch trace information per thread.
314
315 This represents the branch trace configuration as well as the entry point
316 into the branch trace data. For the latter, it also contains the index into
317 an array of branch trace blocks used for iterating though the branch trace
318 blocks of a thread. */
319 struct btrace_thread_info
320 {
321 /* The target branch trace information for this thread.
322
323 This contains the branch trace configuration as well as any
324 target-specific information necessary for implementing branch tracing on
325 the underlying architecture. */
326 struct btrace_target_info *target;
327
328 /* The raw branch trace data for the below branch trace. */
329 struct btrace_data data;
330
331 /* Vector of decoded function segments in execution flow order.
332 Note that the numbering for btrace function segments starts with 1, so
333 function segment i will be at index (i - 1). */
334 std::vector<btrace_function> functions;
335
336 /* The function level offset. When added to each function's LEVEL,
337 this normalizes the function levels such that the smallest level
338 becomes zero. */
339 int level;
340
341 /* The number of gaps in the trace. */
342 unsigned int ngaps;
343
344 /* A bit-vector of btrace_thread_flag. */
345 btrace_thread_flags flags;
346
347 /* The instruction history iterator. */
348 struct btrace_insn_history *insn_history;
349
350 /* The function call history iterator. */
351 struct btrace_call_history *call_history;
352
353 /* The current replay position. NULL if not replaying.
354 Gaps are skipped during replay, so REPLAY always points to a valid
355 instruction. */
356 struct btrace_insn_iterator *replay;
357
358 /* Why the thread stopped, if we need to track it. */
359 enum target_stop_reason stop_reason;
360
361 /* Maintenance information. */
362 struct btrace_maint_info maint;
363 };
364
365 /* Enable branch tracing for a thread. */
366 extern void btrace_enable (struct thread_info *tp,
367 const struct btrace_config *conf);
368
369 /* Get the branch trace configuration for a thread.
370 Return NULL if branch tracing is not enabled for that thread. */
371 extern const struct btrace_config *
372 btrace_conf (const struct btrace_thread_info *);
373
374 /* Disable branch tracing for a thread.
375 This will also delete the current branch trace data. */
376 extern void btrace_disable (struct thread_info *);
377
378 /* Disable branch tracing for a thread during teardown.
379 This is similar to btrace_disable, except that it will use
380 target_teardown_btrace instead of target_disable_btrace. */
381 extern void btrace_teardown (struct thread_info *);
382
383 /* Return a human readable error string for the given ERRCODE in FORMAT.
384 The pointer will never be NULL and must not be freed. */
385
386 extern const char *btrace_decode_error (enum btrace_format format, int errcode);
387
388 /* Fetch the branch trace for a single thread. If CPU is not NULL, assume
389 CPU for trace decode. */
390 extern void btrace_fetch (struct thread_info *,
391 const struct btrace_cpu *cpu);
392
393 /* Clear the branch trace for a single thread. */
394 extern void btrace_clear (struct thread_info *);
395
396 /* Clear the branch trace for all threads when an object file goes away. */
397 extern void btrace_free_objfile (struct objfile *);
398
399 /* Parse a branch trace xml document XML into DATA. */
400 extern void parse_xml_btrace (struct btrace_data *data, const char *xml);
401
402 /* Parse a branch trace configuration xml document XML into CONF. */
403 extern void parse_xml_btrace_conf (struct btrace_config *conf, const char *xml);
404
405 /* Dereference a branch trace instruction iterator. Return a pointer to the
406 instruction the iterator points to.
407 May return NULL if the iterator points to a gap in the trace. */
408 extern const struct btrace_insn *
409 btrace_insn_get (const struct btrace_insn_iterator *);
410
411 /* Return the error code for a branch trace instruction iterator. Returns zero
412 if there is no error, i.e. the instruction is valid. */
413 extern int btrace_insn_get_error (const struct btrace_insn_iterator *);
414
415 /* Return the instruction number for a branch trace iterator.
416 Returns one past the maximum instruction number for the end iterator. */
417 extern unsigned int btrace_insn_number (const struct btrace_insn_iterator *);
418
419 /* Initialize a branch trace instruction iterator to point to the begin/end of
420 the branch trace. Throws an error if there is no branch trace. */
421 extern void btrace_insn_begin (struct btrace_insn_iterator *,
422 const struct btrace_thread_info *);
423 extern void btrace_insn_end (struct btrace_insn_iterator *,
424 const struct btrace_thread_info *);
425
426 /* Increment/decrement a branch trace instruction iterator by at most STRIDE
427 instructions. Return the number of instructions by which the instruction
428 iterator has been advanced.
429 Returns zero, if the operation failed or STRIDE had been zero. */
430 extern unsigned int btrace_insn_next (struct btrace_insn_iterator *,
431 unsigned int stride);
432 extern unsigned int btrace_insn_prev (struct btrace_insn_iterator *,
433 unsigned int stride);
434
435 /* Compare two branch trace instruction iterators.
436 Return a negative number if LHS < RHS.
437 Return zero if LHS == RHS.
438 Return a positive number if LHS > RHS. */
439 extern int btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
440 const struct btrace_insn_iterator *rhs);
441
442 /* Find an instruction or gap in the function branch trace by its number.
443 If the instruction is found, initialize the branch trace instruction
444 iterator to point to this instruction and return non-zero.
445 Return zero otherwise. */
446 extern int btrace_find_insn_by_number (struct btrace_insn_iterator *,
447 const struct btrace_thread_info *,
448 unsigned int number);
449
450 /* Dereference a branch trace call iterator. Return a pointer to the
451 function the iterator points to or NULL if the interator points past
452 the end of the branch trace. */
453 extern const struct btrace_function *
454 btrace_call_get (const struct btrace_call_iterator *);
455
456 /* Return the function number for a branch trace call iterator.
457 Returns one past the maximum function number for the end iterator.
458 Returns zero if the iterator does not point to a valid function. */
459 extern unsigned int btrace_call_number (const struct btrace_call_iterator *);
460
461 /* Initialize a branch trace call iterator to point to the begin/end of
462 the branch trace. Throws an error if there is no branch trace. */
463 extern void btrace_call_begin (struct btrace_call_iterator *,
464 const struct btrace_thread_info *);
465 extern void btrace_call_end (struct btrace_call_iterator *,
466 const struct btrace_thread_info *);
467
468 /* Increment/decrement a branch trace call iterator by at most STRIDE function
469 segments. Return the number of function segments by which the call
470 iterator has been advanced.
471 Returns zero, if the operation failed or STRIDE had been zero. */
472 extern unsigned int btrace_call_next (struct btrace_call_iterator *,
473 unsigned int stride);
474 extern unsigned int btrace_call_prev (struct btrace_call_iterator *,
475 unsigned int stride);
476
477 /* Compare two branch trace call iterators.
478 Return a negative number if LHS < RHS.
479 Return zero if LHS == RHS.
480 Return a positive number if LHS > RHS. */
481 extern int btrace_call_cmp (const struct btrace_call_iterator *lhs,
482 const struct btrace_call_iterator *rhs);
483
484 /* Find a function in the function branch trace by its NUMBER.
485 If the function is found, initialize the branch trace call
486 iterator to point to this function and return non-zero.
487 Return zero otherwise. */
488 extern int btrace_find_call_by_number (struct btrace_call_iterator *,
489 const struct btrace_thread_info *,
490 unsigned int number);
491
492 /* Set the branch trace instruction history from BEGIN (inclusive) to
493 END (exclusive). */
494 extern void btrace_set_insn_history (struct btrace_thread_info *,
495 const struct btrace_insn_iterator *begin,
496 const struct btrace_insn_iterator *end);
497
498 /* Set the branch trace function call history from BEGIN (inclusive) to
499 END (exclusive). */
500 extern void btrace_set_call_history (struct btrace_thread_info *,
501 const struct btrace_call_iterator *begin,
502 const struct btrace_call_iterator *end);
503
504 /* Determine if branch tracing is currently replaying TP. */
505 extern int btrace_is_replaying (struct thread_info *tp);
506
507 /* Return non-zero if the branch trace for TP is empty; zero otherwise. */
508 extern int btrace_is_empty (struct thread_info *tp);
509
510 #endif /* BTRACE_H */
This page took 0.038233 seconds and 3 git commands to generate.