07ed10c6ad6c07d622d3bdf8ccc8c0e3a659b107
[deliverable/binutils-gdb.git] / gdb / btrace.h
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #ifndef BTRACE_H
23 #define BTRACE_H
24
25 /* Branch tracing (btrace) is a per-thread control-flow execution trace of the
26 inferior. For presentation purposes, the branch trace is represented as a
27 list of sequential control-flow blocks, one such list per thread. */
28
29 #include "btrace-common.h"
30 #include "target/waitstatus.h" /* For enum target_stop_reason. */
31 #include "common/enum-flags.h"
32
33 #if defined (HAVE_LIBIPT)
34 # include <intel-pt.h>
35 #endif
36
37 struct thread_info;
38 struct btrace_function;
39
40 /* A coarse instruction classification. */
41 enum btrace_insn_class
42 {
43 /* The instruction is something not listed below. */
44 BTRACE_INSN_OTHER,
45
46 /* The instruction is a function call. */
47 BTRACE_INSN_CALL,
48
49 /* The instruction is a function return. */
50 BTRACE_INSN_RETURN,
51
52 /* The instruction is an unconditional jump. */
53 BTRACE_INSN_JUMP
54 };
55
56 /* Instruction flags. */
57 enum btrace_insn_flag
58 {
59 /* The instruction has been executed speculatively. */
60 BTRACE_INSN_FLAG_SPECULATIVE = (1 << 0)
61 };
62 DEF_ENUM_FLAGS_TYPE (enum btrace_insn_flag, btrace_insn_flags);
63
64 /* A branch trace instruction.
65
66 This represents a single instruction in a branch trace. */
67 struct btrace_insn
68 {
69 /* The address of this instruction. */
70 CORE_ADDR pc;
71
72 /* The size of this instruction in bytes. */
73 gdb_byte size;
74
75 /* The instruction class of this instruction. */
76 enum btrace_insn_class iclass;
77
78 /* A bit vector of BTRACE_INSN_FLAGS. */
79 btrace_insn_flags flags;
80 };
81
82 /* A vector of branch trace instructions. */
83 typedef struct btrace_insn btrace_insn_s;
84 DEF_VEC_O (btrace_insn_s);
85
86 /* A doubly-linked list of branch trace function segments. */
87 struct btrace_func_link
88 {
89 struct btrace_function *prev;
90 struct btrace_function *next;
91 };
92
93 /* Flags for btrace function segments. */
94 enum btrace_function_flag
95 {
96 /* The 'up' link interpretation.
97 If set, it points to the function segment we returned to.
98 If clear, it points to the function segment we called from. */
99 BFUN_UP_LINKS_TO_RET = (1 << 0),
100
101 /* The 'up' link points to a tail call. This obviously only makes sense
102 if bfun_up_links_to_ret is clear. */
103 BFUN_UP_LINKS_TO_TAILCALL = (1 << 1)
104 };
105 DEF_ENUM_FLAGS_TYPE (enum btrace_function_flag, btrace_function_flags);
106
107 /* Decode errors for the BTS recording format. */
108 enum btrace_bts_error
109 {
110 /* The instruction trace overflowed the end of the trace block. */
111 BDE_BTS_OVERFLOW = 1,
112
113 /* The instruction size could not be determined. */
114 BDE_BTS_INSN_SIZE
115 };
116
117 /* Decode errors for the Intel Processor Trace recording format. */
118 enum btrace_pt_error
119 {
120 /* The user cancelled trace processing. */
121 BDE_PT_USER_QUIT = 1,
122
123 /* Tracing was temporarily disabled. */
124 BDE_PT_DISABLED,
125
126 /* Trace recording overflowed. */
127 BDE_PT_OVERFLOW
128
129 /* Negative numbers are used by the decoder library. */
130 };
131
132 /* A branch trace function segment.
133
134 This represents a function segment in a branch trace, i.e. a consecutive
135 number of instructions belonging to the same function.
136
137 In case of decode errors, we add an empty function segment to indicate
138 the gap in the trace.
139
140 We do not allow function segments without instructions otherwise. */
141 struct btrace_function
142 {
143 /* The full and minimal symbol for the function. Both may be NULL. */
144 struct minimal_symbol *msym;
145 struct symbol *sym;
146
147 /* The previous and next segment belonging to the same function.
148 If a function calls another function, the former will have at least
149 two segments: one before the call and another after the return. */
150 struct btrace_func_link segment;
151
152 /* The previous and next function in control flow order. */
153 struct btrace_func_link flow;
154
155 /* The directly preceding function segment in a (fake) call stack. */
156 struct btrace_function *up;
157
158 /* The instructions in this function segment.
159 The instruction vector will be empty if the function segment
160 represents a decode error. */
161 VEC (btrace_insn_s) *insn;
162
163 /* The error code of a decode error that led to a gap.
164 Must be zero unless INSN is empty; non-zero otherwise. */
165 int errcode;
166
167 /* The instruction number offset for the first instruction in this
168 function segment.
169 If INSN is empty this is the insn_offset of the succeding function
170 segment in control-flow order. */
171 unsigned int insn_offset;
172
173 /* The function number in control-flow order.
174 If INSN is empty indicating a gap in the trace due to a decode error,
175 we still count the gap as a function. */
176 unsigned int number;
177
178 /* The function level in a back trace across the entire branch trace.
179 A caller's level is one lower than the level of its callee.
180
181 Levels can be negative if we see returns for which we have not seen
182 the corresponding calls. The branch trace thread information provides
183 a fixup to normalize function levels so the smallest level is zero. */
184 int level;
185
186 /* A bit-vector of btrace_function_flag. */
187 btrace_function_flags flags;
188 };
189
190 typedef struct btrace_function *btrace_fun_p;
191 DEF_VEC_P (btrace_fun_p);
192
193 /* A branch trace instruction iterator. */
194 struct btrace_insn_iterator
195 {
196 /* The branch trace function segment containing the instruction.
197 Will never be NULL. */
198 const struct btrace_function *function;
199
200 /* The index into the function segment's instruction vector. */
201 unsigned int index;
202 };
203
204 /* A branch trace function call iterator. */
205 struct btrace_call_iterator
206 {
207 /* The branch trace information for this thread. Will never be NULL. */
208 const struct btrace_thread_info *btinfo;
209
210 /* The branch trace function segment.
211 This will be NULL for the iterator pointing to the end of the trace. */
212 const struct btrace_function *function;
213 };
214
215 /* Branch trace iteration state for "record instruction-history". */
216 struct btrace_insn_history
217 {
218 /* The branch trace instruction range from BEGIN (inclusive) to
219 END (exclusive) that has been covered last time. */
220 struct btrace_insn_iterator begin;
221 struct btrace_insn_iterator end;
222 };
223
224 /* Branch trace iteration state for "record function-call-history". */
225 struct btrace_call_history
226 {
227 /* The branch trace function range from BEGIN (inclusive) to END (exclusive)
228 that has been covered last time. */
229 struct btrace_call_iterator begin;
230 struct btrace_call_iterator end;
231 };
232
233 /* Branch trace thread flags. */
234 enum btrace_thread_flag
235 {
236 /* The thread is to be stepped forwards. */
237 BTHR_STEP = (1 << 0),
238
239 /* The thread is to be stepped backwards. */
240 BTHR_RSTEP = (1 << 1),
241
242 /* The thread is to be continued forwards. */
243 BTHR_CONT = (1 << 2),
244
245 /* The thread is to be continued backwards. */
246 BTHR_RCONT = (1 << 3),
247
248 /* The thread is to be moved. */
249 BTHR_MOVE = (BTHR_STEP | BTHR_RSTEP | BTHR_CONT | BTHR_RCONT),
250
251 /* The thread is to be stopped. */
252 BTHR_STOP = (1 << 4)
253 };
254 DEF_ENUM_FLAGS_TYPE (enum btrace_thread_flag, btrace_thread_flags);
255
256 #if defined (HAVE_LIBIPT)
257 /* A packet. */
258 struct btrace_pt_packet
259 {
260 /* The offset in the trace stream. */
261 uint64_t offset;
262
263 /* The decode error code. */
264 enum pt_error_code errcode;
265
266 /* The decoded packet. Only valid if ERRCODE == pte_ok. */
267 struct pt_packet packet;
268 };
269
270 /* Define functions operating on a vector of packets. */
271 typedef struct btrace_pt_packet btrace_pt_packet_s;
272 DEF_VEC_O (btrace_pt_packet_s);
273 #endif /* defined (HAVE_LIBIPT) */
274
275 /* Branch trace iteration state for "maintenance btrace packet-history". */
276 struct btrace_maint_packet_history
277 {
278 /* The branch trace packet range from BEGIN (inclusive) to
279 END (exclusive) that has been covered last time. */
280 unsigned int begin;
281 unsigned int end;
282 };
283
284 /* Branch trace maintenance information per thread.
285
286 This information is used by "maintenance btrace" commands. */
287 struct btrace_maint_info
288 {
289 /* Most information is format-specific.
290 The format can be found in the BTRACE.DATA.FORMAT field of each thread. */
291 union
292 {
293 /* BTRACE.DATA.FORMAT == BTRACE_FORMAT_BTS */
294 struct
295 {
296 /* The packet history iterator.
297 We are iterating over BTRACE.DATA.FORMAT.VARIANT.BTS.BLOCKS. */
298 struct btrace_maint_packet_history packet_history;
299 } bts;
300
301 #if defined (HAVE_LIBIPT)
302 /* BTRACE.DATA.FORMAT == BTRACE_FORMAT_PT */
303 struct
304 {
305 /* A vector of decoded packets. */
306 VEC (btrace_pt_packet_s) *packets;
307
308 /* The packet history iterator.
309 We are iterating over the above PACKETS vector. */
310 struct btrace_maint_packet_history packet_history;
311 } pt;
312 #endif /* defined (HAVE_LIBIPT) */
313 } variant;
314 };
315
316 /* Branch trace information per thread.
317
318 This represents the branch trace configuration as well as the entry point
319 into the branch trace data. For the latter, it also contains the index into
320 an array of branch trace blocks used for iterating though the branch trace
321 blocks of a thread. */
322 struct btrace_thread_info
323 {
324 /* The target branch trace information for this thread.
325
326 This contains the branch trace configuration as well as any
327 target-specific information necessary for implementing branch tracing on
328 the underlying architecture. */
329 struct btrace_target_info *target;
330
331 /* The raw branch trace data for the below branch trace. */
332 struct btrace_data data;
333
334 /* The current branch trace for this thread (both inclusive).
335
336 The last instruction of END is the current instruction, which is not
337 part of the execution history.
338 Both will be NULL if there is no branch trace available. If there is
339 branch trace available, both will be non-NULL. */
340 struct btrace_function *begin;
341 struct btrace_function *end;
342
343 /* Vector of pointer to decoded function segments. These are in execution
344 order with the first element == BEGIN and the last element == END. */
345 VEC (btrace_fun_p) *functions;
346
347 /* The function level offset. When added to each function's LEVEL,
348 this normalizes the function levels such that the smallest level
349 becomes zero. */
350 int level;
351
352 /* The number of gaps in the trace. */
353 unsigned int ngaps;
354
355 /* A bit-vector of btrace_thread_flag. */
356 btrace_thread_flags flags;
357
358 /* The instruction history iterator. */
359 struct btrace_insn_history *insn_history;
360
361 /* The function call history iterator. */
362 struct btrace_call_history *call_history;
363
364 /* The current replay position. NULL if not replaying.
365 Gaps are skipped during replay, so REPLAY always points to a valid
366 instruction. */
367 struct btrace_insn_iterator *replay;
368
369 /* Why the thread stopped, if we need to track it. */
370 enum target_stop_reason stop_reason;
371
372 /* Maintenance information. */
373 struct btrace_maint_info maint;
374 };
375
376 /* Enable branch tracing for a thread. */
377 extern void btrace_enable (struct thread_info *tp,
378 const struct btrace_config *conf);
379
380 /* Get the branch trace configuration for a thread.
381 Return NULL if branch tracing is not enabled for that thread. */
382 extern const struct btrace_config *
383 btrace_conf (const struct btrace_thread_info *);
384
385 /* Disable branch tracing for a thread.
386 This will also delete the current branch trace data. */
387 extern void btrace_disable (struct thread_info *);
388
389 /* Disable branch tracing for a thread during teardown.
390 This is similar to btrace_disable, except that it will use
391 target_teardown_btrace instead of target_disable_btrace. */
392 extern void btrace_teardown (struct thread_info *);
393
394 /* Return a human readable error string for the given ERRCODE in FORMAT.
395 The pointer will never be NULL and must not be freed. */
396
397 extern const char *btrace_decode_error (enum btrace_format format, int errcode);
398
399 /* Fetch the branch trace for a single thread. */
400 extern void btrace_fetch (struct thread_info *);
401
402 /* Clear the branch trace for a single thread. */
403 extern void btrace_clear (struct thread_info *);
404
405 /* Clear the branch trace for all threads when an object file goes away. */
406 extern void btrace_free_objfile (struct objfile *);
407
408 /* Parse a branch trace xml document XML into DATA. */
409 extern void parse_xml_btrace (struct btrace_data *data, const char *xml);
410
411 /* Parse a branch trace configuration xml document XML into CONF. */
412 extern void parse_xml_btrace_conf (struct btrace_config *conf, const char *xml);
413
414 /* Dereference a branch trace instruction iterator. Return a pointer to the
415 instruction the iterator points to.
416 May return NULL if the iterator points to a gap in the trace. */
417 extern const struct btrace_insn *
418 btrace_insn_get (const struct btrace_insn_iterator *);
419
420 /* Return the error code for a branch trace instruction iterator. Returns zero
421 if there is no error, i.e. the instruction is valid. */
422 extern int btrace_insn_get_error (const struct btrace_insn_iterator *);
423
424 /* Return the instruction number for a branch trace iterator.
425 Returns one past the maximum instruction number for the end iterator. */
426 extern unsigned int btrace_insn_number (const struct btrace_insn_iterator *);
427
428 /* Initialize a branch trace instruction iterator to point to the begin/end of
429 the branch trace. Throws an error if there is no branch trace. */
430 extern void btrace_insn_begin (struct btrace_insn_iterator *,
431 const struct btrace_thread_info *);
432 extern void btrace_insn_end (struct btrace_insn_iterator *,
433 const struct btrace_thread_info *);
434
435 /* Increment/decrement a branch trace instruction iterator by at most STRIDE
436 instructions. Return the number of instructions by which the instruction
437 iterator has been advanced.
438 Returns zero, if the operation failed or STRIDE had been zero. */
439 extern unsigned int btrace_insn_next (struct btrace_insn_iterator *,
440 unsigned int stride);
441 extern unsigned int btrace_insn_prev (struct btrace_insn_iterator *,
442 unsigned int stride);
443
444 /* Compare two branch trace instruction iterators.
445 Return a negative number if LHS < RHS.
446 Return zero if LHS == RHS.
447 Return a positive number if LHS > RHS. */
448 extern int btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
449 const struct btrace_insn_iterator *rhs);
450
451 /* Find an instruction or gap in the function branch trace by its number.
452 If the instruction is found, initialize the branch trace instruction
453 iterator to point to this instruction and return non-zero.
454 Return zero otherwise. */
455 extern int btrace_find_insn_by_number (struct btrace_insn_iterator *,
456 const struct btrace_thread_info *,
457 unsigned int number);
458
459 /* Dereference a branch trace call iterator. Return a pointer to the
460 function the iterator points to or NULL if the interator points past
461 the end of the branch trace. */
462 extern const struct btrace_function *
463 btrace_call_get (const struct btrace_call_iterator *);
464
465 /* Return the function number for a branch trace call iterator.
466 Returns one past the maximum function number for the end iterator.
467 Returns zero if the iterator does not point to a valid function. */
468 extern unsigned int btrace_call_number (const struct btrace_call_iterator *);
469
470 /* Initialize a branch trace call iterator to point to the begin/end of
471 the branch trace. Throws an error if there is no branch trace. */
472 extern void btrace_call_begin (struct btrace_call_iterator *,
473 const struct btrace_thread_info *);
474 extern void btrace_call_end (struct btrace_call_iterator *,
475 const struct btrace_thread_info *);
476
477 /* Increment/decrement a branch trace call iterator by at most STRIDE function
478 segments. Return the number of function segments by which the call
479 iterator has been advanced.
480 Returns zero, if the operation failed or STRIDE had been zero. */
481 extern unsigned int btrace_call_next (struct btrace_call_iterator *,
482 unsigned int stride);
483 extern unsigned int btrace_call_prev (struct btrace_call_iterator *,
484 unsigned int stride);
485
486 /* Compare two branch trace call iterators.
487 Return a negative number if LHS < RHS.
488 Return zero if LHS == RHS.
489 Return a positive number if LHS > RHS. */
490 extern int btrace_call_cmp (const struct btrace_call_iterator *lhs,
491 const struct btrace_call_iterator *rhs);
492
493 /* Find a function in the function branch trace by its NUMBER.
494 If the function is found, initialize the branch trace call
495 iterator to point to this function and return non-zero.
496 Return zero otherwise. */
497 extern int btrace_find_call_by_number (struct btrace_call_iterator *,
498 const struct btrace_thread_info *,
499 unsigned int number);
500
501 /* Set the branch trace instruction history from BEGIN (inclusive) to
502 END (exclusive). */
503 extern void btrace_set_insn_history (struct btrace_thread_info *,
504 const struct btrace_insn_iterator *begin,
505 const struct btrace_insn_iterator *end);
506
507 /* Set the branch trace function call history from BEGIN (inclusive) to
508 END (exclusive). */
509 extern void btrace_set_call_history (struct btrace_thread_info *,
510 const struct btrace_call_iterator *begin,
511 const struct btrace_call_iterator *end);
512
513 /* Determine if branch tracing is currently replaying TP. */
514 extern int btrace_is_replaying (struct thread_info *tp);
515
516 /* Return non-zero if the branch trace for TP is empty; zero otherwise. */
517 extern int btrace_is_empty (struct thread_info *tp);
518
519 /* Create a cleanup for DATA. */
520 extern struct cleanup *make_cleanup_btrace_data (struct btrace_data *data);
521
522 #endif /* BTRACE_H */
This page took 0.040356 seconds and 3 git commands to generate.