Document the branch tracing extensions to the remote serial protocol.
[deliverable/binutils-gdb.git] / gdb / common / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
3 Copyright (C) 2013 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#ifdef GDBSERVER
23#include "server.h"
24#else
25#include "defs.h"
26#endif
27
28#include "linux-btrace.h"
29#include "common-utils.h"
30#include "gdb_assert.h"
31#include "regcache.h"
32#include "gdbthread.h"
33
34#if HAVE_LINUX_PERF_EVENT_H
35
36#include <errno.h>
37#include <string.h>
38#include <stdint.h>
39#include <unistd.h>
40#include <sys/syscall.h>
41#include <sys/mman.h>
42#include <sys/user.h>
43
44/* A branch trace record in perf_event. */
45struct perf_event_bts
46{
47 /* The linear address of the branch source. */
48 uint64_t from;
49
50 /* The linear address of the branch destination. */
51 uint64_t to;
52};
53
54/* A perf_event branch trace sample. */
55struct perf_event_sample
56{
57 /* The perf_event sample header. */
58 struct perf_event_header header;
59
60 /* The perf_event branch tracing payload. */
61 struct perf_event_bts bts;
62};
63
64/* Get the perf_event header. */
65
66static inline volatile struct perf_event_mmap_page *
67perf_event_header (struct btrace_target_info* tinfo)
68{
69 return tinfo->buffer;
70}
71
72/* Get the size of the perf_event mmap buffer. */
73
74static inline size_t
75perf_event_mmap_size (const struct btrace_target_info *tinfo)
76{
77 /* The branch trace buffer is preceded by a configuration page. */
78 return (tinfo->size + 1) * PAGE_SIZE;
79}
80
81/* Get the size of the perf_event buffer. */
82
83static inline size_t
84perf_event_buffer_size (struct btrace_target_info* tinfo)
85{
86 return tinfo->size * PAGE_SIZE;
87}
88
89/* Get the start address of the perf_event buffer. */
90
91static inline const uint8_t *
92perf_event_buffer_begin (struct btrace_target_info* tinfo)
93{
94 return ((const uint8_t *) tinfo->buffer) + PAGE_SIZE;
95}
96
97/* Get the end address of the perf_event buffer. */
98
99static inline const uint8_t *
100perf_event_buffer_end (struct btrace_target_info* tinfo)
101{
102 return perf_event_buffer_begin (tinfo) + perf_event_buffer_size (tinfo);
103}
104
105/* Check whether an address is in the kernel. */
106
107static inline int
108perf_event_is_kernel_addr (const struct btrace_target_info *tinfo,
109 uint64_t addr)
110{
111 uint64_t mask;
112
113 /* If we don't know the size of a pointer, we can't check. Let's assume it's
114 not a kernel address in this case. */
115 if (tinfo->ptr_bits == 0)
116 return 0;
117
118 /* A bit mask for the most significant bit in an address. */
119 mask = (uint64_t) 1 << (tinfo->ptr_bits - 1);
120
121 /* Check whether the most significant bit in the address is set. */
122 return (addr & mask) != 0;
123}
124
125/* Check whether a perf event record should be skipped. */
126
127static inline int
128perf_event_skip_record (const struct btrace_target_info *tinfo,
129 const struct perf_event_bts *bts)
130{
131 /* The hardware may report branches from kernel into user space. Branches
132 from user into kernel space will be suppressed. We filter the former to
133 provide a consistent branch trace excluding kernel. */
134 return perf_event_is_kernel_addr (tinfo, bts->from);
135}
136
137/* Perform a few consistency checks on a perf event sample record. This is
138 meant to catch cases when we get out of sync with the perf event stream. */
139
140static inline int
141perf_event_sample_ok (const struct perf_event_sample *sample)
142{
143 if (sample->header.type != PERF_RECORD_SAMPLE)
144 return 0;
145
146 if (sample->header.size != sizeof (*sample))
147 return 0;
148
149 return 1;
150}
151
152/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
153 and to addresses (plus a header).
154
155 Start points into that buffer at the next sample position.
156 We read the collected samples backwards from start.
157
158 While reading the samples, we convert the information into a list of blocks.
159 For two adjacent samples s1 and s2, we form a block b such that b.begin =
160 s1.to and b.end = s2.from.
161
162 In case the buffer overflows during sampling, one sample may have its lower
163 part at the end and its upper part at the beginning of the buffer. */
164
165static VEC (btrace_block_s) *
166perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
167 const uint8_t *end, const uint8_t *start)
168{
169 VEC (btrace_block_s) *btrace = NULL;
170 struct perf_event_sample sample;
171 size_t read = 0, size = (end - begin);
172 struct btrace_block block = { 0, 0 };
173 struct regcache *regcache;
174
175 gdb_assert (begin <= start);
176 gdb_assert (start <= end);
177
178 /* The first block ends at the current pc. */
179#ifdef GDBSERVER
180 regcache = get_thread_regcache (find_thread_ptid (tinfo->ptid), 1);
181#else
182 regcache = get_thread_regcache (tinfo->ptid);
183#endif
184 block.end = regcache_read_pc (regcache);
185
186 /* The buffer may contain a partial record as its last entry (i.e. when the
187 buffer size is not a multiple of the sample size). */
188 read = sizeof (sample) - 1;
189
190 for (; read < size; read += sizeof (sample))
191 {
192 const struct perf_event_sample *psample;
193
194 /* Find the next perf_event sample in a backwards traversal. */
195 start -= sizeof (sample);
196
197 /* If we're still inside the buffer, we're done. */
198 if (begin <= start)
199 psample = (const struct perf_event_sample *) start;
200 else
201 {
202 int missing;
203
204 /* We're to the left of the ring buffer, we will wrap around and
205 reappear at the very right of the ring buffer. */
206
207 missing = (begin - start);
208 start = (end - missing);
209
210 /* If the entire sample is missing, we're done. */
211 if (missing == sizeof (sample))
212 psample = (const struct perf_event_sample *) start;
213 else
214 {
215 uint8_t *stack;
216
217 /* The sample wrapped around. The lower part is at the end and
218 the upper part is at the beginning of the buffer. */
219 stack = (uint8_t *) &sample;
220
221 /* Copy the two parts so we have a contiguous sample. */
222 memcpy (stack, start, missing);
223 memcpy (stack + missing, begin, sizeof (sample) - missing);
224
225 psample = &sample;
226 }
227 }
228
229 if (!perf_event_sample_ok (psample))
230 {
231 warning (_("Branch trace may be incomplete."));
232 break;
233 }
234
235 if (perf_event_skip_record (tinfo, &psample->bts))
236 continue;
237
238 /* We found a valid sample, so we can complete the current block. */
239 block.begin = psample->bts.to;
240
241 VEC_safe_push (btrace_block_s, btrace, &block);
242
243 /* Start the next block. */
244 block.end = psample->bts.from;
245 }
246
247 return btrace;
248}
249
250/* See linux-btrace.h. */
251
252int
253linux_supports_btrace (void)
254{
255 return 1;
256}
257
258/* See linux-btrace.h. */
259
260struct btrace_target_info *
261linux_enable_btrace (ptid_t ptid)
262{
263 struct btrace_target_info *tinfo;
264 int pid;
265
266 tinfo = xzalloc (sizeof (*tinfo));
267 tinfo->ptid = ptid;
268
269 tinfo->attr.size = sizeof (tinfo->attr);
270 tinfo->attr.type = PERF_TYPE_HARDWARE;
271 tinfo->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
272 tinfo->attr.sample_period = 1;
273
274 /* We sample from and to address. */
275 tinfo->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
276
277 tinfo->attr.exclude_kernel = 1;
278 tinfo->attr.exclude_hv = 1;
279 tinfo->attr.exclude_idle = 1;
280
281 tinfo->ptr_bits = 0;
282
283 pid = ptid_get_lwp (ptid);
284 if (pid == 0)
285 pid = ptid_get_pid (ptid);
286
287 errno = 0;
288 tinfo->file = syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0);
289 if (tinfo->file < 0)
290 goto err;
291
292 /* We hard-code the trace buffer size.
293 At some later time, we should make this configurable. */
294 tinfo->size = 1;
295 tinfo->buffer = mmap (NULL, perf_event_mmap_size (tinfo),
296 PROT_READ, MAP_SHARED, tinfo->file, 0);
297 if (tinfo->buffer == MAP_FAILED)
298 goto err_file;
299
300 return tinfo;
301
302 err_file:
303 close (tinfo->file);
304
305 err:
306 xfree (tinfo);
307 return NULL;
308}
309
310/* See linux-btrace.h. */
311
312int
313linux_disable_btrace (struct btrace_target_info *tinfo)
314{
315 int errcode;
316
317 errno = 0;
318 errcode = munmap (tinfo->buffer, perf_event_mmap_size (tinfo));
319 if (errcode != 0)
320 return errno;
321
322 close (tinfo->file);
323 xfree (tinfo);
324
325 return 0;
326}
327
328/* Check whether the branch trace has changed. */
329
330static int
331linux_btrace_has_changed (struct btrace_target_info *tinfo)
332{
333 volatile struct perf_event_mmap_page *header = perf_event_header (tinfo);
334
335 return header->data_head != tinfo->data_head;
336}
337
338/* See linux-btrace.h. */
339
340VEC (btrace_block_s) *
341linux_read_btrace (struct btrace_target_info *tinfo,
342 enum btrace_read_type type)
343{
344 VEC (btrace_block_s) *btrace = NULL;
345 volatile struct perf_event_mmap_page *header;
346 const uint8_t *begin, *end, *start;
347 unsigned long data_head, retries = 5;
348 size_t buffer_size;
349
350 if (type == btrace_read_new && !linux_btrace_has_changed (tinfo))
351 return NULL;
352
353 header = perf_event_header (tinfo);
354 buffer_size = perf_event_buffer_size (tinfo);
355
356 /* We may need to retry reading the trace. See below. */
357 while (retries--)
358 {
359 data_head = header->data_head;
360
361 /* If there's new trace, let's read it. */
362 if (data_head != tinfo->data_head)
363 {
364 /* Data_head keeps growing; the buffer itself is circular. */
365 begin = perf_event_buffer_begin (tinfo);
366 start = begin + data_head % buffer_size;
367
368 if (data_head <= buffer_size)
369 end = start;
370 else
371 end = perf_event_buffer_end (tinfo);
372
373 btrace = perf_event_read_bts (tinfo, begin, end, start);
374 }
375
376 /* The stopping thread notifies its ptracer before it is scheduled out.
377 On multi-core systems, the debugger might therefore run while the
378 kernel might be writing the last branch trace records.
379
380 Let's check whether the data head moved while we read the trace. */
381 if (data_head == header->data_head)
382 break;
383 }
384
385 tinfo->data_head = data_head;
386
387 return btrace;
388}
389
390#else /* !HAVE_LINUX_PERF_EVENT_H */
391
392/* See linux-btrace.h. */
393
394int
395linux_supports_btrace (void)
396{
397 return 0;
398}
399
400/* See linux-btrace.h. */
401
402struct btrace_target_info *
403linux_enable_btrace (ptid_t ptid)
404{
405 return NULL;
406}
407
408/* See linux-btrace.h. */
409
410int
411linux_disable_btrace (struct btrace_target_info *tinfo)
412{
413 return ENOSYS;
414}
415
416/* See linux-btrace.h. */
417
418VEC (btrace_block_s) *
419linux_read_btrace (struct btrace_target_info *tinfo,
420 enum btrace_read_type type)
421{
422 return NULL;
423}
424
425#endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.052164 seconds and 4 git commands to generate.