Move gdbsupport to the top level
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
b811d2c2 3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
7c97f91e
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
268a13a5 22#include "gdbsupport/common-defs.h"
01027315
TT
23
24#undef PACKAGE
25#undef PACKAGE_NAME
26#undef PACKAGE_VERSION
27#undef PACKAGE_STRING
28#undef PACKAGE_TARNAME
29
30#include <config.h>
7c97f91e 31#include "linux-btrace.h"
268a13a5
TT
32#include "gdbsupport/common-regcache.h"
33#include "gdbsupport/gdb_wait.h"
df7e5265 34#include "x86-cpuid.h"
268a13a5
TT
35#include "gdbsupport/filestuff.h"
36#include "gdbsupport/scoped_fd.h"
37#include "gdbsupport/scoped_mmap.h"
0568462b
MM
38
39#include <inttypes.h>
7c97f91e 40
5b4e221c 41#include <sys/syscall.h>
5b4e221c
MF
42
43#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
7c97f91e 44#include <unistd.h>
7c97f91e
MM
45#include <sys/mman.h>
46#include <sys/user.h>
5826e159 47#include "nat/gdb_ptrace.h"
a950d57c 48#include <sys/types.h>
a950d57c 49#include <signal.h>
7c97f91e
MM
50
51/* A branch trace record in perf_event. */
52struct perf_event_bts
53{
54 /* The linear address of the branch source. */
55 uint64_t from;
56
57 /* The linear address of the branch destination. */
58 uint64_t to;
59};
60
61/* A perf_event branch trace sample. */
62struct perf_event_sample
63{
64 /* The perf_event sample header. */
65 struct perf_event_header header;
66
67 /* The perf_event branch tracing payload. */
68 struct perf_event_bts bts;
69};
70
afb778a2
MM
71/* Identify the cpu we're running on. */
72static struct btrace_cpu
73btrace_this_cpu (void)
74{
75 struct btrace_cpu cpu;
76 unsigned int eax, ebx, ecx, edx;
77 int ok;
78
79 memset (&cpu, 0, sizeof (cpu));
80
81 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
82 if (ok != 0)
83 {
84 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
85 && edx == signature_INTEL_edx)
86 {
87 unsigned int cpuid, ignore;
88
89 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
90 if (ok != 0)
91 {
92 cpu.vendor = CV_INTEL;
93
94 cpu.family = (cpuid >> 8) & 0xf;
95 cpu.model = (cpuid >> 4) & 0xf;
96
97 if (cpu.family == 0x6)
98 cpu.model += (cpuid >> 12) & 0xf0;
99 }
100 }
101 }
102
103 return cpu;
104}
105
aadf7753 106/* Return non-zero if there is new data in PEVENT; zero otherwise. */
7c97f91e 107
aadf7753
MM
108static int
109perf_event_new_data (const struct perf_event_buffer *pev)
7c97f91e 110{
aadf7753 111 return *pev->data_head != pev->last_head;
7c97f91e
MM
112}
113
b20a6524
MM
114/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
115 to the memory holding the copy.
116 The caller is responsible for freeing the memory. */
117
118static gdb_byte *
e7b01ce0
MM
119perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
120 size_t size)
b20a6524
MM
121{
122 const gdb_byte *begin, *end, *start, *stop;
123 gdb_byte *buffer;
e7b01ce0
MM
124 size_t buffer_size;
125 __u64 data_tail;
b20a6524
MM
126
127 if (size == 0)
128 return NULL;
129
db58b373
MM
130 /* We should never ask for more data than the buffer can hold. */
131 buffer_size = pev->size;
132 gdb_assert (size <= buffer_size);
133
134 /* If we ask for more data than we seem to have, we wrap around and read
135 data from the end of the buffer. This is already handled by the %
136 BUFFER_SIZE operation, below. Here, we just need to make sure that we
137 don't underflow.
138
139 Note that this is perfectly OK for perf event buffers where data_head
140 doesn'grow indefinitely and instead wraps around to remain within the
141 buffer's boundaries. */
142 if (data_head < size)
143 data_head += buffer_size;
144
b20a6524
MM
145 gdb_assert (size <= data_head);
146 data_tail = data_head - size;
147
b20a6524
MM
148 begin = pev->mem;
149 start = begin + data_tail % buffer_size;
150 stop = begin + data_head % buffer_size;
151
224c3ddb 152 buffer = (gdb_byte *) xmalloc (size);
b20a6524
MM
153
154 if (start < stop)
155 memcpy (buffer, start, stop - start);
156 else
157 {
158 end = begin + buffer_size;
159
160 memcpy (buffer, start, end - start);
161 memcpy (buffer + (end - start), begin, stop - begin);
162 }
163
164 return buffer;
165}
166
167/* Copy the perf event buffer data from PEV.
168 Store a pointer to the copy into DATA and its size in SIZE. */
169
170static void
171perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
e7b01ce0 172 size_t *psize)
b20a6524 173{
e7b01ce0
MM
174 size_t size;
175 __u64 data_head;
b20a6524
MM
176
177 data_head = *pev->data_head;
b20a6524 178 size = pev->size;
b20a6524
MM
179
180 *data = perf_event_read (pev, data_head, size);
181 *psize = size;
182
183 pev->last_head = data_head;
184}
185
0568462b
MM
186/* Try to determine the start address of the Linux kernel. */
187
188static uint64_t
189linux_determine_kernel_start (void)
d68e53f4 190{
0568462b
MM
191 static uint64_t kernel_start;
192 static int cached;
d68e53f4 193
0568462b
MM
194 if (cached != 0)
195 return kernel_start;
d68e53f4 196
0568462b
MM
197 cached = 1;
198
d419f42d 199 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
0568462b
MM
200 if (file == NULL)
201 return kernel_start;
202
d419f42d 203 while (!feof (file.get ()))
0568462b
MM
204 {
205 char buffer[1024], symbol[8], *line;
206 uint64_t addr;
207 int match;
208
d419f42d 209 line = fgets (buffer, sizeof (buffer), file.get ());
0568462b
MM
210 if (line == NULL)
211 break;
d68e53f4 212
0568462b
MM
213 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
214 if (match != 2)
215 continue;
d68e53f4 216
0568462b
MM
217 if (strcmp (symbol, "_text") == 0)
218 {
219 kernel_start = addr;
220 break;
221 }
222 }
223
0568462b 224 return kernel_start;
d68e53f4
MM
225}
226
7c97f91e
MM
227/* Check whether an address is in the kernel. */
228
229static inline int
0568462b 230perf_event_is_kernel_addr (uint64_t addr)
7c97f91e 231{
0568462b 232 uint64_t kernel_start;
7c97f91e 233
0568462b
MM
234 kernel_start = linux_determine_kernel_start ();
235 if (kernel_start != 0ull)
236 return (addr >= kernel_start);
7c97f91e 237
0568462b
MM
238 /* If we don't know the kernel's start address, let's check the most
239 significant bit. This will work at least for 64-bit kernels. */
240 return ((addr & (1ull << 63)) != 0);
7c97f91e
MM
241}
242
243/* Check whether a perf event record should be skipped. */
244
245static inline int
0568462b 246perf_event_skip_bts_record (const struct perf_event_bts *bts)
7c97f91e
MM
247{
248 /* The hardware may report branches from kernel into user space. Branches
249 from user into kernel space will be suppressed. We filter the former to
250 provide a consistent branch trace excluding kernel. */
0568462b 251 return perf_event_is_kernel_addr (bts->from);
7c97f91e
MM
252}
253
254/* Perform a few consistency checks on a perf event sample record. This is
255 meant to catch cases when we get out of sync with the perf event stream. */
256
257static inline int
258perf_event_sample_ok (const struct perf_event_sample *sample)
259{
260 if (sample->header.type != PERF_RECORD_SAMPLE)
261 return 0;
262
263 if (sample->header.size != sizeof (*sample))
264 return 0;
265
266 return 1;
267}
268
269/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
270 and to addresses (plus a header).
271
272 Start points into that buffer at the next sample position.
273 We read the collected samples backwards from start.
274
275 While reading the samples, we convert the information into a list of blocks.
276 For two adjacent samples s1 and s2, we form a block b such that b.begin =
277 s1.to and b.end = s2.from.
278
279 In case the buffer overflows during sampling, one sample may have its lower
280 part at the end and its upper part at the beginning of the buffer. */
281
a8b3b8e9 282static std::vector<btrace_block> *
7c97f91e 283perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
e7b01ce0 284 const uint8_t *end, const uint8_t *start, size_t size)
7c97f91e 285{
a8b3b8e9 286 std::vector<btrace_block> *btrace = new std::vector<btrace_block>;
7c97f91e 287 struct perf_event_sample sample;
e7b01ce0 288 size_t read = 0;
7c97f91e
MM
289 struct btrace_block block = { 0, 0 };
290 struct regcache *regcache;
291
292 gdb_assert (begin <= start);
293 gdb_assert (start <= end);
294
295 /* The first block ends at the current pc. */
361c8ade 296 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
7c97f91e
MM
297 block.end = regcache_read_pc (regcache);
298
299 /* The buffer may contain a partial record as its last entry (i.e. when the
300 buffer size is not a multiple of the sample size). */
301 read = sizeof (sample) - 1;
302
303 for (; read < size; read += sizeof (sample))
304 {
305 const struct perf_event_sample *psample;
306
307 /* Find the next perf_event sample in a backwards traversal. */
308 start -= sizeof (sample);
309
310 /* If we're still inside the buffer, we're done. */
311 if (begin <= start)
312 psample = (const struct perf_event_sample *) start;
313 else
314 {
315 int missing;
316
317 /* We're to the left of the ring buffer, we will wrap around and
318 reappear at the very right of the ring buffer. */
319
320 missing = (begin - start);
321 start = (end - missing);
322
323 /* If the entire sample is missing, we're done. */
324 if (missing == sizeof (sample))
325 psample = (const struct perf_event_sample *) start;
326 else
327 {
328 uint8_t *stack;
329
330 /* The sample wrapped around. The lower part is at the end and
331 the upper part is at the beginning of the buffer. */
332 stack = (uint8_t *) &sample;
333
334 /* Copy the two parts so we have a contiguous sample. */
335 memcpy (stack, start, missing);
336 memcpy (stack + missing, begin, sizeof (sample) - missing);
337
338 psample = &sample;
339 }
340 }
341
342 if (!perf_event_sample_ok (psample))
343 {
344 warning (_("Branch trace may be incomplete."));
345 break;
346 }
347
0568462b 348 if (perf_event_skip_bts_record (&psample->bts))
7c97f91e
MM
349 continue;
350
351 /* We found a valid sample, so we can complete the current block. */
352 block.begin = psample->bts.to;
353
46f29a9a 354 btrace->push_back (block);
7c97f91e
MM
355
356 /* Start the next block. */
357 block.end = psample->bts.from;
358 }
359
969c39fb
MM
360 /* Push the last block (i.e. the first one of inferior execution), as well.
361 We don't know where it ends, but we know where it starts. If we're
362 reading delta trace, we can fill in the start address later on.
363 Otherwise we will prune it. */
364 block.begin = 0;
46f29a9a 365 btrace->push_back (block);
969c39fb 366
7c97f91e
MM
367 return btrace;
368}
369
043c3577 370/* Check whether an Intel cpu supports BTS. */
a950d57c
MM
371
372static int
afb778a2 373intel_supports_bts (const struct btrace_cpu *cpu)
a950d57c 374{
afb778a2 375 switch (cpu->family)
5f8e0b8f
MF
376 {
377 case 0x6:
afb778a2 378 switch (cpu->model)
5f8e0b8f
MF
379 {
380 case 0x1a: /* Nehalem */
381 case 0x1f:
382 case 0x1e:
383 case 0x2e:
384 case 0x25: /* Westmere */
385 case 0x2c:
386 case 0x2f:
387 case 0x2a: /* Sandy Bridge */
388 case 0x2d:
389 case 0x3a: /* Ivy Bridge */
390
391 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
392 "from" information afer an EIST transition, T-states, C1E, or
393 Adaptive Thermal Throttling. */
394 return 0;
395 }
396 }
a950d57c
MM
397
398 return 1;
a950d57c
MM
399}
400
043c3577 401/* Check whether the cpu supports BTS. */
a950d57c
MM
402
403static int
043c3577 404cpu_supports_bts (void)
a950d57c 405{
afb778a2 406 struct btrace_cpu cpu;
a950d57c 407
afb778a2
MM
408 cpu = btrace_this_cpu ();
409 switch (cpu.vendor)
410 {
411 default:
412 /* Don't know about others. Let's assume they do. */
413 return 1;
a950d57c 414
afb778a2
MM
415 case CV_INTEL:
416 return intel_supports_bts (&cpu);
417 }
a950d57c
MM
418}
419
88711fbf
MM
420/* The perf_event_open syscall failed. Try to print a helpful error
421 message. */
422
423static void
424diagnose_perf_event_open_fail ()
425{
426 switch (errno)
427 {
428 case EPERM:
429 case EACCES:
430 {
431 static const char filename[] = "/proc/sys/kernel/perf_event_paranoid";
432 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
433 if (file.get () == nullptr)
434 break;
435
436 int level, found = fscanf (file.get (), "%d", &level);
437 if (found == 1 && level > 2)
438 error (_("You do not have permission to record the process. "
439 "Try setting %s to 2 or less."), filename);
440 }
441
442 break;
443 }
444
445 error (_("Failed to start recording: %s"), safe_strerror (errno));
446}
447
f4abbc16 448/* Enable branch tracing in BTS format. */
043c3577 449
f4abbc16 450static struct btrace_target_info *
d33501a5 451linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
7c97f91e 452{
f4abbc16 453 struct btrace_tinfo_bts *bts;
e7b01ce0
MM
454 size_t size, pages;
455 __u64 data_offset;
d0fa7535 456 int pid, pg;
7c97f91e 457
de6242d3
MM
458 if (!cpu_supports_bts ())
459 error (_("BTS support has been disabled for the target cpu."));
460
5c3284c1
MM
461 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
462 (XCNEW (btrace_target_info));
7c97f91e
MM
463 tinfo->ptid = ptid;
464
f4abbc16
MM
465 tinfo->conf.format = BTRACE_FORMAT_BTS;
466 bts = &tinfo->variant.bts;
7c97f91e 467
f4abbc16
MM
468 bts->attr.size = sizeof (bts->attr);
469 bts->attr.type = PERF_TYPE_HARDWARE;
470 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
471 bts->attr.sample_period = 1;
7c97f91e 472
f4abbc16
MM
473 /* We sample from and to address. */
474 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
7c97f91e 475
f4abbc16
MM
476 bts->attr.exclude_kernel = 1;
477 bts->attr.exclude_hv = 1;
478 bts->attr.exclude_idle = 1;
7c97f91e 479
e38504b3 480 pid = ptid.lwp ();
7c97f91e 481 if (pid == 0)
e99b03dc 482 pid = ptid.pid ();
7c97f91e
MM
483
484 errno = 0;
5c3284c1
MM
485 scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0));
486 if (fd.get () < 0)
88711fbf 487 diagnose_perf_event_open_fail ();
7c97f91e 488
d33501a5 489 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
490 pages = ((size_t) conf->size / PAGE_SIZE
491 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
d33501a5
MM
492 /* We need at least one page. */
493 if (pages == 0)
494 pages = 1;
495
496 /* The buffer size can be requested in powers of two pages. Adjust PAGES
497 to the next power of two. */
e7b01ce0
MM
498 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
499 if ((pages & ((size_t) 1 << pg)) != 0)
500 pages += ((size_t) 1 << pg);
d33501a5
MM
501
502 /* We try to allocate the requested size.
503 If that fails, try to get as much as we can. */
5c3284c1 504 scoped_mmap data;
d33501a5 505 for (; pages > 0; pages >>= 1)
d0fa7535 506 {
d33501a5 507 size_t length;
e7b01ce0 508 __u64 data_size;
d33501a5 509
e7b01ce0
MM
510 data_size = (__u64) pages * PAGE_SIZE;
511
512 /* Don't ask for more than we can represent in the configuration. */
513 if ((__u64) UINT_MAX < data_size)
514 continue;
515
516 size = (size_t) data_size;
d33501a5
MM
517 length = size + PAGE_SIZE;
518
519 /* Check for overflows. */
e7b01ce0 520 if ((__u64) length != data_size + PAGE_SIZE)
d33501a5
MM
521 continue;
522
17ad2a4f 523 errno = 0;
d0fa7535 524 /* The number of pages we request needs to be a power of two. */
5c3284c1
MM
525 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
526 if (data.get () != MAP_FAILED)
aadf7753 527 break;
d0fa7535 528 }
7c97f91e 529
010a18a1 530 if (pages == 0)
17ad2a4f 531 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
aadf7753 532
5c3284c1
MM
533 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
534 data.get ();
010a18a1 535 data_offset = PAGE_SIZE;
010a18a1
MM
536
537#if defined (PERF_ATTR_SIZE_VER5)
538 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
539 {
e7b01ce0
MM
540 __u64 data_size;
541
010a18a1
MM
542 data_offset = header->data_offset;
543 data_size = header->data_size;
e7b01ce0
MM
544
545 size = (unsigned int) data_size;
546
547 /* Check for overflows. */
548 if ((__u64) size != data_size)
17ad2a4f 549 error (_("Failed to determine trace buffer size."));
010a18a1
MM
550 }
551#endif /* defined (PERF_ATTR_SIZE_VER5) */
552
e7b01ce0 553 bts->bts.size = size;
f4abbc16 554 bts->bts.data_head = &header->data_head;
083eef1f 555 bts->bts.mem = (const uint8_t *) data.release () + data_offset;
e7b01ce0 556 bts->bts.last_head = 0ull;
5c3284c1
MM
557 bts->header = header;
558 bts->file = fd.release ();
aadf7753 559
5c3284c1
MM
560 tinfo->conf.bts.size = (unsigned int) size;
561 return tinfo.release ();
b20a6524
MM
562}
563
564#if defined (PERF_ATTR_SIZE_VER5)
565
17ad2a4f 566/* Determine the event type. */
de6242d3
MM
567
568static int
17ad2a4f 569perf_event_pt_event_type ()
de6242d3 570{
17ad2a4f
MM
571 static const char filename[] = "/sys/bus/event_source/devices/intel_pt/type";
572
573 errno = 0;
574 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
de6242d3 575 if (file.get () == nullptr)
17ad2a4f 576 error (_("Failed to open %s: %s."), filename, safe_strerror (errno));
de6242d3 577
17ad2a4f
MM
578 int type, found = fscanf (file.get (), "%d", &type);
579 if (found != 1)
580 error (_("Failed to read the PT event type from %s."), filename);
581
582 return type;
de6242d3
MM
583}
584
bc504a31 585/* Enable branch tracing in Intel Processor Trace format. */
b20a6524
MM
586
587static struct btrace_target_info *
588linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
589{
b20a6524 590 struct btrace_tinfo_pt *pt;
5c3284c1 591 size_t pages;
17ad2a4f 592 int pid, pg;
b20a6524 593
e38504b3 594 pid = ptid.lwp ();
b20a6524 595 if (pid == 0)
e99b03dc 596 pid = ptid.pid ();
b20a6524 597
5c3284c1
MM
598 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
599 (XCNEW (btrace_target_info));
b20a6524 600 tinfo->ptid = ptid;
b20a6524
MM
601
602 tinfo->conf.format = BTRACE_FORMAT_PT;
603 pt = &tinfo->variant.pt;
604
605 pt->attr.size = sizeof (pt->attr);
17ad2a4f 606 pt->attr.type = perf_event_pt_event_type ();
b20a6524
MM
607
608 pt->attr.exclude_kernel = 1;
609 pt->attr.exclude_hv = 1;
610 pt->attr.exclude_idle = 1;
611
612 errno = 0;
5c3284c1
MM
613 scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
614 if (fd.get () < 0)
88711fbf 615 diagnose_perf_event_open_fail ();
b20a6524
MM
616
617 /* Allocate the configuration page. */
5c3284c1
MM
618 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
619 fd.get (), 0);
620 if (data.get () == MAP_FAILED)
17ad2a4f 621 error (_("Failed to map trace user page: %s."), safe_strerror (errno));
5c3284c1
MM
622
623 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
624 data.get ();
b20a6524
MM
625
626 header->aux_offset = header->data_offset + header->data_size;
627
628 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
629 pages = ((size_t) conf->size / PAGE_SIZE
630 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
b20a6524
MM
631 /* We need at least one page. */
632 if (pages == 0)
633 pages = 1;
634
635 /* The buffer size can be requested in powers of two pages. Adjust PAGES
636 to the next power of two. */
e7b01ce0
MM
637 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
638 if ((pages & ((size_t) 1 << pg)) != 0)
639 pages += ((size_t) 1 << pg);
b20a6524
MM
640
641 /* We try to allocate the requested size.
642 If that fails, try to get as much as we can. */
5c3284c1 643 scoped_mmap aux;
b20a6524
MM
644 for (; pages > 0; pages >>= 1)
645 {
646 size_t length;
e7b01ce0 647 __u64 data_size;
b20a6524 648
e7b01ce0
MM
649 data_size = (__u64) pages * PAGE_SIZE;
650
651 /* Don't ask for more than we can represent in the configuration. */
652 if ((__u64) UINT_MAX < data_size)
653 continue;
654
5c3284c1 655 length = (size_t) data_size;
b20a6524
MM
656
657 /* Check for overflows. */
5c3284c1 658 if ((__u64) length != data_size)
b20a6524
MM
659 continue;
660
e7b01ce0 661 header->aux_size = data_size;
b20a6524 662
17ad2a4f 663 errno = 0;
5c3284c1
MM
664 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
665 header->aux_offset);
666 if (aux.get () != MAP_FAILED)
b20a6524
MM
667 break;
668 }
669
670 if (pages == 0)
17ad2a4f 671 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
b20a6524 672
5c3284c1
MM
673 pt->pt.size = aux.size ();
674 pt->pt.mem = (const uint8_t *) aux.release ();
b20a6524 675 pt->pt.data_head = &header->aux_head;
083eef1f
TT
676 pt->header = (struct perf_event_mmap_page *) data.release ();
677 gdb_assert (pt->header == header);
5c3284c1 678 pt->file = fd.release ();
b20a6524 679
5c3284c1
MM
680 tinfo->conf.pt.size = (unsigned int) pt->pt.size;
681 return tinfo.release ();
7c97f91e
MM
682}
683
b20a6524
MM
684#else /* !defined (PERF_ATTR_SIZE_VER5) */
685
686static struct btrace_target_info *
687linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
688{
17ad2a4f 689 error (_("Intel Processor Trace support was disabled at compile time."));
b20a6524
MM
690}
691
692#endif /* !defined (PERF_ATTR_SIZE_VER5) */
693
7c97f91e
MM
694/* See linux-btrace.h. */
695
f4abbc16
MM
696struct btrace_target_info *
697linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
698{
f4abbc16
MM
699 switch (conf->format)
700 {
701 case BTRACE_FORMAT_NONE:
17ad2a4f
MM
702 error (_("Bad branch trace format."));
703
704 default:
705 error (_("Unknown branch trace format."));
f4abbc16
MM
706
707 case BTRACE_FORMAT_BTS:
17ad2a4f 708 return linux_enable_bts (ptid, &conf->bts);
b20a6524
MM
709
710 case BTRACE_FORMAT_PT:
17ad2a4f 711 return linux_enable_pt (ptid, &conf->pt);
f4abbc16 712 }
f4abbc16
MM
713}
714
715/* Disable BTS tracing. */
716
717static enum btrace_error
718linux_disable_bts (struct btrace_tinfo_bts *tinfo)
7c97f91e 719{
aadf7753 720 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
7c97f91e 721 close (tinfo->file);
7c97f91e 722
969c39fb 723 return BTRACE_ERR_NONE;
7c97f91e
MM
724}
725
bc504a31 726/* Disable Intel Processor Trace tracing. */
b20a6524
MM
727
728static enum btrace_error
729linux_disable_pt (struct btrace_tinfo_pt *tinfo)
730{
731 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
732 munmap((void *) tinfo->header, PAGE_SIZE);
733 close (tinfo->file);
734
735 return BTRACE_ERR_NONE;
736}
737
f4abbc16
MM
738/* See linux-btrace.h. */
739
740enum btrace_error
741linux_disable_btrace (struct btrace_target_info *tinfo)
742{
743 enum btrace_error errcode;
744
745 errcode = BTRACE_ERR_NOT_SUPPORTED;
746 switch (tinfo->conf.format)
747 {
748 case BTRACE_FORMAT_NONE:
749 break;
750
751 case BTRACE_FORMAT_BTS:
752 errcode = linux_disable_bts (&tinfo->variant.bts);
753 break;
b20a6524
MM
754
755 case BTRACE_FORMAT_PT:
756 errcode = linux_disable_pt (&tinfo->variant.pt);
757 break;
f4abbc16
MM
758 }
759
760 if (errcode == BTRACE_ERR_NONE)
761 xfree (tinfo);
762
763 return errcode;
764}
765
734b0e4b
MM
766/* Read branch trace data in BTS format for the thread given by TINFO into
767 BTRACE using the TYPE reading method. */
7c97f91e 768
734b0e4b
MM
769static enum btrace_error
770linux_read_bts (struct btrace_data_bts *btrace,
771 struct btrace_target_info *tinfo,
772 enum btrace_read_type type)
7c97f91e 773{
aadf7753 774 struct perf_event_buffer *pevent;
7c97f91e 775 const uint8_t *begin, *end, *start;
e7b01ce0
MM
776 size_t buffer_size, size;
777 __u64 data_head, data_tail;
aadf7753
MM
778 unsigned int retries = 5;
779
f4abbc16 780 pevent = &tinfo->variant.bts.bts;
7c97f91e 781
969c39fb
MM
782 /* For delta reads, we return at least the partial last block containing
783 the current PC. */
aadf7753 784 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
969c39fb 785 return BTRACE_ERR_NONE;
7c97f91e 786
aadf7753
MM
787 buffer_size = pevent->size;
788 data_tail = pevent->last_head;
7c97f91e
MM
789
790 /* We may need to retry reading the trace. See below. */
791 while (retries--)
792 {
aadf7753 793 data_head = *pevent->data_head;
7c97f91e 794
ed9edfb5 795 /* Delete any leftover trace from the previous iteration. */
46f29a9a
AB
796 delete btrace->blocks;
797 btrace->blocks = nullptr;
ed9edfb5 798
969c39fb 799 if (type == BTRACE_READ_DELTA)
7c97f91e 800 {
e7b01ce0
MM
801 __u64 data_size;
802
969c39fb
MM
803 /* Determine the number of bytes to read and check for buffer
804 overflows. */
805
806 /* Check for data head overflows. We might be able to recover from
807 those but they are very unlikely and it's not really worth the
808 effort, I think. */
809 if (data_head < data_tail)
810 return BTRACE_ERR_OVERFLOW;
811
812 /* If the buffer is smaller than the trace delta, we overflowed. */
e7b01ce0
MM
813 data_size = data_head - data_tail;
814 if (buffer_size < data_size)
969c39fb 815 return BTRACE_ERR_OVERFLOW;
e7b01ce0
MM
816
817 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
818 size = (size_t) data_size;
969c39fb
MM
819 }
820 else
821 {
822 /* Read the entire buffer. */
823 size = buffer_size;
7c97f91e 824
969c39fb
MM
825 /* Adjust the size if the buffer has not overflowed, yet. */
826 if (data_head < size)
e7b01ce0 827 size = (size_t) data_head;
7c97f91e
MM
828 }
829
969c39fb 830 /* Data_head keeps growing; the buffer itself is circular. */
aadf7753 831 begin = pevent->mem;
969c39fb
MM
832 start = begin + data_head % buffer_size;
833
834 if (data_head <= buffer_size)
835 end = start;
836 else
aadf7753 837 end = begin + pevent->size;
969c39fb 838
734b0e4b 839 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
969c39fb 840
7c97f91e
MM
841 /* The stopping thread notifies its ptracer before it is scheduled out.
842 On multi-core systems, the debugger might therefore run while the
843 kernel might be writing the last branch trace records.
844
845 Let's check whether the data head moved while we read the trace. */
aadf7753 846 if (data_head == *pevent->data_head)
7c97f91e
MM
847 break;
848 }
849
aadf7753 850 pevent->last_head = data_head;
7c97f91e 851
969c39fb
MM
852 /* Prune the incomplete last block (i.e. the first one of inferior execution)
853 if we're not doing a delta read. There is no way of filling in its zeroed
854 BEGIN element. */
46f29a9a
AB
855 if (!btrace->blocks->empty () && type != BTRACE_READ_DELTA)
856 btrace->blocks->pop_back ();
969c39fb
MM
857
858 return BTRACE_ERR_NONE;
7c97f91e
MM
859}
860
bc504a31 861/* Fill in the Intel Processor Trace configuration information. */
b20a6524
MM
862
863static void
864linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
865{
866 conf->cpu = btrace_this_cpu ();
867}
868
bc504a31 869/* Read branch trace data in Intel Processor Trace format for the thread
b20a6524
MM
870 given by TINFO into BTRACE using the TYPE reading method. */
871
872static enum btrace_error
873linux_read_pt (struct btrace_data_pt *btrace,
874 struct btrace_target_info *tinfo,
875 enum btrace_read_type type)
876{
877 struct perf_event_buffer *pt;
878
879 pt = &tinfo->variant.pt.pt;
880
881 linux_fill_btrace_pt_config (&btrace->config);
882
883 switch (type)
884 {
885 case BTRACE_READ_DELTA:
886 /* We don't support delta reads. The data head (i.e. aux_head) wraps
887 around to stay inside the aux buffer. */
888 return BTRACE_ERR_NOT_SUPPORTED;
889
890 case BTRACE_READ_NEW:
891 if (!perf_event_new_data (pt))
892 return BTRACE_ERR_NONE;
893
894 /* Fall through. */
895 case BTRACE_READ_ALL:
896 perf_event_read_all (pt, &btrace->data, &btrace->size);
897 return BTRACE_ERR_NONE;
898 }
899
900 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
901}
902
734b0e4b
MM
903/* See linux-btrace.h. */
904
905enum btrace_error
906linux_read_btrace (struct btrace_data *btrace,
907 struct btrace_target_info *tinfo,
908 enum btrace_read_type type)
909{
f4abbc16
MM
910 switch (tinfo->conf.format)
911 {
912 case BTRACE_FORMAT_NONE:
913 return BTRACE_ERR_NOT_SUPPORTED;
914
915 case BTRACE_FORMAT_BTS:
916 /* We read btrace in BTS format. */
917 btrace->format = BTRACE_FORMAT_BTS;
918 btrace->variant.bts.blocks = NULL;
919
920 return linux_read_bts (&btrace->variant.bts, tinfo, type);
b20a6524
MM
921
922 case BTRACE_FORMAT_PT:
bc504a31 923 /* We read btrace in Intel Processor Trace format. */
b20a6524
MM
924 btrace->format = BTRACE_FORMAT_PT;
925 btrace->variant.pt.data = NULL;
926 btrace->variant.pt.size = 0;
927
928 return linux_read_pt (&btrace->variant.pt, tinfo, type);
f4abbc16
MM
929 }
930
931 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
932}
933
934/* See linux-btrace.h. */
734b0e4b 935
f4abbc16
MM
936const struct btrace_config *
937linux_btrace_conf (const struct btrace_target_info *tinfo)
938{
939 return &tinfo->conf;
734b0e4b
MM
940}
941
7c97f91e
MM
942#else /* !HAVE_LINUX_PERF_EVENT_H */
943
944/* See linux-btrace.h. */
945
7c97f91e 946struct btrace_target_info *
f4abbc16 947linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
7c97f91e
MM
948{
949 return NULL;
950}
951
952/* See linux-btrace.h. */
953
969c39fb 954enum btrace_error
7c97f91e
MM
955linux_disable_btrace (struct btrace_target_info *tinfo)
956{
969c39fb 957 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
958}
959
960/* See linux-btrace.h. */
961
969c39fb 962enum btrace_error
734b0e4b 963linux_read_btrace (struct btrace_data *btrace,
969c39fb 964 struct btrace_target_info *tinfo,
7c97f91e
MM
965 enum btrace_read_type type)
966{
969c39fb 967 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
968}
969
f4abbc16
MM
970/* See linux-btrace.h. */
971
972const struct btrace_config *
973linux_btrace_conf (const struct btrace_target_info *tinfo)
974{
975 return NULL;
976}
977
7c97f91e 978#endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.760798 seconds and 4 git commands to generate.