Automatic Copyright Year update after running gdb/copyright.py
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
88b9d363 3 Copyright (C) 2013-2022 Free Software Foundation, Inc.
7c97f91e
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
268a13a5 22#include "gdbsupport/common-defs.h"
7c97f91e 23#include "linux-btrace.h"
268a13a5
TT
24#include "gdbsupport/common-regcache.h"
25#include "gdbsupport/gdb_wait.h"
df7e5265 26#include "x86-cpuid.h"
268a13a5
TT
27#include "gdbsupport/filestuff.h"
28#include "gdbsupport/scoped_fd.h"
29#include "gdbsupport/scoped_mmap.h"
0568462b
MM
30
31#include <inttypes.h>
7c97f91e 32
5b4e221c 33#include <sys/syscall.h>
5b4e221c
MF
34
35#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
7c97f91e 36#include <unistd.h>
7c97f91e
MM
37#include <sys/mman.h>
38#include <sys/user.h>
5826e159 39#include "nat/gdb_ptrace.h"
a950d57c 40#include <sys/types.h>
a950d57c 41#include <signal.h>
7c97f91e
MM
42
43/* A branch trace record in perf_event. */
44struct perf_event_bts
45{
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51};
52
53/* A perf_event branch trace sample. */
54struct perf_event_sample
55{
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61};
62
afb778a2
MM
63/* Identify the cpu we're running on. */
64static struct btrace_cpu
65btrace_this_cpu (void)
66{
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
a51951c2
KB
93 else if (ebx == signature_AMD_ebx && ecx == signature_AMD_ecx
94 && edx == signature_AMD_edx)
95 cpu.vendor = CV_AMD;
afb778a2
MM
96 }
97
98 return cpu;
99}
100
aadf7753 101/* Return non-zero if there is new data in PEVENT; zero otherwise. */
7c97f91e 102
aadf7753
MM
103static int
104perf_event_new_data (const struct perf_event_buffer *pev)
7c97f91e 105{
aadf7753 106 return *pev->data_head != pev->last_head;
7c97f91e
MM
107}
108
b20a6524
MM
109/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
110 to the memory holding the copy.
111 The caller is responsible for freeing the memory. */
112
113static gdb_byte *
e7b01ce0
MM
114perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
115 size_t size)
b20a6524
MM
116{
117 const gdb_byte *begin, *end, *start, *stop;
118 gdb_byte *buffer;
e7b01ce0
MM
119 size_t buffer_size;
120 __u64 data_tail;
b20a6524
MM
121
122 if (size == 0)
123 return NULL;
124
db58b373
MM
125 /* We should never ask for more data than the buffer can hold. */
126 buffer_size = pev->size;
127 gdb_assert (size <= buffer_size);
128
129 /* If we ask for more data than we seem to have, we wrap around and read
130 data from the end of the buffer. This is already handled by the %
131 BUFFER_SIZE operation, below. Here, we just need to make sure that we
132 don't underflow.
133
134 Note that this is perfectly OK for perf event buffers where data_head
135 doesn'grow indefinitely and instead wraps around to remain within the
136 buffer's boundaries. */
137 if (data_head < size)
138 data_head += buffer_size;
139
b20a6524
MM
140 gdb_assert (size <= data_head);
141 data_tail = data_head - size;
142
b20a6524
MM
143 begin = pev->mem;
144 start = begin + data_tail % buffer_size;
145 stop = begin + data_head % buffer_size;
146
224c3ddb 147 buffer = (gdb_byte *) xmalloc (size);
b20a6524
MM
148
149 if (start < stop)
150 memcpy (buffer, start, stop - start);
151 else
152 {
153 end = begin + buffer_size;
154
155 memcpy (buffer, start, end - start);
156 memcpy (buffer + (end - start), begin, stop - begin);
157 }
158
159 return buffer;
160}
161
162/* Copy the perf event buffer data from PEV.
163 Store a pointer to the copy into DATA and its size in SIZE. */
164
165static void
166perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
e7b01ce0 167 size_t *psize)
b20a6524 168{
e7b01ce0
MM
169 size_t size;
170 __u64 data_head;
b20a6524
MM
171
172 data_head = *pev->data_head;
b20a6524 173 size = pev->size;
b20a6524
MM
174
175 *data = perf_event_read (pev, data_head, size);
176 *psize = size;
177
178 pev->last_head = data_head;
179}
180
0568462b
MM
181/* Try to determine the start address of the Linux kernel. */
182
183static uint64_t
184linux_determine_kernel_start (void)
d68e53f4 185{
0568462b
MM
186 static uint64_t kernel_start;
187 static int cached;
d68e53f4 188
0568462b
MM
189 if (cached != 0)
190 return kernel_start;
d68e53f4 191
0568462b
MM
192 cached = 1;
193
d419f42d 194 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
0568462b
MM
195 if (file == NULL)
196 return kernel_start;
197
d419f42d 198 while (!feof (file.get ()))
0568462b
MM
199 {
200 char buffer[1024], symbol[8], *line;
201 uint64_t addr;
202 int match;
203
d419f42d 204 line = fgets (buffer, sizeof (buffer), file.get ());
0568462b
MM
205 if (line == NULL)
206 break;
d68e53f4 207
0568462b
MM
208 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
209 if (match != 2)
210 continue;
d68e53f4 211
0568462b
MM
212 if (strcmp (symbol, "_text") == 0)
213 {
214 kernel_start = addr;
215 break;
216 }
217 }
218
0568462b 219 return kernel_start;
d68e53f4
MM
220}
221
7c97f91e
MM
222/* Check whether an address is in the kernel. */
223
224static inline int
0568462b 225perf_event_is_kernel_addr (uint64_t addr)
7c97f91e 226{
0568462b 227 uint64_t kernel_start;
7c97f91e 228
0568462b
MM
229 kernel_start = linux_determine_kernel_start ();
230 if (kernel_start != 0ull)
231 return (addr >= kernel_start);
7c97f91e 232
0568462b
MM
233 /* If we don't know the kernel's start address, let's check the most
234 significant bit. This will work at least for 64-bit kernels. */
235 return ((addr & (1ull << 63)) != 0);
7c97f91e
MM
236}
237
238/* Check whether a perf event record should be skipped. */
239
240static inline int
0568462b 241perf_event_skip_bts_record (const struct perf_event_bts *bts)
7c97f91e
MM
242{
243 /* The hardware may report branches from kernel into user space. Branches
244 from user into kernel space will be suppressed. We filter the former to
245 provide a consistent branch trace excluding kernel. */
0568462b 246 return perf_event_is_kernel_addr (bts->from);
7c97f91e
MM
247}
248
249/* Perform a few consistency checks on a perf event sample record. This is
250 meant to catch cases when we get out of sync with the perf event stream. */
251
252static inline int
253perf_event_sample_ok (const struct perf_event_sample *sample)
254{
255 if (sample->header.type != PERF_RECORD_SAMPLE)
256 return 0;
257
258 if (sample->header.size != sizeof (*sample))
259 return 0;
260
261 return 1;
262}
263
264/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
265 and to addresses (plus a header).
266
267 Start points into that buffer at the next sample position.
268 We read the collected samples backwards from start.
269
270 While reading the samples, we convert the information into a list of blocks.
271 For two adjacent samples s1 and s2, we form a block b such that b.begin =
272 s1.to and b.end = s2.from.
273
274 In case the buffer overflows during sampling, one sample may have its lower
275 part at the end and its upper part at the beginning of the buffer. */
276
a8b3b8e9 277static std::vector<btrace_block> *
7c97f91e 278perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
e7b01ce0 279 const uint8_t *end, const uint8_t *start, size_t size)
7c97f91e 280{
a8b3b8e9 281 std::vector<btrace_block> *btrace = new std::vector<btrace_block>;
7c97f91e 282 struct perf_event_sample sample;
e7b01ce0 283 size_t read = 0;
7c97f91e
MM
284 struct btrace_block block = { 0, 0 };
285 struct regcache *regcache;
286
287 gdb_assert (begin <= start);
288 gdb_assert (start <= end);
289
290 /* The first block ends at the current pc. */
361c8ade 291 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
7c97f91e
MM
292 block.end = regcache_read_pc (regcache);
293
294 /* The buffer may contain a partial record as its last entry (i.e. when the
295 buffer size is not a multiple of the sample size). */
296 read = sizeof (sample) - 1;
297
298 for (; read < size; read += sizeof (sample))
299 {
300 const struct perf_event_sample *psample;
301
302 /* Find the next perf_event sample in a backwards traversal. */
303 start -= sizeof (sample);
304
305 /* If we're still inside the buffer, we're done. */
306 if (begin <= start)
307 psample = (const struct perf_event_sample *) start;
308 else
309 {
310 int missing;
311
312 /* We're to the left of the ring buffer, we will wrap around and
313 reappear at the very right of the ring buffer. */
314
315 missing = (begin - start);
316 start = (end - missing);
317
318 /* If the entire sample is missing, we're done. */
319 if (missing == sizeof (sample))
320 psample = (const struct perf_event_sample *) start;
321 else
322 {
323 uint8_t *stack;
324
325 /* The sample wrapped around. The lower part is at the end and
326 the upper part is at the beginning of the buffer. */
327 stack = (uint8_t *) &sample;
328
329 /* Copy the two parts so we have a contiguous sample. */
330 memcpy (stack, start, missing);
331 memcpy (stack + missing, begin, sizeof (sample) - missing);
332
333 psample = &sample;
334 }
335 }
336
337 if (!perf_event_sample_ok (psample))
338 {
339 warning (_("Branch trace may be incomplete."));
340 break;
341 }
342
0568462b 343 if (perf_event_skip_bts_record (&psample->bts))
7c97f91e
MM
344 continue;
345
346 /* We found a valid sample, so we can complete the current block. */
347 block.begin = psample->bts.to;
348
46f29a9a 349 btrace->push_back (block);
7c97f91e
MM
350
351 /* Start the next block. */
352 block.end = psample->bts.from;
353 }
354
969c39fb
MM
355 /* Push the last block (i.e. the first one of inferior execution), as well.
356 We don't know where it ends, but we know where it starts. If we're
357 reading delta trace, we can fill in the start address later on.
358 Otherwise we will prune it. */
359 block.begin = 0;
46f29a9a 360 btrace->push_back (block);
969c39fb 361
7c97f91e
MM
362 return btrace;
363}
364
043c3577 365/* Check whether an Intel cpu supports BTS. */
a950d57c
MM
366
367static int
afb778a2 368intel_supports_bts (const struct btrace_cpu *cpu)
a950d57c 369{
afb778a2 370 switch (cpu->family)
5f8e0b8f
MF
371 {
372 case 0x6:
afb778a2 373 switch (cpu->model)
5f8e0b8f
MF
374 {
375 case 0x1a: /* Nehalem */
376 case 0x1f:
377 case 0x1e:
378 case 0x2e:
379 case 0x25: /* Westmere */
380 case 0x2c:
381 case 0x2f:
382 case 0x2a: /* Sandy Bridge */
383 case 0x2d:
384 case 0x3a: /* Ivy Bridge */
385
386 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
387 "from" information afer an EIST transition, T-states, C1E, or
388 Adaptive Thermal Throttling. */
389 return 0;
390 }
391 }
a950d57c
MM
392
393 return 1;
a950d57c
MM
394}
395
043c3577 396/* Check whether the cpu supports BTS. */
a950d57c
MM
397
398static int
043c3577 399cpu_supports_bts (void)
a950d57c 400{
afb778a2 401 struct btrace_cpu cpu;
a950d57c 402
afb778a2
MM
403 cpu = btrace_this_cpu ();
404 switch (cpu.vendor)
405 {
406 default:
407 /* Don't know about others. Let's assume they do. */
408 return 1;
a950d57c 409
afb778a2
MM
410 case CV_INTEL:
411 return intel_supports_bts (&cpu);
a51951c2
KB
412
413 case CV_AMD:
414 return 0;
afb778a2 415 }
a950d57c
MM
416}
417
88711fbf
MM
418/* The perf_event_open syscall failed. Try to print a helpful error
419 message. */
420
421static void
422diagnose_perf_event_open_fail ()
423{
424 switch (errno)
425 {
426 case EPERM:
427 case EACCES:
428 {
429 static const char filename[] = "/proc/sys/kernel/perf_event_paranoid";
430 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
431 if (file.get () == nullptr)
432 break;
433
434 int level, found = fscanf (file.get (), "%d", &level);
435 if (found == 1 && level > 2)
436 error (_("You do not have permission to record the process. "
437 "Try setting %s to 2 or less."), filename);
438 }
439
440 break;
441 }
442
443 error (_("Failed to start recording: %s"), safe_strerror (errno));
444}
445
f4abbc16 446/* Enable branch tracing in BTS format. */
043c3577 447
f4abbc16 448static struct btrace_target_info *
d33501a5 449linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
7c97f91e 450{
f4abbc16 451 struct btrace_tinfo_bts *bts;
e7b01ce0
MM
452 size_t size, pages;
453 __u64 data_offset;
d0fa7535 454 int pid, pg;
7c97f91e 455
de6242d3
MM
456 if (!cpu_supports_bts ())
457 error (_("BTS support has been disabled for the target cpu."));
458
5c3284c1
MM
459 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
460 (XCNEW (btrace_target_info));
7c97f91e
MM
461 tinfo->ptid = ptid;
462
f4abbc16
MM
463 tinfo->conf.format = BTRACE_FORMAT_BTS;
464 bts = &tinfo->variant.bts;
7c97f91e 465
f4abbc16
MM
466 bts->attr.size = sizeof (bts->attr);
467 bts->attr.type = PERF_TYPE_HARDWARE;
468 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
469 bts->attr.sample_period = 1;
7c97f91e 470
f4abbc16
MM
471 /* We sample from and to address. */
472 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
7c97f91e 473
f4abbc16
MM
474 bts->attr.exclude_kernel = 1;
475 bts->attr.exclude_hv = 1;
476 bts->attr.exclude_idle = 1;
7c97f91e 477
e38504b3 478 pid = ptid.lwp ();
7c97f91e 479 if (pid == 0)
e99b03dc 480 pid = ptid.pid ();
7c97f91e
MM
481
482 errno = 0;
5c3284c1
MM
483 scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0));
484 if (fd.get () < 0)
88711fbf 485 diagnose_perf_event_open_fail ();
7c97f91e 486
d33501a5 487 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
488 pages = ((size_t) conf->size / PAGE_SIZE
489 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
d33501a5
MM
490 /* We need at least one page. */
491 if (pages == 0)
492 pages = 1;
493
494 /* The buffer size can be requested in powers of two pages. Adjust PAGES
495 to the next power of two. */
e7b01ce0
MM
496 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
497 if ((pages & ((size_t) 1 << pg)) != 0)
498 pages += ((size_t) 1 << pg);
d33501a5
MM
499
500 /* We try to allocate the requested size.
501 If that fails, try to get as much as we can. */
5c3284c1 502 scoped_mmap data;
d33501a5 503 for (; pages > 0; pages >>= 1)
d0fa7535 504 {
d33501a5 505 size_t length;
e7b01ce0 506 __u64 data_size;
d33501a5 507
e7b01ce0
MM
508 data_size = (__u64) pages * PAGE_SIZE;
509
510 /* Don't ask for more than we can represent in the configuration. */
511 if ((__u64) UINT_MAX < data_size)
512 continue;
513
514 size = (size_t) data_size;
d33501a5
MM
515 length = size + PAGE_SIZE;
516
517 /* Check for overflows. */
e7b01ce0 518 if ((__u64) length != data_size + PAGE_SIZE)
d33501a5
MM
519 continue;
520
17ad2a4f 521 errno = 0;
d0fa7535 522 /* The number of pages we request needs to be a power of two. */
5c3284c1
MM
523 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
524 if (data.get () != MAP_FAILED)
aadf7753 525 break;
d0fa7535 526 }
7c97f91e 527
010a18a1 528 if (pages == 0)
17ad2a4f 529 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
aadf7753 530
5c3284c1
MM
531 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
532 data.get ();
010a18a1 533 data_offset = PAGE_SIZE;
010a18a1
MM
534
535#if defined (PERF_ATTR_SIZE_VER5)
536 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
537 {
e7b01ce0
MM
538 __u64 data_size;
539
010a18a1
MM
540 data_offset = header->data_offset;
541 data_size = header->data_size;
e7b01ce0
MM
542
543 size = (unsigned int) data_size;
544
545 /* Check for overflows. */
546 if ((__u64) size != data_size)
17ad2a4f 547 error (_("Failed to determine trace buffer size."));
010a18a1
MM
548 }
549#endif /* defined (PERF_ATTR_SIZE_VER5) */
550
e7b01ce0 551 bts->bts.size = size;
f4abbc16 552 bts->bts.data_head = &header->data_head;
083eef1f 553 bts->bts.mem = (const uint8_t *) data.release () + data_offset;
e7b01ce0 554 bts->bts.last_head = 0ull;
5c3284c1
MM
555 bts->header = header;
556 bts->file = fd.release ();
aadf7753 557
5c3284c1
MM
558 tinfo->conf.bts.size = (unsigned int) size;
559 return tinfo.release ();
b20a6524
MM
560}
561
562#if defined (PERF_ATTR_SIZE_VER5)
563
17ad2a4f 564/* Determine the event type. */
de6242d3
MM
565
566static int
17ad2a4f 567perf_event_pt_event_type ()
de6242d3 568{
17ad2a4f
MM
569 static const char filename[] = "/sys/bus/event_source/devices/intel_pt/type";
570
571 errno = 0;
572 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
de6242d3 573 if (file.get () == nullptr)
17ad2a4f 574 error (_("Failed to open %s: %s."), filename, safe_strerror (errno));
de6242d3 575
17ad2a4f
MM
576 int type, found = fscanf (file.get (), "%d", &type);
577 if (found != 1)
578 error (_("Failed to read the PT event type from %s."), filename);
579
580 return type;
de6242d3
MM
581}
582
bc504a31 583/* Enable branch tracing in Intel Processor Trace format. */
b20a6524
MM
584
585static struct btrace_target_info *
586linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
587{
b20a6524 588 struct btrace_tinfo_pt *pt;
5c3284c1 589 size_t pages;
17ad2a4f 590 int pid, pg;
b20a6524 591
e38504b3 592 pid = ptid.lwp ();
b20a6524 593 if (pid == 0)
e99b03dc 594 pid = ptid.pid ();
b20a6524 595
5c3284c1
MM
596 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
597 (XCNEW (btrace_target_info));
b20a6524 598 tinfo->ptid = ptid;
b20a6524
MM
599
600 tinfo->conf.format = BTRACE_FORMAT_PT;
601 pt = &tinfo->variant.pt;
602
603 pt->attr.size = sizeof (pt->attr);
17ad2a4f 604 pt->attr.type = perf_event_pt_event_type ();
b20a6524
MM
605
606 pt->attr.exclude_kernel = 1;
607 pt->attr.exclude_hv = 1;
608 pt->attr.exclude_idle = 1;
609
610 errno = 0;
5c3284c1
MM
611 scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
612 if (fd.get () < 0)
88711fbf 613 diagnose_perf_event_open_fail ();
b20a6524
MM
614
615 /* Allocate the configuration page. */
5c3284c1
MM
616 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
617 fd.get (), 0);
618 if (data.get () == MAP_FAILED)
17ad2a4f 619 error (_("Failed to map trace user page: %s."), safe_strerror (errno));
5c3284c1
MM
620
621 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
622 data.get ();
b20a6524
MM
623
624 header->aux_offset = header->data_offset + header->data_size;
625
626 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
627 pages = ((size_t) conf->size / PAGE_SIZE
628 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
b20a6524
MM
629 /* We need at least one page. */
630 if (pages == 0)
631 pages = 1;
632
633 /* The buffer size can be requested in powers of two pages. Adjust PAGES
634 to the next power of two. */
e7b01ce0
MM
635 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
636 if ((pages & ((size_t) 1 << pg)) != 0)
637 pages += ((size_t) 1 << pg);
b20a6524
MM
638
639 /* We try to allocate the requested size.
640 If that fails, try to get as much as we can. */
5c3284c1 641 scoped_mmap aux;
b20a6524
MM
642 for (; pages > 0; pages >>= 1)
643 {
644 size_t length;
e7b01ce0 645 __u64 data_size;
b20a6524 646
e7b01ce0
MM
647 data_size = (__u64) pages * PAGE_SIZE;
648
649 /* Don't ask for more than we can represent in the configuration. */
650 if ((__u64) UINT_MAX < data_size)
651 continue;
652
5c3284c1 653 length = (size_t) data_size;
b20a6524
MM
654
655 /* Check for overflows. */
5c3284c1 656 if ((__u64) length != data_size)
b20a6524
MM
657 continue;
658
e7b01ce0 659 header->aux_size = data_size;
b20a6524 660
17ad2a4f 661 errno = 0;
5c3284c1
MM
662 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
663 header->aux_offset);
664 if (aux.get () != MAP_FAILED)
b20a6524
MM
665 break;
666 }
667
668 if (pages == 0)
17ad2a4f 669 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
b20a6524 670
5c3284c1
MM
671 pt->pt.size = aux.size ();
672 pt->pt.mem = (const uint8_t *) aux.release ();
b20a6524 673 pt->pt.data_head = &header->aux_head;
083eef1f
TT
674 pt->header = (struct perf_event_mmap_page *) data.release ();
675 gdb_assert (pt->header == header);
5c3284c1 676 pt->file = fd.release ();
b20a6524 677
5c3284c1
MM
678 tinfo->conf.pt.size = (unsigned int) pt->pt.size;
679 return tinfo.release ();
7c97f91e
MM
680}
681
b20a6524
MM
682#else /* !defined (PERF_ATTR_SIZE_VER5) */
683
684static struct btrace_target_info *
685linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
686{
17ad2a4f 687 error (_("Intel Processor Trace support was disabled at compile time."));
b20a6524
MM
688}
689
690#endif /* !defined (PERF_ATTR_SIZE_VER5) */
691
7c97f91e
MM
692/* See linux-btrace.h. */
693
f4abbc16
MM
694struct btrace_target_info *
695linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
696{
f4abbc16
MM
697 switch (conf->format)
698 {
699 case BTRACE_FORMAT_NONE:
17ad2a4f
MM
700 error (_("Bad branch trace format."));
701
702 default:
703 error (_("Unknown branch trace format."));
f4abbc16
MM
704
705 case BTRACE_FORMAT_BTS:
17ad2a4f 706 return linux_enable_bts (ptid, &conf->bts);
b20a6524
MM
707
708 case BTRACE_FORMAT_PT:
17ad2a4f 709 return linux_enable_pt (ptid, &conf->pt);
f4abbc16 710 }
f4abbc16
MM
711}
712
713/* Disable BTS tracing. */
714
715static enum btrace_error
716linux_disable_bts (struct btrace_tinfo_bts *tinfo)
7c97f91e 717{
aadf7753 718 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
7c97f91e 719 close (tinfo->file);
7c97f91e 720
969c39fb 721 return BTRACE_ERR_NONE;
7c97f91e
MM
722}
723
bc504a31 724/* Disable Intel Processor Trace tracing. */
b20a6524
MM
725
726static enum btrace_error
727linux_disable_pt (struct btrace_tinfo_pt *tinfo)
728{
729 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
730 munmap((void *) tinfo->header, PAGE_SIZE);
731 close (tinfo->file);
732
733 return BTRACE_ERR_NONE;
734}
735
f4abbc16
MM
736/* See linux-btrace.h. */
737
738enum btrace_error
739linux_disable_btrace (struct btrace_target_info *tinfo)
740{
741 enum btrace_error errcode;
742
743 errcode = BTRACE_ERR_NOT_SUPPORTED;
744 switch (tinfo->conf.format)
745 {
746 case BTRACE_FORMAT_NONE:
747 break;
748
749 case BTRACE_FORMAT_BTS:
750 errcode = linux_disable_bts (&tinfo->variant.bts);
751 break;
b20a6524
MM
752
753 case BTRACE_FORMAT_PT:
754 errcode = linux_disable_pt (&tinfo->variant.pt);
755 break;
f4abbc16
MM
756 }
757
758 if (errcode == BTRACE_ERR_NONE)
759 xfree (tinfo);
760
761 return errcode;
762}
763
734b0e4b
MM
764/* Read branch trace data in BTS format for the thread given by TINFO into
765 BTRACE using the TYPE reading method. */
7c97f91e 766
734b0e4b
MM
767static enum btrace_error
768linux_read_bts (struct btrace_data_bts *btrace,
769 struct btrace_target_info *tinfo,
770 enum btrace_read_type type)
7c97f91e 771{
aadf7753 772 struct perf_event_buffer *pevent;
7c97f91e 773 const uint8_t *begin, *end, *start;
e7b01ce0
MM
774 size_t buffer_size, size;
775 __u64 data_head, data_tail;
aadf7753
MM
776 unsigned int retries = 5;
777
f4abbc16 778 pevent = &tinfo->variant.bts.bts;
7c97f91e 779
969c39fb
MM
780 /* For delta reads, we return at least the partial last block containing
781 the current PC. */
aadf7753 782 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
969c39fb 783 return BTRACE_ERR_NONE;
7c97f91e 784
aadf7753
MM
785 buffer_size = pevent->size;
786 data_tail = pevent->last_head;
7c97f91e
MM
787
788 /* We may need to retry reading the trace. See below. */
789 while (retries--)
790 {
aadf7753 791 data_head = *pevent->data_head;
7c97f91e 792
ed9edfb5 793 /* Delete any leftover trace from the previous iteration. */
46f29a9a
AB
794 delete btrace->blocks;
795 btrace->blocks = nullptr;
ed9edfb5 796
969c39fb 797 if (type == BTRACE_READ_DELTA)
7c97f91e 798 {
e7b01ce0
MM
799 __u64 data_size;
800
969c39fb
MM
801 /* Determine the number of bytes to read and check for buffer
802 overflows. */
803
804 /* Check for data head overflows. We might be able to recover from
805 those but they are very unlikely and it's not really worth the
806 effort, I think. */
807 if (data_head < data_tail)
808 return BTRACE_ERR_OVERFLOW;
809
810 /* If the buffer is smaller than the trace delta, we overflowed. */
e7b01ce0
MM
811 data_size = data_head - data_tail;
812 if (buffer_size < data_size)
969c39fb 813 return BTRACE_ERR_OVERFLOW;
e7b01ce0
MM
814
815 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
816 size = (size_t) data_size;
969c39fb
MM
817 }
818 else
819 {
820 /* Read the entire buffer. */
821 size = buffer_size;
7c97f91e 822
969c39fb
MM
823 /* Adjust the size if the buffer has not overflowed, yet. */
824 if (data_head < size)
e7b01ce0 825 size = (size_t) data_head;
7c97f91e
MM
826 }
827
969c39fb 828 /* Data_head keeps growing; the buffer itself is circular. */
aadf7753 829 begin = pevent->mem;
969c39fb
MM
830 start = begin + data_head % buffer_size;
831
832 if (data_head <= buffer_size)
833 end = start;
834 else
aadf7753 835 end = begin + pevent->size;
969c39fb 836
734b0e4b 837 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
969c39fb 838
7c97f91e
MM
839 /* The stopping thread notifies its ptracer before it is scheduled out.
840 On multi-core systems, the debugger might therefore run while the
841 kernel might be writing the last branch trace records.
842
843 Let's check whether the data head moved while we read the trace. */
aadf7753 844 if (data_head == *pevent->data_head)
7c97f91e
MM
845 break;
846 }
847
aadf7753 848 pevent->last_head = data_head;
7c97f91e 849
969c39fb
MM
850 /* Prune the incomplete last block (i.e. the first one of inferior execution)
851 if we're not doing a delta read. There is no way of filling in its zeroed
852 BEGIN element. */
46f29a9a
AB
853 if (!btrace->blocks->empty () && type != BTRACE_READ_DELTA)
854 btrace->blocks->pop_back ();
969c39fb
MM
855
856 return BTRACE_ERR_NONE;
7c97f91e
MM
857}
858
bc504a31 859/* Fill in the Intel Processor Trace configuration information. */
b20a6524
MM
860
861static void
862linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
863{
864 conf->cpu = btrace_this_cpu ();
865}
866
bc504a31 867/* Read branch trace data in Intel Processor Trace format for the thread
b20a6524
MM
868 given by TINFO into BTRACE using the TYPE reading method. */
869
870static enum btrace_error
871linux_read_pt (struct btrace_data_pt *btrace,
872 struct btrace_target_info *tinfo,
873 enum btrace_read_type type)
874{
875 struct perf_event_buffer *pt;
876
877 pt = &tinfo->variant.pt.pt;
878
879 linux_fill_btrace_pt_config (&btrace->config);
880
881 switch (type)
882 {
883 case BTRACE_READ_DELTA:
884 /* We don't support delta reads. The data head (i.e. aux_head) wraps
885 around to stay inside the aux buffer. */
886 return BTRACE_ERR_NOT_SUPPORTED;
887
888 case BTRACE_READ_NEW:
889 if (!perf_event_new_data (pt))
890 return BTRACE_ERR_NONE;
891
892 /* Fall through. */
893 case BTRACE_READ_ALL:
894 perf_event_read_all (pt, &btrace->data, &btrace->size);
895 return BTRACE_ERR_NONE;
896 }
897
40c94099 898 internal_error (__FILE__, __LINE__, _("Unknown btrace read type."));
b20a6524
MM
899}
900
734b0e4b
MM
901/* See linux-btrace.h. */
902
903enum btrace_error
904linux_read_btrace (struct btrace_data *btrace,
905 struct btrace_target_info *tinfo,
906 enum btrace_read_type type)
907{
f4abbc16
MM
908 switch (tinfo->conf.format)
909 {
910 case BTRACE_FORMAT_NONE:
911 return BTRACE_ERR_NOT_SUPPORTED;
912
913 case BTRACE_FORMAT_BTS:
914 /* We read btrace in BTS format. */
915 btrace->format = BTRACE_FORMAT_BTS;
916 btrace->variant.bts.blocks = NULL;
917
918 return linux_read_bts (&btrace->variant.bts, tinfo, type);
b20a6524
MM
919
920 case BTRACE_FORMAT_PT:
bc504a31 921 /* We read btrace in Intel Processor Trace format. */
b20a6524
MM
922 btrace->format = BTRACE_FORMAT_PT;
923 btrace->variant.pt.data = NULL;
924 btrace->variant.pt.size = 0;
925
926 return linux_read_pt (&btrace->variant.pt, tinfo, type);
f4abbc16
MM
927 }
928
929 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
930}
931
932/* See linux-btrace.h. */
734b0e4b 933
f4abbc16
MM
934const struct btrace_config *
935linux_btrace_conf (const struct btrace_target_info *tinfo)
936{
937 return &tinfo->conf;
734b0e4b
MM
938}
939
7c97f91e
MM
940#else /* !HAVE_LINUX_PERF_EVENT_H */
941
942/* See linux-btrace.h. */
943
7c97f91e 944struct btrace_target_info *
f4abbc16 945linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
7c97f91e
MM
946{
947 return NULL;
948}
949
950/* See linux-btrace.h. */
951
969c39fb 952enum btrace_error
7c97f91e
MM
953linux_disable_btrace (struct btrace_target_info *tinfo)
954{
969c39fb 955 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
956}
957
958/* See linux-btrace.h. */
959
969c39fb 960enum btrace_error
734b0e4b 961linux_read_btrace (struct btrace_data *btrace,
969c39fb 962 struct btrace_target_info *tinfo,
7c97f91e
MM
963 enum btrace_read_type type)
964{
969c39fb 965 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
966}
967
f4abbc16
MM
968/* See linux-btrace.h. */
969
970const struct btrace_config *
971linux_btrace_conf (const struct btrace_target_info *tinfo)
972{
973 return NULL;
974}
975
7c97f91e 976#endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.878576 seconds and 4 git commands to generate.