btrace, gdbserver: use exceptions to convey btrace enable/disable errors
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
7c97f91e
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
53f81362 22#include "common-defs.h"
7c97f91e 23#include "linux-btrace.h"
361c8ade 24#include "common-regcache.h"
be8b1ea6 25#include "gdb_wait.h"
df7e5265 26#include "x86-cpuid.h"
0568462b 27#include "filestuff.h"
5c3284c1
MM
28#include "common/scoped_fd.h"
29#include "common/scoped_mmap.h"
0568462b
MM
30
31#include <inttypes.h>
7c97f91e 32
5b4e221c 33#include <sys/syscall.h>
5b4e221c
MF
34
35#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
7c97f91e 36#include <unistd.h>
7c97f91e
MM
37#include <sys/mman.h>
38#include <sys/user.h>
5826e159 39#include "nat/gdb_ptrace.h"
a950d57c 40#include <sys/types.h>
a950d57c 41#include <signal.h>
7c97f91e
MM
42
43/* A branch trace record in perf_event. */
44struct perf_event_bts
45{
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51};
52
53/* A perf_event branch trace sample. */
54struct perf_event_sample
55{
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61};
62
afb778a2
MM
63/* Identify the cpu we're running on. */
64static struct btrace_cpu
65btrace_this_cpu (void)
66{
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
93 }
94
95 return cpu;
96}
97
aadf7753 98/* Return non-zero if there is new data in PEVENT; zero otherwise. */
7c97f91e 99
aadf7753
MM
100static int
101perf_event_new_data (const struct perf_event_buffer *pev)
7c97f91e 102{
aadf7753 103 return *pev->data_head != pev->last_head;
7c97f91e
MM
104}
105
b20a6524
MM
106/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
109
110static gdb_byte *
e7b01ce0
MM
111perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
112 size_t size)
b20a6524
MM
113{
114 const gdb_byte *begin, *end, *start, *stop;
115 gdb_byte *buffer;
e7b01ce0
MM
116 size_t buffer_size;
117 __u64 data_tail;
b20a6524
MM
118
119 if (size == 0)
120 return NULL;
121
db58b373
MM
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size = pev->size;
124 gdb_assert (size <= buffer_size);
125
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
129 don't underflow.
130
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head < size)
135 data_head += buffer_size;
136
b20a6524
MM
137 gdb_assert (size <= data_head);
138 data_tail = data_head - size;
139
b20a6524
MM
140 begin = pev->mem;
141 start = begin + data_tail % buffer_size;
142 stop = begin + data_head % buffer_size;
143
224c3ddb 144 buffer = (gdb_byte *) xmalloc (size);
b20a6524
MM
145
146 if (start < stop)
147 memcpy (buffer, start, stop - start);
148 else
149 {
150 end = begin + buffer_size;
151
152 memcpy (buffer, start, end - start);
153 memcpy (buffer + (end - start), begin, stop - begin);
154 }
155
156 return buffer;
157}
158
159/* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
161
162static void
163perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
e7b01ce0 164 size_t *psize)
b20a6524 165{
e7b01ce0
MM
166 size_t size;
167 __u64 data_head;
b20a6524
MM
168
169 data_head = *pev->data_head;
b20a6524 170 size = pev->size;
b20a6524
MM
171
172 *data = perf_event_read (pev, data_head, size);
173 *psize = size;
174
175 pev->last_head = data_head;
176}
177
178/* Determine the event type.
179 Returns zero on success and fills in TYPE; returns -1 otherwise. */
180
181static int
182perf_event_pt_event_type (int *type)
183{
5c3284c1
MM
184 gdb_file_up file
185 = gdb_fopen_cloexec ("/sys/bus/event_source/devices/intel_pt/type", "r");
186 if (file == nullptr)
b20a6524
MM
187 return -1;
188
5c3284c1 189 int found = fscanf (file.get (), "%d", type);
b20a6524
MM
190 if (found == 1)
191 return 0;
192 return -1;
193}
194
0568462b
MM
195/* Try to determine the start address of the Linux kernel. */
196
197static uint64_t
198linux_determine_kernel_start (void)
d68e53f4 199{
0568462b
MM
200 static uint64_t kernel_start;
201 static int cached;
d68e53f4 202
0568462b
MM
203 if (cached != 0)
204 return kernel_start;
d68e53f4 205
0568462b
MM
206 cached = 1;
207
d419f42d 208 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
0568462b
MM
209 if (file == NULL)
210 return kernel_start;
211
d419f42d 212 while (!feof (file.get ()))
0568462b
MM
213 {
214 char buffer[1024], symbol[8], *line;
215 uint64_t addr;
216 int match;
217
d419f42d 218 line = fgets (buffer, sizeof (buffer), file.get ());
0568462b
MM
219 if (line == NULL)
220 break;
d68e53f4 221
0568462b
MM
222 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
223 if (match != 2)
224 continue;
d68e53f4 225
0568462b
MM
226 if (strcmp (symbol, "_text") == 0)
227 {
228 kernel_start = addr;
229 break;
230 }
231 }
232
0568462b 233 return kernel_start;
d68e53f4
MM
234}
235
7c97f91e
MM
236/* Check whether an address is in the kernel. */
237
238static inline int
0568462b 239perf_event_is_kernel_addr (uint64_t addr)
7c97f91e 240{
0568462b 241 uint64_t kernel_start;
7c97f91e 242
0568462b
MM
243 kernel_start = linux_determine_kernel_start ();
244 if (kernel_start != 0ull)
245 return (addr >= kernel_start);
7c97f91e 246
0568462b
MM
247 /* If we don't know the kernel's start address, let's check the most
248 significant bit. This will work at least for 64-bit kernels. */
249 return ((addr & (1ull << 63)) != 0);
7c97f91e
MM
250}
251
252/* Check whether a perf event record should be skipped. */
253
254static inline int
0568462b 255perf_event_skip_bts_record (const struct perf_event_bts *bts)
7c97f91e
MM
256{
257 /* The hardware may report branches from kernel into user space. Branches
258 from user into kernel space will be suppressed. We filter the former to
259 provide a consistent branch trace excluding kernel. */
0568462b 260 return perf_event_is_kernel_addr (bts->from);
7c97f91e
MM
261}
262
263/* Perform a few consistency checks on a perf event sample record. This is
264 meant to catch cases when we get out of sync with the perf event stream. */
265
266static inline int
267perf_event_sample_ok (const struct perf_event_sample *sample)
268{
269 if (sample->header.type != PERF_RECORD_SAMPLE)
270 return 0;
271
272 if (sample->header.size != sizeof (*sample))
273 return 0;
274
275 return 1;
276}
277
278/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
279 and to addresses (plus a header).
280
281 Start points into that buffer at the next sample position.
282 We read the collected samples backwards from start.
283
284 While reading the samples, we convert the information into a list of blocks.
285 For two adjacent samples s1 and s2, we form a block b such that b.begin =
286 s1.to and b.end = s2.from.
287
288 In case the buffer overflows during sampling, one sample may have its lower
289 part at the end and its upper part at the beginning of the buffer. */
290
291static VEC (btrace_block_s) *
292perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
e7b01ce0 293 const uint8_t *end, const uint8_t *start, size_t size)
7c97f91e
MM
294{
295 VEC (btrace_block_s) *btrace = NULL;
296 struct perf_event_sample sample;
e7b01ce0 297 size_t read = 0;
7c97f91e
MM
298 struct btrace_block block = { 0, 0 };
299 struct regcache *regcache;
300
301 gdb_assert (begin <= start);
302 gdb_assert (start <= end);
303
304 /* The first block ends at the current pc. */
361c8ade 305 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
7c97f91e
MM
306 block.end = regcache_read_pc (regcache);
307
308 /* The buffer may contain a partial record as its last entry (i.e. when the
309 buffer size is not a multiple of the sample size). */
310 read = sizeof (sample) - 1;
311
312 for (; read < size; read += sizeof (sample))
313 {
314 const struct perf_event_sample *psample;
315
316 /* Find the next perf_event sample in a backwards traversal. */
317 start -= sizeof (sample);
318
319 /* If we're still inside the buffer, we're done. */
320 if (begin <= start)
321 psample = (const struct perf_event_sample *) start;
322 else
323 {
324 int missing;
325
326 /* We're to the left of the ring buffer, we will wrap around and
327 reappear at the very right of the ring buffer. */
328
329 missing = (begin - start);
330 start = (end - missing);
331
332 /* If the entire sample is missing, we're done. */
333 if (missing == sizeof (sample))
334 psample = (const struct perf_event_sample *) start;
335 else
336 {
337 uint8_t *stack;
338
339 /* The sample wrapped around. The lower part is at the end and
340 the upper part is at the beginning of the buffer. */
341 stack = (uint8_t *) &sample;
342
343 /* Copy the two parts so we have a contiguous sample. */
344 memcpy (stack, start, missing);
345 memcpy (stack + missing, begin, sizeof (sample) - missing);
346
347 psample = &sample;
348 }
349 }
350
351 if (!perf_event_sample_ok (psample))
352 {
353 warning (_("Branch trace may be incomplete."));
354 break;
355 }
356
0568462b 357 if (perf_event_skip_bts_record (&psample->bts))
7c97f91e
MM
358 continue;
359
360 /* We found a valid sample, so we can complete the current block. */
361 block.begin = psample->bts.to;
362
363 VEC_safe_push (btrace_block_s, btrace, &block);
364
365 /* Start the next block. */
366 block.end = psample->bts.from;
367 }
368
969c39fb
MM
369 /* Push the last block (i.e. the first one of inferior execution), as well.
370 We don't know where it ends, but we know where it starts. If we're
371 reading delta trace, we can fill in the start address later on.
372 Otherwise we will prune it. */
373 block.begin = 0;
374 VEC_safe_push (btrace_block_s, btrace, &block);
375
7c97f91e
MM
376 return btrace;
377}
378
043c3577 379/* Check whether the kernel supports BTS. */
a950d57c
MM
380
381static int
043c3577 382kernel_supports_bts (void)
a950d57c
MM
383{
384 struct perf_event_attr attr;
385 pid_t child, pid;
386 int status, file;
387
388 errno = 0;
389 child = fork ();
390 switch (child)
391 {
392 case -1:
76fb6829 393 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
a950d57c
MM
394 return 0;
395
396 case 0:
397 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
398 if (status != 0)
399 {
043c3577 400 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
76fb6829 401 safe_strerror (errno));
a950d57c
MM
402 _exit (1);
403 }
404
405 status = raise (SIGTRAP);
406 if (status != 0)
407 {
043c3577 408 warning (_("test bts: cannot raise SIGTRAP: %s."),
76fb6829 409 safe_strerror (errno));
a950d57c
MM
410 _exit (1);
411 }
412
413 _exit (1);
414
415 default:
416 pid = waitpid (child, &status, 0);
417 if (pid != child)
418 {
043c3577 419 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 420 (long) pid, safe_strerror (errno));
a950d57c
MM
421 return 0;
422 }
423
424 if (!WIFSTOPPED (status))
425 {
043c3577 426 warning (_("test bts: expected stop. status: %d."),
a950d57c
MM
427 status);
428 return 0;
429 }
430
431 memset (&attr, 0, sizeof (attr));
432
433 attr.type = PERF_TYPE_HARDWARE;
434 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
435 attr.sample_period = 1;
436 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
437 attr.exclude_kernel = 1;
438 attr.exclude_hv = 1;
439 attr.exclude_idle = 1;
440
441 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
442 if (file >= 0)
443 close (file);
444
445 kill (child, SIGKILL);
446 ptrace (PTRACE_KILL, child, NULL, NULL);
447
448 pid = waitpid (child, &status, 0);
449 if (pid != child)
450 {
043c3577 451 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 452 (long) pid, safe_strerror (errno));
a950d57c 453 if (!WIFSIGNALED (status))
043c3577 454 warning (_("test bts: expected killed. status: %d."),
a950d57c
MM
455 status);
456 }
457
458 return (file >= 0);
459 }
460}
461
bc504a31 462/* Check whether the kernel supports Intel Processor Trace. */
b20a6524
MM
463
464static int
465kernel_supports_pt (void)
466{
467 struct perf_event_attr attr;
468 pid_t child, pid;
469 int status, file, type;
470
471 errno = 0;
472 child = fork ();
473 switch (child)
474 {
475 case -1:
76fb6829 476 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
b20a6524
MM
477 return 0;
478
479 case 0:
480 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
481 if (status != 0)
482 {
483 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
76fb6829 484 safe_strerror (errno));
b20a6524
MM
485 _exit (1);
486 }
487
488 status = raise (SIGTRAP);
489 if (status != 0)
490 {
491 warning (_("test pt: cannot raise SIGTRAP: %s."),
76fb6829 492 safe_strerror (errno));
b20a6524
MM
493 _exit (1);
494 }
495
496 _exit (1);
497
498 default:
499 pid = waitpid (child, &status, 0);
500 if (pid != child)
501 {
502 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 503 (long) pid, safe_strerror (errno));
b20a6524
MM
504 return 0;
505 }
506
507 if (!WIFSTOPPED (status))
508 {
509 warning (_("test pt: expected stop. status: %d."),
510 status);
511 return 0;
512 }
513
514 status = perf_event_pt_event_type (&type);
515 if (status != 0)
516 file = -1;
517 else
518 {
519 memset (&attr, 0, sizeof (attr));
520
521 attr.size = sizeof (attr);
522 attr.type = type;
523 attr.exclude_kernel = 1;
524 attr.exclude_hv = 1;
525 attr.exclude_idle = 1;
526
527 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
528 if (file >= 0)
529 close (file);
530 }
531
532 kill (child, SIGKILL);
533 ptrace (PTRACE_KILL, child, NULL, NULL);
534
535 pid = waitpid (child, &status, 0);
536 if (pid != child)
537 {
538 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 539 (long) pid, safe_strerror (errno));
b20a6524
MM
540 if (!WIFSIGNALED (status))
541 warning (_("test pt: expected killed. status: %d."),
542 status);
543 }
544
545 return (file >= 0);
546 }
547}
548
043c3577 549/* Check whether an Intel cpu supports BTS. */
a950d57c
MM
550
551static int
afb778a2 552intel_supports_bts (const struct btrace_cpu *cpu)
a950d57c 553{
afb778a2 554 switch (cpu->family)
5f8e0b8f
MF
555 {
556 case 0x6:
afb778a2 557 switch (cpu->model)
5f8e0b8f
MF
558 {
559 case 0x1a: /* Nehalem */
560 case 0x1f:
561 case 0x1e:
562 case 0x2e:
563 case 0x25: /* Westmere */
564 case 0x2c:
565 case 0x2f:
566 case 0x2a: /* Sandy Bridge */
567 case 0x2d:
568 case 0x3a: /* Ivy Bridge */
569
570 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
571 "from" information afer an EIST transition, T-states, C1E, or
572 Adaptive Thermal Throttling. */
573 return 0;
574 }
575 }
a950d57c
MM
576
577 return 1;
a950d57c
MM
578}
579
043c3577 580/* Check whether the cpu supports BTS. */
a950d57c
MM
581
582static int
043c3577 583cpu_supports_bts (void)
a950d57c 584{
afb778a2 585 struct btrace_cpu cpu;
a950d57c 586
afb778a2
MM
587 cpu = btrace_this_cpu ();
588 switch (cpu.vendor)
589 {
590 default:
591 /* Don't know about others. Let's assume they do. */
592 return 1;
a950d57c 593
afb778a2
MM
594 case CV_INTEL:
595 return intel_supports_bts (&cpu);
596 }
a950d57c
MM
597}
598
043c3577 599/* Check whether the linux target supports BTS. */
7c97f91e 600
043c3577
MM
601static int
602linux_supports_bts (void)
7c97f91e 603{
a950d57c
MM
604 static int cached;
605
606 if (cached == 0)
607 {
043c3577 608 if (!kernel_supports_bts ())
a950d57c 609 cached = -1;
043c3577 610 else if (!cpu_supports_bts ())
a950d57c
MM
611 cached = -1;
612 else
613 cached = 1;
614 }
615
616 return cached > 0;
7c97f91e
MM
617}
618
bc504a31 619/* Check whether the linux target supports Intel Processor Trace. */
b20a6524
MM
620
621static int
622linux_supports_pt (void)
623{
624 static int cached;
625
626 if (cached == 0)
627 {
628 if (!kernel_supports_pt ())
629 cached = -1;
630 else
631 cached = 1;
632 }
633
634 return cached > 0;
635}
636
7c97f91e
MM
637/* See linux-btrace.h. */
638
043c3577
MM
639int
640linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
641{
642 switch (format)
643 {
644 case BTRACE_FORMAT_NONE:
645 return 0;
646
647 case BTRACE_FORMAT_BTS:
648 return linux_supports_bts ();
b20a6524
MM
649
650 case BTRACE_FORMAT_PT:
651 return linux_supports_pt ();
043c3577
MM
652 }
653
654 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
655}
656
f4abbc16 657/* Enable branch tracing in BTS format. */
043c3577 658
f4abbc16 659static struct btrace_target_info *
d33501a5 660linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
7c97f91e 661{
f4abbc16 662 struct btrace_tinfo_bts *bts;
e7b01ce0
MM
663 size_t size, pages;
664 __u64 data_offset;
d0fa7535 665 int pid, pg;
7c97f91e 666
5c3284c1
MM
667 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
668 (XCNEW (btrace_target_info));
7c97f91e
MM
669 tinfo->ptid = ptid;
670
f4abbc16
MM
671 tinfo->conf.format = BTRACE_FORMAT_BTS;
672 bts = &tinfo->variant.bts;
7c97f91e 673
f4abbc16
MM
674 bts->attr.size = sizeof (bts->attr);
675 bts->attr.type = PERF_TYPE_HARDWARE;
676 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
677 bts->attr.sample_period = 1;
7c97f91e 678
f4abbc16
MM
679 /* We sample from and to address. */
680 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
7c97f91e 681
f4abbc16
MM
682 bts->attr.exclude_kernel = 1;
683 bts->attr.exclude_hv = 1;
684 bts->attr.exclude_idle = 1;
7c97f91e
MM
685
686 pid = ptid_get_lwp (ptid);
687 if (pid == 0)
688 pid = ptid_get_pid (ptid);
689
690 errno = 0;
5c3284c1
MM
691 scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0));
692 if (fd.get () < 0)
693 return nullptr;
7c97f91e 694
d33501a5 695 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
696 pages = ((size_t) conf->size / PAGE_SIZE
697 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
d33501a5
MM
698 /* We need at least one page. */
699 if (pages == 0)
700 pages = 1;
701
702 /* The buffer size can be requested in powers of two pages. Adjust PAGES
703 to the next power of two. */
e7b01ce0
MM
704 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
705 if ((pages & ((size_t) 1 << pg)) != 0)
706 pages += ((size_t) 1 << pg);
d33501a5
MM
707
708 /* We try to allocate the requested size.
709 If that fails, try to get as much as we can. */
5c3284c1 710 scoped_mmap data;
d33501a5 711 for (; pages > 0; pages >>= 1)
d0fa7535 712 {
d33501a5 713 size_t length;
e7b01ce0 714 __u64 data_size;
d33501a5 715
e7b01ce0
MM
716 data_size = (__u64) pages * PAGE_SIZE;
717
718 /* Don't ask for more than we can represent in the configuration. */
719 if ((__u64) UINT_MAX < data_size)
720 continue;
721
722 size = (size_t) data_size;
d33501a5
MM
723 length = size + PAGE_SIZE;
724
725 /* Check for overflows. */
e7b01ce0 726 if ((__u64) length != data_size + PAGE_SIZE)
d33501a5
MM
727 continue;
728
d0fa7535 729 /* The number of pages we request needs to be a power of two. */
5c3284c1
MM
730 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
731 if (data.get () != MAP_FAILED)
aadf7753 732 break;
d0fa7535 733 }
7c97f91e 734
010a18a1 735 if (pages == 0)
5c3284c1 736 return nullptr;
aadf7753 737
5c3284c1
MM
738 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
739 data.get ();
010a18a1 740 data_offset = PAGE_SIZE;
010a18a1
MM
741
742#if defined (PERF_ATTR_SIZE_VER5)
743 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
744 {
e7b01ce0
MM
745 __u64 data_size;
746
010a18a1
MM
747 data_offset = header->data_offset;
748 data_size = header->data_size;
e7b01ce0
MM
749
750 size = (unsigned int) data_size;
751
752 /* Check for overflows. */
753 if ((__u64) size != data_size)
5c3284c1 754 return nullptr;
010a18a1
MM
755 }
756#endif /* defined (PERF_ATTR_SIZE_VER5) */
757
e7b01ce0 758 bts->bts.size = size;
f4abbc16 759 bts->bts.data_head = &header->data_head;
5c3284c1 760 bts->bts.mem = (const uint8_t *) data.get () + data_offset;
e7b01ce0 761 bts->bts.last_head = 0ull;
5c3284c1
MM
762 bts->header = header;
763 bts->file = fd.release ();
aadf7753 764
5c3284c1 765 data.release ();
7c97f91e 766
5c3284c1
MM
767 tinfo->conf.bts.size = (unsigned int) size;
768 return tinfo.release ();
b20a6524
MM
769}
770
771#if defined (PERF_ATTR_SIZE_VER5)
772
bc504a31 773/* Enable branch tracing in Intel Processor Trace format. */
b20a6524
MM
774
775static struct btrace_target_info *
776linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
777{
b20a6524 778 struct btrace_tinfo_pt *pt;
5c3284c1 779 size_t pages;
b20a6524
MM
780 int pid, pg, errcode, type;
781
782 if (conf->size == 0)
783 return NULL;
784
785 errcode = perf_event_pt_event_type (&type);
786 if (errcode != 0)
787 return NULL;
788
789 pid = ptid_get_lwp (ptid);
790 if (pid == 0)
791 pid = ptid_get_pid (ptid);
792
5c3284c1
MM
793 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
794 (XCNEW (btrace_target_info));
b20a6524 795 tinfo->ptid = ptid;
b20a6524
MM
796
797 tinfo->conf.format = BTRACE_FORMAT_PT;
798 pt = &tinfo->variant.pt;
799
800 pt->attr.size = sizeof (pt->attr);
801 pt->attr.type = type;
802
803 pt->attr.exclude_kernel = 1;
804 pt->attr.exclude_hv = 1;
805 pt->attr.exclude_idle = 1;
806
807 errno = 0;
5c3284c1
MM
808 scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
809 if (fd.get () < 0)
810 return nullptr;
b20a6524
MM
811
812 /* Allocate the configuration page. */
5c3284c1
MM
813 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
814 fd.get (), 0);
815 if (data.get () == MAP_FAILED)
816 return nullptr;
817
818 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
819 data.get ();
b20a6524
MM
820
821 header->aux_offset = header->data_offset + header->data_size;
822
823 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
824 pages = ((size_t) conf->size / PAGE_SIZE
825 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
b20a6524
MM
826 /* We need at least one page. */
827 if (pages == 0)
828 pages = 1;
829
830 /* The buffer size can be requested in powers of two pages. Adjust PAGES
831 to the next power of two. */
e7b01ce0
MM
832 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
833 if ((pages & ((size_t) 1 << pg)) != 0)
834 pages += ((size_t) 1 << pg);
b20a6524
MM
835
836 /* We try to allocate the requested size.
837 If that fails, try to get as much as we can. */
5c3284c1 838 scoped_mmap aux;
b20a6524
MM
839 for (; pages > 0; pages >>= 1)
840 {
841 size_t length;
e7b01ce0 842 __u64 data_size;
b20a6524 843
e7b01ce0
MM
844 data_size = (__u64) pages * PAGE_SIZE;
845
846 /* Don't ask for more than we can represent in the configuration. */
847 if ((__u64) UINT_MAX < data_size)
848 continue;
849
5c3284c1 850 length = (size_t) data_size;
b20a6524
MM
851
852 /* Check for overflows. */
5c3284c1 853 if ((__u64) length != data_size)
b20a6524
MM
854 continue;
855
e7b01ce0 856 header->aux_size = data_size;
b20a6524 857
5c3284c1
MM
858 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
859 header->aux_offset);
860 if (aux.get () != MAP_FAILED)
b20a6524
MM
861 break;
862 }
863
864 if (pages == 0)
5c3284c1 865 return nullptr;
b20a6524 866
5c3284c1
MM
867 pt->pt.size = aux.size ();
868 pt->pt.mem = (const uint8_t *) aux.release ();
b20a6524 869 pt->pt.data_head = &header->aux_head;
5c3284c1
MM
870 pt->header = header;
871 pt->file = fd.release ();
b20a6524 872
5c3284c1 873 data.release ();
b20a6524 874
5c3284c1
MM
875 tinfo->conf.pt.size = (unsigned int) pt->pt.size;
876 return tinfo.release ();
7c97f91e
MM
877}
878
b20a6524
MM
879#else /* !defined (PERF_ATTR_SIZE_VER5) */
880
881static struct btrace_target_info *
882linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
883{
884 errno = EOPNOTSUPP;
885 return NULL;
886}
887
888#endif /* !defined (PERF_ATTR_SIZE_VER5) */
889
7c97f91e
MM
890/* See linux-btrace.h. */
891
f4abbc16
MM
892struct btrace_target_info *
893linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
894{
895 struct btrace_target_info *tinfo;
896
897 tinfo = NULL;
898 switch (conf->format)
899 {
900 case BTRACE_FORMAT_NONE:
901 break;
902
903 case BTRACE_FORMAT_BTS:
d33501a5 904 tinfo = linux_enable_bts (ptid, &conf->bts);
f4abbc16 905 break;
b20a6524
MM
906
907 case BTRACE_FORMAT_PT:
908 tinfo = linux_enable_pt (ptid, &conf->pt);
909 break;
f4abbc16
MM
910 }
911
9ee23a85
MM
912 if (tinfo == NULL)
913 error (_("Unknown error."));
914
f4abbc16
MM
915 return tinfo;
916}
917
918/* Disable BTS tracing. */
919
920static enum btrace_error
921linux_disable_bts (struct btrace_tinfo_bts *tinfo)
7c97f91e 922{
aadf7753 923 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
7c97f91e 924 close (tinfo->file);
7c97f91e 925
969c39fb 926 return BTRACE_ERR_NONE;
7c97f91e
MM
927}
928
bc504a31 929/* Disable Intel Processor Trace tracing. */
b20a6524
MM
930
931static enum btrace_error
932linux_disable_pt (struct btrace_tinfo_pt *tinfo)
933{
934 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
935 munmap((void *) tinfo->header, PAGE_SIZE);
936 close (tinfo->file);
937
938 return BTRACE_ERR_NONE;
939}
940
f4abbc16
MM
941/* See linux-btrace.h. */
942
943enum btrace_error
944linux_disable_btrace (struct btrace_target_info *tinfo)
945{
946 enum btrace_error errcode;
947
948 errcode = BTRACE_ERR_NOT_SUPPORTED;
949 switch (tinfo->conf.format)
950 {
951 case BTRACE_FORMAT_NONE:
952 break;
953
954 case BTRACE_FORMAT_BTS:
955 errcode = linux_disable_bts (&tinfo->variant.bts);
956 break;
b20a6524
MM
957
958 case BTRACE_FORMAT_PT:
959 errcode = linux_disable_pt (&tinfo->variant.pt);
960 break;
f4abbc16
MM
961 }
962
963 if (errcode == BTRACE_ERR_NONE)
964 xfree (tinfo);
965
966 return errcode;
967}
968
734b0e4b
MM
969/* Read branch trace data in BTS format for the thread given by TINFO into
970 BTRACE using the TYPE reading method. */
7c97f91e 971
734b0e4b
MM
972static enum btrace_error
973linux_read_bts (struct btrace_data_bts *btrace,
974 struct btrace_target_info *tinfo,
975 enum btrace_read_type type)
7c97f91e 976{
aadf7753 977 struct perf_event_buffer *pevent;
7c97f91e 978 const uint8_t *begin, *end, *start;
e7b01ce0
MM
979 size_t buffer_size, size;
980 __u64 data_head, data_tail;
aadf7753
MM
981 unsigned int retries = 5;
982
f4abbc16 983 pevent = &tinfo->variant.bts.bts;
7c97f91e 984
969c39fb
MM
985 /* For delta reads, we return at least the partial last block containing
986 the current PC. */
aadf7753 987 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
969c39fb 988 return BTRACE_ERR_NONE;
7c97f91e 989
aadf7753
MM
990 buffer_size = pevent->size;
991 data_tail = pevent->last_head;
7c97f91e
MM
992
993 /* We may need to retry reading the trace. See below. */
994 while (retries--)
995 {
aadf7753 996 data_head = *pevent->data_head;
7c97f91e 997
ed9edfb5 998 /* Delete any leftover trace from the previous iteration. */
734b0e4b 999 VEC_free (btrace_block_s, btrace->blocks);
ed9edfb5 1000
969c39fb 1001 if (type == BTRACE_READ_DELTA)
7c97f91e 1002 {
e7b01ce0
MM
1003 __u64 data_size;
1004
969c39fb
MM
1005 /* Determine the number of bytes to read and check for buffer
1006 overflows. */
1007
1008 /* Check for data head overflows. We might be able to recover from
1009 those but they are very unlikely and it's not really worth the
1010 effort, I think. */
1011 if (data_head < data_tail)
1012 return BTRACE_ERR_OVERFLOW;
1013
1014 /* If the buffer is smaller than the trace delta, we overflowed. */
e7b01ce0
MM
1015 data_size = data_head - data_tail;
1016 if (buffer_size < data_size)
969c39fb 1017 return BTRACE_ERR_OVERFLOW;
e7b01ce0
MM
1018
1019 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1020 size = (size_t) data_size;
969c39fb
MM
1021 }
1022 else
1023 {
1024 /* Read the entire buffer. */
1025 size = buffer_size;
7c97f91e 1026
969c39fb
MM
1027 /* Adjust the size if the buffer has not overflowed, yet. */
1028 if (data_head < size)
e7b01ce0 1029 size = (size_t) data_head;
7c97f91e
MM
1030 }
1031
969c39fb 1032 /* Data_head keeps growing; the buffer itself is circular. */
aadf7753 1033 begin = pevent->mem;
969c39fb
MM
1034 start = begin + data_head % buffer_size;
1035
1036 if (data_head <= buffer_size)
1037 end = start;
1038 else
aadf7753 1039 end = begin + pevent->size;
969c39fb 1040
734b0e4b 1041 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
969c39fb 1042
7c97f91e
MM
1043 /* The stopping thread notifies its ptracer before it is scheduled out.
1044 On multi-core systems, the debugger might therefore run while the
1045 kernel might be writing the last branch trace records.
1046
1047 Let's check whether the data head moved while we read the trace. */
aadf7753 1048 if (data_head == *pevent->data_head)
7c97f91e
MM
1049 break;
1050 }
1051
aadf7753 1052 pevent->last_head = data_head;
7c97f91e 1053
969c39fb
MM
1054 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1055 if we're not doing a delta read. There is no way of filling in its zeroed
1056 BEGIN element. */
734b0e4b
MM
1057 if (!VEC_empty (btrace_block_s, btrace->blocks)
1058 && type != BTRACE_READ_DELTA)
1059 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1060
1061 return BTRACE_ERR_NONE;
7c97f91e
MM
1062}
1063
bc504a31 1064/* Fill in the Intel Processor Trace configuration information. */
b20a6524
MM
1065
1066static void
1067linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1068{
1069 conf->cpu = btrace_this_cpu ();
1070}
1071
bc504a31 1072/* Read branch trace data in Intel Processor Trace format for the thread
b20a6524
MM
1073 given by TINFO into BTRACE using the TYPE reading method. */
1074
1075static enum btrace_error
1076linux_read_pt (struct btrace_data_pt *btrace,
1077 struct btrace_target_info *tinfo,
1078 enum btrace_read_type type)
1079{
1080 struct perf_event_buffer *pt;
1081
1082 pt = &tinfo->variant.pt.pt;
1083
1084 linux_fill_btrace_pt_config (&btrace->config);
1085
1086 switch (type)
1087 {
1088 case BTRACE_READ_DELTA:
1089 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1090 around to stay inside the aux buffer. */
1091 return BTRACE_ERR_NOT_SUPPORTED;
1092
1093 case BTRACE_READ_NEW:
1094 if (!perf_event_new_data (pt))
1095 return BTRACE_ERR_NONE;
1096
1097 /* Fall through. */
1098 case BTRACE_READ_ALL:
1099 perf_event_read_all (pt, &btrace->data, &btrace->size);
1100 return BTRACE_ERR_NONE;
1101 }
1102
1103 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1104}
1105
734b0e4b
MM
1106/* See linux-btrace.h. */
1107
1108enum btrace_error
1109linux_read_btrace (struct btrace_data *btrace,
1110 struct btrace_target_info *tinfo,
1111 enum btrace_read_type type)
1112{
f4abbc16
MM
1113 switch (tinfo->conf.format)
1114 {
1115 case BTRACE_FORMAT_NONE:
1116 return BTRACE_ERR_NOT_SUPPORTED;
1117
1118 case BTRACE_FORMAT_BTS:
1119 /* We read btrace in BTS format. */
1120 btrace->format = BTRACE_FORMAT_BTS;
1121 btrace->variant.bts.blocks = NULL;
1122
1123 return linux_read_bts (&btrace->variant.bts, tinfo, type);
b20a6524
MM
1124
1125 case BTRACE_FORMAT_PT:
bc504a31 1126 /* We read btrace in Intel Processor Trace format. */
b20a6524
MM
1127 btrace->format = BTRACE_FORMAT_PT;
1128 btrace->variant.pt.data = NULL;
1129 btrace->variant.pt.size = 0;
1130
1131 return linux_read_pt (&btrace->variant.pt, tinfo, type);
f4abbc16
MM
1132 }
1133
1134 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1135}
1136
1137/* See linux-btrace.h. */
734b0e4b 1138
f4abbc16
MM
1139const struct btrace_config *
1140linux_btrace_conf (const struct btrace_target_info *tinfo)
1141{
1142 return &tinfo->conf;
734b0e4b
MM
1143}
1144
7c97f91e
MM
1145#else /* !HAVE_LINUX_PERF_EVENT_H */
1146
1147/* See linux-btrace.h. */
1148
1149int
043c3577 1150linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
7c97f91e
MM
1151{
1152 return 0;
1153}
1154
1155/* See linux-btrace.h. */
1156
1157struct btrace_target_info *
f4abbc16 1158linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
7c97f91e
MM
1159{
1160 return NULL;
1161}
1162
1163/* See linux-btrace.h. */
1164
969c39fb 1165enum btrace_error
7c97f91e
MM
1166linux_disable_btrace (struct btrace_target_info *tinfo)
1167{
969c39fb 1168 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1169}
1170
1171/* See linux-btrace.h. */
1172
969c39fb 1173enum btrace_error
734b0e4b 1174linux_read_btrace (struct btrace_data *btrace,
969c39fb 1175 struct btrace_target_info *tinfo,
7c97f91e
MM
1176 enum btrace_read_type type)
1177{
969c39fb 1178 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1179}
1180
f4abbc16
MM
1181/* See linux-btrace.h. */
1182
1183const struct btrace_config *
1184linux_btrace_conf (const struct btrace_target_info *tinfo)
1185{
1186 return NULL;
1187}
1188
7c97f91e 1189#endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.900501 seconds and 4 git commands to generate.