bbd0fe68a808bf23428814243ed3fd760a4ae45e
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
25 #include "gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
28
29 #include <inttypes.h>
30
31 #include <sys/syscall.h>
32
33 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
34 #include <unistd.h>
35 #include <sys/mman.h>
36 #include <sys/user.h>
37 #include "nat/gdb_ptrace.h"
38 #include <sys/types.h>
39 #include <signal.h>
40
41 /* A branch trace record in perf_event. */
42 struct perf_event_bts
43 {
44 /* The linear address of the branch source. */
45 uint64_t from;
46
47 /* The linear address of the branch destination. */
48 uint64_t to;
49 };
50
51 /* A perf_event branch trace sample. */
52 struct perf_event_sample
53 {
54 /* The perf_event sample header. */
55 struct perf_event_header header;
56
57 /* The perf_event branch tracing payload. */
58 struct perf_event_bts bts;
59 };
60
61 /* Identify the cpu we're running on. */
62 static struct btrace_cpu
63 btrace_this_cpu (void)
64 {
65 struct btrace_cpu cpu;
66 unsigned int eax, ebx, ecx, edx;
67 int ok;
68
69 memset (&cpu, 0, sizeof (cpu));
70
71 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
72 if (ok != 0)
73 {
74 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
75 && edx == signature_INTEL_edx)
76 {
77 unsigned int cpuid, ignore;
78
79 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
80 if (ok != 0)
81 {
82 cpu.vendor = CV_INTEL;
83
84 cpu.family = (cpuid >> 8) & 0xf;
85 cpu.model = (cpuid >> 4) & 0xf;
86
87 if (cpu.family == 0x6)
88 cpu.model += (cpuid >> 12) & 0xf0;
89 }
90 }
91 }
92
93 return cpu;
94 }
95
96 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
97
98 static int
99 perf_event_new_data (const struct perf_event_buffer *pev)
100 {
101 return *pev->data_head != pev->last_head;
102 }
103
104 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
105 to the memory holding the copy.
106 The caller is responsible for freeing the memory. */
107
108 static gdb_byte *
109 perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
110 size_t size)
111 {
112 const gdb_byte *begin, *end, *start, *stop;
113 gdb_byte *buffer;
114 size_t buffer_size;
115 __u64 data_tail;
116
117 if (size == 0)
118 return NULL;
119
120 /* We should never ask for more data than the buffer can hold. */
121 buffer_size = pev->size;
122 gdb_assert (size <= buffer_size);
123
124 /* If we ask for more data than we seem to have, we wrap around and read
125 data from the end of the buffer. This is already handled by the %
126 BUFFER_SIZE operation, below. Here, we just need to make sure that we
127 don't underflow.
128
129 Note that this is perfectly OK for perf event buffers where data_head
130 doesn'grow indefinitely and instead wraps around to remain within the
131 buffer's boundaries. */
132 if (data_head < size)
133 data_head += buffer_size;
134
135 gdb_assert (size <= data_head);
136 data_tail = data_head - size;
137
138 begin = pev->mem;
139 start = begin + data_tail % buffer_size;
140 stop = begin + data_head % buffer_size;
141
142 buffer = (gdb_byte *) xmalloc (size);
143
144 if (start < stop)
145 memcpy (buffer, start, stop - start);
146 else
147 {
148 end = begin + buffer_size;
149
150 memcpy (buffer, start, end - start);
151 memcpy (buffer + (end - start), begin, stop - begin);
152 }
153
154 return buffer;
155 }
156
157 /* Copy the perf event buffer data from PEV.
158 Store a pointer to the copy into DATA and its size in SIZE. */
159
160 static void
161 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
162 size_t *psize)
163 {
164 size_t size;
165 __u64 data_head;
166
167 data_head = *pev->data_head;
168 size = pev->size;
169
170 *data = perf_event_read (pev, data_head, size);
171 *psize = size;
172
173 pev->last_head = data_head;
174 }
175
176 /* Determine the event type.
177 Returns zero on success and fills in TYPE; returns -1 otherwise. */
178
179 static int
180 perf_event_pt_event_type (int *type)
181 {
182 FILE *file;
183 int found;
184
185 file = fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
186 if (file == NULL)
187 return -1;
188
189 found = fscanf (file, "%d", type);
190
191 fclose (file);
192
193 if (found == 1)
194 return 0;
195 return -1;
196 }
197
198 /* Try to determine the start address of the Linux kernel. */
199
200 static uint64_t
201 linux_determine_kernel_start (void)
202 {
203 static uint64_t kernel_start;
204 static int cached;
205
206 if (cached != 0)
207 return kernel_start;
208
209 cached = 1;
210
211 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
212 if (file == NULL)
213 return kernel_start;
214
215 while (!feof (file.get ()))
216 {
217 char buffer[1024], symbol[8], *line;
218 uint64_t addr;
219 int match;
220
221 line = fgets (buffer, sizeof (buffer), file.get ());
222 if (line == NULL)
223 break;
224
225 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
226 if (match != 2)
227 continue;
228
229 if (strcmp (symbol, "_text") == 0)
230 {
231 kernel_start = addr;
232 break;
233 }
234 }
235
236 return kernel_start;
237 }
238
239 /* Check whether an address is in the kernel. */
240
241 static inline int
242 perf_event_is_kernel_addr (uint64_t addr)
243 {
244 uint64_t kernel_start;
245
246 kernel_start = linux_determine_kernel_start ();
247 if (kernel_start != 0ull)
248 return (addr >= kernel_start);
249
250 /* If we don't know the kernel's start address, let's check the most
251 significant bit. This will work at least for 64-bit kernels. */
252 return ((addr & (1ull << 63)) != 0);
253 }
254
255 /* Check whether a perf event record should be skipped. */
256
257 static inline int
258 perf_event_skip_bts_record (const struct perf_event_bts *bts)
259 {
260 /* The hardware may report branches from kernel into user space. Branches
261 from user into kernel space will be suppressed. We filter the former to
262 provide a consistent branch trace excluding kernel. */
263 return perf_event_is_kernel_addr (bts->from);
264 }
265
266 /* Perform a few consistency checks on a perf event sample record. This is
267 meant to catch cases when we get out of sync with the perf event stream. */
268
269 static inline int
270 perf_event_sample_ok (const struct perf_event_sample *sample)
271 {
272 if (sample->header.type != PERF_RECORD_SAMPLE)
273 return 0;
274
275 if (sample->header.size != sizeof (*sample))
276 return 0;
277
278 return 1;
279 }
280
281 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
282 and to addresses (plus a header).
283
284 Start points into that buffer at the next sample position.
285 We read the collected samples backwards from start.
286
287 While reading the samples, we convert the information into a list of blocks.
288 For two adjacent samples s1 and s2, we form a block b such that b.begin =
289 s1.to and b.end = s2.from.
290
291 In case the buffer overflows during sampling, one sample may have its lower
292 part at the end and its upper part at the beginning of the buffer. */
293
294 static VEC (btrace_block_s) *
295 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
296 const uint8_t *end, const uint8_t *start, size_t size)
297 {
298 VEC (btrace_block_s) *btrace = NULL;
299 struct perf_event_sample sample;
300 size_t read = 0;
301 struct btrace_block block = { 0, 0 };
302 struct regcache *regcache;
303
304 gdb_assert (begin <= start);
305 gdb_assert (start <= end);
306
307 /* The first block ends at the current pc. */
308 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
309 block.end = regcache_read_pc (regcache);
310
311 /* The buffer may contain a partial record as its last entry (i.e. when the
312 buffer size is not a multiple of the sample size). */
313 read = sizeof (sample) - 1;
314
315 for (; read < size; read += sizeof (sample))
316 {
317 const struct perf_event_sample *psample;
318
319 /* Find the next perf_event sample in a backwards traversal. */
320 start -= sizeof (sample);
321
322 /* If we're still inside the buffer, we're done. */
323 if (begin <= start)
324 psample = (const struct perf_event_sample *) start;
325 else
326 {
327 int missing;
328
329 /* We're to the left of the ring buffer, we will wrap around and
330 reappear at the very right of the ring buffer. */
331
332 missing = (begin - start);
333 start = (end - missing);
334
335 /* If the entire sample is missing, we're done. */
336 if (missing == sizeof (sample))
337 psample = (const struct perf_event_sample *) start;
338 else
339 {
340 uint8_t *stack;
341
342 /* The sample wrapped around. The lower part is at the end and
343 the upper part is at the beginning of the buffer. */
344 stack = (uint8_t *) &sample;
345
346 /* Copy the two parts so we have a contiguous sample. */
347 memcpy (stack, start, missing);
348 memcpy (stack + missing, begin, sizeof (sample) - missing);
349
350 psample = &sample;
351 }
352 }
353
354 if (!perf_event_sample_ok (psample))
355 {
356 warning (_("Branch trace may be incomplete."));
357 break;
358 }
359
360 if (perf_event_skip_bts_record (&psample->bts))
361 continue;
362
363 /* We found a valid sample, so we can complete the current block. */
364 block.begin = psample->bts.to;
365
366 VEC_safe_push (btrace_block_s, btrace, &block);
367
368 /* Start the next block. */
369 block.end = psample->bts.from;
370 }
371
372 /* Push the last block (i.e. the first one of inferior execution), as well.
373 We don't know where it ends, but we know where it starts. If we're
374 reading delta trace, we can fill in the start address later on.
375 Otherwise we will prune it. */
376 block.begin = 0;
377 VEC_safe_push (btrace_block_s, btrace, &block);
378
379 return btrace;
380 }
381
382 /* Check whether the kernel supports BTS. */
383
384 static int
385 kernel_supports_bts (void)
386 {
387 struct perf_event_attr attr;
388 pid_t child, pid;
389 int status, file;
390
391 errno = 0;
392 child = fork ();
393 switch (child)
394 {
395 case -1:
396 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
397 return 0;
398
399 case 0:
400 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
401 if (status != 0)
402 {
403 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
404 safe_strerror (errno));
405 _exit (1);
406 }
407
408 status = raise (SIGTRAP);
409 if (status != 0)
410 {
411 warning (_("test bts: cannot raise SIGTRAP: %s."),
412 safe_strerror (errno));
413 _exit (1);
414 }
415
416 _exit (1);
417
418 default:
419 pid = waitpid (child, &status, 0);
420 if (pid != child)
421 {
422 warning (_("test bts: bad pid %ld, error: %s."),
423 (long) pid, safe_strerror (errno));
424 return 0;
425 }
426
427 if (!WIFSTOPPED (status))
428 {
429 warning (_("test bts: expected stop. status: %d."),
430 status);
431 return 0;
432 }
433
434 memset (&attr, 0, sizeof (attr));
435
436 attr.type = PERF_TYPE_HARDWARE;
437 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
438 attr.sample_period = 1;
439 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
440 attr.exclude_kernel = 1;
441 attr.exclude_hv = 1;
442 attr.exclude_idle = 1;
443
444 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
445 if (file >= 0)
446 close (file);
447
448 kill (child, SIGKILL);
449 ptrace (PTRACE_KILL, child, NULL, NULL);
450
451 pid = waitpid (child, &status, 0);
452 if (pid != child)
453 {
454 warning (_("test bts: bad pid %ld, error: %s."),
455 (long) pid, safe_strerror (errno));
456 if (!WIFSIGNALED (status))
457 warning (_("test bts: expected killed. status: %d."),
458 status);
459 }
460
461 return (file >= 0);
462 }
463 }
464
465 /* Check whether the kernel supports Intel Processor Trace. */
466
467 static int
468 kernel_supports_pt (void)
469 {
470 struct perf_event_attr attr;
471 pid_t child, pid;
472 int status, file, type;
473
474 errno = 0;
475 child = fork ();
476 switch (child)
477 {
478 case -1:
479 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
480 return 0;
481
482 case 0:
483 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
484 if (status != 0)
485 {
486 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
487 safe_strerror (errno));
488 _exit (1);
489 }
490
491 status = raise (SIGTRAP);
492 if (status != 0)
493 {
494 warning (_("test pt: cannot raise SIGTRAP: %s."),
495 safe_strerror (errno));
496 _exit (1);
497 }
498
499 _exit (1);
500
501 default:
502 pid = waitpid (child, &status, 0);
503 if (pid != child)
504 {
505 warning (_("test pt: bad pid %ld, error: %s."),
506 (long) pid, safe_strerror (errno));
507 return 0;
508 }
509
510 if (!WIFSTOPPED (status))
511 {
512 warning (_("test pt: expected stop. status: %d."),
513 status);
514 return 0;
515 }
516
517 status = perf_event_pt_event_type (&type);
518 if (status != 0)
519 file = -1;
520 else
521 {
522 memset (&attr, 0, sizeof (attr));
523
524 attr.size = sizeof (attr);
525 attr.type = type;
526 attr.exclude_kernel = 1;
527 attr.exclude_hv = 1;
528 attr.exclude_idle = 1;
529
530 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
531 if (file >= 0)
532 close (file);
533 }
534
535 kill (child, SIGKILL);
536 ptrace (PTRACE_KILL, child, NULL, NULL);
537
538 pid = waitpid (child, &status, 0);
539 if (pid != child)
540 {
541 warning (_("test pt: bad pid %ld, error: %s."),
542 (long) pid, safe_strerror (errno));
543 if (!WIFSIGNALED (status))
544 warning (_("test pt: expected killed. status: %d."),
545 status);
546 }
547
548 return (file >= 0);
549 }
550 }
551
552 /* Check whether an Intel cpu supports BTS. */
553
554 static int
555 intel_supports_bts (const struct btrace_cpu *cpu)
556 {
557 switch (cpu->family)
558 {
559 case 0x6:
560 switch (cpu->model)
561 {
562 case 0x1a: /* Nehalem */
563 case 0x1f:
564 case 0x1e:
565 case 0x2e:
566 case 0x25: /* Westmere */
567 case 0x2c:
568 case 0x2f:
569 case 0x2a: /* Sandy Bridge */
570 case 0x2d:
571 case 0x3a: /* Ivy Bridge */
572
573 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
574 "from" information afer an EIST transition, T-states, C1E, or
575 Adaptive Thermal Throttling. */
576 return 0;
577 }
578 }
579
580 return 1;
581 }
582
583 /* Check whether the cpu supports BTS. */
584
585 static int
586 cpu_supports_bts (void)
587 {
588 struct btrace_cpu cpu;
589
590 cpu = btrace_this_cpu ();
591 switch (cpu.vendor)
592 {
593 default:
594 /* Don't know about others. Let's assume they do. */
595 return 1;
596
597 case CV_INTEL:
598 return intel_supports_bts (&cpu);
599 }
600 }
601
602 /* Check whether the linux target supports BTS. */
603
604 static int
605 linux_supports_bts (void)
606 {
607 static int cached;
608
609 if (cached == 0)
610 {
611 if (!kernel_supports_bts ())
612 cached = -1;
613 else if (!cpu_supports_bts ())
614 cached = -1;
615 else
616 cached = 1;
617 }
618
619 return cached > 0;
620 }
621
622 /* Check whether the linux target supports Intel Processor Trace. */
623
624 static int
625 linux_supports_pt (void)
626 {
627 static int cached;
628
629 if (cached == 0)
630 {
631 if (!kernel_supports_pt ())
632 cached = -1;
633 else
634 cached = 1;
635 }
636
637 return cached > 0;
638 }
639
640 /* See linux-btrace.h. */
641
642 int
643 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
644 {
645 switch (format)
646 {
647 case BTRACE_FORMAT_NONE:
648 return 0;
649
650 case BTRACE_FORMAT_BTS:
651 return linux_supports_bts ();
652
653 case BTRACE_FORMAT_PT:
654 return linux_supports_pt ();
655 }
656
657 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
658 }
659
660 /* Enable branch tracing in BTS format. */
661
662 static struct btrace_target_info *
663 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
664 {
665 struct perf_event_mmap_page *header;
666 struct btrace_target_info *tinfo;
667 struct btrace_tinfo_bts *bts;
668 size_t size, pages;
669 __u64 data_offset;
670 int pid, pg;
671
672 tinfo = XCNEW (struct btrace_target_info);
673 tinfo->ptid = ptid;
674
675 tinfo->conf.format = BTRACE_FORMAT_BTS;
676 bts = &tinfo->variant.bts;
677
678 bts->attr.size = sizeof (bts->attr);
679 bts->attr.type = PERF_TYPE_HARDWARE;
680 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
681 bts->attr.sample_period = 1;
682
683 /* We sample from and to address. */
684 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
685
686 bts->attr.exclude_kernel = 1;
687 bts->attr.exclude_hv = 1;
688 bts->attr.exclude_idle = 1;
689
690 pid = ptid_get_lwp (ptid);
691 if (pid == 0)
692 pid = ptid_get_pid (ptid);
693
694 errno = 0;
695 bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
696 if (bts->file < 0)
697 goto err_out;
698
699 /* Convert the requested size in bytes to pages (rounding up). */
700 pages = ((size_t) conf->size / PAGE_SIZE
701 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
702 /* We need at least one page. */
703 if (pages == 0)
704 pages = 1;
705
706 /* The buffer size can be requested in powers of two pages. Adjust PAGES
707 to the next power of two. */
708 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
709 if ((pages & ((size_t) 1 << pg)) != 0)
710 pages += ((size_t) 1 << pg);
711
712 /* We try to allocate the requested size.
713 If that fails, try to get as much as we can. */
714 for (; pages > 0; pages >>= 1)
715 {
716 size_t length;
717 __u64 data_size;
718
719 data_size = (__u64) pages * PAGE_SIZE;
720
721 /* Don't ask for more than we can represent in the configuration. */
722 if ((__u64) UINT_MAX < data_size)
723 continue;
724
725 size = (size_t) data_size;
726 length = size + PAGE_SIZE;
727
728 /* Check for overflows. */
729 if ((__u64) length != data_size + PAGE_SIZE)
730 continue;
731
732 /* The number of pages we request needs to be a power of two. */
733 header = ((struct perf_event_mmap_page *)
734 mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0));
735 if (header != MAP_FAILED)
736 break;
737 }
738
739 if (pages == 0)
740 goto err_file;
741
742 data_offset = PAGE_SIZE;
743
744 #if defined (PERF_ATTR_SIZE_VER5)
745 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
746 {
747 __u64 data_size;
748
749 data_offset = header->data_offset;
750 data_size = header->data_size;
751
752 size = (unsigned int) data_size;
753
754 /* Check for overflows. */
755 if ((__u64) size != data_size)
756 {
757 munmap ((void *) header, size + PAGE_SIZE);
758 goto err_file;
759 }
760 }
761 #endif /* defined (PERF_ATTR_SIZE_VER5) */
762
763 bts->header = header;
764 bts->bts.mem = ((const uint8_t *) header) + data_offset;
765 bts->bts.size = size;
766 bts->bts.data_head = &header->data_head;
767 bts->bts.last_head = 0ull;
768
769 tinfo->conf.bts.size = (unsigned int) size;
770 return tinfo;
771
772 err_file:
773 /* We were not able to allocate any buffer. */
774 close (bts->file);
775
776 err_out:
777 xfree (tinfo);
778 return NULL;
779 }
780
781 #if defined (PERF_ATTR_SIZE_VER5)
782
783 /* Enable branch tracing in Intel Processor Trace format. */
784
785 static struct btrace_target_info *
786 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
787 {
788 struct perf_event_mmap_page *header;
789 struct btrace_target_info *tinfo;
790 struct btrace_tinfo_pt *pt;
791 size_t pages, size;
792 int pid, pg, errcode, type;
793
794 if (conf->size == 0)
795 return NULL;
796
797 errcode = perf_event_pt_event_type (&type);
798 if (errcode != 0)
799 return NULL;
800
801 pid = ptid_get_lwp (ptid);
802 if (pid == 0)
803 pid = ptid_get_pid (ptid);
804
805 tinfo = XCNEW (struct btrace_target_info);
806 tinfo->ptid = ptid;
807
808 tinfo->conf.format = BTRACE_FORMAT_PT;
809 pt = &tinfo->variant.pt;
810
811 pt->attr.size = sizeof (pt->attr);
812 pt->attr.type = type;
813
814 pt->attr.exclude_kernel = 1;
815 pt->attr.exclude_hv = 1;
816 pt->attr.exclude_idle = 1;
817
818 errno = 0;
819 pt->file = syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0);
820 if (pt->file < 0)
821 goto err;
822
823 /* Allocate the configuration page. */
824 header = ((struct perf_event_mmap_page *)
825 mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
826 pt->file, 0));
827 if (header == MAP_FAILED)
828 goto err_file;
829
830 header->aux_offset = header->data_offset + header->data_size;
831
832 /* Convert the requested size in bytes to pages (rounding up). */
833 pages = ((size_t) conf->size / PAGE_SIZE
834 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
835 /* We need at least one page. */
836 if (pages == 0)
837 pages = 1;
838
839 /* The buffer size can be requested in powers of two pages. Adjust PAGES
840 to the next power of two. */
841 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
842 if ((pages & ((size_t) 1 << pg)) != 0)
843 pages += ((size_t) 1 << pg);
844
845 /* We try to allocate the requested size.
846 If that fails, try to get as much as we can. */
847 for (; pages > 0; pages >>= 1)
848 {
849 size_t length;
850 __u64 data_size;
851
852 data_size = (__u64) pages * PAGE_SIZE;
853
854 /* Don't ask for more than we can represent in the configuration. */
855 if ((__u64) UINT_MAX < data_size)
856 continue;
857
858 size = (size_t) data_size;
859
860 /* Check for overflows. */
861 if ((__u64) size != data_size)
862 continue;
863
864 header->aux_size = data_size;
865 length = size;
866
867 pt->pt.mem = ((const uint8_t *)
868 mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
869 header->aux_offset));
870 if (pt->pt.mem != MAP_FAILED)
871 break;
872 }
873
874 if (pages == 0)
875 goto err_conf;
876
877 pt->header = header;
878 pt->pt.size = size;
879 pt->pt.data_head = &header->aux_head;
880
881 tinfo->conf.pt.size = (unsigned int) size;
882 return tinfo;
883
884 err_conf:
885 munmap((void *) header, PAGE_SIZE);
886
887 err_file:
888 close (pt->file);
889
890 err:
891 xfree (tinfo);
892 return NULL;
893 }
894
895 #else /* !defined (PERF_ATTR_SIZE_VER5) */
896
897 static struct btrace_target_info *
898 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
899 {
900 errno = EOPNOTSUPP;
901 return NULL;
902 }
903
904 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
905
906 /* See linux-btrace.h. */
907
908 struct btrace_target_info *
909 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
910 {
911 struct btrace_target_info *tinfo;
912
913 tinfo = NULL;
914 switch (conf->format)
915 {
916 case BTRACE_FORMAT_NONE:
917 break;
918
919 case BTRACE_FORMAT_BTS:
920 tinfo = linux_enable_bts (ptid, &conf->bts);
921 break;
922
923 case BTRACE_FORMAT_PT:
924 tinfo = linux_enable_pt (ptid, &conf->pt);
925 break;
926 }
927
928 return tinfo;
929 }
930
931 /* Disable BTS tracing. */
932
933 static enum btrace_error
934 linux_disable_bts (struct btrace_tinfo_bts *tinfo)
935 {
936 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
937 close (tinfo->file);
938
939 return BTRACE_ERR_NONE;
940 }
941
942 /* Disable Intel Processor Trace tracing. */
943
944 static enum btrace_error
945 linux_disable_pt (struct btrace_tinfo_pt *tinfo)
946 {
947 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
948 munmap((void *) tinfo->header, PAGE_SIZE);
949 close (tinfo->file);
950
951 return BTRACE_ERR_NONE;
952 }
953
954 /* See linux-btrace.h. */
955
956 enum btrace_error
957 linux_disable_btrace (struct btrace_target_info *tinfo)
958 {
959 enum btrace_error errcode;
960
961 errcode = BTRACE_ERR_NOT_SUPPORTED;
962 switch (tinfo->conf.format)
963 {
964 case BTRACE_FORMAT_NONE:
965 break;
966
967 case BTRACE_FORMAT_BTS:
968 errcode = linux_disable_bts (&tinfo->variant.bts);
969 break;
970
971 case BTRACE_FORMAT_PT:
972 errcode = linux_disable_pt (&tinfo->variant.pt);
973 break;
974 }
975
976 if (errcode == BTRACE_ERR_NONE)
977 xfree (tinfo);
978
979 return errcode;
980 }
981
982 /* Read branch trace data in BTS format for the thread given by TINFO into
983 BTRACE using the TYPE reading method. */
984
985 static enum btrace_error
986 linux_read_bts (struct btrace_data_bts *btrace,
987 struct btrace_target_info *tinfo,
988 enum btrace_read_type type)
989 {
990 struct perf_event_buffer *pevent;
991 const uint8_t *begin, *end, *start;
992 size_t buffer_size, size;
993 __u64 data_head, data_tail;
994 unsigned int retries = 5;
995
996 pevent = &tinfo->variant.bts.bts;
997
998 /* For delta reads, we return at least the partial last block containing
999 the current PC. */
1000 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
1001 return BTRACE_ERR_NONE;
1002
1003 buffer_size = pevent->size;
1004 data_tail = pevent->last_head;
1005
1006 /* We may need to retry reading the trace. See below. */
1007 while (retries--)
1008 {
1009 data_head = *pevent->data_head;
1010
1011 /* Delete any leftover trace from the previous iteration. */
1012 VEC_free (btrace_block_s, btrace->blocks);
1013
1014 if (type == BTRACE_READ_DELTA)
1015 {
1016 __u64 data_size;
1017
1018 /* Determine the number of bytes to read and check for buffer
1019 overflows. */
1020
1021 /* Check for data head overflows. We might be able to recover from
1022 those but they are very unlikely and it's not really worth the
1023 effort, I think. */
1024 if (data_head < data_tail)
1025 return BTRACE_ERR_OVERFLOW;
1026
1027 /* If the buffer is smaller than the trace delta, we overflowed. */
1028 data_size = data_head - data_tail;
1029 if (buffer_size < data_size)
1030 return BTRACE_ERR_OVERFLOW;
1031
1032 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1033 size = (size_t) data_size;
1034 }
1035 else
1036 {
1037 /* Read the entire buffer. */
1038 size = buffer_size;
1039
1040 /* Adjust the size if the buffer has not overflowed, yet. */
1041 if (data_head < size)
1042 size = (size_t) data_head;
1043 }
1044
1045 /* Data_head keeps growing; the buffer itself is circular. */
1046 begin = pevent->mem;
1047 start = begin + data_head % buffer_size;
1048
1049 if (data_head <= buffer_size)
1050 end = start;
1051 else
1052 end = begin + pevent->size;
1053
1054 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
1055
1056 /* The stopping thread notifies its ptracer before it is scheduled out.
1057 On multi-core systems, the debugger might therefore run while the
1058 kernel might be writing the last branch trace records.
1059
1060 Let's check whether the data head moved while we read the trace. */
1061 if (data_head == *pevent->data_head)
1062 break;
1063 }
1064
1065 pevent->last_head = data_head;
1066
1067 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1068 if we're not doing a delta read. There is no way of filling in its zeroed
1069 BEGIN element. */
1070 if (!VEC_empty (btrace_block_s, btrace->blocks)
1071 && type != BTRACE_READ_DELTA)
1072 VEC_pop (btrace_block_s, btrace->blocks);
1073
1074 return BTRACE_ERR_NONE;
1075 }
1076
1077 /* Fill in the Intel Processor Trace configuration information. */
1078
1079 static void
1080 linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1081 {
1082 conf->cpu = btrace_this_cpu ();
1083 }
1084
1085 /* Read branch trace data in Intel Processor Trace format for the thread
1086 given by TINFO into BTRACE using the TYPE reading method. */
1087
1088 static enum btrace_error
1089 linux_read_pt (struct btrace_data_pt *btrace,
1090 struct btrace_target_info *tinfo,
1091 enum btrace_read_type type)
1092 {
1093 struct perf_event_buffer *pt;
1094
1095 pt = &tinfo->variant.pt.pt;
1096
1097 linux_fill_btrace_pt_config (&btrace->config);
1098
1099 switch (type)
1100 {
1101 case BTRACE_READ_DELTA:
1102 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1103 around to stay inside the aux buffer. */
1104 return BTRACE_ERR_NOT_SUPPORTED;
1105
1106 case BTRACE_READ_NEW:
1107 if (!perf_event_new_data (pt))
1108 return BTRACE_ERR_NONE;
1109
1110 /* Fall through. */
1111 case BTRACE_READ_ALL:
1112 perf_event_read_all (pt, &btrace->data, &btrace->size);
1113 return BTRACE_ERR_NONE;
1114 }
1115
1116 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1117 }
1118
1119 /* See linux-btrace.h. */
1120
1121 enum btrace_error
1122 linux_read_btrace (struct btrace_data *btrace,
1123 struct btrace_target_info *tinfo,
1124 enum btrace_read_type type)
1125 {
1126 switch (tinfo->conf.format)
1127 {
1128 case BTRACE_FORMAT_NONE:
1129 return BTRACE_ERR_NOT_SUPPORTED;
1130
1131 case BTRACE_FORMAT_BTS:
1132 /* We read btrace in BTS format. */
1133 btrace->format = BTRACE_FORMAT_BTS;
1134 btrace->variant.bts.blocks = NULL;
1135
1136 return linux_read_bts (&btrace->variant.bts, tinfo, type);
1137
1138 case BTRACE_FORMAT_PT:
1139 /* We read btrace in Intel Processor Trace format. */
1140 btrace->format = BTRACE_FORMAT_PT;
1141 btrace->variant.pt.data = NULL;
1142 btrace->variant.pt.size = 0;
1143
1144 return linux_read_pt (&btrace->variant.pt, tinfo, type);
1145 }
1146
1147 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1148 }
1149
1150 /* See linux-btrace.h. */
1151
1152 const struct btrace_config *
1153 linux_btrace_conf (const struct btrace_target_info *tinfo)
1154 {
1155 return &tinfo->conf;
1156 }
1157
1158 #else /* !HAVE_LINUX_PERF_EVENT_H */
1159
1160 /* See linux-btrace.h. */
1161
1162 int
1163 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
1164 {
1165 return 0;
1166 }
1167
1168 /* See linux-btrace.h. */
1169
1170 struct btrace_target_info *
1171 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
1172 {
1173 return NULL;
1174 }
1175
1176 /* See linux-btrace.h. */
1177
1178 enum btrace_error
1179 linux_disable_btrace (struct btrace_target_info *tinfo)
1180 {
1181 return BTRACE_ERR_NOT_SUPPORTED;
1182 }
1183
1184 /* See linux-btrace.h. */
1185
1186 enum btrace_error
1187 linux_read_btrace (struct btrace_data *btrace,
1188 struct btrace_target_info *tinfo,
1189 enum btrace_read_type type)
1190 {
1191 return BTRACE_ERR_NOT_SUPPORTED;
1192 }
1193
1194 /* See linux-btrace.h. */
1195
1196 const struct btrace_config *
1197 linux_btrace_conf (const struct btrace_target_info *tinfo)
1198 {
1199 return NULL;
1200 }
1201
1202 #endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.053606 seconds and 4 git commands to generate.