Linux: sys/ptrace.h -> nat/gdb_ptrace.h everywhere
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
25 #include "gdb_wait.h"
26 #include "x86-cpuid.h"
27
28 #ifdef HAVE_SYS_SYSCALL_H
29 #include <sys/syscall.h>
30 #endif
31
32 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
33 #include <unistd.h>
34 #include <sys/mman.h>
35 #include <sys/user.h>
36 #include "nat/gdb_ptrace.h"
37 #include <sys/types.h>
38 #include <signal.h>
39 #include <sys/utsname.h>
40
41 /* A branch trace record in perf_event. */
42 struct perf_event_bts
43 {
44 /* The linear address of the branch source. */
45 uint64_t from;
46
47 /* The linear address of the branch destination. */
48 uint64_t to;
49 };
50
51 /* A perf_event branch trace sample. */
52 struct perf_event_sample
53 {
54 /* The perf_event sample header. */
55 struct perf_event_header header;
56
57 /* The perf_event branch tracing payload. */
58 struct perf_event_bts bts;
59 };
60
61 /* Identify the cpu we're running on. */
62 static struct btrace_cpu
63 btrace_this_cpu (void)
64 {
65 struct btrace_cpu cpu;
66 unsigned int eax, ebx, ecx, edx;
67 int ok;
68
69 memset (&cpu, 0, sizeof (cpu));
70
71 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
72 if (ok != 0)
73 {
74 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
75 && edx == signature_INTEL_edx)
76 {
77 unsigned int cpuid, ignore;
78
79 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
80 if (ok != 0)
81 {
82 cpu.vendor = CV_INTEL;
83
84 cpu.family = (cpuid >> 8) & 0xf;
85 cpu.model = (cpuid >> 4) & 0xf;
86
87 if (cpu.family == 0x6)
88 cpu.model += (cpuid >> 12) & 0xf0;
89 }
90 }
91 }
92
93 return cpu;
94 }
95
96 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
97
98 static int
99 perf_event_new_data (const struct perf_event_buffer *pev)
100 {
101 return *pev->data_head != pev->last_head;
102 }
103
104 /* Try to determine the size of a pointer in bits for the OS.
105
106 This is the same as the size of a pointer for the inferior process
107 except when a 32-bit inferior is running on a 64-bit OS. */
108
109 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
110 to the memory holding the copy.
111 The caller is responsible for freeing the memory. */
112
113 static gdb_byte *
114 perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
115 size_t size)
116 {
117 const gdb_byte *begin, *end, *start, *stop;
118 gdb_byte *buffer;
119 size_t buffer_size;
120 __u64 data_tail;
121
122 if (size == 0)
123 return NULL;
124
125 gdb_assert (size <= data_head);
126 data_tail = data_head - size;
127
128 buffer_size = pev->size;
129 begin = pev->mem;
130 start = begin + data_tail % buffer_size;
131 stop = begin + data_head % buffer_size;
132
133 buffer = xmalloc (size);
134
135 if (start < stop)
136 memcpy (buffer, start, stop - start);
137 else
138 {
139 end = begin + buffer_size;
140
141 memcpy (buffer, start, end - start);
142 memcpy (buffer + (end - start), begin, stop - begin);
143 }
144
145 return buffer;
146 }
147
148 /* Copy the perf event buffer data from PEV.
149 Store a pointer to the copy into DATA and its size in SIZE. */
150
151 static void
152 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
153 size_t *psize)
154 {
155 size_t size;
156 __u64 data_head;
157
158 data_head = *pev->data_head;
159
160 size = pev->size;
161 if (data_head < size)
162 size = (size_t) data_head;
163
164 *data = perf_event_read (pev, data_head, size);
165 *psize = size;
166
167 pev->last_head = data_head;
168 }
169
170 /* Determine the event type.
171 Returns zero on success and fills in TYPE; returns -1 otherwise. */
172
173 static int
174 perf_event_pt_event_type (int *type)
175 {
176 FILE *file;
177 int found;
178
179 file = fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
180 if (file == NULL)
181 return -1;
182
183 found = fscanf (file, "%d", type);
184
185 fclose (file);
186
187 if (found == 1)
188 return 0;
189 return -1;
190 }
191
192 static int
193 linux_determine_kernel_ptr_bits (void)
194 {
195 struct utsname utsn;
196 int errcode;
197
198 memset (&utsn, 0, sizeof (utsn));
199
200 errcode = uname (&utsn);
201 if (errcode < 0)
202 return 0;
203
204 /* We only need to handle the 64-bit host case, here. For 32-bit host,
205 the pointer size can be filled in later based on the inferior. */
206 if (strcmp (utsn.machine, "x86_64") == 0)
207 return 64;
208
209 return 0;
210 }
211
212 /* Check whether an address is in the kernel. */
213
214 static inline int
215 perf_event_is_kernel_addr (const struct btrace_target_info *tinfo,
216 uint64_t addr)
217 {
218 uint64_t mask;
219
220 /* If we don't know the size of a pointer, we can't check. Let's assume it's
221 not a kernel address in this case. */
222 if (tinfo->ptr_bits == 0)
223 return 0;
224
225 /* A bit mask for the most significant bit in an address. */
226 mask = (uint64_t) 1 << (tinfo->ptr_bits - 1);
227
228 /* Check whether the most significant bit in the address is set. */
229 return (addr & mask) != 0;
230 }
231
232 /* Check whether a perf event record should be skipped. */
233
234 static inline int
235 perf_event_skip_bts_record (const struct btrace_target_info *tinfo,
236 const struct perf_event_bts *bts)
237 {
238 /* The hardware may report branches from kernel into user space. Branches
239 from user into kernel space will be suppressed. We filter the former to
240 provide a consistent branch trace excluding kernel. */
241 return perf_event_is_kernel_addr (tinfo, bts->from);
242 }
243
244 /* Perform a few consistency checks on a perf event sample record. This is
245 meant to catch cases when we get out of sync with the perf event stream. */
246
247 static inline int
248 perf_event_sample_ok (const struct perf_event_sample *sample)
249 {
250 if (sample->header.type != PERF_RECORD_SAMPLE)
251 return 0;
252
253 if (sample->header.size != sizeof (*sample))
254 return 0;
255
256 return 1;
257 }
258
259 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
260 and to addresses (plus a header).
261
262 Start points into that buffer at the next sample position.
263 We read the collected samples backwards from start.
264
265 While reading the samples, we convert the information into a list of blocks.
266 For two adjacent samples s1 and s2, we form a block b such that b.begin =
267 s1.to and b.end = s2.from.
268
269 In case the buffer overflows during sampling, one sample may have its lower
270 part at the end and its upper part at the beginning of the buffer. */
271
272 static VEC (btrace_block_s) *
273 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
274 const uint8_t *end, const uint8_t *start, size_t size)
275 {
276 VEC (btrace_block_s) *btrace = NULL;
277 struct perf_event_sample sample;
278 size_t read = 0;
279 struct btrace_block block = { 0, 0 };
280 struct regcache *regcache;
281
282 gdb_assert (begin <= start);
283 gdb_assert (start <= end);
284
285 /* The first block ends at the current pc. */
286 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
287 block.end = regcache_read_pc (regcache);
288
289 /* The buffer may contain a partial record as its last entry (i.e. when the
290 buffer size is not a multiple of the sample size). */
291 read = sizeof (sample) - 1;
292
293 for (; read < size; read += sizeof (sample))
294 {
295 const struct perf_event_sample *psample;
296
297 /* Find the next perf_event sample in a backwards traversal. */
298 start -= sizeof (sample);
299
300 /* If we're still inside the buffer, we're done. */
301 if (begin <= start)
302 psample = (const struct perf_event_sample *) start;
303 else
304 {
305 int missing;
306
307 /* We're to the left of the ring buffer, we will wrap around and
308 reappear at the very right of the ring buffer. */
309
310 missing = (begin - start);
311 start = (end - missing);
312
313 /* If the entire sample is missing, we're done. */
314 if (missing == sizeof (sample))
315 psample = (const struct perf_event_sample *) start;
316 else
317 {
318 uint8_t *stack;
319
320 /* The sample wrapped around. The lower part is at the end and
321 the upper part is at the beginning of the buffer. */
322 stack = (uint8_t *) &sample;
323
324 /* Copy the two parts so we have a contiguous sample. */
325 memcpy (stack, start, missing);
326 memcpy (stack + missing, begin, sizeof (sample) - missing);
327
328 psample = &sample;
329 }
330 }
331
332 if (!perf_event_sample_ok (psample))
333 {
334 warning (_("Branch trace may be incomplete."));
335 break;
336 }
337
338 if (perf_event_skip_bts_record (tinfo, &psample->bts))
339 continue;
340
341 /* We found a valid sample, so we can complete the current block. */
342 block.begin = psample->bts.to;
343
344 VEC_safe_push (btrace_block_s, btrace, &block);
345
346 /* Start the next block. */
347 block.end = psample->bts.from;
348 }
349
350 /* Push the last block (i.e. the first one of inferior execution), as well.
351 We don't know where it ends, but we know where it starts. If we're
352 reading delta trace, we can fill in the start address later on.
353 Otherwise we will prune it. */
354 block.begin = 0;
355 VEC_safe_push (btrace_block_s, btrace, &block);
356
357 return btrace;
358 }
359
360 /* Check whether the kernel supports BTS. */
361
362 static int
363 kernel_supports_bts (void)
364 {
365 struct perf_event_attr attr;
366 pid_t child, pid;
367 int status, file;
368
369 errno = 0;
370 child = fork ();
371 switch (child)
372 {
373 case -1:
374 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
375 return 0;
376
377 case 0:
378 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
379 if (status != 0)
380 {
381 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
382 safe_strerror (errno));
383 _exit (1);
384 }
385
386 status = raise (SIGTRAP);
387 if (status != 0)
388 {
389 warning (_("test bts: cannot raise SIGTRAP: %s."),
390 safe_strerror (errno));
391 _exit (1);
392 }
393
394 _exit (1);
395
396 default:
397 pid = waitpid (child, &status, 0);
398 if (pid != child)
399 {
400 warning (_("test bts: bad pid %ld, error: %s."),
401 (long) pid, safe_strerror (errno));
402 return 0;
403 }
404
405 if (!WIFSTOPPED (status))
406 {
407 warning (_("test bts: expected stop. status: %d."),
408 status);
409 return 0;
410 }
411
412 memset (&attr, 0, sizeof (attr));
413
414 attr.type = PERF_TYPE_HARDWARE;
415 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
416 attr.sample_period = 1;
417 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
418 attr.exclude_kernel = 1;
419 attr.exclude_hv = 1;
420 attr.exclude_idle = 1;
421
422 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
423 if (file >= 0)
424 close (file);
425
426 kill (child, SIGKILL);
427 ptrace (PTRACE_KILL, child, NULL, NULL);
428
429 pid = waitpid (child, &status, 0);
430 if (pid != child)
431 {
432 warning (_("test bts: bad pid %ld, error: %s."),
433 (long) pid, safe_strerror (errno));
434 if (!WIFSIGNALED (status))
435 warning (_("test bts: expected killed. status: %d."),
436 status);
437 }
438
439 return (file >= 0);
440 }
441 }
442
443 /* Check whether the kernel supports Intel(R) Processor Trace. */
444
445 static int
446 kernel_supports_pt (void)
447 {
448 struct perf_event_attr attr;
449 pid_t child, pid;
450 int status, file, type;
451
452 errno = 0;
453 child = fork ();
454 switch (child)
455 {
456 case -1:
457 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
458 return 0;
459
460 case 0:
461 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
462 if (status != 0)
463 {
464 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
465 safe_strerror (errno));
466 _exit (1);
467 }
468
469 status = raise (SIGTRAP);
470 if (status != 0)
471 {
472 warning (_("test pt: cannot raise SIGTRAP: %s."),
473 safe_strerror (errno));
474 _exit (1);
475 }
476
477 _exit (1);
478
479 default:
480 pid = waitpid (child, &status, 0);
481 if (pid != child)
482 {
483 warning (_("test pt: bad pid %ld, error: %s."),
484 (long) pid, safe_strerror (errno));
485 return 0;
486 }
487
488 if (!WIFSTOPPED (status))
489 {
490 warning (_("test pt: expected stop. status: %d."),
491 status);
492 return 0;
493 }
494
495 status = perf_event_pt_event_type (&type);
496 if (status != 0)
497 file = -1;
498 else
499 {
500 memset (&attr, 0, sizeof (attr));
501
502 attr.size = sizeof (attr);
503 attr.type = type;
504 attr.exclude_kernel = 1;
505 attr.exclude_hv = 1;
506 attr.exclude_idle = 1;
507
508 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
509 if (file >= 0)
510 close (file);
511 }
512
513 kill (child, SIGKILL);
514 ptrace (PTRACE_KILL, child, NULL, NULL);
515
516 pid = waitpid (child, &status, 0);
517 if (pid != child)
518 {
519 warning (_("test pt: bad pid %ld, error: %s."),
520 (long) pid, safe_strerror (errno));
521 if (!WIFSIGNALED (status))
522 warning (_("test pt: expected killed. status: %d."),
523 status);
524 }
525
526 return (file >= 0);
527 }
528 }
529
530 /* Check whether an Intel cpu supports BTS. */
531
532 static int
533 intel_supports_bts (const struct btrace_cpu *cpu)
534 {
535 switch (cpu->family)
536 {
537 case 0x6:
538 switch (cpu->model)
539 {
540 case 0x1a: /* Nehalem */
541 case 0x1f:
542 case 0x1e:
543 case 0x2e:
544 case 0x25: /* Westmere */
545 case 0x2c:
546 case 0x2f:
547 case 0x2a: /* Sandy Bridge */
548 case 0x2d:
549 case 0x3a: /* Ivy Bridge */
550
551 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
552 "from" information afer an EIST transition, T-states, C1E, or
553 Adaptive Thermal Throttling. */
554 return 0;
555 }
556 }
557
558 return 1;
559 }
560
561 /* Check whether the cpu supports BTS. */
562
563 static int
564 cpu_supports_bts (void)
565 {
566 struct btrace_cpu cpu;
567
568 cpu = btrace_this_cpu ();
569 switch (cpu.vendor)
570 {
571 default:
572 /* Don't know about others. Let's assume they do. */
573 return 1;
574
575 case CV_INTEL:
576 return intel_supports_bts (&cpu);
577 }
578 }
579
580 /* Check whether the linux target supports BTS. */
581
582 static int
583 linux_supports_bts (void)
584 {
585 static int cached;
586
587 if (cached == 0)
588 {
589 if (!kernel_supports_bts ())
590 cached = -1;
591 else if (!cpu_supports_bts ())
592 cached = -1;
593 else
594 cached = 1;
595 }
596
597 return cached > 0;
598 }
599
600 /* Check whether the linux target supports Intel(R) Processor Trace. */
601
602 static int
603 linux_supports_pt (void)
604 {
605 static int cached;
606
607 if (cached == 0)
608 {
609 if (!kernel_supports_pt ())
610 cached = -1;
611 else
612 cached = 1;
613 }
614
615 return cached > 0;
616 }
617
618 /* See linux-btrace.h. */
619
620 int
621 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
622 {
623 switch (format)
624 {
625 case BTRACE_FORMAT_NONE:
626 return 0;
627
628 case BTRACE_FORMAT_BTS:
629 return linux_supports_bts ();
630
631 case BTRACE_FORMAT_PT:
632 return linux_supports_pt ();
633 }
634
635 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
636 }
637
638 /* Enable branch tracing in BTS format. */
639
640 static struct btrace_target_info *
641 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
642 {
643 struct perf_event_mmap_page *header;
644 struct btrace_target_info *tinfo;
645 struct btrace_tinfo_bts *bts;
646 size_t size, pages;
647 __u64 data_offset;
648 int pid, pg;
649
650 tinfo = xzalloc (sizeof (*tinfo));
651 tinfo->ptid = ptid;
652 tinfo->ptr_bits = linux_determine_kernel_ptr_bits ();
653
654 tinfo->conf.format = BTRACE_FORMAT_BTS;
655 bts = &tinfo->variant.bts;
656
657 bts->attr.size = sizeof (bts->attr);
658 bts->attr.type = PERF_TYPE_HARDWARE;
659 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
660 bts->attr.sample_period = 1;
661
662 /* We sample from and to address. */
663 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
664
665 bts->attr.exclude_kernel = 1;
666 bts->attr.exclude_hv = 1;
667 bts->attr.exclude_idle = 1;
668
669 pid = ptid_get_lwp (ptid);
670 if (pid == 0)
671 pid = ptid_get_pid (ptid);
672
673 errno = 0;
674 bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
675 if (bts->file < 0)
676 goto err_out;
677
678 /* Convert the requested size in bytes to pages (rounding up). */
679 pages = ((size_t) conf->size / PAGE_SIZE
680 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
681 /* We need at least one page. */
682 if (pages == 0)
683 pages = 1;
684
685 /* The buffer size can be requested in powers of two pages. Adjust PAGES
686 to the next power of two. */
687 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
688 if ((pages & ((size_t) 1 << pg)) != 0)
689 pages += ((size_t) 1 << pg);
690
691 /* We try to allocate the requested size.
692 If that fails, try to get as much as we can. */
693 for (; pages > 0; pages >>= 1)
694 {
695 size_t length;
696 __u64 data_size;
697
698 data_size = (__u64) pages * PAGE_SIZE;
699
700 /* Don't ask for more than we can represent in the configuration. */
701 if ((__u64) UINT_MAX < data_size)
702 continue;
703
704 size = (size_t) data_size;
705 length = size + PAGE_SIZE;
706
707 /* Check for overflows. */
708 if ((__u64) length != data_size + PAGE_SIZE)
709 continue;
710
711 /* The number of pages we request needs to be a power of two. */
712 header = mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0);
713 if (header != MAP_FAILED)
714 break;
715 }
716
717 if (pages == 0)
718 goto err_file;
719
720 data_offset = PAGE_SIZE;
721
722 #if defined (PERF_ATTR_SIZE_VER5)
723 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
724 {
725 __u64 data_size;
726
727 data_offset = header->data_offset;
728 data_size = header->data_size;
729
730 size = (unsigned int) data_size;
731
732 /* Check for overflows. */
733 if ((__u64) size != data_size)
734 {
735 munmap ((void *) header, size + PAGE_SIZE);
736 goto err_file;
737 }
738 }
739 #endif /* defined (PERF_ATTR_SIZE_VER5) */
740
741 bts->header = header;
742 bts->bts.mem = ((const uint8_t *) header) + data_offset;
743 bts->bts.size = size;
744 bts->bts.data_head = &header->data_head;
745 bts->bts.last_head = 0ull;
746
747 tinfo->conf.bts.size = (unsigned int) size;
748 return tinfo;
749
750 err_file:
751 /* We were not able to allocate any buffer. */
752 close (bts->file);
753
754 err_out:
755 xfree (tinfo);
756 return NULL;
757 }
758
759 #if defined (PERF_ATTR_SIZE_VER5)
760
761 /* Enable branch tracing in Intel(R) Processor Trace format. */
762
763 static struct btrace_target_info *
764 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
765 {
766 struct perf_event_mmap_page *header;
767 struct btrace_target_info *tinfo;
768 struct btrace_tinfo_pt *pt;
769 size_t pages, size;
770 int pid, pg, errcode, type;
771
772 if (conf->size == 0)
773 return NULL;
774
775 errcode = perf_event_pt_event_type (&type);
776 if (errcode != 0)
777 return NULL;
778
779 pid = ptid_get_lwp (ptid);
780 if (pid == 0)
781 pid = ptid_get_pid (ptid);
782
783 tinfo = xzalloc (sizeof (*tinfo));
784 tinfo->ptid = ptid;
785 tinfo->ptr_bits = 0;
786
787 tinfo->conf.format = BTRACE_FORMAT_PT;
788 pt = &tinfo->variant.pt;
789
790 pt->attr.size = sizeof (pt->attr);
791 pt->attr.type = type;
792
793 pt->attr.exclude_kernel = 1;
794 pt->attr.exclude_hv = 1;
795 pt->attr.exclude_idle = 1;
796
797 errno = 0;
798 pt->file = syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0);
799 if (pt->file < 0)
800 goto err;
801
802 /* Allocate the configuration page. */
803 header = mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
804 pt->file, 0);
805 if (header == MAP_FAILED)
806 goto err_file;
807
808 header->aux_offset = header->data_offset + header->data_size;
809
810 /* Convert the requested size in bytes to pages (rounding up). */
811 pages = ((size_t) conf->size / PAGE_SIZE
812 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
813 /* We need at least one page. */
814 if (pages == 0)
815 pages = 1;
816
817 /* The buffer size can be requested in powers of two pages. Adjust PAGES
818 to the next power of two. */
819 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
820 if ((pages & ((size_t) 1 << pg)) != 0)
821 pages += ((size_t) 1 << pg);
822
823 /* We try to allocate the requested size.
824 If that fails, try to get as much as we can. */
825 for (; pages > 0; pages >>= 1)
826 {
827 size_t length;
828 __u64 data_size;
829
830 data_size = (__u64) pages * PAGE_SIZE;
831
832 /* Don't ask for more than we can represent in the configuration. */
833 if ((__u64) UINT_MAX < data_size)
834 continue;
835
836 size = (size_t) data_size;
837
838 /* Check for overflows. */
839 if ((__u64) size != data_size)
840 continue;
841
842 header->aux_size = data_size;
843 length = size;
844
845 pt->pt.mem = mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
846 header->aux_offset);
847 if (pt->pt.mem != MAP_FAILED)
848 break;
849 }
850
851 if (pages == 0)
852 goto err_conf;
853
854 pt->header = header;
855 pt->pt.size = size;
856 pt->pt.data_head = &header->aux_head;
857
858 tinfo->conf.pt.size = (unsigned int) size;
859 return tinfo;
860
861 err_conf:
862 munmap((void *) header, PAGE_SIZE);
863
864 err_file:
865 close (pt->file);
866
867 err:
868 xfree (tinfo);
869 return NULL;
870 }
871
872 #else /* !defined (PERF_ATTR_SIZE_VER5) */
873
874 static struct btrace_target_info *
875 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
876 {
877 errno = EOPNOTSUPP;
878 return NULL;
879 }
880
881 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
882
883 /* See linux-btrace.h. */
884
885 struct btrace_target_info *
886 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
887 {
888 struct btrace_target_info *tinfo;
889
890 tinfo = NULL;
891 switch (conf->format)
892 {
893 case BTRACE_FORMAT_NONE:
894 break;
895
896 case BTRACE_FORMAT_BTS:
897 tinfo = linux_enable_bts (ptid, &conf->bts);
898 break;
899
900 case BTRACE_FORMAT_PT:
901 tinfo = linux_enable_pt (ptid, &conf->pt);
902 break;
903 }
904
905 return tinfo;
906 }
907
908 /* Disable BTS tracing. */
909
910 static enum btrace_error
911 linux_disable_bts (struct btrace_tinfo_bts *tinfo)
912 {
913 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
914 close (tinfo->file);
915
916 return BTRACE_ERR_NONE;
917 }
918
919 /* Disable Intel(R) Processor Trace tracing. */
920
921 static enum btrace_error
922 linux_disable_pt (struct btrace_tinfo_pt *tinfo)
923 {
924 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
925 munmap((void *) tinfo->header, PAGE_SIZE);
926 close (tinfo->file);
927
928 return BTRACE_ERR_NONE;
929 }
930
931 /* See linux-btrace.h. */
932
933 enum btrace_error
934 linux_disable_btrace (struct btrace_target_info *tinfo)
935 {
936 enum btrace_error errcode;
937
938 errcode = BTRACE_ERR_NOT_SUPPORTED;
939 switch (tinfo->conf.format)
940 {
941 case BTRACE_FORMAT_NONE:
942 break;
943
944 case BTRACE_FORMAT_BTS:
945 errcode = linux_disable_bts (&tinfo->variant.bts);
946 break;
947
948 case BTRACE_FORMAT_PT:
949 errcode = linux_disable_pt (&tinfo->variant.pt);
950 break;
951 }
952
953 if (errcode == BTRACE_ERR_NONE)
954 xfree (tinfo);
955
956 return errcode;
957 }
958
959 /* Read branch trace data in BTS format for the thread given by TINFO into
960 BTRACE using the TYPE reading method. */
961
962 static enum btrace_error
963 linux_read_bts (struct btrace_data_bts *btrace,
964 struct btrace_target_info *tinfo,
965 enum btrace_read_type type)
966 {
967 struct perf_event_buffer *pevent;
968 const uint8_t *begin, *end, *start;
969 size_t buffer_size, size;
970 __u64 data_head, data_tail;
971 unsigned int retries = 5;
972
973 pevent = &tinfo->variant.bts.bts;
974
975 /* For delta reads, we return at least the partial last block containing
976 the current PC. */
977 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
978 return BTRACE_ERR_NONE;
979
980 buffer_size = pevent->size;
981 data_tail = pevent->last_head;
982
983 /* We may need to retry reading the trace. See below. */
984 while (retries--)
985 {
986 data_head = *pevent->data_head;
987
988 /* Delete any leftover trace from the previous iteration. */
989 VEC_free (btrace_block_s, btrace->blocks);
990
991 if (type == BTRACE_READ_DELTA)
992 {
993 __u64 data_size;
994
995 /* Determine the number of bytes to read and check for buffer
996 overflows. */
997
998 /* Check for data head overflows. We might be able to recover from
999 those but they are very unlikely and it's not really worth the
1000 effort, I think. */
1001 if (data_head < data_tail)
1002 return BTRACE_ERR_OVERFLOW;
1003
1004 /* If the buffer is smaller than the trace delta, we overflowed. */
1005 data_size = data_head - data_tail;
1006 if (buffer_size < data_size)
1007 return BTRACE_ERR_OVERFLOW;
1008
1009 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1010 size = (size_t) data_size;
1011 }
1012 else
1013 {
1014 /* Read the entire buffer. */
1015 size = buffer_size;
1016
1017 /* Adjust the size if the buffer has not overflowed, yet. */
1018 if (data_head < size)
1019 size = (size_t) data_head;
1020 }
1021
1022 /* Data_head keeps growing; the buffer itself is circular. */
1023 begin = pevent->mem;
1024 start = begin + data_head % buffer_size;
1025
1026 if (data_head <= buffer_size)
1027 end = start;
1028 else
1029 end = begin + pevent->size;
1030
1031 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
1032
1033 /* The stopping thread notifies its ptracer before it is scheduled out.
1034 On multi-core systems, the debugger might therefore run while the
1035 kernel might be writing the last branch trace records.
1036
1037 Let's check whether the data head moved while we read the trace. */
1038 if (data_head == *pevent->data_head)
1039 break;
1040 }
1041
1042 pevent->last_head = data_head;
1043
1044 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1045 if we're not doing a delta read. There is no way of filling in its zeroed
1046 BEGIN element. */
1047 if (!VEC_empty (btrace_block_s, btrace->blocks)
1048 && type != BTRACE_READ_DELTA)
1049 VEC_pop (btrace_block_s, btrace->blocks);
1050
1051 return BTRACE_ERR_NONE;
1052 }
1053
1054 /* Fill in the Intel(R) Processor Trace configuration information. */
1055
1056 static void
1057 linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1058 {
1059 conf->cpu = btrace_this_cpu ();
1060 }
1061
1062 /* Read branch trace data in Intel(R) Processor Trace format for the thread
1063 given by TINFO into BTRACE using the TYPE reading method. */
1064
1065 static enum btrace_error
1066 linux_read_pt (struct btrace_data_pt *btrace,
1067 struct btrace_target_info *tinfo,
1068 enum btrace_read_type type)
1069 {
1070 struct perf_event_buffer *pt;
1071
1072 pt = &tinfo->variant.pt.pt;
1073
1074 linux_fill_btrace_pt_config (&btrace->config);
1075
1076 switch (type)
1077 {
1078 case BTRACE_READ_DELTA:
1079 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1080 around to stay inside the aux buffer. */
1081 return BTRACE_ERR_NOT_SUPPORTED;
1082
1083 case BTRACE_READ_NEW:
1084 if (!perf_event_new_data (pt))
1085 return BTRACE_ERR_NONE;
1086
1087 /* Fall through. */
1088 case BTRACE_READ_ALL:
1089 perf_event_read_all (pt, &btrace->data, &btrace->size);
1090 return BTRACE_ERR_NONE;
1091 }
1092
1093 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1094 }
1095
1096 /* See linux-btrace.h. */
1097
1098 enum btrace_error
1099 linux_read_btrace (struct btrace_data *btrace,
1100 struct btrace_target_info *tinfo,
1101 enum btrace_read_type type)
1102 {
1103 switch (tinfo->conf.format)
1104 {
1105 case BTRACE_FORMAT_NONE:
1106 return BTRACE_ERR_NOT_SUPPORTED;
1107
1108 case BTRACE_FORMAT_BTS:
1109 /* We read btrace in BTS format. */
1110 btrace->format = BTRACE_FORMAT_BTS;
1111 btrace->variant.bts.blocks = NULL;
1112
1113 return linux_read_bts (&btrace->variant.bts, tinfo, type);
1114
1115 case BTRACE_FORMAT_PT:
1116 /* We read btrace in Intel(R) Processor Trace format. */
1117 btrace->format = BTRACE_FORMAT_PT;
1118 btrace->variant.pt.data = NULL;
1119 btrace->variant.pt.size = 0;
1120
1121 return linux_read_pt (&btrace->variant.pt, tinfo, type);
1122 }
1123
1124 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1125 }
1126
1127 /* See linux-btrace.h. */
1128
1129 const struct btrace_config *
1130 linux_btrace_conf (const struct btrace_target_info *tinfo)
1131 {
1132 return &tinfo->conf;
1133 }
1134
1135 #else /* !HAVE_LINUX_PERF_EVENT_H */
1136
1137 /* See linux-btrace.h. */
1138
1139 int
1140 linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
1141 {
1142 return 0;
1143 }
1144
1145 /* See linux-btrace.h. */
1146
1147 struct btrace_target_info *
1148 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
1149 {
1150 return NULL;
1151 }
1152
1153 /* See linux-btrace.h. */
1154
1155 enum btrace_error
1156 linux_disable_btrace (struct btrace_target_info *tinfo)
1157 {
1158 return BTRACE_ERR_NOT_SUPPORTED;
1159 }
1160
1161 /* See linux-btrace.h. */
1162
1163 enum btrace_error
1164 linux_read_btrace (struct btrace_data *btrace,
1165 struct btrace_target_info *tinfo,
1166 enum btrace_read_type type)
1167 {
1168 return BTRACE_ERR_NOT_SUPPORTED;
1169 }
1170
1171 /* See linux-btrace.h. */
1172
1173 const struct btrace_config *
1174 linux_btrace_conf (const struct btrace_target_info *tinfo)
1175 {
1176 return NULL;
1177 }
1178
1179 #endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.055444 seconds and 4 git commands to generate.