1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
31 #ifdef HAVE_SYS_SYSCALL_H
32 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 cpu
.model
= (cpuid
>> 4) & 0xf;
89 if (cpu
.family
== 0x6)
90 cpu
.model
+= (cpuid
>> 12) & 0xf0;
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
101 perf_event_new_data (const struct perf_event_buffer
*pev
)
103 return *pev
->data_head
!= pev
->last_head
;
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
111 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
114 const gdb_byte
*begin
, *end
, *start
, *stop
;
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size
= pev
->size
;
124 gdb_assert (size
<= buffer_size
);
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head
< size
)
135 data_head
+= buffer_size
;
137 gdb_assert (size
<= data_head
);
138 data_tail
= data_head
- size
;
141 start
= begin
+ data_tail
% buffer_size
;
142 stop
= begin
+ data_head
% buffer_size
;
144 buffer
= (gdb_byte
*) xmalloc (size
);
147 memcpy (buffer
, start
, stop
- start
);
150 end
= begin
+ buffer_size
;
152 memcpy (buffer
, start
, end
- start
);
153 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
159 /* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
163 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
169 data_head
= *pev
->data_head
;
172 *data
= perf_event_read (pev
, data_head
, size
);
175 pev
->last_head
= data_head
;
178 /* Determine the event type.
179 Returns zero on success and fills in TYPE; returns -1 otherwise. */
182 perf_event_pt_event_type (int *type
)
187 file
= fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
191 found
= fscanf (file
, "%d", type
);
200 /* Try to determine the start address of the Linux kernel. */
203 linux_determine_kernel_start (void)
205 static uint64_t kernel_start
;
213 gdb_file_up file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
217 while (!feof (file
.get ()))
219 char buffer
[1024], symbol
[8], *line
;
223 line
= fgets (buffer
, sizeof (buffer
), file
.get ());
227 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
231 if (strcmp (symbol
, "_text") == 0)
241 /* Check whether an address is in the kernel. */
244 perf_event_is_kernel_addr (uint64_t addr
)
246 uint64_t kernel_start
;
248 kernel_start
= linux_determine_kernel_start ();
249 if (kernel_start
!= 0ull)
250 return (addr
>= kernel_start
);
252 /* If we don't know the kernel's start address, let's check the most
253 significant bit. This will work at least for 64-bit kernels. */
254 return ((addr
& (1ull << 63)) != 0);
257 /* Check whether a perf event record should be skipped. */
260 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
262 /* The hardware may report branches from kernel into user space. Branches
263 from user into kernel space will be suppressed. We filter the former to
264 provide a consistent branch trace excluding kernel. */
265 return perf_event_is_kernel_addr (bts
->from
);
268 /* Perform a few consistency checks on a perf event sample record. This is
269 meant to catch cases when we get out of sync with the perf event stream. */
272 perf_event_sample_ok (const struct perf_event_sample
*sample
)
274 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
277 if (sample
->header
.size
!= sizeof (*sample
))
283 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
284 and to addresses (plus a header).
286 Start points into that buffer at the next sample position.
287 We read the collected samples backwards from start.
289 While reading the samples, we convert the information into a list of blocks.
290 For two adjacent samples s1 and s2, we form a block b such that b.begin =
291 s1.to and b.end = s2.from.
293 In case the buffer overflows during sampling, one sample may have its lower
294 part at the end and its upper part at the beginning of the buffer. */
296 static VEC (btrace_block_s
) *
297 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
298 const uint8_t *end
, const uint8_t *start
, size_t size
)
300 VEC (btrace_block_s
) *btrace
= NULL
;
301 struct perf_event_sample sample
;
303 struct btrace_block block
= { 0, 0 };
304 struct regcache
*regcache
;
306 gdb_assert (begin
<= start
);
307 gdb_assert (start
<= end
);
309 /* The first block ends at the current pc. */
310 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
311 block
.end
= regcache_read_pc (regcache
);
313 /* The buffer may contain a partial record as its last entry (i.e. when the
314 buffer size is not a multiple of the sample size). */
315 read
= sizeof (sample
) - 1;
317 for (; read
< size
; read
+= sizeof (sample
))
319 const struct perf_event_sample
*psample
;
321 /* Find the next perf_event sample in a backwards traversal. */
322 start
-= sizeof (sample
);
324 /* If we're still inside the buffer, we're done. */
326 psample
= (const struct perf_event_sample
*) start
;
331 /* We're to the left of the ring buffer, we will wrap around and
332 reappear at the very right of the ring buffer. */
334 missing
= (begin
- start
);
335 start
= (end
- missing
);
337 /* If the entire sample is missing, we're done. */
338 if (missing
== sizeof (sample
))
339 psample
= (const struct perf_event_sample
*) start
;
344 /* The sample wrapped around. The lower part is at the end and
345 the upper part is at the beginning of the buffer. */
346 stack
= (uint8_t *) &sample
;
348 /* Copy the two parts so we have a contiguous sample. */
349 memcpy (stack
, start
, missing
);
350 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
356 if (!perf_event_sample_ok (psample
))
358 warning (_("Branch trace may be incomplete."));
362 if (perf_event_skip_bts_record (&psample
->bts
))
365 /* We found a valid sample, so we can complete the current block. */
366 block
.begin
= psample
->bts
.to
;
368 VEC_safe_push (btrace_block_s
, btrace
, &block
);
370 /* Start the next block. */
371 block
.end
= psample
->bts
.from
;
374 /* Push the last block (i.e. the first one of inferior execution), as well.
375 We don't know where it ends, but we know where it starts. If we're
376 reading delta trace, we can fill in the start address later on.
377 Otherwise we will prune it. */
379 VEC_safe_push (btrace_block_s
, btrace
, &block
);
384 /* Check whether the kernel supports BTS. */
387 kernel_supports_bts (void)
389 struct perf_event_attr attr
;
398 warning (_("test bts: cannot fork: %s."), safe_strerror (errno
));
402 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
405 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
406 safe_strerror (errno
));
410 status
= raise (SIGTRAP
);
413 warning (_("test bts: cannot raise SIGTRAP: %s."),
414 safe_strerror (errno
));
421 pid
= waitpid (child
, &status
, 0);
424 warning (_("test bts: bad pid %ld, error: %s."),
425 (long) pid
, safe_strerror (errno
));
429 if (!WIFSTOPPED (status
))
431 warning (_("test bts: expected stop. status: %d."),
436 memset (&attr
, 0, sizeof (attr
));
438 attr
.type
= PERF_TYPE_HARDWARE
;
439 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
440 attr
.sample_period
= 1;
441 attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
442 attr
.exclude_kernel
= 1;
444 attr
.exclude_idle
= 1;
446 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
450 kill (child
, SIGKILL
);
451 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
453 pid
= waitpid (child
, &status
, 0);
456 warning (_("test bts: bad pid %ld, error: %s."),
457 (long) pid
, safe_strerror (errno
));
458 if (!WIFSIGNALED (status
))
459 warning (_("test bts: expected killed. status: %d."),
467 /* Check whether the kernel supports Intel Processor Trace. */
470 kernel_supports_pt (void)
472 struct perf_event_attr attr
;
474 int status
, file
, type
;
481 warning (_("test pt: cannot fork: %s."), safe_strerror (errno
));
485 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
488 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
489 safe_strerror (errno
));
493 status
= raise (SIGTRAP
);
496 warning (_("test pt: cannot raise SIGTRAP: %s."),
497 safe_strerror (errno
));
504 pid
= waitpid (child
, &status
, 0);
507 warning (_("test pt: bad pid %ld, error: %s."),
508 (long) pid
, safe_strerror (errno
));
512 if (!WIFSTOPPED (status
))
514 warning (_("test pt: expected stop. status: %d."),
519 status
= perf_event_pt_event_type (&type
);
524 memset (&attr
, 0, sizeof (attr
));
526 attr
.size
= sizeof (attr
);
528 attr
.exclude_kernel
= 1;
530 attr
.exclude_idle
= 1;
532 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
537 kill (child
, SIGKILL
);
538 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
540 pid
= waitpid (child
, &status
, 0);
543 warning (_("test pt: bad pid %ld, error: %s."),
544 (long) pid
, safe_strerror (errno
));
545 if (!WIFSIGNALED (status
))
546 warning (_("test pt: expected killed. status: %d."),
554 /* Check whether an Intel cpu supports BTS. */
557 intel_supports_bts (const struct btrace_cpu
*cpu
)
564 case 0x1a: /* Nehalem */
568 case 0x25: /* Westmere */
571 case 0x2a: /* Sandy Bridge */
573 case 0x3a: /* Ivy Bridge */
575 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
576 "from" information afer an EIST transition, T-states, C1E, or
577 Adaptive Thermal Throttling. */
585 /* Check whether the cpu supports BTS. */
588 cpu_supports_bts (void)
590 struct btrace_cpu cpu
;
592 cpu
= btrace_this_cpu ();
596 /* Don't know about others. Let's assume they do. */
600 return intel_supports_bts (&cpu
);
604 /* Check whether the linux target supports BTS. */
607 linux_supports_bts (void)
613 if (!kernel_supports_bts ())
615 else if (!cpu_supports_bts ())
624 /* Check whether the linux target supports Intel Processor Trace. */
627 linux_supports_pt (void)
633 if (!kernel_supports_pt ())
642 /* See linux-btrace.h. */
645 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
649 case BTRACE_FORMAT_NONE
:
652 case BTRACE_FORMAT_BTS
:
653 return linux_supports_bts ();
655 case BTRACE_FORMAT_PT
:
656 return linux_supports_pt ();
659 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format"));
662 /* Enable branch tracing in BTS format. */
664 static struct btrace_target_info
*
665 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
667 struct perf_event_mmap_page
*header
;
668 struct btrace_target_info
*tinfo
;
669 struct btrace_tinfo_bts
*bts
;
674 tinfo
= XCNEW (struct btrace_target_info
);
677 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
678 bts
= &tinfo
->variant
.bts
;
680 bts
->attr
.size
= sizeof (bts
->attr
);
681 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
682 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
683 bts
->attr
.sample_period
= 1;
685 /* We sample from and to address. */
686 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
688 bts
->attr
.exclude_kernel
= 1;
689 bts
->attr
.exclude_hv
= 1;
690 bts
->attr
.exclude_idle
= 1;
692 pid
= ptid_get_lwp (ptid
);
694 pid
= ptid_get_pid (ptid
);
697 bts
->file
= syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0);
701 /* Convert the requested size in bytes to pages (rounding up). */
702 pages
= ((size_t) conf
->size
/ PAGE_SIZE
703 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
704 /* We need at least one page. */
708 /* The buffer size can be requested in powers of two pages. Adjust PAGES
709 to the next power of two. */
710 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
711 if ((pages
& ((size_t) 1 << pg
)) != 0)
712 pages
+= ((size_t) 1 << pg
);
714 /* We try to allocate the requested size.
715 If that fails, try to get as much as we can. */
716 for (; pages
> 0; pages
>>= 1)
721 data_size
= (__u64
) pages
* PAGE_SIZE
;
723 /* Don't ask for more than we can represent in the configuration. */
724 if ((__u64
) UINT_MAX
< data_size
)
727 size
= (size_t) data_size
;
728 length
= size
+ PAGE_SIZE
;
730 /* Check for overflows. */
731 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
734 /* The number of pages we request needs to be a power of two. */
735 header
= ((struct perf_event_mmap_page
*)
736 mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, bts
->file
, 0));
737 if (header
!= MAP_FAILED
)
744 data_offset
= PAGE_SIZE
;
746 #if defined (PERF_ATTR_SIZE_VER5)
747 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
751 data_offset
= header
->data_offset
;
752 data_size
= header
->data_size
;
754 size
= (unsigned int) data_size
;
756 /* Check for overflows. */
757 if ((__u64
) size
!= data_size
)
759 munmap ((void *) header
, size
+ PAGE_SIZE
);
763 #endif /* defined (PERF_ATTR_SIZE_VER5) */
765 bts
->header
= header
;
766 bts
->bts
.mem
= ((const uint8_t *) header
) + data_offset
;
767 bts
->bts
.size
= size
;
768 bts
->bts
.data_head
= &header
->data_head
;
769 bts
->bts
.last_head
= 0ull;
771 tinfo
->conf
.bts
.size
= (unsigned int) size
;
775 /* We were not able to allocate any buffer. */
783 #if defined (PERF_ATTR_SIZE_VER5)
785 /* Enable branch tracing in Intel Processor Trace format. */
787 static struct btrace_target_info
*
788 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
790 struct perf_event_mmap_page
*header
;
791 struct btrace_target_info
*tinfo
;
792 struct btrace_tinfo_pt
*pt
;
794 int pid
, pg
, errcode
, type
;
799 errcode
= perf_event_pt_event_type (&type
);
803 pid
= ptid_get_lwp (ptid
);
805 pid
= ptid_get_pid (ptid
);
807 tinfo
= XCNEW (struct btrace_target_info
);
810 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
811 pt
= &tinfo
->variant
.pt
;
813 pt
->attr
.size
= sizeof (pt
->attr
);
814 pt
->attr
.type
= type
;
816 pt
->attr
.exclude_kernel
= 1;
817 pt
->attr
.exclude_hv
= 1;
818 pt
->attr
.exclude_idle
= 1;
821 pt
->file
= syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0);
825 /* Allocate the configuration page. */
826 header
= ((struct perf_event_mmap_page
*)
827 mmap (NULL
, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
829 if (header
== MAP_FAILED
)
832 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
834 /* Convert the requested size in bytes to pages (rounding up). */
835 pages
= ((size_t) conf
->size
/ PAGE_SIZE
836 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
837 /* We need at least one page. */
841 /* The buffer size can be requested in powers of two pages. Adjust PAGES
842 to the next power of two. */
843 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
844 if ((pages
& ((size_t) 1 << pg
)) != 0)
845 pages
+= ((size_t) 1 << pg
);
847 /* We try to allocate the requested size.
848 If that fails, try to get as much as we can. */
849 for (; pages
> 0; pages
>>= 1)
854 data_size
= (__u64
) pages
* PAGE_SIZE
;
856 /* Don't ask for more than we can represent in the configuration. */
857 if ((__u64
) UINT_MAX
< data_size
)
860 size
= (size_t) data_size
;
862 /* Check for overflows. */
863 if ((__u64
) size
!= data_size
)
866 header
->aux_size
= data_size
;
869 pt
->pt
.mem
= ((const uint8_t *)
870 mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, pt
->file
,
871 header
->aux_offset
));
872 if (pt
->pt
.mem
!= MAP_FAILED
)
881 pt
->pt
.data_head
= &header
->aux_head
;
883 tinfo
->conf
.pt
.size
= (unsigned int) size
;
887 munmap((void *) header
, PAGE_SIZE
);
897 #else /* !defined (PERF_ATTR_SIZE_VER5) */
899 static struct btrace_target_info
*
900 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
906 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
908 /* See linux-btrace.h. */
910 struct btrace_target_info
*
911 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
913 struct btrace_target_info
*tinfo
;
916 switch (conf
->format
)
918 case BTRACE_FORMAT_NONE
:
921 case BTRACE_FORMAT_BTS
:
922 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
925 case BTRACE_FORMAT_PT
:
926 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
933 /* Disable BTS tracing. */
935 static enum btrace_error
936 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
938 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
941 return BTRACE_ERR_NONE
;
944 /* Disable Intel Processor Trace tracing. */
946 static enum btrace_error
947 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
949 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
950 munmap((void *) tinfo
->header
, PAGE_SIZE
);
953 return BTRACE_ERR_NONE
;
956 /* See linux-btrace.h. */
959 linux_disable_btrace (struct btrace_target_info
*tinfo
)
961 enum btrace_error errcode
;
963 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
964 switch (tinfo
->conf
.format
)
966 case BTRACE_FORMAT_NONE
:
969 case BTRACE_FORMAT_BTS
:
970 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
973 case BTRACE_FORMAT_PT
:
974 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
978 if (errcode
== BTRACE_ERR_NONE
)
984 /* Read branch trace data in BTS format for the thread given by TINFO into
985 BTRACE using the TYPE reading method. */
987 static enum btrace_error
988 linux_read_bts (struct btrace_data_bts
*btrace
,
989 struct btrace_target_info
*tinfo
,
990 enum btrace_read_type type
)
992 struct perf_event_buffer
*pevent
;
993 const uint8_t *begin
, *end
, *start
;
994 size_t buffer_size
, size
;
995 __u64 data_head
, data_tail
;
996 unsigned int retries
= 5;
998 pevent
= &tinfo
->variant
.bts
.bts
;
1000 /* For delta reads, we return at least the partial last block containing
1002 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
1003 return BTRACE_ERR_NONE
;
1005 buffer_size
= pevent
->size
;
1006 data_tail
= pevent
->last_head
;
1008 /* We may need to retry reading the trace. See below. */
1011 data_head
= *pevent
->data_head
;
1013 /* Delete any leftover trace from the previous iteration. */
1014 VEC_free (btrace_block_s
, btrace
->blocks
);
1016 if (type
== BTRACE_READ_DELTA
)
1020 /* Determine the number of bytes to read and check for buffer
1023 /* Check for data head overflows. We might be able to recover from
1024 those but they are very unlikely and it's not really worth the
1026 if (data_head
< data_tail
)
1027 return BTRACE_ERR_OVERFLOW
;
1029 /* If the buffer is smaller than the trace delta, we overflowed. */
1030 data_size
= data_head
- data_tail
;
1031 if (buffer_size
< data_size
)
1032 return BTRACE_ERR_OVERFLOW
;
1034 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1035 size
= (size_t) data_size
;
1039 /* Read the entire buffer. */
1042 /* Adjust the size if the buffer has not overflowed, yet. */
1043 if (data_head
< size
)
1044 size
= (size_t) data_head
;
1047 /* Data_head keeps growing; the buffer itself is circular. */
1048 begin
= pevent
->mem
;
1049 start
= begin
+ data_head
% buffer_size
;
1051 if (data_head
<= buffer_size
)
1054 end
= begin
+ pevent
->size
;
1056 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
1058 /* The stopping thread notifies its ptracer before it is scheduled out.
1059 On multi-core systems, the debugger might therefore run while the
1060 kernel might be writing the last branch trace records.
1062 Let's check whether the data head moved while we read the trace. */
1063 if (data_head
== *pevent
->data_head
)
1067 pevent
->last_head
= data_head
;
1069 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1070 if we're not doing a delta read. There is no way of filling in its zeroed
1072 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
1073 && type
!= BTRACE_READ_DELTA
)
1074 VEC_pop (btrace_block_s
, btrace
->blocks
);
1076 return BTRACE_ERR_NONE
;
1079 /* Fill in the Intel Processor Trace configuration information. */
1082 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
1084 conf
->cpu
= btrace_this_cpu ();
1087 /* Read branch trace data in Intel Processor Trace format for the thread
1088 given by TINFO into BTRACE using the TYPE reading method. */
1090 static enum btrace_error
1091 linux_read_pt (struct btrace_data_pt
*btrace
,
1092 struct btrace_target_info
*tinfo
,
1093 enum btrace_read_type type
)
1095 struct perf_event_buffer
*pt
;
1097 pt
= &tinfo
->variant
.pt
.pt
;
1099 linux_fill_btrace_pt_config (&btrace
->config
);
1103 case BTRACE_READ_DELTA
:
1104 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1105 around to stay inside the aux buffer. */
1106 return BTRACE_ERR_NOT_SUPPORTED
;
1108 case BTRACE_READ_NEW
:
1109 if (!perf_event_new_data (pt
))
1110 return BTRACE_ERR_NONE
;
1113 case BTRACE_READ_ALL
:
1114 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
1115 return BTRACE_ERR_NONE
;
1118 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
1121 /* See linux-btrace.h. */
1124 linux_read_btrace (struct btrace_data
*btrace
,
1125 struct btrace_target_info
*tinfo
,
1126 enum btrace_read_type type
)
1128 switch (tinfo
->conf
.format
)
1130 case BTRACE_FORMAT_NONE
:
1131 return BTRACE_ERR_NOT_SUPPORTED
;
1133 case BTRACE_FORMAT_BTS
:
1134 /* We read btrace in BTS format. */
1135 btrace
->format
= BTRACE_FORMAT_BTS
;
1136 btrace
->variant
.bts
.blocks
= NULL
;
1138 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
1140 case BTRACE_FORMAT_PT
:
1141 /* We read btrace in Intel Processor Trace format. */
1142 btrace
->format
= BTRACE_FORMAT_PT
;
1143 btrace
->variant
.pt
.data
= NULL
;
1144 btrace
->variant
.pt
.size
= 0;
1146 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
1149 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1152 /* See linux-btrace.h. */
1154 const struct btrace_config
*
1155 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1157 return &tinfo
->conf
;
1160 #else /* !HAVE_LINUX_PERF_EVENT_H */
1162 /* See linux-btrace.h. */
1165 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
1170 /* See linux-btrace.h. */
1172 struct btrace_target_info
*
1173 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
1178 /* See linux-btrace.h. */
1181 linux_disable_btrace (struct btrace_target_info
*tinfo
)
1183 return BTRACE_ERR_NOT_SUPPORTED
;
1186 /* See linux-btrace.h. */
1189 linux_read_btrace (struct btrace_data
*btrace
,
1190 struct btrace_target_info
*tinfo
,
1191 enum btrace_read_type type
)
1193 return BTRACE_ERR_NOT_SUPPORTED
;
1196 /* See linux-btrace.h. */
1198 const struct btrace_config
*
1199 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1204 #endif /* !HAVE_LINUX_PERF_EVENT_H */