1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
31 #ifdef HAVE_SYS_SYSCALL_H
32 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 cpu
.model
= (cpuid
>> 4) & 0xf;
89 if (cpu
.family
== 0x6)
90 cpu
.model
+= (cpuid
>> 12) & 0xf0;
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
101 perf_event_new_data (const struct perf_event_buffer
*pev
)
103 return *pev
->data_head
!= pev
->last_head
;
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
111 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
114 const gdb_byte
*begin
, *end
, *start
, *stop
;
122 gdb_assert (size
<= data_head
);
123 data_tail
= data_head
- size
;
125 buffer_size
= pev
->size
;
127 start
= begin
+ data_tail
% buffer_size
;
128 stop
= begin
+ data_head
% buffer_size
;
130 buffer
= (gdb_byte
*) xmalloc (size
);
133 memcpy (buffer
, start
, stop
- start
);
136 end
= begin
+ buffer_size
;
138 memcpy (buffer
, start
, end
- start
);
139 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
145 /* Copy the perf event buffer data from PEV.
146 Store a pointer to the copy into DATA and its size in SIZE. */
149 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
155 data_head
= *pev
->data_head
;
158 if (data_head
< size
)
159 size
= (size_t) data_head
;
161 *data
= perf_event_read (pev
, data_head
, size
);
164 pev
->last_head
= data_head
;
167 /* Determine the event type.
168 Returns zero on success and fills in TYPE; returns -1 otherwise. */
171 perf_event_pt_event_type (int *type
)
176 file
= fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
180 found
= fscanf (file
, "%d", type
);
189 /* Try to determine the start address of the Linux kernel. */
192 linux_determine_kernel_start (void)
194 static uint64_t kernel_start
;
203 file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
209 char buffer
[1024], symbol
[8], *line
;
213 line
= fgets (buffer
, sizeof (buffer
), file
);
217 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
221 if (strcmp (symbol
, "_text") == 0)
233 /* Check whether an address is in the kernel. */
236 perf_event_is_kernel_addr (uint64_t addr
)
238 uint64_t kernel_start
;
240 kernel_start
= linux_determine_kernel_start ();
241 if (kernel_start
!= 0ull)
242 return (addr
>= kernel_start
);
244 /* If we don't know the kernel's start address, let's check the most
245 significant bit. This will work at least for 64-bit kernels. */
246 return ((addr
& (1ull << 63)) != 0);
249 /* Check whether a perf event record should be skipped. */
252 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
254 /* The hardware may report branches from kernel into user space. Branches
255 from user into kernel space will be suppressed. We filter the former to
256 provide a consistent branch trace excluding kernel. */
257 return perf_event_is_kernel_addr (bts
->from
);
260 /* Perform a few consistency checks on a perf event sample record. This is
261 meant to catch cases when we get out of sync with the perf event stream. */
264 perf_event_sample_ok (const struct perf_event_sample
*sample
)
266 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
269 if (sample
->header
.size
!= sizeof (*sample
))
275 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
276 and to addresses (plus a header).
278 Start points into that buffer at the next sample position.
279 We read the collected samples backwards from start.
281 While reading the samples, we convert the information into a list of blocks.
282 For two adjacent samples s1 and s2, we form a block b such that b.begin =
283 s1.to and b.end = s2.from.
285 In case the buffer overflows during sampling, one sample may have its lower
286 part at the end and its upper part at the beginning of the buffer. */
288 static VEC (btrace_block_s
) *
289 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
290 const uint8_t *end
, const uint8_t *start
, size_t size
)
292 VEC (btrace_block_s
) *btrace
= NULL
;
293 struct perf_event_sample sample
;
295 struct btrace_block block
= { 0, 0 };
296 struct regcache
*regcache
;
298 gdb_assert (begin
<= start
);
299 gdb_assert (start
<= end
);
301 /* The first block ends at the current pc. */
302 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
303 block
.end
= regcache_read_pc (regcache
);
305 /* The buffer may contain a partial record as its last entry (i.e. when the
306 buffer size is not a multiple of the sample size). */
307 read
= sizeof (sample
) - 1;
309 for (; read
< size
; read
+= sizeof (sample
))
311 const struct perf_event_sample
*psample
;
313 /* Find the next perf_event sample in a backwards traversal. */
314 start
-= sizeof (sample
);
316 /* If we're still inside the buffer, we're done. */
318 psample
= (const struct perf_event_sample
*) start
;
323 /* We're to the left of the ring buffer, we will wrap around and
324 reappear at the very right of the ring buffer. */
326 missing
= (begin
- start
);
327 start
= (end
- missing
);
329 /* If the entire sample is missing, we're done. */
330 if (missing
== sizeof (sample
))
331 psample
= (const struct perf_event_sample
*) start
;
336 /* The sample wrapped around. The lower part is at the end and
337 the upper part is at the beginning of the buffer. */
338 stack
= (uint8_t *) &sample
;
340 /* Copy the two parts so we have a contiguous sample. */
341 memcpy (stack
, start
, missing
);
342 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
348 if (!perf_event_sample_ok (psample
))
350 warning (_("Branch trace may be incomplete."));
354 if (perf_event_skip_bts_record (&psample
->bts
))
357 /* We found a valid sample, so we can complete the current block. */
358 block
.begin
= psample
->bts
.to
;
360 VEC_safe_push (btrace_block_s
, btrace
, &block
);
362 /* Start the next block. */
363 block
.end
= psample
->bts
.from
;
366 /* Push the last block (i.e. the first one of inferior execution), as well.
367 We don't know where it ends, but we know where it starts. If we're
368 reading delta trace, we can fill in the start address later on.
369 Otherwise we will prune it. */
371 VEC_safe_push (btrace_block_s
, btrace
, &block
);
376 /* Check whether the kernel supports BTS. */
379 kernel_supports_bts (void)
381 struct perf_event_attr attr
;
390 warning (_("test bts: cannot fork: %s."), safe_strerror (errno
));
394 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
397 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
398 safe_strerror (errno
));
402 status
= raise (SIGTRAP
);
405 warning (_("test bts: cannot raise SIGTRAP: %s."),
406 safe_strerror (errno
));
413 pid
= waitpid (child
, &status
, 0);
416 warning (_("test bts: bad pid %ld, error: %s."),
417 (long) pid
, safe_strerror (errno
));
421 if (!WIFSTOPPED (status
))
423 warning (_("test bts: expected stop. status: %d."),
428 memset (&attr
, 0, sizeof (attr
));
430 attr
.type
= PERF_TYPE_HARDWARE
;
431 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
432 attr
.sample_period
= 1;
433 attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
434 attr
.exclude_kernel
= 1;
436 attr
.exclude_idle
= 1;
438 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
442 kill (child
, SIGKILL
);
443 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
445 pid
= waitpid (child
, &status
, 0);
448 warning (_("test bts: bad pid %ld, error: %s."),
449 (long) pid
, safe_strerror (errno
));
450 if (!WIFSIGNALED (status
))
451 warning (_("test bts: expected killed. status: %d."),
459 /* Check whether the kernel supports Intel Processor Trace. */
462 kernel_supports_pt (void)
464 struct perf_event_attr attr
;
466 int status
, file
, type
;
473 warning (_("test pt: cannot fork: %s."), safe_strerror (errno
));
477 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
480 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
481 safe_strerror (errno
));
485 status
= raise (SIGTRAP
);
488 warning (_("test pt: cannot raise SIGTRAP: %s."),
489 safe_strerror (errno
));
496 pid
= waitpid (child
, &status
, 0);
499 warning (_("test pt: bad pid %ld, error: %s."),
500 (long) pid
, safe_strerror (errno
));
504 if (!WIFSTOPPED (status
))
506 warning (_("test pt: expected stop. status: %d."),
511 status
= perf_event_pt_event_type (&type
);
516 memset (&attr
, 0, sizeof (attr
));
518 attr
.size
= sizeof (attr
);
520 attr
.exclude_kernel
= 1;
522 attr
.exclude_idle
= 1;
524 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
529 kill (child
, SIGKILL
);
530 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
532 pid
= waitpid (child
, &status
, 0);
535 warning (_("test pt: bad pid %ld, error: %s."),
536 (long) pid
, safe_strerror (errno
));
537 if (!WIFSIGNALED (status
))
538 warning (_("test pt: expected killed. status: %d."),
546 /* Check whether an Intel cpu supports BTS. */
549 intel_supports_bts (const struct btrace_cpu
*cpu
)
556 case 0x1a: /* Nehalem */
560 case 0x25: /* Westmere */
563 case 0x2a: /* Sandy Bridge */
565 case 0x3a: /* Ivy Bridge */
567 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
568 "from" information afer an EIST transition, T-states, C1E, or
569 Adaptive Thermal Throttling. */
577 /* Check whether the cpu supports BTS. */
580 cpu_supports_bts (void)
582 struct btrace_cpu cpu
;
584 cpu
= btrace_this_cpu ();
588 /* Don't know about others. Let's assume they do. */
592 return intel_supports_bts (&cpu
);
596 /* Check whether the linux target supports BTS. */
599 linux_supports_bts (void)
605 if (!kernel_supports_bts ())
607 else if (!cpu_supports_bts ())
616 /* Check whether the linux target supports Intel Processor Trace. */
619 linux_supports_pt (void)
625 if (!kernel_supports_pt ())
634 /* See linux-btrace.h. */
637 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
641 case BTRACE_FORMAT_NONE
:
644 case BTRACE_FORMAT_BTS
:
645 return linux_supports_bts ();
647 case BTRACE_FORMAT_PT
:
648 return linux_supports_pt ();
651 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format"));
654 /* Enable branch tracing in BTS format. */
656 static struct btrace_target_info
*
657 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
659 struct perf_event_mmap_page
*header
;
660 struct btrace_target_info
*tinfo
;
661 struct btrace_tinfo_bts
*bts
;
666 tinfo
= XCNEW (struct btrace_target_info
);
669 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
670 bts
= &tinfo
->variant
.bts
;
672 bts
->attr
.size
= sizeof (bts
->attr
);
673 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
674 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
675 bts
->attr
.sample_period
= 1;
677 /* We sample from and to address. */
678 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
680 bts
->attr
.exclude_kernel
= 1;
681 bts
->attr
.exclude_hv
= 1;
682 bts
->attr
.exclude_idle
= 1;
684 pid
= ptid_get_lwp (ptid
);
686 pid
= ptid_get_pid (ptid
);
689 bts
->file
= syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0);
693 /* Convert the requested size in bytes to pages (rounding up). */
694 pages
= ((size_t) conf
->size
/ PAGE_SIZE
695 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
696 /* We need at least one page. */
700 /* The buffer size can be requested in powers of two pages. Adjust PAGES
701 to the next power of two. */
702 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
703 if ((pages
& ((size_t) 1 << pg
)) != 0)
704 pages
+= ((size_t) 1 << pg
);
706 /* We try to allocate the requested size.
707 If that fails, try to get as much as we can. */
708 for (; pages
> 0; pages
>>= 1)
713 data_size
= (__u64
) pages
* PAGE_SIZE
;
715 /* Don't ask for more than we can represent in the configuration. */
716 if ((__u64
) UINT_MAX
< data_size
)
719 size
= (size_t) data_size
;
720 length
= size
+ PAGE_SIZE
;
722 /* Check for overflows. */
723 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
726 /* The number of pages we request needs to be a power of two. */
727 header
= ((struct perf_event_mmap_page
*)
728 mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, bts
->file
, 0));
729 if (header
!= MAP_FAILED
)
736 data_offset
= PAGE_SIZE
;
738 #if defined (PERF_ATTR_SIZE_VER5)
739 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
743 data_offset
= header
->data_offset
;
744 data_size
= header
->data_size
;
746 size
= (unsigned int) data_size
;
748 /* Check for overflows. */
749 if ((__u64
) size
!= data_size
)
751 munmap ((void *) header
, size
+ PAGE_SIZE
);
755 #endif /* defined (PERF_ATTR_SIZE_VER5) */
757 bts
->header
= header
;
758 bts
->bts
.mem
= ((const uint8_t *) header
) + data_offset
;
759 bts
->bts
.size
= size
;
760 bts
->bts
.data_head
= &header
->data_head
;
761 bts
->bts
.last_head
= 0ull;
763 tinfo
->conf
.bts
.size
= (unsigned int) size
;
767 /* We were not able to allocate any buffer. */
775 #if defined (PERF_ATTR_SIZE_VER5)
777 /* Enable branch tracing in Intel Processor Trace format. */
779 static struct btrace_target_info
*
780 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
782 struct perf_event_mmap_page
*header
;
783 struct btrace_target_info
*tinfo
;
784 struct btrace_tinfo_pt
*pt
;
786 int pid
, pg
, errcode
, type
;
791 errcode
= perf_event_pt_event_type (&type
);
795 pid
= ptid_get_lwp (ptid
);
797 pid
= ptid_get_pid (ptid
);
799 tinfo
= XCNEW (struct btrace_target_info
);
802 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
803 pt
= &tinfo
->variant
.pt
;
805 pt
->attr
.size
= sizeof (pt
->attr
);
806 pt
->attr
.type
= type
;
808 pt
->attr
.exclude_kernel
= 1;
809 pt
->attr
.exclude_hv
= 1;
810 pt
->attr
.exclude_idle
= 1;
813 pt
->file
= syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0);
817 /* Allocate the configuration page. */
818 header
= ((struct perf_event_mmap_page
*)
819 mmap (NULL
, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
821 if (header
== MAP_FAILED
)
824 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
826 /* Convert the requested size in bytes to pages (rounding up). */
827 pages
= ((size_t) conf
->size
/ PAGE_SIZE
828 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
829 /* We need at least one page. */
833 /* The buffer size can be requested in powers of two pages. Adjust PAGES
834 to the next power of two. */
835 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
836 if ((pages
& ((size_t) 1 << pg
)) != 0)
837 pages
+= ((size_t) 1 << pg
);
839 /* We try to allocate the requested size.
840 If that fails, try to get as much as we can. */
841 for (; pages
> 0; pages
>>= 1)
846 data_size
= (__u64
) pages
* PAGE_SIZE
;
848 /* Don't ask for more than we can represent in the configuration. */
849 if ((__u64
) UINT_MAX
< data_size
)
852 size
= (size_t) data_size
;
854 /* Check for overflows. */
855 if ((__u64
) size
!= data_size
)
858 header
->aux_size
= data_size
;
861 pt
->pt
.mem
= ((const uint8_t *)
862 mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, pt
->file
,
863 header
->aux_offset
));
864 if (pt
->pt
.mem
!= MAP_FAILED
)
873 pt
->pt
.data_head
= &header
->aux_head
;
875 tinfo
->conf
.pt
.size
= (unsigned int) size
;
879 munmap((void *) header
, PAGE_SIZE
);
889 #else /* !defined (PERF_ATTR_SIZE_VER5) */
891 static struct btrace_target_info
*
892 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
898 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
900 /* See linux-btrace.h. */
902 struct btrace_target_info
*
903 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
905 struct btrace_target_info
*tinfo
;
908 switch (conf
->format
)
910 case BTRACE_FORMAT_NONE
:
913 case BTRACE_FORMAT_BTS
:
914 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
917 case BTRACE_FORMAT_PT
:
918 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
925 /* Disable BTS tracing. */
927 static enum btrace_error
928 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
930 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
933 return BTRACE_ERR_NONE
;
936 /* Disable Intel Processor Trace tracing. */
938 static enum btrace_error
939 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
941 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
942 munmap((void *) tinfo
->header
, PAGE_SIZE
);
945 return BTRACE_ERR_NONE
;
948 /* See linux-btrace.h. */
951 linux_disable_btrace (struct btrace_target_info
*tinfo
)
953 enum btrace_error errcode
;
955 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
956 switch (tinfo
->conf
.format
)
958 case BTRACE_FORMAT_NONE
:
961 case BTRACE_FORMAT_BTS
:
962 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
965 case BTRACE_FORMAT_PT
:
966 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
970 if (errcode
== BTRACE_ERR_NONE
)
976 /* Read branch trace data in BTS format for the thread given by TINFO into
977 BTRACE using the TYPE reading method. */
979 static enum btrace_error
980 linux_read_bts (struct btrace_data_bts
*btrace
,
981 struct btrace_target_info
*tinfo
,
982 enum btrace_read_type type
)
984 struct perf_event_buffer
*pevent
;
985 const uint8_t *begin
, *end
, *start
;
986 size_t buffer_size
, size
;
987 __u64 data_head
, data_tail
;
988 unsigned int retries
= 5;
990 pevent
= &tinfo
->variant
.bts
.bts
;
992 /* For delta reads, we return at least the partial last block containing
994 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
995 return BTRACE_ERR_NONE
;
997 buffer_size
= pevent
->size
;
998 data_tail
= pevent
->last_head
;
1000 /* We may need to retry reading the trace. See below. */
1003 data_head
= *pevent
->data_head
;
1005 /* Delete any leftover trace from the previous iteration. */
1006 VEC_free (btrace_block_s
, btrace
->blocks
);
1008 if (type
== BTRACE_READ_DELTA
)
1012 /* Determine the number of bytes to read and check for buffer
1015 /* Check for data head overflows. We might be able to recover from
1016 those but they are very unlikely and it's not really worth the
1018 if (data_head
< data_tail
)
1019 return BTRACE_ERR_OVERFLOW
;
1021 /* If the buffer is smaller than the trace delta, we overflowed. */
1022 data_size
= data_head
- data_tail
;
1023 if (buffer_size
< data_size
)
1024 return BTRACE_ERR_OVERFLOW
;
1026 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1027 size
= (size_t) data_size
;
1031 /* Read the entire buffer. */
1034 /* Adjust the size if the buffer has not overflowed, yet. */
1035 if (data_head
< size
)
1036 size
= (size_t) data_head
;
1039 /* Data_head keeps growing; the buffer itself is circular. */
1040 begin
= pevent
->mem
;
1041 start
= begin
+ data_head
% buffer_size
;
1043 if (data_head
<= buffer_size
)
1046 end
= begin
+ pevent
->size
;
1048 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
1050 /* The stopping thread notifies its ptracer before it is scheduled out.
1051 On multi-core systems, the debugger might therefore run while the
1052 kernel might be writing the last branch trace records.
1054 Let's check whether the data head moved while we read the trace. */
1055 if (data_head
== *pevent
->data_head
)
1059 pevent
->last_head
= data_head
;
1061 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1062 if we're not doing a delta read. There is no way of filling in its zeroed
1064 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
1065 && type
!= BTRACE_READ_DELTA
)
1066 VEC_pop (btrace_block_s
, btrace
->blocks
);
1068 return BTRACE_ERR_NONE
;
1071 /* Fill in the Intel Processor Trace configuration information. */
1074 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
1076 conf
->cpu
= btrace_this_cpu ();
1079 /* Read branch trace data in Intel Processor Trace format for the thread
1080 given by TINFO into BTRACE using the TYPE reading method. */
1082 static enum btrace_error
1083 linux_read_pt (struct btrace_data_pt
*btrace
,
1084 struct btrace_target_info
*tinfo
,
1085 enum btrace_read_type type
)
1087 struct perf_event_buffer
*pt
;
1089 pt
= &tinfo
->variant
.pt
.pt
;
1091 linux_fill_btrace_pt_config (&btrace
->config
);
1095 case BTRACE_READ_DELTA
:
1096 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1097 around to stay inside the aux buffer. */
1098 return BTRACE_ERR_NOT_SUPPORTED
;
1100 case BTRACE_READ_NEW
:
1101 if (!perf_event_new_data (pt
))
1102 return BTRACE_ERR_NONE
;
1105 case BTRACE_READ_ALL
:
1106 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
1107 return BTRACE_ERR_NONE
;
1110 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
1113 /* See linux-btrace.h. */
1116 linux_read_btrace (struct btrace_data
*btrace
,
1117 struct btrace_target_info
*tinfo
,
1118 enum btrace_read_type type
)
1120 switch (tinfo
->conf
.format
)
1122 case BTRACE_FORMAT_NONE
:
1123 return BTRACE_ERR_NOT_SUPPORTED
;
1125 case BTRACE_FORMAT_BTS
:
1126 /* We read btrace in BTS format. */
1127 btrace
->format
= BTRACE_FORMAT_BTS
;
1128 btrace
->variant
.bts
.blocks
= NULL
;
1130 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
1132 case BTRACE_FORMAT_PT
:
1133 /* We read btrace in Intel Processor Trace format. */
1134 btrace
->format
= BTRACE_FORMAT_PT
;
1135 btrace
->variant
.pt
.data
= NULL
;
1136 btrace
->variant
.pt
.size
= 0;
1138 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
1141 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1144 /* See linux-btrace.h. */
1146 const struct btrace_config
*
1147 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1149 return &tinfo
->conf
;
1152 #else /* !HAVE_LINUX_PERF_EVENT_H */
1154 /* See linux-btrace.h. */
1157 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
1162 /* See linux-btrace.h. */
1164 struct btrace_target_info
*
1165 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
1170 /* See linux-btrace.h. */
1173 linux_disable_btrace (struct btrace_target_info
*tinfo
)
1175 return BTRACE_ERR_NOT_SUPPORTED
;
1178 /* See linux-btrace.h. */
1181 linux_read_btrace (struct btrace_data
*btrace
,
1182 struct btrace_target_info
*tinfo
,
1183 enum btrace_read_type type
)
1185 return BTRACE_ERR_NOT_SUPPORTED
;
1188 /* See linux-btrace.h. */
1190 const struct btrace_config
*
1191 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1196 #endif /* !HAVE_LINUX_PERF_EVENT_H */