1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
28 #ifdef HAVE_SYS_SYSCALL_H
29 #include <sys/syscall.h>
32 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36 #include <sys/ptrace.h>
37 #include <sys/types.h>
39 #include <sys/utsname.h>
41 /* A branch trace record in perf_event. */
44 /* The linear address of the branch source. */
47 /* The linear address of the branch destination. */
51 /* A perf_event branch trace sample. */
52 struct perf_event_sample
54 /* The perf_event sample header. */
55 struct perf_event_header header
;
57 /* The perf_event branch tracing payload. */
58 struct perf_event_bts bts
;
61 /* Identify the cpu we're running on. */
62 static struct btrace_cpu
63 btrace_this_cpu (void)
65 struct btrace_cpu cpu
;
66 unsigned int eax
, ebx
, ecx
, edx
;
69 memset (&cpu
, 0, sizeof (cpu
));
71 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
74 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
75 && edx
== signature_INTEL_edx
)
77 unsigned int cpuid
, ignore
;
79 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
82 cpu
.vendor
= CV_INTEL
;
84 cpu
.family
= (cpuid
>> 8) & 0xf;
85 cpu
.model
= (cpuid
>> 4) & 0xf;
87 if (cpu
.family
== 0x6)
88 cpu
.model
+= (cpuid
>> 12) & 0xf0;
96 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
99 perf_event_new_data (const struct perf_event_buffer
*pev
)
101 return *pev
->data_head
!= pev
->last_head
;
104 /* Try to determine the size of a pointer in bits for the OS.
106 This is the same as the size of a pointer for the inferior process
107 except when a 32-bit inferior is running on a 64-bit OS. */
109 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
110 to the memory holding the copy.
111 The caller is responsible for freeing the memory. */
114 perf_event_read (const struct perf_event_buffer
*pev
, unsigned long data_head
,
117 const gdb_byte
*begin
, *end
, *start
, *stop
;
119 unsigned long data_tail
, buffer_size
;
124 gdb_assert (size
<= data_head
);
125 data_tail
= data_head
- size
;
127 buffer_size
= pev
->size
;
129 start
= begin
+ data_tail
% buffer_size
;
130 stop
= begin
+ data_head
% buffer_size
;
132 buffer
= xmalloc (size
);
135 memcpy (buffer
, start
, stop
- start
);
138 end
= begin
+ buffer_size
;
140 memcpy (buffer
, start
, end
- start
);
141 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
147 /* Copy the perf event buffer data from PEV.
148 Store a pointer to the copy into DATA and its size in SIZE. */
151 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
152 unsigned long *psize
)
154 unsigned long data_head
, size
;
156 data_head
= *pev
->data_head
;
159 if (data_head
< size
)
162 *data
= perf_event_read (pev
, data_head
, size
);
165 pev
->last_head
= data_head
;
168 /* Determine the event type.
169 Returns zero on success and fills in TYPE; returns -1 otherwise. */
172 perf_event_pt_event_type (int *type
)
177 file
= fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
181 found
= fscanf (file
, "%d", type
);
191 linux_determine_kernel_ptr_bits (void)
196 memset (&utsn
, 0, sizeof (utsn
));
198 errcode
= uname (&utsn
);
202 /* We only need to handle the 64-bit host case, here. For 32-bit host,
203 the pointer size can be filled in later based on the inferior. */
204 if (strcmp (utsn
.machine
, "x86_64") == 0)
210 /* Check whether an address is in the kernel. */
213 perf_event_is_kernel_addr (const struct btrace_target_info
*tinfo
,
218 /* If we don't know the size of a pointer, we can't check. Let's assume it's
219 not a kernel address in this case. */
220 if (tinfo
->ptr_bits
== 0)
223 /* A bit mask for the most significant bit in an address. */
224 mask
= (uint64_t) 1 << (tinfo
->ptr_bits
- 1);
226 /* Check whether the most significant bit in the address is set. */
227 return (addr
& mask
) != 0;
230 /* Check whether a perf event record should be skipped. */
233 perf_event_skip_bts_record (const struct btrace_target_info
*tinfo
,
234 const struct perf_event_bts
*bts
)
236 /* The hardware may report branches from kernel into user space. Branches
237 from user into kernel space will be suppressed. We filter the former to
238 provide a consistent branch trace excluding kernel. */
239 return perf_event_is_kernel_addr (tinfo
, bts
->from
);
242 /* Perform a few consistency checks on a perf event sample record. This is
243 meant to catch cases when we get out of sync with the perf event stream. */
246 perf_event_sample_ok (const struct perf_event_sample
*sample
)
248 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
251 if (sample
->header
.size
!= sizeof (*sample
))
257 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
258 and to addresses (plus a header).
260 Start points into that buffer at the next sample position.
261 We read the collected samples backwards from start.
263 While reading the samples, we convert the information into a list of blocks.
264 For two adjacent samples s1 and s2, we form a block b such that b.begin =
265 s1.to and b.end = s2.from.
267 In case the buffer overflows during sampling, one sample may have its lower
268 part at the end and its upper part at the beginning of the buffer. */
270 static VEC (btrace_block_s
) *
271 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
272 const uint8_t *end
, const uint8_t *start
,
273 unsigned long long size
)
275 VEC (btrace_block_s
) *btrace
= NULL
;
276 struct perf_event_sample sample
;
277 unsigned long long read
= 0;
278 struct btrace_block block
= { 0, 0 };
279 struct regcache
*regcache
;
281 gdb_assert (begin
<= start
);
282 gdb_assert (start
<= end
);
284 /* The first block ends at the current pc. */
285 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
286 block
.end
= regcache_read_pc (regcache
);
288 /* The buffer may contain a partial record as its last entry (i.e. when the
289 buffer size is not a multiple of the sample size). */
290 read
= sizeof (sample
) - 1;
292 for (; read
< size
; read
+= sizeof (sample
))
294 const struct perf_event_sample
*psample
;
296 /* Find the next perf_event sample in a backwards traversal. */
297 start
-= sizeof (sample
);
299 /* If we're still inside the buffer, we're done. */
301 psample
= (const struct perf_event_sample
*) start
;
306 /* We're to the left of the ring buffer, we will wrap around and
307 reappear at the very right of the ring buffer. */
309 missing
= (begin
- start
);
310 start
= (end
- missing
);
312 /* If the entire sample is missing, we're done. */
313 if (missing
== sizeof (sample
))
314 psample
= (const struct perf_event_sample
*) start
;
319 /* The sample wrapped around. The lower part is at the end and
320 the upper part is at the beginning of the buffer. */
321 stack
= (uint8_t *) &sample
;
323 /* Copy the two parts so we have a contiguous sample. */
324 memcpy (stack
, start
, missing
);
325 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
331 if (!perf_event_sample_ok (psample
))
333 warning (_("Branch trace may be incomplete."));
337 if (perf_event_skip_bts_record (tinfo
, &psample
->bts
))
340 /* We found a valid sample, so we can complete the current block. */
341 block
.begin
= psample
->bts
.to
;
343 VEC_safe_push (btrace_block_s
, btrace
, &block
);
345 /* Start the next block. */
346 block
.end
= psample
->bts
.from
;
349 /* Push the last block (i.e. the first one of inferior execution), as well.
350 We don't know where it ends, but we know where it starts. If we're
351 reading delta trace, we can fill in the start address later on.
352 Otherwise we will prune it. */
354 VEC_safe_push (btrace_block_s
, btrace
, &block
);
359 /* Check whether the kernel supports BTS. */
362 kernel_supports_bts (void)
364 struct perf_event_attr attr
;
373 warning (_("test bts: cannot fork: %s."), strerror (errno
));
377 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
380 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
385 status
= raise (SIGTRAP
);
388 warning (_("test bts: cannot raise SIGTRAP: %s."),
396 pid
= waitpid (child
, &status
, 0);
399 warning (_("test bts: bad pid %ld, error: %s."),
400 (long) pid
, strerror (errno
));
404 if (!WIFSTOPPED (status
))
406 warning (_("test bts: expected stop. status: %d."),
411 memset (&attr
, 0, sizeof (attr
));
413 attr
.type
= PERF_TYPE_HARDWARE
;
414 attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
415 attr
.sample_period
= 1;
416 attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
417 attr
.exclude_kernel
= 1;
419 attr
.exclude_idle
= 1;
421 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
425 kill (child
, SIGKILL
);
426 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
428 pid
= waitpid (child
, &status
, 0);
431 warning (_("test bts: bad pid %ld, error: %s."),
432 (long) pid
, strerror (errno
));
433 if (!WIFSIGNALED (status
))
434 warning (_("test bts: expected killed. status: %d."),
442 /* Check whether the kernel supports Intel(R) Processor Trace. */
445 kernel_supports_pt (void)
447 struct perf_event_attr attr
;
449 int status
, file
, type
;
456 warning (_("test pt: cannot fork: %s."), strerror (errno
));
460 status
= ptrace (PTRACE_TRACEME
, 0, NULL
, NULL
);
463 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
468 status
= raise (SIGTRAP
);
471 warning (_("test pt: cannot raise SIGTRAP: %s."),
479 pid
= waitpid (child
, &status
, 0);
482 warning (_("test pt: bad pid %ld, error: %s."),
483 (long) pid
, strerror (errno
));
487 if (!WIFSTOPPED (status
))
489 warning (_("test pt: expected stop. status: %d."),
494 status
= perf_event_pt_event_type (&type
);
499 memset (&attr
, 0, sizeof (attr
));
501 attr
.size
= sizeof (attr
);
503 attr
.exclude_kernel
= 1;
505 attr
.exclude_idle
= 1;
507 file
= syscall (SYS_perf_event_open
, &attr
, child
, -1, -1, 0);
512 kill (child
, SIGKILL
);
513 ptrace (PTRACE_KILL
, child
, NULL
, NULL
);
515 pid
= waitpid (child
, &status
, 0);
518 warning (_("test pt: bad pid %ld, error: %s."),
519 (long) pid
, strerror (errno
));
520 if (!WIFSIGNALED (status
))
521 warning (_("test pt: expected killed. status: %d."),
529 /* Check whether an Intel cpu supports BTS. */
532 intel_supports_bts (const struct btrace_cpu
*cpu
)
539 case 0x1a: /* Nehalem */
543 case 0x25: /* Westmere */
546 case 0x2a: /* Sandy Bridge */
548 case 0x3a: /* Ivy Bridge */
550 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
551 "from" information afer an EIST transition, T-states, C1E, or
552 Adaptive Thermal Throttling. */
560 /* Check whether the cpu supports BTS. */
563 cpu_supports_bts (void)
565 struct btrace_cpu cpu
;
567 cpu
= btrace_this_cpu ();
571 /* Don't know about others. Let's assume they do. */
575 return intel_supports_bts (&cpu
);
579 /* Check whether the linux target supports BTS. */
582 linux_supports_bts (void)
588 if (!kernel_supports_bts ())
590 else if (!cpu_supports_bts ())
599 /* Check whether the linux target supports Intel(R) Processor Trace. */
602 linux_supports_pt (void)
608 if (!kernel_supports_pt ())
617 /* See linux-btrace.h. */
620 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
624 case BTRACE_FORMAT_NONE
:
627 case BTRACE_FORMAT_BTS
:
628 return linux_supports_bts ();
630 case BTRACE_FORMAT_PT
:
631 return linux_supports_pt ();
634 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format"));
637 /* Enable branch tracing in BTS format. */
639 static struct btrace_target_info
*
640 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
642 struct perf_event_mmap_page
*header
;
643 struct btrace_target_info
*tinfo
;
644 struct btrace_tinfo_bts
*bts
;
645 unsigned long long size
, pages
, data_offset
, data_size
;
648 tinfo
= xzalloc (sizeof (*tinfo
));
650 tinfo
->ptr_bits
= linux_determine_kernel_ptr_bits ();
652 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
653 bts
= &tinfo
->variant
.bts
;
655 bts
->attr
.size
= sizeof (bts
->attr
);
656 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
657 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
658 bts
->attr
.sample_period
= 1;
660 /* We sample from and to address. */
661 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
663 bts
->attr
.exclude_kernel
= 1;
664 bts
->attr
.exclude_hv
= 1;
665 bts
->attr
.exclude_idle
= 1;
667 pid
= ptid_get_lwp (ptid
);
669 pid
= ptid_get_pid (ptid
);
672 bts
->file
= syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0);
676 /* Convert the requested size in bytes to pages (rounding up). */
677 pages
= (((unsigned long long) conf
->size
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
678 /* We need at least one page. */
682 /* The buffer size can be requested in powers of two pages. Adjust PAGES
683 to the next power of two. */
684 for (pg
= 0; pages
!= (1u << pg
); ++pg
)
685 if ((pages
& (1u << pg
)) != 0)
688 /* We try to allocate the requested size.
689 If that fails, try to get as much as we can. */
690 for (; pages
> 0; pages
>>= 1)
694 size
= pages
* PAGE_SIZE
;
695 length
= size
+ PAGE_SIZE
;
697 /* Check for overflows. */
698 if ((unsigned long long) length
< size
)
701 /* The number of pages we request needs to be a power of two. */
702 header
= mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, bts
->file
, 0);
703 if (header
!= MAP_FAILED
)
710 data_offset
= PAGE_SIZE
;
713 #if defined (PERF_ATTR_SIZE_VER5)
714 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
716 data_offset
= header
->data_offset
;
717 data_size
= header
->data_size
;
719 #endif /* defined (PERF_ATTR_SIZE_VER5) */
721 bts
->header
= header
;
722 bts
->bts
.mem
= ((const uint8_t *) header
) + data_offset
;
723 bts
->bts
.size
= data_size
;
724 bts
->bts
.data_head
= &header
->data_head
;
725 bts
->bts
.last_head
= 0;
727 tinfo
->conf
.bts
.size
= data_size
;
731 /* We were not able to allocate any buffer. */
739 #if defined (PERF_ATTR_SIZE_VER5)
741 /* Enable branch tracing in Intel(R) Processor Trace format. */
743 static struct btrace_target_info
*
744 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
746 struct perf_event_mmap_page
*header
;
747 struct btrace_target_info
*tinfo
;
748 struct btrace_tinfo_pt
*pt
;
749 unsigned long long pages
, size
;
750 int pid
, pg
, errcode
, type
;
755 errcode
= perf_event_pt_event_type (&type
);
759 pid
= ptid_get_lwp (ptid
);
761 pid
= ptid_get_pid (ptid
);
763 tinfo
= xzalloc (sizeof (*tinfo
));
767 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
768 pt
= &tinfo
->variant
.pt
;
770 pt
->attr
.size
= sizeof (pt
->attr
);
771 pt
->attr
.type
= type
;
773 pt
->attr
.exclude_kernel
= 1;
774 pt
->attr
.exclude_hv
= 1;
775 pt
->attr
.exclude_idle
= 1;
778 pt
->file
= syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0);
782 /* Allocate the configuration page. */
783 header
= mmap (NULL
, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
785 if (header
== MAP_FAILED
)
788 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
790 /* Convert the requested size in bytes to pages (rounding up). */
791 pages
= (((unsigned long long) conf
->size
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
792 /* We need at least one page. */
796 /* The buffer size can be requested in powers of two pages. Adjust PAGES
797 to the next power of two. */
798 for (pg
= 0; pages
!= (1u << pg
); ++pg
)
799 if ((pages
& (1u << pg
)) != 0)
802 /* We try to allocate the requested size.
803 If that fails, try to get as much as we can. */
804 for (; pages
> 0; pages
>>= 1)
808 size
= pages
* PAGE_SIZE
;
811 /* Check for overflows. */
812 if ((unsigned long long) length
< size
)
815 header
->aux_size
= size
;
817 pt
->pt
.mem
= mmap (NULL
, length
, PROT_READ
, MAP_SHARED
, pt
->file
,
819 if (pt
->pt
.mem
!= MAP_FAILED
)
828 pt
->pt
.data_head
= &header
->aux_head
;
830 tinfo
->conf
.pt
.size
= size
;
834 munmap((void *) header
, PAGE_SIZE
);
844 #else /* !defined (PERF_ATTR_SIZE_VER5) */
846 static struct btrace_target_info
*
847 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
853 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
855 /* See linux-btrace.h. */
857 struct btrace_target_info
*
858 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
860 struct btrace_target_info
*tinfo
;
863 switch (conf
->format
)
865 case BTRACE_FORMAT_NONE
:
868 case BTRACE_FORMAT_BTS
:
869 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
872 case BTRACE_FORMAT_PT
:
873 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
880 /* Disable BTS tracing. */
882 static enum btrace_error
883 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
885 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
888 return BTRACE_ERR_NONE
;
891 /* Disable Intel(R) Processor Trace tracing. */
893 static enum btrace_error
894 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
896 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
897 munmap((void *) tinfo
->header
, PAGE_SIZE
);
900 return BTRACE_ERR_NONE
;
903 /* See linux-btrace.h. */
906 linux_disable_btrace (struct btrace_target_info
*tinfo
)
908 enum btrace_error errcode
;
910 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
911 switch (tinfo
->conf
.format
)
913 case BTRACE_FORMAT_NONE
:
916 case BTRACE_FORMAT_BTS
:
917 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
920 case BTRACE_FORMAT_PT
:
921 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
925 if (errcode
== BTRACE_ERR_NONE
)
931 /* Read branch trace data in BTS format for the thread given by TINFO into
932 BTRACE using the TYPE reading method. */
934 static enum btrace_error
935 linux_read_bts (struct btrace_data_bts
*btrace
,
936 struct btrace_target_info
*tinfo
,
937 enum btrace_read_type type
)
939 struct perf_event_buffer
*pevent
;
940 const uint8_t *begin
, *end
, *start
;
941 unsigned long long data_head
, data_tail
, buffer_size
, size
;
942 unsigned int retries
= 5;
944 pevent
= &tinfo
->variant
.bts
.bts
;
946 /* For delta reads, we return at least the partial last block containing
948 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
949 return BTRACE_ERR_NONE
;
951 buffer_size
= pevent
->size
;
952 data_tail
= pevent
->last_head
;
954 /* We may need to retry reading the trace. See below. */
957 data_head
= *pevent
->data_head
;
959 /* Delete any leftover trace from the previous iteration. */
960 VEC_free (btrace_block_s
, btrace
->blocks
);
962 if (type
== BTRACE_READ_DELTA
)
964 /* Determine the number of bytes to read and check for buffer
967 /* Check for data head overflows. We might be able to recover from
968 those but they are very unlikely and it's not really worth the
970 if (data_head
< data_tail
)
971 return BTRACE_ERR_OVERFLOW
;
973 /* If the buffer is smaller than the trace delta, we overflowed. */
974 size
= data_head
- data_tail
;
975 if (buffer_size
< size
)
976 return BTRACE_ERR_OVERFLOW
;
980 /* Read the entire buffer. */
983 /* Adjust the size if the buffer has not overflowed, yet. */
984 if (data_head
< size
)
988 /* Data_head keeps growing; the buffer itself is circular. */
990 start
= begin
+ data_head
% buffer_size
;
992 if (data_head
<= buffer_size
)
995 end
= begin
+ pevent
->size
;
997 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
999 /* The stopping thread notifies its ptracer before it is scheduled out.
1000 On multi-core systems, the debugger might therefore run while the
1001 kernel might be writing the last branch trace records.
1003 Let's check whether the data head moved while we read the trace. */
1004 if (data_head
== *pevent
->data_head
)
1008 pevent
->last_head
= data_head
;
1010 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1011 if we're not doing a delta read. There is no way of filling in its zeroed
1013 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
1014 && type
!= BTRACE_READ_DELTA
)
1015 VEC_pop (btrace_block_s
, btrace
->blocks
);
1017 return BTRACE_ERR_NONE
;
1020 /* Fill in the Intel(R) Processor Trace configuration information. */
1023 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
1025 conf
->cpu
= btrace_this_cpu ();
1028 /* Read branch trace data in Intel(R) Processor Trace format for the thread
1029 given by TINFO into BTRACE using the TYPE reading method. */
1031 static enum btrace_error
1032 linux_read_pt (struct btrace_data_pt
*btrace
,
1033 struct btrace_target_info
*tinfo
,
1034 enum btrace_read_type type
)
1036 struct perf_event_buffer
*pt
;
1038 pt
= &tinfo
->variant
.pt
.pt
;
1040 linux_fill_btrace_pt_config (&btrace
->config
);
1044 case BTRACE_READ_DELTA
:
1045 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1046 around to stay inside the aux buffer. */
1047 return BTRACE_ERR_NOT_SUPPORTED
;
1049 case BTRACE_READ_NEW
:
1050 if (!perf_event_new_data (pt
))
1051 return BTRACE_ERR_NONE
;
1054 case BTRACE_READ_ALL
:
1055 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
1056 return BTRACE_ERR_NONE
;
1059 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
1062 /* See linux-btrace.h. */
1065 linux_read_btrace (struct btrace_data
*btrace
,
1066 struct btrace_target_info
*tinfo
,
1067 enum btrace_read_type type
)
1069 switch (tinfo
->conf
.format
)
1071 case BTRACE_FORMAT_NONE
:
1072 return BTRACE_ERR_NOT_SUPPORTED
;
1074 case BTRACE_FORMAT_BTS
:
1075 /* We read btrace in BTS format. */
1076 btrace
->format
= BTRACE_FORMAT_BTS
;
1077 btrace
->variant
.bts
.blocks
= NULL
;
1079 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
1081 case BTRACE_FORMAT_PT
:
1082 /* We read btrace in Intel(R) Processor Trace format. */
1083 btrace
->format
= BTRACE_FORMAT_PT
;
1084 btrace
->variant
.pt
.data
= NULL
;
1085 btrace
->variant
.pt
.size
= 0;
1087 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
1090 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
1093 /* See linux-btrace.h. */
1095 const struct btrace_config
*
1096 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1098 return &tinfo
->conf
;
1101 #else /* !HAVE_LINUX_PERF_EVENT_H */
1103 /* See linux-btrace.h. */
1106 linux_supports_btrace (struct target_ops
*ops
, enum btrace_format format
)
1111 /* See linux-btrace.h. */
1113 struct btrace_target_info
*
1114 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
1119 /* See linux-btrace.h. */
1122 linux_disable_btrace (struct btrace_target_info
*tinfo
)
1124 return BTRACE_ERR_NOT_SUPPORTED
;
1127 /* See linux-btrace.h. */
1130 linux_read_btrace (struct btrace_data
*btrace
,
1131 struct btrace_target_info
*tinfo
,
1132 enum btrace_read_type type
)
1134 return BTRACE_ERR_NOT_SUPPORTED
;
1137 /* See linux-btrace.h. */
1139 const struct btrace_config
*
1140 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
1145 #endif /* !HAVE_LINUX_PERF_EVENT_H */