1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
28 #include "common/scoped_fd.h"
29 #include "common/scoped_mmap.h"
33 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 cpu
.model
= (cpuid
>> 4) & 0xf;
89 if (cpu
.family
== 0x6)
90 cpu
.model
+= (cpuid
>> 12) & 0xf0;
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
101 perf_event_new_data (const struct perf_event_buffer
*pev
)
103 return *pev
->data_head
!= pev
->last_head
;
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
111 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
114 const gdb_byte
*begin
, *end
, *start
, *stop
;
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size
= pev
->size
;
124 gdb_assert (size
<= buffer_size
);
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head
< size
)
135 data_head
+= buffer_size
;
137 gdb_assert (size
<= data_head
);
138 data_tail
= data_head
- size
;
141 start
= begin
+ data_tail
% buffer_size
;
142 stop
= begin
+ data_head
% buffer_size
;
144 buffer
= (gdb_byte
*) xmalloc (size
);
147 memcpy (buffer
, start
, stop
- start
);
150 end
= begin
+ buffer_size
;
152 memcpy (buffer
, start
, end
- start
);
153 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
159 /* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
163 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
169 data_head
= *pev
->data_head
;
172 *data
= perf_event_read (pev
, data_head
, size
);
175 pev
->last_head
= data_head
;
178 /* Try to determine the start address of the Linux kernel. */
181 linux_determine_kernel_start (void)
183 static uint64_t kernel_start
;
191 gdb_file_up file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
195 while (!feof (file
.get ()))
197 char buffer
[1024], symbol
[8], *line
;
201 line
= fgets (buffer
, sizeof (buffer
), file
.get ());
205 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
209 if (strcmp (symbol
, "_text") == 0)
219 /* Check whether an address is in the kernel. */
222 perf_event_is_kernel_addr (uint64_t addr
)
224 uint64_t kernel_start
;
226 kernel_start
= linux_determine_kernel_start ();
227 if (kernel_start
!= 0ull)
228 return (addr
>= kernel_start
);
230 /* If we don't know the kernel's start address, let's check the most
231 significant bit. This will work at least for 64-bit kernels. */
232 return ((addr
& (1ull << 63)) != 0);
235 /* Check whether a perf event record should be skipped. */
238 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
240 /* The hardware may report branches from kernel into user space. Branches
241 from user into kernel space will be suppressed. We filter the former to
242 provide a consistent branch trace excluding kernel. */
243 return perf_event_is_kernel_addr (bts
->from
);
246 /* Perform a few consistency checks on a perf event sample record. This is
247 meant to catch cases when we get out of sync with the perf event stream. */
250 perf_event_sample_ok (const struct perf_event_sample
*sample
)
252 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
255 if (sample
->header
.size
!= sizeof (*sample
))
261 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
262 and to addresses (plus a header).
264 Start points into that buffer at the next sample position.
265 We read the collected samples backwards from start.
267 While reading the samples, we convert the information into a list of blocks.
268 For two adjacent samples s1 and s2, we form a block b such that b.begin =
269 s1.to and b.end = s2.from.
271 In case the buffer overflows during sampling, one sample may have its lower
272 part at the end and its upper part at the beginning of the buffer. */
274 static VEC (btrace_block_s
) *
275 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
276 const uint8_t *end
, const uint8_t *start
, size_t size
)
278 VEC (btrace_block_s
) *btrace
= NULL
;
279 struct perf_event_sample sample
;
281 struct btrace_block block
= { 0, 0 };
282 struct regcache
*regcache
;
284 gdb_assert (begin
<= start
);
285 gdb_assert (start
<= end
);
287 /* The first block ends at the current pc. */
288 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
289 block
.end
= regcache_read_pc (regcache
);
291 /* The buffer may contain a partial record as its last entry (i.e. when the
292 buffer size is not a multiple of the sample size). */
293 read
= sizeof (sample
) - 1;
295 for (; read
< size
; read
+= sizeof (sample
))
297 const struct perf_event_sample
*psample
;
299 /* Find the next perf_event sample in a backwards traversal. */
300 start
-= sizeof (sample
);
302 /* If we're still inside the buffer, we're done. */
304 psample
= (const struct perf_event_sample
*) start
;
309 /* We're to the left of the ring buffer, we will wrap around and
310 reappear at the very right of the ring buffer. */
312 missing
= (begin
- start
);
313 start
= (end
- missing
);
315 /* If the entire sample is missing, we're done. */
316 if (missing
== sizeof (sample
))
317 psample
= (const struct perf_event_sample
*) start
;
322 /* The sample wrapped around. The lower part is at the end and
323 the upper part is at the beginning of the buffer. */
324 stack
= (uint8_t *) &sample
;
326 /* Copy the two parts so we have a contiguous sample. */
327 memcpy (stack
, start
, missing
);
328 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
334 if (!perf_event_sample_ok (psample
))
336 warning (_("Branch trace may be incomplete."));
340 if (perf_event_skip_bts_record (&psample
->bts
))
343 /* We found a valid sample, so we can complete the current block. */
344 block
.begin
= psample
->bts
.to
;
346 VEC_safe_push (btrace_block_s
, btrace
, &block
);
348 /* Start the next block. */
349 block
.end
= psample
->bts
.from
;
352 /* Push the last block (i.e. the first one of inferior execution), as well.
353 We don't know where it ends, but we know where it starts. If we're
354 reading delta trace, we can fill in the start address later on.
355 Otherwise we will prune it. */
357 VEC_safe_push (btrace_block_s
, btrace
, &block
);
362 /* Check whether an Intel cpu supports BTS. */
365 intel_supports_bts (const struct btrace_cpu
*cpu
)
372 case 0x1a: /* Nehalem */
376 case 0x25: /* Westmere */
379 case 0x2a: /* Sandy Bridge */
381 case 0x3a: /* Ivy Bridge */
383 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
384 "from" information afer an EIST transition, T-states, C1E, or
385 Adaptive Thermal Throttling. */
393 /* Check whether the cpu supports BTS. */
396 cpu_supports_bts (void)
398 struct btrace_cpu cpu
;
400 cpu
= btrace_this_cpu ();
404 /* Don't know about others. Let's assume they do. */
408 return intel_supports_bts (&cpu
);
412 /* Enable branch tracing in BTS format. */
414 static struct btrace_target_info
*
415 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
417 struct btrace_tinfo_bts
*bts
;
422 if (!cpu_supports_bts ())
423 error (_("BTS support has been disabled for the target cpu."));
425 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
426 (XCNEW (btrace_target_info
));
429 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
430 bts
= &tinfo
->variant
.bts
;
432 bts
->attr
.size
= sizeof (bts
->attr
);
433 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
434 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
435 bts
->attr
.sample_period
= 1;
437 /* We sample from and to address. */
438 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
440 bts
->attr
.exclude_kernel
= 1;
441 bts
->attr
.exclude_hv
= 1;
442 bts
->attr
.exclude_idle
= 1;
444 pid
= ptid_get_lwp (ptid
);
446 pid
= ptid_get_pid (ptid
);
449 scoped_fd
fd (syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0));
453 /* Convert the requested size in bytes to pages (rounding up). */
454 pages
= ((size_t) conf
->size
/ PAGE_SIZE
455 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
456 /* We need at least one page. */
460 /* The buffer size can be requested in powers of two pages. Adjust PAGES
461 to the next power of two. */
462 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
463 if ((pages
& ((size_t) 1 << pg
)) != 0)
464 pages
+= ((size_t) 1 << pg
);
466 /* We try to allocate the requested size.
467 If that fails, try to get as much as we can. */
469 for (; pages
> 0; pages
>>= 1)
474 data_size
= (__u64
) pages
* PAGE_SIZE
;
476 /* Don't ask for more than we can represent in the configuration. */
477 if ((__u64
) UINT_MAX
< data_size
)
480 size
= (size_t) data_size
;
481 length
= size
+ PAGE_SIZE
;
483 /* Check for overflows. */
484 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
487 /* The number of pages we request needs to be a power of two. */
488 data
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (), 0);
489 if (data
.get () != MAP_FAILED
)
496 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
498 data_offset
= PAGE_SIZE
;
500 #if defined (PERF_ATTR_SIZE_VER5)
501 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
505 data_offset
= header
->data_offset
;
506 data_size
= header
->data_size
;
508 size
= (unsigned int) data_size
;
510 /* Check for overflows. */
511 if ((__u64
) size
!= data_size
)
514 #endif /* defined (PERF_ATTR_SIZE_VER5) */
516 bts
->bts
.size
= size
;
517 bts
->bts
.data_head
= &header
->data_head
;
518 bts
->bts
.mem
= (const uint8_t *) data
.get () + data_offset
;
519 bts
->bts
.last_head
= 0ull;
520 bts
->header
= header
;
521 bts
->file
= fd
.release ();
525 tinfo
->conf
.bts
.size
= (unsigned int) size
;
526 return tinfo
.release ();
529 #if defined (PERF_ATTR_SIZE_VER5)
531 /* Determine the event type.
532 Returns zero on success and fills in TYPE; returns -1 otherwise. */
535 perf_event_pt_event_type (int *type
)
538 gdb_fopen_cloexec ("/sys/bus/event_source/devices/intel_pt/type", "r");
539 if (file
.get () == nullptr)
542 int found
= fscanf (file
.get (), "%d", type
);
548 /* Enable branch tracing in Intel Processor Trace format. */
550 static struct btrace_target_info
*
551 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
553 struct btrace_tinfo_pt
*pt
;
555 int pid
, pg
, errcode
, type
;
560 errcode
= perf_event_pt_event_type (&type
);
564 pid
= ptid_get_lwp (ptid
);
566 pid
= ptid_get_pid (ptid
);
568 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
569 (XCNEW (btrace_target_info
));
572 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
573 pt
= &tinfo
->variant
.pt
;
575 pt
->attr
.size
= sizeof (pt
->attr
);
576 pt
->attr
.type
= type
;
578 pt
->attr
.exclude_kernel
= 1;
579 pt
->attr
.exclude_hv
= 1;
580 pt
->attr
.exclude_idle
= 1;
583 scoped_fd
fd (syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0));
587 /* Allocate the configuration page. */
588 scoped_mmap
data (nullptr, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
590 if (data
.get () == MAP_FAILED
)
593 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
596 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
598 /* Convert the requested size in bytes to pages (rounding up). */
599 pages
= ((size_t) conf
->size
/ PAGE_SIZE
600 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
601 /* We need at least one page. */
605 /* The buffer size can be requested in powers of two pages. Adjust PAGES
606 to the next power of two. */
607 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
608 if ((pages
& ((size_t) 1 << pg
)) != 0)
609 pages
+= ((size_t) 1 << pg
);
611 /* We try to allocate the requested size.
612 If that fails, try to get as much as we can. */
614 for (; pages
> 0; pages
>>= 1)
619 data_size
= (__u64
) pages
* PAGE_SIZE
;
621 /* Don't ask for more than we can represent in the configuration. */
622 if ((__u64
) UINT_MAX
< data_size
)
625 length
= (size_t) data_size
;
627 /* Check for overflows. */
628 if ((__u64
) length
!= data_size
)
631 header
->aux_size
= data_size
;
633 aux
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (),
635 if (aux
.get () != MAP_FAILED
)
642 pt
->pt
.size
= aux
.size ();
643 pt
->pt
.mem
= (const uint8_t *) aux
.release ();
644 pt
->pt
.data_head
= &header
->aux_head
;
646 pt
->file
= fd
.release ();
650 tinfo
->conf
.pt
.size
= (unsigned int) pt
->pt
.size
;
651 return tinfo
.release ();
654 #else /* !defined (PERF_ATTR_SIZE_VER5) */
656 static struct btrace_target_info
*
657 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
663 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
665 /* See linux-btrace.h. */
667 struct btrace_target_info
*
668 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
670 struct btrace_target_info
*tinfo
;
673 switch (conf
->format
)
675 case BTRACE_FORMAT_NONE
:
678 case BTRACE_FORMAT_BTS
:
679 tinfo
= linux_enable_bts (ptid
, &conf
->bts
);
682 case BTRACE_FORMAT_PT
:
683 tinfo
= linux_enable_pt (ptid
, &conf
->pt
);
688 error (_("Unknown error."));
693 /* Disable BTS tracing. */
695 static enum btrace_error
696 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
698 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
701 return BTRACE_ERR_NONE
;
704 /* Disable Intel Processor Trace tracing. */
706 static enum btrace_error
707 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
709 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
710 munmap((void *) tinfo
->header
, PAGE_SIZE
);
713 return BTRACE_ERR_NONE
;
716 /* See linux-btrace.h. */
719 linux_disable_btrace (struct btrace_target_info
*tinfo
)
721 enum btrace_error errcode
;
723 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
724 switch (tinfo
->conf
.format
)
726 case BTRACE_FORMAT_NONE
:
729 case BTRACE_FORMAT_BTS
:
730 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
733 case BTRACE_FORMAT_PT
:
734 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
738 if (errcode
== BTRACE_ERR_NONE
)
744 /* Read branch trace data in BTS format for the thread given by TINFO into
745 BTRACE using the TYPE reading method. */
747 static enum btrace_error
748 linux_read_bts (struct btrace_data_bts
*btrace
,
749 struct btrace_target_info
*tinfo
,
750 enum btrace_read_type type
)
752 struct perf_event_buffer
*pevent
;
753 const uint8_t *begin
, *end
, *start
;
754 size_t buffer_size
, size
;
755 __u64 data_head
, data_tail
;
756 unsigned int retries
= 5;
758 pevent
= &tinfo
->variant
.bts
.bts
;
760 /* For delta reads, we return at least the partial last block containing
762 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
763 return BTRACE_ERR_NONE
;
765 buffer_size
= pevent
->size
;
766 data_tail
= pevent
->last_head
;
768 /* We may need to retry reading the trace. See below. */
771 data_head
= *pevent
->data_head
;
773 /* Delete any leftover trace from the previous iteration. */
774 VEC_free (btrace_block_s
, btrace
->blocks
);
776 if (type
== BTRACE_READ_DELTA
)
780 /* Determine the number of bytes to read and check for buffer
783 /* Check for data head overflows. We might be able to recover from
784 those but they are very unlikely and it's not really worth the
786 if (data_head
< data_tail
)
787 return BTRACE_ERR_OVERFLOW
;
789 /* If the buffer is smaller than the trace delta, we overflowed. */
790 data_size
= data_head
- data_tail
;
791 if (buffer_size
< data_size
)
792 return BTRACE_ERR_OVERFLOW
;
794 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
795 size
= (size_t) data_size
;
799 /* Read the entire buffer. */
802 /* Adjust the size if the buffer has not overflowed, yet. */
803 if (data_head
< size
)
804 size
= (size_t) data_head
;
807 /* Data_head keeps growing; the buffer itself is circular. */
809 start
= begin
+ data_head
% buffer_size
;
811 if (data_head
<= buffer_size
)
814 end
= begin
+ pevent
->size
;
816 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
818 /* The stopping thread notifies its ptracer before it is scheduled out.
819 On multi-core systems, the debugger might therefore run while the
820 kernel might be writing the last branch trace records.
822 Let's check whether the data head moved while we read the trace. */
823 if (data_head
== *pevent
->data_head
)
827 pevent
->last_head
= data_head
;
829 /* Prune the incomplete last block (i.e. the first one of inferior execution)
830 if we're not doing a delta read. There is no way of filling in its zeroed
832 if (!VEC_empty (btrace_block_s
, btrace
->blocks
)
833 && type
!= BTRACE_READ_DELTA
)
834 VEC_pop (btrace_block_s
, btrace
->blocks
);
836 return BTRACE_ERR_NONE
;
839 /* Fill in the Intel Processor Trace configuration information. */
842 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
844 conf
->cpu
= btrace_this_cpu ();
847 /* Read branch trace data in Intel Processor Trace format for the thread
848 given by TINFO into BTRACE using the TYPE reading method. */
850 static enum btrace_error
851 linux_read_pt (struct btrace_data_pt
*btrace
,
852 struct btrace_target_info
*tinfo
,
853 enum btrace_read_type type
)
855 struct perf_event_buffer
*pt
;
857 pt
= &tinfo
->variant
.pt
.pt
;
859 linux_fill_btrace_pt_config (&btrace
->config
);
863 case BTRACE_READ_DELTA
:
864 /* We don't support delta reads. The data head (i.e. aux_head) wraps
865 around to stay inside the aux buffer. */
866 return BTRACE_ERR_NOT_SUPPORTED
;
868 case BTRACE_READ_NEW
:
869 if (!perf_event_new_data (pt
))
870 return BTRACE_ERR_NONE
;
873 case BTRACE_READ_ALL
:
874 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
875 return BTRACE_ERR_NONE
;
878 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
881 /* See linux-btrace.h. */
884 linux_read_btrace (struct btrace_data
*btrace
,
885 struct btrace_target_info
*tinfo
,
886 enum btrace_read_type type
)
888 switch (tinfo
->conf
.format
)
890 case BTRACE_FORMAT_NONE
:
891 return BTRACE_ERR_NOT_SUPPORTED
;
893 case BTRACE_FORMAT_BTS
:
894 /* We read btrace in BTS format. */
895 btrace
->format
= BTRACE_FORMAT_BTS
;
896 btrace
->variant
.bts
.blocks
= NULL
;
898 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
900 case BTRACE_FORMAT_PT
:
901 /* We read btrace in Intel Processor Trace format. */
902 btrace
->format
= BTRACE_FORMAT_PT
;
903 btrace
->variant
.pt
.data
= NULL
;
904 btrace
->variant
.pt
.size
= 0;
906 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
909 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
912 /* See linux-btrace.h. */
914 const struct btrace_config
*
915 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
920 #else /* !HAVE_LINUX_PERF_EVENT_H */
922 /* See linux-btrace.h. */
924 struct btrace_target_info
*
925 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
930 /* See linux-btrace.h. */
933 linux_disable_btrace (struct btrace_target_info
*tinfo
)
935 return BTRACE_ERR_NOT_SUPPORTED
;
938 /* See linux-btrace.h. */
941 linux_read_btrace (struct btrace_data
*btrace
,
942 struct btrace_target_info
*tinfo
,
943 enum btrace_read_type type
)
945 return BTRACE_ERR_NOT_SUPPORTED
;
948 /* See linux-btrace.h. */
950 const struct btrace_config
*
951 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
956 #endif /* !HAVE_LINUX_PERF_EVENT_H */