1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "gdbsupport/common-defs.h"
26 #undef PACKAGE_VERSION
28 #undef PACKAGE_TARNAME
31 #include "linux-btrace.h"
32 #include "gdbsupport/common-regcache.h"
33 #include "gdbsupport/gdb_wait.h"
34 #include "x86-cpuid.h"
35 #include "gdbsupport/filestuff.h"
36 #include "gdbsupport/scoped_fd.h"
37 #include "gdbsupport/scoped_mmap.h"
41 #include <sys/syscall.h>
43 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
47 #include "nat/gdb_ptrace.h"
48 #include <sys/types.h>
51 /* A branch trace record in perf_event. */
54 /* The linear address of the branch source. */
57 /* The linear address of the branch destination. */
61 /* A perf_event branch trace sample. */
62 struct perf_event_sample
64 /* The perf_event sample header. */
65 struct perf_event_header header
;
67 /* The perf_event branch tracing payload. */
68 struct perf_event_bts bts
;
71 /* Identify the cpu we're running on. */
72 static struct btrace_cpu
73 btrace_this_cpu (void)
75 struct btrace_cpu cpu
;
76 unsigned int eax
, ebx
, ecx
, edx
;
79 memset (&cpu
, 0, sizeof (cpu
));
81 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
84 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
85 && edx
== signature_INTEL_edx
)
87 unsigned int cpuid
, ignore
;
89 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
92 cpu
.vendor
= CV_INTEL
;
94 cpu
.family
= (cpuid
>> 8) & 0xf;
95 cpu
.model
= (cpuid
>> 4) & 0xf;
97 if (cpu
.family
== 0x6)
98 cpu
.model
+= (cpuid
>> 12) & 0xf0;
106 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
109 perf_event_new_data (const struct perf_event_buffer
*pev
)
111 return *pev
->data_head
!= pev
->last_head
;
114 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
115 to the memory holding the copy.
116 The caller is responsible for freeing the memory. */
119 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
122 const gdb_byte
*begin
, *end
, *start
, *stop
;
130 /* We should never ask for more data than the buffer can hold. */
131 buffer_size
= pev
->size
;
132 gdb_assert (size
<= buffer_size
);
134 /* If we ask for more data than we seem to have, we wrap around and read
135 data from the end of the buffer. This is already handled by the %
136 BUFFER_SIZE operation, below. Here, we just need to make sure that we
139 Note that this is perfectly OK for perf event buffers where data_head
140 doesn'grow indefinitely and instead wraps around to remain within the
141 buffer's boundaries. */
142 if (data_head
< size
)
143 data_head
+= buffer_size
;
145 gdb_assert (size
<= data_head
);
146 data_tail
= data_head
- size
;
149 start
= begin
+ data_tail
% buffer_size
;
150 stop
= begin
+ data_head
% buffer_size
;
152 buffer
= (gdb_byte
*) xmalloc (size
);
155 memcpy (buffer
, start
, stop
- start
);
158 end
= begin
+ buffer_size
;
160 memcpy (buffer
, start
, end
- start
);
161 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
167 /* Copy the perf event buffer data from PEV.
168 Store a pointer to the copy into DATA and its size in SIZE. */
171 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
177 data_head
= *pev
->data_head
;
180 *data
= perf_event_read (pev
, data_head
, size
);
183 pev
->last_head
= data_head
;
186 /* Try to determine the start address of the Linux kernel. */
189 linux_determine_kernel_start (void)
191 static uint64_t kernel_start
;
199 gdb_file_up file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
203 while (!feof (file
.get ()))
205 char buffer
[1024], symbol
[8], *line
;
209 line
= fgets (buffer
, sizeof (buffer
), file
.get ());
213 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
217 if (strcmp (symbol
, "_text") == 0)
227 /* Check whether an address is in the kernel. */
230 perf_event_is_kernel_addr (uint64_t addr
)
232 uint64_t kernel_start
;
234 kernel_start
= linux_determine_kernel_start ();
235 if (kernel_start
!= 0ull)
236 return (addr
>= kernel_start
);
238 /* If we don't know the kernel's start address, let's check the most
239 significant bit. This will work at least for 64-bit kernels. */
240 return ((addr
& (1ull << 63)) != 0);
243 /* Check whether a perf event record should be skipped. */
246 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
248 /* The hardware may report branches from kernel into user space. Branches
249 from user into kernel space will be suppressed. We filter the former to
250 provide a consistent branch trace excluding kernel. */
251 return perf_event_is_kernel_addr (bts
->from
);
254 /* Perform a few consistency checks on a perf event sample record. This is
255 meant to catch cases when we get out of sync with the perf event stream. */
258 perf_event_sample_ok (const struct perf_event_sample
*sample
)
260 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
263 if (sample
->header
.size
!= sizeof (*sample
))
269 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
270 and to addresses (plus a header).
272 Start points into that buffer at the next sample position.
273 We read the collected samples backwards from start.
275 While reading the samples, we convert the information into a list of blocks.
276 For two adjacent samples s1 and s2, we form a block b such that b.begin =
277 s1.to and b.end = s2.from.
279 In case the buffer overflows during sampling, one sample may have its lower
280 part at the end and its upper part at the beginning of the buffer. */
282 static std::vector
<btrace_block
> *
283 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
284 const uint8_t *end
, const uint8_t *start
, size_t size
)
286 std::vector
<btrace_block
> *btrace
= new std::vector
<btrace_block
>;
287 struct perf_event_sample sample
;
289 struct btrace_block block
= { 0, 0 };
290 struct regcache
*regcache
;
292 gdb_assert (begin
<= start
);
293 gdb_assert (start
<= end
);
295 /* The first block ends at the current pc. */
296 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
297 block
.end
= regcache_read_pc (regcache
);
299 /* The buffer may contain a partial record as its last entry (i.e. when the
300 buffer size is not a multiple of the sample size). */
301 read
= sizeof (sample
) - 1;
303 for (; read
< size
; read
+= sizeof (sample
))
305 const struct perf_event_sample
*psample
;
307 /* Find the next perf_event sample in a backwards traversal. */
308 start
-= sizeof (sample
);
310 /* If we're still inside the buffer, we're done. */
312 psample
= (const struct perf_event_sample
*) start
;
317 /* We're to the left of the ring buffer, we will wrap around and
318 reappear at the very right of the ring buffer. */
320 missing
= (begin
- start
);
321 start
= (end
- missing
);
323 /* If the entire sample is missing, we're done. */
324 if (missing
== sizeof (sample
))
325 psample
= (const struct perf_event_sample
*) start
;
330 /* The sample wrapped around. The lower part is at the end and
331 the upper part is at the beginning of the buffer. */
332 stack
= (uint8_t *) &sample
;
334 /* Copy the two parts so we have a contiguous sample. */
335 memcpy (stack
, start
, missing
);
336 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
342 if (!perf_event_sample_ok (psample
))
344 warning (_("Branch trace may be incomplete."));
348 if (perf_event_skip_bts_record (&psample
->bts
))
351 /* We found a valid sample, so we can complete the current block. */
352 block
.begin
= psample
->bts
.to
;
354 btrace
->push_back (block
);
356 /* Start the next block. */
357 block
.end
= psample
->bts
.from
;
360 /* Push the last block (i.e. the first one of inferior execution), as well.
361 We don't know where it ends, but we know where it starts. If we're
362 reading delta trace, we can fill in the start address later on.
363 Otherwise we will prune it. */
365 btrace
->push_back (block
);
370 /* Check whether an Intel cpu supports BTS. */
373 intel_supports_bts (const struct btrace_cpu
*cpu
)
380 case 0x1a: /* Nehalem */
384 case 0x25: /* Westmere */
387 case 0x2a: /* Sandy Bridge */
389 case 0x3a: /* Ivy Bridge */
391 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
392 "from" information afer an EIST transition, T-states, C1E, or
393 Adaptive Thermal Throttling. */
401 /* Check whether the cpu supports BTS. */
404 cpu_supports_bts (void)
406 struct btrace_cpu cpu
;
408 cpu
= btrace_this_cpu ();
412 /* Don't know about others. Let's assume they do. */
416 return intel_supports_bts (&cpu
);
420 /* The perf_event_open syscall failed. Try to print a helpful error
424 diagnose_perf_event_open_fail ()
431 static const char filename
[] = "/proc/sys/kernel/perf_event_paranoid";
432 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
433 if (file
.get () == nullptr)
436 int level
, found
= fscanf (file
.get (), "%d", &level
);
437 if (found
== 1 && level
> 2)
438 error (_("You do not have permission to record the process. "
439 "Try setting %s to 2 or less."), filename
);
445 error (_("Failed to start recording: %s"), safe_strerror (errno
));
448 /* Enable branch tracing in BTS format. */
450 static struct btrace_target_info
*
451 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
453 struct btrace_tinfo_bts
*bts
;
458 if (!cpu_supports_bts ())
459 error (_("BTS support has been disabled for the target cpu."));
461 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
462 (XCNEW (btrace_target_info
));
465 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
466 bts
= &tinfo
->variant
.bts
;
468 bts
->attr
.size
= sizeof (bts
->attr
);
469 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
470 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
471 bts
->attr
.sample_period
= 1;
473 /* We sample from and to address. */
474 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
476 bts
->attr
.exclude_kernel
= 1;
477 bts
->attr
.exclude_hv
= 1;
478 bts
->attr
.exclude_idle
= 1;
485 scoped_fd
fd (syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0));
487 diagnose_perf_event_open_fail ();
489 /* Convert the requested size in bytes to pages (rounding up). */
490 pages
= ((size_t) conf
->size
/ PAGE_SIZE
491 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
492 /* We need at least one page. */
496 /* The buffer size can be requested in powers of two pages. Adjust PAGES
497 to the next power of two. */
498 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
499 if ((pages
& ((size_t) 1 << pg
)) != 0)
500 pages
+= ((size_t) 1 << pg
);
502 /* We try to allocate the requested size.
503 If that fails, try to get as much as we can. */
505 for (; pages
> 0; pages
>>= 1)
510 data_size
= (__u64
) pages
* PAGE_SIZE
;
512 /* Don't ask for more than we can represent in the configuration. */
513 if ((__u64
) UINT_MAX
< data_size
)
516 size
= (size_t) data_size
;
517 length
= size
+ PAGE_SIZE
;
519 /* Check for overflows. */
520 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
524 /* The number of pages we request needs to be a power of two. */
525 data
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (), 0);
526 if (data
.get () != MAP_FAILED
)
531 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
533 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
535 data_offset
= PAGE_SIZE
;
537 #if defined (PERF_ATTR_SIZE_VER5)
538 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
542 data_offset
= header
->data_offset
;
543 data_size
= header
->data_size
;
545 size
= (unsigned int) data_size
;
547 /* Check for overflows. */
548 if ((__u64
) size
!= data_size
)
549 error (_("Failed to determine trace buffer size."));
551 #endif /* defined (PERF_ATTR_SIZE_VER5) */
553 bts
->bts
.size
= size
;
554 bts
->bts
.data_head
= &header
->data_head
;
555 bts
->bts
.mem
= (const uint8_t *) data
.release () + data_offset
;
556 bts
->bts
.last_head
= 0ull;
557 bts
->header
= header
;
558 bts
->file
= fd
.release ();
560 tinfo
->conf
.bts
.size
= (unsigned int) size
;
561 return tinfo
.release ();
564 #if defined (PERF_ATTR_SIZE_VER5)
566 /* Determine the event type. */
569 perf_event_pt_event_type ()
571 static const char filename
[] = "/sys/bus/event_source/devices/intel_pt/type";
574 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
575 if (file
.get () == nullptr)
576 error (_("Failed to open %s: %s."), filename
, safe_strerror (errno
));
578 int type
, found
= fscanf (file
.get (), "%d", &type
);
580 error (_("Failed to read the PT event type from %s."), filename
);
585 /* Enable branch tracing in Intel Processor Trace format. */
587 static struct btrace_target_info
*
588 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
590 struct btrace_tinfo_pt
*pt
;
598 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
599 (XCNEW (btrace_target_info
));
602 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
603 pt
= &tinfo
->variant
.pt
;
605 pt
->attr
.size
= sizeof (pt
->attr
);
606 pt
->attr
.type
= perf_event_pt_event_type ();
608 pt
->attr
.exclude_kernel
= 1;
609 pt
->attr
.exclude_hv
= 1;
610 pt
->attr
.exclude_idle
= 1;
613 scoped_fd
fd (syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0));
615 diagnose_perf_event_open_fail ();
617 /* Allocate the configuration page. */
618 scoped_mmap
data (nullptr, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
620 if (data
.get () == MAP_FAILED
)
621 error (_("Failed to map trace user page: %s."), safe_strerror (errno
));
623 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
626 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
628 /* Convert the requested size in bytes to pages (rounding up). */
629 pages
= ((size_t) conf
->size
/ PAGE_SIZE
630 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
631 /* We need at least one page. */
635 /* The buffer size can be requested in powers of two pages. Adjust PAGES
636 to the next power of two. */
637 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
638 if ((pages
& ((size_t) 1 << pg
)) != 0)
639 pages
+= ((size_t) 1 << pg
);
641 /* We try to allocate the requested size.
642 If that fails, try to get as much as we can. */
644 for (; pages
> 0; pages
>>= 1)
649 data_size
= (__u64
) pages
* PAGE_SIZE
;
651 /* Don't ask for more than we can represent in the configuration. */
652 if ((__u64
) UINT_MAX
< data_size
)
655 length
= (size_t) data_size
;
657 /* Check for overflows. */
658 if ((__u64
) length
!= data_size
)
661 header
->aux_size
= data_size
;
664 aux
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (),
666 if (aux
.get () != MAP_FAILED
)
671 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
673 pt
->pt
.size
= aux
.size ();
674 pt
->pt
.mem
= (const uint8_t *) aux
.release ();
675 pt
->pt
.data_head
= &header
->aux_head
;
676 pt
->header
= (struct perf_event_mmap_page
*) data
.release ();
677 gdb_assert (pt
->header
== header
);
678 pt
->file
= fd
.release ();
680 tinfo
->conf
.pt
.size
= (unsigned int) pt
->pt
.size
;
681 return tinfo
.release ();
684 #else /* !defined (PERF_ATTR_SIZE_VER5) */
686 static struct btrace_target_info
*
687 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
689 error (_("Intel Processor Trace support was disabled at compile time."));
692 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
694 /* See linux-btrace.h. */
696 struct btrace_target_info
*
697 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
699 switch (conf
->format
)
701 case BTRACE_FORMAT_NONE
:
702 error (_("Bad branch trace format."));
705 error (_("Unknown branch trace format."));
707 case BTRACE_FORMAT_BTS
:
708 return linux_enable_bts (ptid
, &conf
->bts
);
710 case BTRACE_FORMAT_PT
:
711 return linux_enable_pt (ptid
, &conf
->pt
);
715 /* Disable BTS tracing. */
717 static enum btrace_error
718 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
720 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
723 return BTRACE_ERR_NONE
;
726 /* Disable Intel Processor Trace tracing. */
728 static enum btrace_error
729 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
731 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
732 munmap((void *) tinfo
->header
, PAGE_SIZE
);
735 return BTRACE_ERR_NONE
;
738 /* See linux-btrace.h. */
741 linux_disable_btrace (struct btrace_target_info
*tinfo
)
743 enum btrace_error errcode
;
745 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
746 switch (tinfo
->conf
.format
)
748 case BTRACE_FORMAT_NONE
:
751 case BTRACE_FORMAT_BTS
:
752 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
755 case BTRACE_FORMAT_PT
:
756 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
760 if (errcode
== BTRACE_ERR_NONE
)
766 /* Read branch trace data in BTS format for the thread given by TINFO into
767 BTRACE using the TYPE reading method. */
769 static enum btrace_error
770 linux_read_bts (struct btrace_data_bts
*btrace
,
771 struct btrace_target_info
*tinfo
,
772 enum btrace_read_type type
)
774 struct perf_event_buffer
*pevent
;
775 const uint8_t *begin
, *end
, *start
;
776 size_t buffer_size
, size
;
777 __u64 data_head
, data_tail
;
778 unsigned int retries
= 5;
780 pevent
= &tinfo
->variant
.bts
.bts
;
782 /* For delta reads, we return at least the partial last block containing
784 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
785 return BTRACE_ERR_NONE
;
787 buffer_size
= pevent
->size
;
788 data_tail
= pevent
->last_head
;
790 /* We may need to retry reading the trace. See below. */
793 data_head
= *pevent
->data_head
;
795 /* Delete any leftover trace from the previous iteration. */
796 delete btrace
->blocks
;
797 btrace
->blocks
= nullptr;
799 if (type
== BTRACE_READ_DELTA
)
803 /* Determine the number of bytes to read and check for buffer
806 /* Check for data head overflows. We might be able to recover from
807 those but they are very unlikely and it's not really worth the
809 if (data_head
< data_tail
)
810 return BTRACE_ERR_OVERFLOW
;
812 /* If the buffer is smaller than the trace delta, we overflowed. */
813 data_size
= data_head
- data_tail
;
814 if (buffer_size
< data_size
)
815 return BTRACE_ERR_OVERFLOW
;
817 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
818 size
= (size_t) data_size
;
822 /* Read the entire buffer. */
825 /* Adjust the size if the buffer has not overflowed, yet. */
826 if (data_head
< size
)
827 size
= (size_t) data_head
;
830 /* Data_head keeps growing; the buffer itself is circular. */
832 start
= begin
+ data_head
% buffer_size
;
834 if (data_head
<= buffer_size
)
837 end
= begin
+ pevent
->size
;
839 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
841 /* The stopping thread notifies its ptracer before it is scheduled out.
842 On multi-core systems, the debugger might therefore run while the
843 kernel might be writing the last branch trace records.
845 Let's check whether the data head moved while we read the trace. */
846 if (data_head
== *pevent
->data_head
)
850 pevent
->last_head
= data_head
;
852 /* Prune the incomplete last block (i.e. the first one of inferior execution)
853 if we're not doing a delta read. There is no way of filling in its zeroed
855 if (!btrace
->blocks
->empty () && type
!= BTRACE_READ_DELTA
)
856 btrace
->blocks
->pop_back ();
858 return BTRACE_ERR_NONE
;
861 /* Fill in the Intel Processor Trace configuration information. */
864 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
866 conf
->cpu
= btrace_this_cpu ();
869 /* Read branch trace data in Intel Processor Trace format for the thread
870 given by TINFO into BTRACE using the TYPE reading method. */
872 static enum btrace_error
873 linux_read_pt (struct btrace_data_pt
*btrace
,
874 struct btrace_target_info
*tinfo
,
875 enum btrace_read_type type
)
877 struct perf_event_buffer
*pt
;
879 pt
= &tinfo
->variant
.pt
.pt
;
881 linux_fill_btrace_pt_config (&btrace
->config
);
885 case BTRACE_READ_DELTA
:
886 /* We don't support delta reads. The data head (i.e. aux_head) wraps
887 around to stay inside the aux buffer. */
888 return BTRACE_ERR_NOT_SUPPORTED
;
890 case BTRACE_READ_NEW
:
891 if (!perf_event_new_data (pt
))
892 return BTRACE_ERR_NONE
;
895 case BTRACE_READ_ALL
:
896 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
897 return BTRACE_ERR_NONE
;
900 internal_error (__FILE__
, __LINE__
, _("Unkown btrace read type."));
903 /* See linux-btrace.h. */
906 linux_read_btrace (struct btrace_data
*btrace
,
907 struct btrace_target_info
*tinfo
,
908 enum btrace_read_type type
)
910 switch (tinfo
->conf
.format
)
912 case BTRACE_FORMAT_NONE
:
913 return BTRACE_ERR_NOT_SUPPORTED
;
915 case BTRACE_FORMAT_BTS
:
916 /* We read btrace in BTS format. */
917 btrace
->format
= BTRACE_FORMAT_BTS
;
918 btrace
->variant
.bts
.blocks
= NULL
;
920 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
922 case BTRACE_FORMAT_PT
:
923 /* We read btrace in Intel Processor Trace format. */
924 btrace
->format
= BTRACE_FORMAT_PT
;
925 btrace
->variant
.pt
.data
= NULL
;
926 btrace
->variant
.pt
.size
= 0;
928 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
931 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
934 /* See linux-btrace.h. */
936 const struct btrace_config
*
937 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
942 #else /* !HAVE_LINUX_PERF_EVENT_H */
944 /* See linux-btrace.h. */
946 struct btrace_target_info
*
947 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
952 /* See linux-btrace.h. */
955 linux_disable_btrace (struct btrace_target_info
*tinfo
)
957 return BTRACE_ERR_NOT_SUPPORTED
;
960 /* See linux-btrace.h. */
963 linux_read_btrace (struct btrace_data
*btrace
,
964 struct btrace_target_info
*tinfo
,
965 enum btrace_read_type type
)
967 return BTRACE_ERR_NOT_SUPPORTED
;
970 /* See linux-btrace.h. */
972 const struct btrace_config
*
973 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
978 #endif /* !HAVE_LINUX_PERF_EVENT_H */