btrace: work around _dl_runtime_resolve returning to resolved function
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
7c97f91e
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
53f81362 22#include "common-defs.h"
7c97f91e 23#include "linux-btrace.h"
361c8ade 24#include "common-regcache.h"
be8b1ea6 25#include "gdb_wait.h"
df7e5265 26#include "x86-cpuid.h"
7c97f91e 27
5b4e221c
MF
28#ifdef HAVE_SYS_SYSCALL_H
29#include <sys/syscall.h>
30#endif
31
32#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
7c97f91e 33
7c97f91e
MM
34#include <stdint.h>
35#include <unistd.h>
7c97f91e
MM
36#include <sys/mman.h>
37#include <sys/user.h>
a950d57c
MM
38#include <sys/ptrace.h>
39#include <sys/types.h>
a950d57c 40#include <signal.h>
7c97f91e
MM
41
42/* A branch trace record in perf_event. */
43struct perf_event_bts
44{
45 /* The linear address of the branch source. */
46 uint64_t from;
47
48 /* The linear address of the branch destination. */
49 uint64_t to;
50};
51
52/* A perf_event branch trace sample. */
53struct perf_event_sample
54{
55 /* The perf_event sample header. */
56 struct perf_event_header header;
57
58 /* The perf_event branch tracing payload. */
59 struct perf_event_bts bts;
60};
61
afb778a2
MM
62/* Identify the cpu we're running on. */
63static struct btrace_cpu
64btrace_this_cpu (void)
65{
66 struct btrace_cpu cpu;
67 unsigned int eax, ebx, ecx, edx;
68 int ok;
69
70 memset (&cpu, 0, sizeof (cpu));
71
72 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
73 if (ok != 0)
74 {
75 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
76 && edx == signature_INTEL_edx)
77 {
78 unsigned int cpuid, ignore;
79
80 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
81 if (ok != 0)
82 {
83 cpu.vendor = CV_INTEL;
84
85 cpu.family = (cpuid >> 8) & 0xf;
86 cpu.model = (cpuid >> 4) & 0xf;
87
88 if (cpu.family == 0x6)
89 cpu.model += (cpuid >> 12) & 0xf0;
90 }
91 }
92 }
93
94 return cpu;
95}
96
aadf7753 97/* Return non-zero if there is new data in PEVENT; zero otherwise. */
7c97f91e 98
aadf7753
MM
99static int
100perf_event_new_data (const struct perf_event_buffer *pev)
7c97f91e 101{
aadf7753 102 return *pev->data_head != pev->last_head;
7c97f91e
MM
103}
104
105/* Check whether an address is in the kernel. */
106
107static inline int
108perf_event_is_kernel_addr (const struct btrace_target_info *tinfo,
109 uint64_t addr)
110{
111 uint64_t mask;
112
113 /* If we don't know the size of a pointer, we can't check. Let's assume it's
114 not a kernel address in this case. */
115 if (tinfo->ptr_bits == 0)
116 return 0;
117
118 /* A bit mask for the most significant bit in an address. */
119 mask = (uint64_t) 1 << (tinfo->ptr_bits - 1);
120
121 /* Check whether the most significant bit in the address is set. */
122 return (addr & mask) != 0;
123}
124
125/* Check whether a perf event record should be skipped. */
126
127static inline int
f4abbc16
MM
128perf_event_skip_bts_record (const struct btrace_target_info *tinfo,
129 const struct perf_event_bts *bts)
7c97f91e
MM
130{
131 /* The hardware may report branches from kernel into user space. Branches
132 from user into kernel space will be suppressed. We filter the former to
133 provide a consistent branch trace excluding kernel. */
134 return perf_event_is_kernel_addr (tinfo, bts->from);
135}
136
137/* Perform a few consistency checks on a perf event sample record. This is
138 meant to catch cases when we get out of sync with the perf event stream. */
139
140static inline int
141perf_event_sample_ok (const struct perf_event_sample *sample)
142{
143 if (sample->header.type != PERF_RECORD_SAMPLE)
144 return 0;
145
146 if (sample->header.size != sizeof (*sample))
147 return 0;
148
149 return 1;
150}
151
152/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
153 and to addresses (plus a header).
154
155 Start points into that buffer at the next sample position.
156 We read the collected samples backwards from start.
157
158 While reading the samples, we convert the information into a list of blocks.
159 For two adjacent samples s1 and s2, we form a block b such that b.begin =
160 s1.to and b.end = s2.from.
161
162 In case the buffer overflows during sampling, one sample may have its lower
163 part at the end and its upper part at the beginning of the buffer. */
164
165static VEC (btrace_block_s) *
166perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
aadf7753
MM
167 const uint8_t *end, const uint8_t *start,
168 unsigned long long size)
7c97f91e
MM
169{
170 VEC (btrace_block_s) *btrace = NULL;
171 struct perf_event_sample sample;
aadf7753 172 unsigned long long read = 0;
7c97f91e
MM
173 struct btrace_block block = { 0, 0 };
174 struct regcache *regcache;
175
176 gdb_assert (begin <= start);
177 gdb_assert (start <= end);
178
179 /* The first block ends at the current pc. */
361c8ade 180 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
7c97f91e
MM
181 block.end = regcache_read_pc (regcache);
182
183 /* The buffer may contain a partial record as its last entry (i.e. when the
184 buffer size is not a multiple of the sample size). */
185 read = sizeof (sample) - 1;
186
187 for (; read < size; read += sizeof (sample))
188 {
189 const struct perf_event_sample *psample;
190
191 /* Find the next perf_event sample in a backwards traversal. */
192 start -= sizeof (sample);
193
194 /* If we're still inside the buffer, we're done. */
195 if (begin <= start)
196 psample = (const struct perf_event_sample *) start;
197 else
198 {
199 int missing;
200
201 /* We're to the left of the ring buffer, we will wrap around and
202 reappear at the very right of the ring buffer. */
203
204 missing = (begin - start);
205 start = (end - missing);
206
207 /* If the entire sample is missing, we're done. */
208 if (missing == sizeof (sample))
209 psample = (const struct perf_event_sample *) start;
210 else
211 {
212 uint8_t *stack;
213
214 /* The sample wrapped around. The lower part is at the end and
215 the upper part is at the beginning of the buffer. */
216 stack = (uint8_t *) &sample;
217
218 /* Copy the two parts so we have a contiguous sample. */
219 memcpy (stack, start, missing);
220 memcpy (stack + missing, begin, sizeof (sample) - missing);
221
222 psample = &sample;
223 }
224 }
225
226 if (!perf_event_sample_ok (psample))
227 {
228 warning (_("Branch trace may be incomplete."));
229 break;
230 }
231
f4abbc16 232 if (perf_event_skip_bts_record (tinfo, &psample->bts))
7c97f91e
MM
233 continue;
234
235 /* We found a valid sample, so we can complete the current block. */
236 block.begin = psample->bts.to;
237
238 VEC_safe_push (btrace_block_s, btrace, &block);
239
240 /* Start the next block. */
241 block.end = psample->bts.from;
242 }
243
969c39fb
MM
244 /* Push the last block (i.e. the first one of inferior execution), as well.
245 We don't know where it ends, but we know where it starts. If we're
246 reading delta trace, we can fill in the start address later on.
247 Otherwise we will prune it. */
248 block.begin = 0;
249 VEC_safe_push (btrace_block_s, btrace, &block);
250
7c97f91e
MM
251 return btrace;
252}
253
043c3577 254/* Check whether the kernel supports BTS. */
a950d57c
MM
255
256static int
043c3577 257kernel_supports_bts (void)
a950d57c
MM
258{
259 struct perf_event_attr attr;
260 pid_t child, pid;
261 int status, file;
262
263 errno = 0;
264 child = fork ();
265 switch (child)
266 {
267 case -1:
043c3577 268 warning (_("test bts: cannot fork: %s."), strerror (errno));
a950d57c
MM
269 return 0;
270
271 case 0:
272 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
273 if (status != 0)
274 {
043c3577 275 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
a950d57c
MM
276 strerror (errno));
277 _exit (1);
278 }
279
280 status = raise (SIGTRAP);
281 if (status != 0)
282 {
043c3577 283 warning (_("test bts: cannot raise SIGTRAP: %s."),
a950d57c
MM
284 strerror (errno));
285 _exit (1);
286 }
287
288 _exit (1);
289
290 default:
291 pid = waitpid (child, &status, 0);
292 if (pid != child)
293 {
043c3577 294 warning (_("test bts: bad pid %ld, error: %s."),
a950d57c
MM
295 (long) pid, strerror (errno));
296 return 0;
297 }
298
299 if (!WIFSTOPPED (status))
300 {
043c3577 301 warning (_("test bts: expected stop. status: %d."),
a950d57c
MM
302 status);
303 return 0;
304 }
305
306 memset (&attr, 0, sizeof (attr));
307
308 attr.type = PERF_TYPE_HARDWARE;
309 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
310 attr.sample_period = 1;
311 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
312 attr.exclude_kernel = 1;
313 attr.exclude_hv = 1;
314 attr.exclude_idle = 1;
315
316 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
317 if (file >= 0)
318 close (file);
319
320 kill (child, SIGKILL);
321 ptrace (PTRACE_KILL, child, NULL, NULL);
322
323 pid = waitpid (child, &status, 0);
324 if (pid != child)
325 {
043c3577 326 warning (_("test bts: bad pid %ld, error: %s."),
a950d57c
MM
327 (long) pid, strerror (errno));
328 if (!WIFSIGNALED (status))
043c3577 329 warning (_("test bts: expected killed. status: %d."),
a950d57c
MM
330 status);
331 }
332
333 return (file >= 0);
334 }
335}
336
043c3577 337/* Check whether an Intel cpu supports BTS. */
a950d57c
MM
338
339static int
afb778a2 340intel_supports_bts (const struct btrace_cpu *cpu)
a950d57c 341{
afb778a2 342 switch (cpu->family)
5f8e0b8f
MF
343 {
344 case 0x6:
afb778a2 345 switch (cpu->model)
5f8e0b8f
MF
346 {
347 case 0x1a: /* Nehalem */
348 case 0x1f:
349 case 0x1e:
350 case 0x2e:
351 case 0x25: /* Westmere */
352 case 0x2c:
353 case 0x2f:
354 case 0x2a: /* Sandy Bridge */
355 case 0x2d:
356 case 0x3a: /* Ivy Bridge */
357
358 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
359 "from" information afer an EIST transition, T-states, C1E, or
360 Adaptive Thermal Throttling. */
361 return 0;
362 }
363 }
a950d57c
MM
364
365 return 1;
a950d57c
MM
366}
367
043c3577 368/* Check whether the cpu supports BTS. */
a950d57c
MM
369
370static int
043c3577 371cpu_supports_bts (void)
a950d57c 372{
afb778a2 373 struct btrace_cpu cpu;
a950d57c 374
afb778a2
MM
375 cpu = btrace_this_cpu ();
376 switch (cpu.vendor)
377 {
378 default:
379 /* Don't know about others. Let's assume they do. */
380 return 1;
a950d57c 381
afb778a2
MM
382 case CV_INTEL:
383 return intel_supports_bts (&cpu);
384 }
a950d57c
MM
385}
386
043c3577 387/* Check whether the linux target supports BTS. */
7c97f91e 388
043c3577
MM
389static int
390linux_supports_bts (void)
7c97f91e 391{
a950d57c
MM
392 static int cached;
393
394 if (cached == 0)
395 {
043c3577 396 if (!kernel_supports_bts ())
a950d57c 397 cached = -1;
043c3577 398 else if (!cpu_supports_bts ())
a950d57c
MM
399 cached = -1;
400 else
401 cached = 1;
402 }
403
404 return cached > 0;
7c97f91e
MM
405}
406
407/* See linux-btrace.h. */
408
043c3577
MM
409int
410linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
411{
412 switch (format)
413 {
414 case BTRACE_FORMAT_NONE:
415 return 0;
416
417 case BTRACE_FORMAT_BTS:
418 return linux_supports_bts ();
419 }
420
421 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
422}
423
f4abbc16 424/* Enable branch tracing in BTS format. */
043c3577 425
f4abbc16 426static struct btrace_target_info *
d33501a5 427linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
7c97f91e 428{
aadf7753 429 struct perf_event_mmap_page *header;
7c97f91e 430 struct btrace_target_info *tinfo;
f4abbc16 431 struct btrace_tinfo_bts *bts;
d33501a5 432 unsigned long long size, pages;
d0fa7535 433 int pid, pg;
7c97f91e
MM
434
435 tinfo = xzalloc (sizeof (*tinfo));
436 tinfo->ptid = ptid;
f4abbc16 437 tinfo->ptr_bits = 0;
7c97f91e 438
f4abbc16
MM
439 tinfo->conf.format = BTRACE_FORMAT_BTS;
440 bts = &tinfo->variant.bts;
7c97f91e 441
f4abbc16
MM
442 bts->attr.size = sizeof (bts->attr);
443 bts->attr.type = PERF_TYPE_HARDWARE;
444 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
445 bts->attr.sample_period = 1;
7c97f91e 446
f4abbc16
MM
447 /* We sample from and to address. */
448 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
7c97f91e 449
f4abbc16
MM
450 bts->attr.exclude_kernel = 1;
451 bts->attr.exclude_hv = 1;
452 bts->attr.exclude_idle = 1;
7c97f91e
MM
453
454 pid = ptid_get_lwp (ptid);
455 if (pid == 0)
456 pid = ptid_get_pid (ptid);
457
458 errno = 0;
f4abbc16
MM
459 bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
460 if (bts->file < 0)
7c97f91e
MM
461 goto err;
462
d33501a5
MM
463 /* Convert the requested size in bytes to pages (rounding up). */
464 pages = (((unsigned long long) conf->size) + PAGE_SIZE - 1) / PAGE_SIZE;
465 /* We need at least one page. */
466 if (pages == 0)
467 pages = 1;
468
469 /* The buffer size can be requested in powers of two pages. Adjust PAGES
470 to the next power of two. */
471 for (pg = 0; pages != (1u << pg); ++pg)
472 if ((pages & (1u << pg)) != 0)
473 pages += (1u << pg);
474
475 /* We try to allocate the requested size.
476 If that fails, try to get as much as we can. */
477 for (; pages > 0; pages >>= 1)
d0fa7535 478 {
d33501a5
MM
479 size_t length;
480
481 size = pages * PAGE_SIZE;
482 length = size + PAGE_SIZE;
483
484 /* Check for overflows. */
485 if ((unsigned long long) length < size)
486 continue;
487
d0fa7535 488 /* The number of pages we request needs to be a power of two. */
d33501a5 489 header = mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0);
aadf7753
MM
490 if (header != MAP_FAILED)
491 break;
d0fa7535 492 }
7c97f91e 493
aadf7753
MM
494 if (header == MAP_FAILED)
495 goto err_file;
496
f4abbc16
MM
497 bts->header = header;
498 bts->bts.mem = ((const uint8_t *) header) + PAGE_SIZE;
d33501a5 499 bts->bts.size = size;
f4abbc16
MM
500 bts->bts.data_head = &header->data_head;
501 bts->bts.last_head = 0;
aadf7753 502
d33501a5 503 tinfo->conf.bts.size = size;
aadf7753
MM
504 return tinfo;
505
506 err_file:
d0fa7535 507 /* We were not able to allocate any buffer. */
f4abbc16 508 close (bts->file);
7c97f91e
MM
509
510 err:
511 xfree (tinfo);
512 return NULL;
513}
514
515/* See linux-btrace.h. */
516
f4abbc16
MM
517struct btrace_target_info *
518linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
519{
520 struct btrace_target_info *tinfo;
521
522 tinfo = NULL;
523 switch (conf->format)
524 {
525 case BTRACE_FORMAT_NONE:
526 break;
527
528 case BTRACE_FORMAT_BTS:
d33501a5 529 tinfo = linux_enable_bts (ptid, &conf->bts);
f4abbc16
MM
530 break;
531 }
532
533 return tinfo;
534}
535
536/* Disable BTS tracing. */
537
538static enum btrace_error
539linux_disable_bts (struct btrace_tinfo_bts *tinfo)
7c97f91e 540{
aadf7753 541 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
7c97f91e 542 close (tinfo->file);
7c97f91e 543
969c39fb 544 return BTRACE_ERR_NONE;
7c97f91e
MM
545}
546
f4abbc16
MM
547/* See linux-btrace.h. */
548
549enum btrace_error
550linux_disable_btrace (struct btrace_target_info *tinfo)
551{
552 enum btrace_error errcode;
553
554 errcode = BTRACE_ERR_NOT_SUPPORTED;
555 switch (tinfo->conf.format)
556 {
557 case BTRACE_FORMAT_NONE:
558 break;
559
560 case BTRACE_FORMAT_BTS:
561 errcode = linux_disable_bts (&tinfo->variant.bts);
562 break;
563 }
564
565 if (errcode == BTRACE_ERR_NONE)
566 xfree (tinfo);
567
568 return errcode;
569}
570
734b0e4b
MM
571/* Read branch trace data in BTS format for the thread given by TINFO into
572 BTRACE using the TYPE reading method. */
7c97f91e 573
734b0e4b
MM
574static enum btrace_error
575linux_read_bts (struct btrace_data_bts *btrace,
576 struct btrace_target_info *tinfo,
577 enum btrace_read_type type)
7c97f91e 578{
aadf7753 579 struct perf_event_buffer *pevent;
7c97f91e 580 const uint8_t *begin, *end, *start;
aadf7753
MM
581 unsigned long long data_head, data_tail, buffer_size, size;
582 unsigned int retries = 5;
583
f4abbc16 584 pevent = &tinfo->variant.bts.bts;
7c97f91e 585
969c39fb
MM
586 /* For delta reads, we return at least the partial last block containing
587 the current PC. */
aadf7753 588 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
969c39fb 589 return BTRACE_ERR_NONE;
7c97f91e 590
aadf7753
MM
591 buffer_size = pevent->size;
592 data_tail = pevent->last_head;
7c97f91e
MM
593
594 /* We may need to retry reading the trace. See below. */
595 while (retries--)
596 {
aadf7753 597 data_head = *pevent->data_head;
7c97f91e 598
ed9edfb5 599 /* Delete any leftover trace from the previous iteration. */
734b0e4b 600 VEC_free (btrace_block_s, btrace->blocks);
ed9edfb5 601
969c39fb 602 if (type == BTRACE_READ_DELTA)
7c97f91e 603 {
969c39fb
MM
604 /* Determine the number of bytes to read and check for buffer
605 overflows. */
606
607 /* Check for data head overflows. We might be able to recover from
608 those but they are very unlikely and it's not really worth the
609 effort, I think. */
610 if (data_head < data_tail)
611 return BTRACE_ERR_OVERFLOW;
612
613 /* If the buffer is smaller than the trace delta, we overflowed. */
614 size = data_head - data_tail;
615 if (buffer_size < size)
616 return BTRACE_ERR_OVERFLOW;
617 }
618 else
619 {
620 /* Read the entire buffer. */
621 size = buffer_size;
7c97f91e 622
969c39fb
MM
623 /* Adjust the size if the buffer has not overflowed, yet. */
624 if (data_head < size)
625 size = data_head;
7c97f91e
MM
626 }
627
969c39fb 628 /* Data_head keeps growing; the buffer itself is circular. */
aadf7753 629 begin = pevent->mem;
969c39fb
MM
630 start = begin + data_head % buffer_size;
631
632 if (data_head <= buffer_size)
633 end = start;
634 else
aadf7753 635 end = begin + pevent->size;
969c39fb 636
734b0e4b 637 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
969c39fb 638
7c97f91e
MM
639 /* The stopping thread notifies its ptracer before it is scheduled out.
640 On multi-core systems, the debugger might therefore run while the
641 kernel might be writing the last branch trace records.
642
643 Let's check whether the data head moved while we read the trace. */
aadf7753 644 if (data_head == *pevent->data_head)
7c97f91e
MM
645 break;
646 }
647
aadf7753 648 pevent->last_head = data_head;
7c97f91e 649
969c39fb
MM
650 /* Prune the incomplete last block (i.e. the first one of inferior execution)
651 if we're not doing a delta read. There is no way of filling in its zeroed
652 BEGIN element. */
734b0e4b
MM
653 if (!VEC_empty (btrace_block_s, btrace->blocks)
654 && type != BTRACE_READ_DELTA)
655 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
656
657 return BTRACE_ERR_NONE;
7c97f91e
MM
658}
659
734b0e4b
MM
660/* See linux-btrace.h. */
661
662enum btrace_error
663linux_read_btrace (struct btrace_data *btrace,
664 struct btrace_target_info *tinfo,
665 enum btrace_read_type type)
666{
f4abbc16
MM
667 switch (tinfo->conf.format)
668 {
669 case BTRACE_FORMAT_NONE:
670 return BTRACE_ERR_NOT_SUPPORTED;
671
672 case BTRACE_FORMAT_BTS:
673 /* We read btrace in BTS format. */
674 btrace->format = BTRACE_FORMAT_BTS;
675 btrace->variant.bts.blocks = NULL;
676
677 return linux_read_bts (&btrace->variant.bts, tinfo, type);
678 }
679
680 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
681}
682
683/* See linux-btrace.h. */
734b0e4b 684
f4abbc16
MM
685const struct btrace_config *
686linux_btrace_conf (const struct btrace_target_info *tinfo)
687{
688 return &tinfo->conf;
734b0e4b
MM
689}
690
7c97f91e
MM
691#else /* !HAVE_LINUX_PERF_EVENT_H */
692
693/* See linux-btrace.h. */
694
695int
043c3577 696linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
7c97f91e
MM
697{
698 return 0;
699}
700
701/* See linux-btrace.h. */
702
703struct btrace_target_info *
f4abbc16 704linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
7c97f91e
MM
705{
706 return NULL;
707}
708
709/* See linux-btrace.h. */
710
969c39fb 711enum btrace_error
7c97f91e
MM
712linux_disable_btrace (struct btrace_target_info *tinfo)
713{
969c39fb 714 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
715}
716
717/* See linux-btrace.h. */
718
969c39fb 719enum btrace_error
734b0e4b 720linux_read_btrace (struct btrace_data *btrace,
969c39fb 721 struct btrace_target_info *tinfo,
7c97f91e
MM
722 enum btrace_read_type type)
723{
969c39fb 724 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
725}
726
f4abbc16
MM
727/* See linux-btrace.h. */
728
729const struct btrace_config *
730linux_btrace_conf (const struct btrace_target_info *tinfo)
731{
732 return NULL;
733}
734
7c97f91e 735#endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.208077 seconds and 4 git commands to generate.