btrace, gdbserver: remove the to_supports_btrace target method
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "common-defs.h"
23 #include "linux-btrace.h"
24 #include "common-regcache.h"
25 #include "gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "filestuff.h"
28 #include "common/scoped_fd.h"
29 #include "common/scoped_mmap.h"
30
31 #include <inttypes.h>
32
33 #include <sys/syscall.h>
34
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36 #include <unistd.h>
37 #include <sys/mman.h>
38 #include <sys/user.h>
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
41 #include <signal.h>
42
43 /* A branch trace record in perf_event. */
44 struct perf_event_bts
45 {
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51 };
52
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
55 {
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61 };
62
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
66 {
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
93 }
94
95 return cpu;
96 }
97
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
99
100 static int
101 perf_event_new_data (const struct perf_event_buffer *pev)
102 {
103 return *pev->data_head != pev->last_head;
104 }
105
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
109
110 static gdb_byte *
111 perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
112 size_t size)
113 {
114 const gdb_byte *begin, *end, *start, *stop;
115 gdb_byte *buffer;
116 size_t buffer_size;
117 __u64 data_tail;
118
119 if (size == 0)
120 return NULL;
121
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size = pev->size;
124 gdb_assert (size <= buffer_size);
125
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
129 don't underflow.
130
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head < size)
135 data_head += buffer_size;
136
137 gdb_assert (size <= data_head);
138 data_tail = data_head - size;
139
140 begin = pev->mem;
141 start = begin + data_tail % buffer_size;
142 stop = begin + data_head % buffer_size;
143
144 buffer = (gdb_byte *) xmalloc (size);
145
146 if (start < stop)
147 memcpy (buffer, start, stop - start);
148 else
149 {
150 end = begin + buffer_size;
151
152 memcpy (buffer, start, end - start);
153 memcpy (buffer + (end - start), begin, stop - begin);
154 }
155
156 return buffer;
157 }
158
159 /* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
161
162 static void
163 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
164 size_t *psize)
165 {
166 size_t size;
167 __u64 data_head;
168
169 data_head = *pev->data_head;
170 size = pev->size;
171
172 *data = perf_event_read (pev, data_head, size);
173 *psize = size;
174
175 pev->last_head = data_head;
176 }
177
178 /* Try to determine the start address of the Linux kernel. */
179
180 static uint64_t
181 linux_determine_kernel_start (void)
182 {
183 static uint64_t kernel_start;
184 static int cached;
185
186 if (cached != 0)
187 return kernel_start;
188
189 cached = 1;
190
191 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
192 if (file == NULL)
193 return kernel_start;
194
195 while (!feof (file.get ()))
196 {
197 char buffer[1024], symbol[8], *line;
198 uint64_t addr;
199 int match;
200
201 line = fgets (buffer, sizeof (buffer), file.get ());
202 if (line == NULL)
203 break;
204
205 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
206 if (match != 2)
207 continue;
208
209 if (strcmp (symbol, "_text") == 0)
210 {
211 kernel_start = addr;
212 break;
213 }
214 }
215
216 return kernel_start;
217 }
218
219 /* Check whether an address is in the kernel. */
220
221 static inline int
222 perf_event_is_kernel_addr (uint64_t addr)
223 {
224 uint64_t kernel_start;
225
226 kernel_start = linux_determine_kernel_start ();
227 if (kernel_start != 0ull)
228 return (addr >= kernel_start);
229
230 /* If we don't know the kernel's start address, let's check the most
231 significant bit. This will work at least for 64-bit kernels. */
232 return ((addr & (1ull << 63)) != 0);
233 }
234
235 /* Check whether a perf event record should be skipped. */
236
237 static inline int
238 perf_event_skip_bts_record (const struct perf_event_bts *bts)
239 {
240 /* The hardware may report branches from kernel into user space. Branches
241 from user into kernel space will be suppressed. We filter the former to
242 provide a consistent branch trace excluding kernel. */
243 return perf_event_is_kernel_addr (bts->from);
244 }
245
246 /* Perform a few consistency checks on a perf event sample record. This is
247 meant to catch cases when we get out of sync with the perf event stream. */
248
249 static inline int
250 perf_event_sample_ok (const struct perf_event_sample *sample)
251 {
252 if (sample->header.type != PERF_RECORD_SAMPLE)
253 return 0;
254
255 if (sample->header.size != sizeof (*sample))
256 return 0;
257
258 return 1;
259 }
260
261 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
262 and to addresses (plus a header).
263
264 Start points into that buffer at the next sample position.
265 We read the collected samples backwards from start.
266
267 While reading the samples, we convert the information into a list of blocks.
268 For two adjacent samples s1 and s2, we form a block b such that b.begin =
269 s1.to and b.end = s2.from.
270
271 In case the buffer overflows during sampling, one sample may have its lower
272 part at the end and its upper part at the beginning of the buffer. */
273
274 static VEC (btrace_block_s) *
275 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
276 const uint8_t *end, const uint8_t *start, size_t size)
277 {
278 VEC (btrace_block_s) *btrace = NULL;
279 struct perf_event_sample sample;
280 size_t read = 0;
281 struct btrace_block block = { 0, 0 };
282 struct regcache *regcache;
283
284 gdb_assert (begin <= start);
285 gdb_assert (start <= end);
286
287 /* The first block ends at the current pc. */
288 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
289 block.end = regcache_read_pc (regcache);
290
291 /* The buffer may contain a partial record as its last entry (i.e. when the
292 buffer size is not a multiple of the sample size). */
293 read = sizeof (sample) - 1;
294
295 for (; read < size; read += sizeof (sample))
296 {
297 const struct perf_event_sample *psample;
298
299 /* Find the next perf_event sample in a backwards traversal. */
300 start -= sizeof (sample);
301
302 /* If we're still inside the buffer, we're done. */
303 if (begin <= start)
304 psample = (const struct perf_event_sample *) start;
305 else
306 {
307 int missing;
308
309 /* We're to the left of the ring buffer, we will wrap around and
310 reappear at the very right of the ring buffer. */
311
312 missing = (begin - start);
313 start = (end - missing);
314
315 /* If the entire sample is missing, we're done. */
316 if (missing == sizeof (sample))
317 psample = (const struct perf_event_sample *) start;
318 else
319 {
320 uint8_t *stack;
321
322 /* The sample wrapped around. The lower part is at the end and
323 the upper part is at the beginning of the buffer. */
324 stack = (uint8_t *) &sample;
325
326 /* Copy the two parts so we have a contiguous sample. */
327 memcpy (stack, start, missing);
328 memcpy (stack + missing, begin, sizeof (sample) - missing);
329
330 psample = &sample;
331 }
332 }
333
334 if (!perf_event_sample_ok (psample))
335 {
336 warning (_("Branch trace may be incomplete."));
337 break;
338 }
339
340 if (perf_event_skip_bts_record (&psample->bts))
341 continue;
342
343 /* We found a valid sample, so we can complete the current block. */
344 block.begin = psample->bts.to;
345
346 VEC_safe_push (btrace_block_s, btrace, &block);
347
348 /* Start the next block. */
349 block.end = psample->bts.from;
350 }
351
352 /* Push the last block (i.e. the first one of inferior execution), as well.
353 We don't know where it ends, but we know where it starts. If we're
354 reading delta trace, we can fill in the start address later on.
355 Otherwise we will prune it. */
356 block.begin = 0;
357 VEC_safe_push (btrace_block_s, btrace, &block);
358
359 return btrace;
360 }
361
362 /* Check whether an Intel cpu supports BTS. */
363
364 static int
365 intel_supports_bts (const struct btrace_cpu *cpu)
366 {
367 switch (cpu->family)
368 {
369 case 0x6:
370 switch (cpu->model)
371 {
372 case 0x1a: /* Nehalem */
373 case 0x1f:
374 case 0x1e:
375 case 0x2e:
376 case 0x25: /* Westmere */
377 case 0x2c:
378 case 0x2f:
379 case 0x2a: /* Sandy Bridge */
380 case 0x2d:
381 case 0x3a: /* Ivy Bridge */
382
383 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
384 "from" information afer an EIST transition, T-states, C1E, or
385 Adaptive Thermal Throttling. */
386 return 0;
387 }
388 }
389
390 return 1;
391 }
392
393 /* Check whether the cpu supports BTS. */
394
395 static int
396 cpu_supports_bts (void)
397 {
398 struct btrace_cpu cpu;
399
400 cpu = btrace_this_cpu ();
401 switch (cpu.vendor)
402 {
403 default:
404 /* Don't know about others. Let's assume they do. */
405 return 1;
406
407 case CV_INTEL:
408 return intel_supports_bts (&cpu);
409 }
410 }
411
412 /* Enable branch tracing in BTS format. */
413
414 static struct btrace_target_info *
415 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
416 {
417 struct btrace_tinfo_bts *bts;
418 size_t size, pages;
419 __u64 data_offset;
420 int pid, pg;
421
422 if (!cpu_supports_bts ())
423 error (_("BTS support has been disabled for the target cpu."));
424
425 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
426 (XCNEW (btrace_target_info));
427 tinfo->ptid = ptid;
428
429 tinfo->conf.format = BTRACE_FORMAT_BTS;
430 bts = &tinfo->variant.bts;
431
432 bts->attr.size = sizeof (bts->attr);
433 bts->attr.type = PERF_TYPE_HARDWARE;
434 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
435 bts->attr.sample_period = 1;
436
437 /* We sample from and to address. */
438 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
439
440 bts->attr.exclude_kernel = 1;
441 bts->attr.exclude_hv = 1;
442 bts->attr.exclude_idle = 1;
443
444 pid = ptid_get_lwp (ptid);
445 if (pid == 0)
446 pid = ptid_get_pid (ptid);
447
448 errno = 0;
449 scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0));
450 if (fd.get () < 0)
451 return nullptr;
452
453 /* Convert the requested size in bytes to pages (rounding up). */
454 pages = ((size_t) conf->size / PAGE_SIZE
455 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
456 /* We need at least one page. */
457 if (pages == 0)
458 pages = 1;
459
460 /* The buffer size can be requested in powers of two pages. Adjust PAGES
461 to the next power of two. */
462 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
463 if ((pages & ((size_t) 1 << pg)) != 0)
464 pages += ((size_t) 1 << pg);
465
466 /* We try to allocate the requested size.
467 If that fails, try to get as much as we can. */
468 scoped_mmap data;
469 for (; pages > 0; pages >>= 1)
470 {
471 size_t length;
472 __u64 data_size;
473
474 data_size = (__u64) pages * PAGE_SIZE;
475
476 /* Don't ask for more than we can represent in the configuration. */
477 if ((__u64) UINT_MAX < data_size)
478 continue;
479
480 size = (size_t) data_size;
481 length = size + PAGE_SIZE;
482
483 /* Check for overflows. */
484 if ((__u64) length != data_size + PAGE_SIZE)
485 continue;
486
487 /* The number of pages we request needs to be a power of two. */
488 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
489 if (data.get () != MAP_FAILED)
490 break;
491 }
492
493 if (pages == 0)
494 return nullptr;
495
496 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
497 data.get ();
498 data_offset = PAGE_SIZE;
499
500 #if defined (PERF_ATTR_SIZE_VER5)
501 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
502 {
503 __u64 data_size;
504
505 data_offset = header->data_offset;
506 data_size = header->data_size;
507
508 size = (unsigned int) data_size;
509
510 /* Check for overflows. */
511 if ((__u64) size != data_size)
512 return nullptr;
513 }
514 #endif /* defined (PERF_ATTR_SIZE_VER5) */
515
516 bts->bts.size = size;
517 bts->bts.data_head = &header->data_head;
518 bts->bts.mem = (const uint8_t *) data.get () + data_offset;
519 bts->bts.last_head = 0ull;
520 bts->header = header;
521 bts->file = fd.release ();
522
523 data.release ();
524
525 tinfo->conf.bts.size = (unsigned int) size;
526 return tinfo.release ();
527 }
528
529 #if defined (PERF_ATTR_SIZE_VER5)
530
531 /* Determine the event type.
532 Returns zero on success and fills in TYPE; returns -1 otherwise. */
533
534 static int
535 perf_event_pt_event_type (int *type)
536 {
537 gdb_file_up file =
538 gdb_fopen_cloexec ("/sys/bus/event_source/devices/intel_pt/type", "r");
539 if (file.get () == nullptr)
540 return -1;
541
542 int found = fscanf (file.get (), "%d", type);
543 if (found == 1)
544 return 0;
545 return -1;
546 }
547
548 /* Enable branch tracing in Intel Processor Trace format. */
549
550 static struct btrace_target_info *
551 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
552 {
553 struct btrace_tinfo_pt *pt;
554 size_t pages;
555 int pid, pg, errcode, type;
556
557 if (conf->size == 0)
558 return NULL;
559
560 errcode = perf_event_pt_event_type (&type);
561 if (errcode != 0)
562 return NULL;
563
564 pid = ptid_get_lwp (ptid);
565 if (pid == 0)
566 pid = ptid_get_pid (ptid);
567
568 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
569 (XCNEW (btrace_target_info));
570 tinfo->ptid = ptid;
571
572 tinfo->conf.format = BTRACE_FORMAT_PT;
573 pt = &tinfo->variant.pt;
574
575 pt->attr.size = sizeof (pt->attr);
576 pt->attr.type = type;
577
578 pt->attr.exclude_kernel = 1;
579 pt->attr.exclude_hv = 1;
580 pt->attr.exclude_idle = 1;
581
582 errno = 0;
583 scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
584 if (fd.get () < 0)
585 return nullptr;
586
587 /* Allocate the configuration page. */
588 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
589 fd.get (), 0);
590 if (data.get () == MAP_FAILED)
591 return nullptr;
592
593 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
594 data.get ();
595
596 header->aux_offset = header->data_offset + header->data_size;
597
598 /* Convert the requested size in bytes to pages (rounding up). */
599 pages = ((size_t) conf->size / PAGE_SIZE
600 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
601 /* We need at least one page. */
602 if (pages == 0)
603 pages = 1;
604
605 /* The buffer size can be requested in powers of two pages. Adjust PAGES
606 to the next power of two. */
607 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
608 if ((pages & ((size_t) 1 << pg)) != 0)
609 pages += ((size_t) 1 << pg);
610
611 /* We try to allocate the requested size.
612 If that fails, try to get as much as we can. */
613 scoped_mmap aux;
614 for (; pages > 0; pages >>= 1)
615 {
616 size_t length;
617 __u64 data_size;
618
619 data_size = (__u64) pages * PAGE_SIZE;
620
621 /* Don't ask for more than we can represent in the configuration. */
622 if ((__u64) UINT_MAX < data_size)
623 continue;
624
625 length = (size_t) data_size;
626
627 /* Check for overflows. */
628 if ((__u64) length != data_size)
629 continue;
630
631 header->aux_size = data_size;
632
633 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
634 header->aux_offset);
635 if (aux.get () != MAP_FAILED)
636 break;
637 }
638
639 if (pages == 0)
640 return nullptr;
641
642 pt->pt.size = aux.size ();
643 pt->pt.mem = (const uint8_t *) aux.release ();
644 pt->pt.data_head = &header->aux_head;
645 pt->header = header;
646 pt->file = fd.release ();
647
648 data.release ();
649
650 tinfo->conf.pt.size = (unsigned int) pt->pt.size;
651 return tinfo.release ();
652 }
653
654 #else /* !defined (PERF_ATTR_SIZE_VER5) */
655
656 static struct btrace_target_info *
657 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
658 {
659 errno = EOPNOTSUPP;
660 return NULL;
661 }
662
663 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
664
665 /* See linux-btrace.h. */
666
667 struct btrace_target_info *
668 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
669 {
670 struct btrace_target_info *tinfo;
671
672 tinfo = NULL;
673 switch (conf->format)
674 {
675 case BTRACE_FORMAT_NONE:
676 break;
677
678 case BTRACE_FORMAT_BTS:
679 tinfo = linux_enable_bts (ptid, &conf->bts);
680 break;
681
682 case BTRACE_FORMAT_PT:
683 tinfo = linux_enable_pt (ptid, &conf->pt);
684 break;
685 }
686
687 if (tinfo == NULL)
688 error (_("Unknown error."));
689
690 return tinfo;
691 }
692
693 /* Disable BTS tracing. */
694
695 static enum btrace_error
696 linux_disable_bts (struct btrace_tinfo_bts *tinfo)
697 {
698 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
699 close (tinfo->file);
700
701 return BTRACE_ERR_NONE;
702 }
703
704 /* Disable Intel Processor Trace tracing. */
705
706 static enum btrace_error
707 linux_disable_pt (struct btrace_tinfo_pt *tinfo)
708 {
709 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
710 munmap((void *) tinfo->header, PAGE_SIZE);
711 close (tinfo->file);
712
713 return BTRACE_ERR_NONE;
714 }
715
716 /* See linux-btrace.h. */
717
718 enum btrace_error
719 linux_disable_btrace (struct btrace_target_info *tinfo)
720 {
721 enum btrace_error errcode;
722
723 errcode = BTRACE_ERR_NOT_SUPPORTED;
724 switch (tinfo->conf.format)
725 {
726 case BTRACE_FORMAT_NONE:
727 break;
728
729 case BTRACE_FORMAT_BTS:
730 errcode = linux_disable_bts (&tinfo->variant.bts);
731 break;
732
733 case BTRACE_FORMAT_PT:
734 errcode = linux_disable_pt (&tinfo->variant.pt);
735 break;
736 }
737
738 if (errcode == BTRACE_ERR_NONE)
739 xfree (tinfo);
740
741 return errcode;
742 }
743
744 /* Read branch trace data in BTS format for the thread given by TINFO into
745 BTRACE using the TYPE reading method. */
746
747 static enum btrace_error
748 linux_read_bts (struct btrace_data_bts *btrace,
749 struct btrace_target_info *tinfo,
750 enum btrace_read_type type)
751 {
752 struct perf_event_buffer *pevent;
753 const uint8_t *begin, *end, *start;
754 size_t buffer_size, size;
755 __u64 data_head, data_tail;
756 unsigned int retries = 5;
757
758 pevent = &tinfo->variant.bts.bts;
759
760 /* For delta reads, we return at least the partial last block containing
761 the current PC. */
762 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
763 return BTRACE_ERR_NONE;
764
765 buffer_size = pevent->size;
766 data_tail = pevent->last_head;
767
768 /* We may need to retry reading the trace. See below. */
769 while (retries--)
770 {
771 data_head = *pevent->data_head;
772
773 /* Delete any leftover trace from the previous iteration. */
774 VEC_free (btrace_block_s, btrace->blocks);
775
776 if (type == BTRACE_READ_DELTA)
777 {
778 __u64 data_size;
779
780 /* Determine the number of bytes to read and check for buffer
781 overflows. */
782
783 /* Check for data head overflows. We might be able to recover from
784 those but they are very unlikely and it's not really worth the
785 effort, I think. */
786 if (data_head < data_tail)
787 return BTRACE_ERR_OVERFLOW;
788
789 /* If the buffer is smaller than the trace delta, we overflowed. */
790 data_size = data_head - data_tail;
791 if (buffer_size < data_size)
792 return BTRACE_ERR_OVERFLOW;
793
794 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
795 size = (size_t) data_size;
796 }
797 else
798 {
799 /* Read the entire buffer. */
800 size = buffer_size;
801
802 /* Adjust the size if the buffer has not overflowed, yet. */
803 if (data_head < size)
804 size = (size_t) data_head;
805 }
806
807 /* Data_head keeps growing; the buffer itself is circular. */
808 begin = pevent->mem;
809 start = begin + data_head % buffer_size;
810
811 if (data_head <= buffer_size)
812 end = start;
813 else
814 end = begin + pevent->size;
815
816 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
817
818 /* The stopping thread notifies its ptracer before it is scheduled out.
819 On multi-core systems, the debugger might therefore run while the
820 kernel might be writing the last branch trace records.
821
822 Let's check whether the data head moved while we read the trace. */
823 if (data_head == *pevent->data_head)
824 break;
825 }
826
827 pevent->last_head = data_head;
828
829 /* Prune the incomplete last block (i.e. the first one of inferior execution)
830 if we're not doing a delta read. There is no way of filling in its zeroed
831 BEGIN element. */
832 if (!VEC_empty (btrace_block_s, btrace->blocks)
833 && type != BTRACE_READ_DELTA)
834 VEC_pop (btrace_block_s, btrace->blocks);
835
836 return BTRACE_ERR_NONE;
837 }
838
839 /* Fill in the Intel Processor Trace configuration information. */
840
841 static void
842 linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
843 {
844 conf->cpu = btrace_this_cpu ();
845 }
846
847 /* Read branch trace data in Intel Processor Trace format for the thread
848 given by TINFO into BTRACE using the TYPE reading method. */
849
850 static enum btrace_error
851 linux_read_pt (struct btrace_data_pt *btrace,
852 struct btrace_target_info *tinfo,
853 enum btrace_read_type type)
854 {
855 struct perf_event_buffer *pt;
856
857 pt = &tinfo->variant.pt.pt;
858
859 linux_fill_btrace_pt_config (&btrace->config);
860
861 switch (type)
862 {
863 case BTRACE_READ_DELTA:
864 /* We don't support delta reads. The data head (i.e. aux_head) wraps
865 around to stay inside the aux buffer. */
866 return BTRACE_ERR_NOT_SUPPORTED;
867
868 case BTRACE_READ_NEW:
869 if (!perf_event_new_data (pt))
870 return BTRACE_ERR_NONE;
871
872 /* Fall through. */
873 case BTRACE_READ_ALL:
874 perf_event_read_all (pt, &btrace->data, &btrace->size);
875 return BTRACE_ERR_NONE;
876 }
877
878 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
879 }
880
881 /* See linux-btrace.h. */
882
883 enum btrace_error
884 linux_read_btrace (struct btrace_data *btrace,
885 struct btrace_target_info *tinfo,
886 enum btrace_read_type type)
887 {
888 switch (tinfo->conf.format)
889 {
890 case BTRACE_FORMAT_NONE:
891 return BTRACE_ERR_NOT_SUPPORTED;
892
893 case BTRACE_FORMAT_BTS:
894 /* We read btrace in BTS format. */
895 btrace->format = BTRACE_FORMAT_BTS;
896 btrace->variant.bts.blocks = NULL;
897
898 return linux_read_bts (&btrace->variant.bts, tinfo, type);
899
900 case BTRACE_FORMAT_PT:
901 /* We read btrace in Intel Processor Trace format. */
902 btrace->format = BTRACE_FORMAT_PT;
903 btrace->variant.pt.data = NULL;
904 btrace->variant.pt.size = 0;
905
906 return linux_read_pt (&btrace->variant.pt, tinfo, type);
907 }
908
909 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
910 }
911
912 /* See linux-btrace.h. */
913
914 const struct btrace_config *
915 linux_btrace_conf (const struct btrace_target_info *tinfo)
916 {
917 return &tinfo->conf;
918 }
919
920 #else /* !HAVE_LINUX_PERF_EVENT_H */
921
922 /* See linux-btrace.h. */
923
924 struct btrace_target_info *
925 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
926 {
927 return NULL;
928 }
929
930 /* See linux-btrace.h. */
931
932 enum btrace_error
933 linux_disable_btrace (struct btrace_target_info *tinfo)
934 {
935 return BTRACE_ERR_NOT_SUPPORTED;
936 }
937
938 /* See linux-btrace.h. */
939
940 enum btrace_error
941 linux_read_btrace (struct btrace_data *btrace,
942 struct btrace_target_info *tinfo,
943 enum btrace_read_type type)
944 {
945 return BTRACE_ERR_NOT_SUPPORTED;
946 }
947
948 /* See linux-btrace.h. */
949
950 const struct btrace_config *
951 linux_btrace_conf (const struct btrace_target_info *tinfo)
952 {
953 return NULL;
954 }
955
956 #endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.051503 seconds and 5 git commands to generate.