Update Traditional Chinese translation for the binutils sub-directory.
[deliverable/binutils-gdb.git] / gdb / nat / linux-btrace.c
1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "gdbsupport/common-defs.h"
23 #include "linux-btrace.h"
24 #include "gdbsupport/common-regcache.h"
25 #include "gdbsupport/gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "gdbsupport/filestuff.h"
28 #include "gdbsupport/scoped_fd.h"
29 #include "gdbsupport/scoped_mmap.h"
30
31 #include <inttypes.h>
32
33 #include <sys/syscall.h>
34
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36 #include <unistd.h>
37 #include <sys/mman.h>
38 #include <sys/user.h>
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
41 #include <signal.h>
42
43 /* A branch trace record in perf_event. */
44 struct perf_event_bts
45 {
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51 };
52
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
55 {
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61 };
62
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
66 {
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 cpu.model = (cpuid >> 4) & 0xf;
88
89 if (cpu.family == 0x6)
90 cpu.model += (cpuid >> 12) & 0xf0;
91 }
92 }
93 }
94
95 return cpu;
96 }
97
98 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
99
100 static int
101 perf_event_new_data (const struct perf_event_buffer *pev)
102 {
103 return *pev->data_head != pev->last_head;
104 }
105
106 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
107 to the memory holding the copy.
108 The caller is responsible for freeing the memory. */
109
110 static gdb_byte *
111 perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
112 size_t size)
113 {
114 const gdb_byte *begin, *end, *start, *stop;
115 gdb_byte *buffer;
116 size_t buffer_size;
117 __u64 data_tail;
118
119 if (size == 0)
120 return NULL;
121
122 /* We should never ask for more data than the buffer can hold. */
123 buffer_size = pev->size;
124 gdb_assert (size <= buffer_size);
125
126 /* If we ask for more data than we seem to have, we wrap around and read
127 data from the end of the buffer. This is already handled by the %
128 BUFFER_SIZE operation, below. Here, we just need to make sure that we
129 don't underflow.
130
131 Note that this is perfectly OK for perf event buffers where data_head
132 doesn'grow indefinitely and instead wraps around to remain within the
133 buffer's boundaries. */
134 if (data_head < size)
135 data_head += buffer_size;
136
137 gdb_assert (size <= data_head);
138 data_tail = data_head - size;
139
140 begin = pev->mem;
141 start = begin + data_tail % buffer_size;
142 stop = begin + data_head % buffer_size;
143
144 buffer = (gdb_byte *) xmalloc (size);
145
146 if (start < stop)
147 memcpy (buffer, start, stop - start);
148 else
149 {
150 end = begin + buffer_size;
151
152 memcpy (buffer, start, end - start);
153 memcpy (buffer + (end - start), begin, stop - begin);
154 }
155
156 return buffer;
157 }
158
159 /* Copy the perf event buffer data from PEV.
160 Store a pointer to the copy into DATA and its size in SIZE. */
161
162 static void
163 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
164 size_t *psize)
165 {
166 size_t size;
167 __u64 data_head;
168
169 data_head = *pev->data_head;
170 size = pev->size;
171
172 *data = perf_event_read (pev, data_head, size);
173 *psize = size;
174
175 pev->last_head = data_head;
176 }
177
178 /* Try to determine the start address of the Linux kernel. */
179
180 static uint64_t
181 linux_determine_kernel_start (void)
182 {
183 static uint64_t kernel_start;
184 static int cached;
185
186 if (cached != 0)
187 return kernel_start;
188
189 cached = 1;
190
191 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
192 if (file == NULL)
193 return kernel_start;
194
195 while (!feof (file.get ()))
196 {
197 char buffer[1024], symbol[8], *line;
198 uint64_t addr;
199 int match;
200
201 line = fgets (buffer, sizeof (buffer), file.get ());
202 if (line == NULL)
203 break;
204
205 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
206 if (match != 2)
207 continue;
208
209 if (strcmp (symbol, "_text") == 0)
210 {
211 kernel_start = addr;
212 break;
213 }
214 }
215
216 return kernel_start;
217 }
218
219 /* Check whether an address is in the kernel. */
220
221 static inline int
222 perf_event_is_kernel_addr (uint64_t addr)
223 {
224 uint64_t kernel_start;
225
226 kernel_start = linux_determine_kernel_start ();
227 if (kernel_start != 0ull)
228 return (addr >= kernel_start);
229
230 /* If we don't know the kernel's start address, let's check the most
231 significant bit. This will work at least for 64-bit kernels. */
232 return ((addr & (1ull << 63)) != 0);
233 }
234
235 /* Check whether a perf event record should be skipped. */
236
237 static inline int
238 perf_event_skip_bts_record (const struct perf_event_bts *bts)
239 {
240 /* The hardware may report branches from kernel into user space. Branches
241 from user into kernel space will be suppressed. We filter the former to
242 provide a consistent branch trace excluding kernel. */
243 return perf_event_is_kernel_addr (bts->from);
244 }
245
246 /* Perform a few consistency checks on a perf event sample record. This is
247 meant to catch cases when we get out of sync with the perf event stream. */
248
249 static inline int
250 perf_event_sample_ok (const struct perf_event_sample *sample)
251 {
252 if (sample->header.type != PERF_RECORD_SAMPLE)
253 return 0;
254
255 if (sample->header.size != sizeof (*sample))
256 return 0;
257
258 return 1;
259 }
260
261 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
262 and to addresses (plus a header).
263
264 Start points into that buffer at the next sample position.
265 We read the collected samples backwards from start.
266
267 While reading the samples, we convert the information into a list of blocks.
268 For two adjacent samples s1 and s2, we form a block b such that b.begin =
269 s1.to and b.end = s2.from.
270
271 In case the buffer overflows during sampling, one sample may have its lower
272 part at the end and its upper part at the beginning of the buffer. */
273
274 static std::vector<btrace_block> *
275 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
276 const uint8_t *end, const uint8_t *start, size_t size)
277 {
278 std::vector<btrace_block> *btrace = new std::vector<btrace_block>;
279 struct perf_event_sample sample;
280 size_t read = 0;
281 struct btrace_block block = { 0, 0 };
282 struct regcache *regcache;
283
284 gdb_assert (begin <= start);
285 gdb_assert (start <= end);
286
287 /* The first block ends at the current pc. */
288 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
289 block.end = regcache_read_pc (regcache);
290
291 /* The buffer may contain a partial record as its last entry (i.e. when the
292 buffer size is not a multiple of the sample size). */
293 read = sizeof (sample) - 1;
294
295 for (; read < size; read += sizeof (sample))
296 {
297 const struct perf_event_sample *psample;
298
299 /* Find the next perf_event sample in a backwards traversal. */
300 start -= sizeof (sample);
301
302 /* If we're still inside the buffer, we're done. */
303 if (begin <= start)
304 psample = (const struct perf_event_sample *) start;
305 else
306 {
307 int missing;
308
309 /* We're to the left of the ring buffer, we will wrap around and
310 reappear at the very right of the ring buffer. */
311
312 missing = (begin - start);
313 start = (end - missing);
314
315 /* If the entire sample is missing, we're done. */
316 if (missing == sizeof (sample))
317 psample = (const struct perf_event_sample *) start;
318 else
319 {
320 uint8_t *stack;
321
322 /* The sample wrapped around. The lower part is at the end and
323 the upper part is at the beginning of the buffer. */
324 stack = (uint8_t *) &sample;
325
326 /* Copy the two parts so we have a contiguous sample. */
327 memcpy (stack, start, missing);
328 memcpy (stack + missing, begin, sizeof (sample) - missing);
329
330 psample = &sample;
331 }
332 }
333
334 if (!perf_event_sample_ok (psample))
335 {
336 warning (_("Branch trace may be incomplete."));
337 break;
338 }
339
340 if (perf_event_skip_bts_record (&psample->bts))
341 continue;
342
343 /* We found a valid sample, so we can complete the current block. */
344 block.begin = psample->bts.to;
345
346 btrace->push_back (block);
347
348 /* Start the next block. */
349 block.end = psample->bts.from;
350 }
351
352 /* Push the last block (i.e. the first one of inferior execution), as well.
353 We don't know where it ends, but we know where it starts. If we're
354 reading delta trace, we can fill in the start address later on.
355 Otherwise we will prune it. */
356 block.begin = 0;
357 btrace->push_back (block);
358
359 return btrace;
360 }
361
362 /* Check whether an Intel cpu supports BTS. */
363
364 static int
365 intel_supports_bts (const struct btrace_cpu *cpu)
366 {
367 switch (cpu->family)
368 {
369 case 0x6:
370 switch (cpu->model)
371 {
372 case 0x1a: /* Nehalem */
373 case 0x1f:
374 case 0x1e:
375 case 0x2e:
376 case 0x25: /* Westmere */
377 case 0x2c:
378 case 0x2f:
379 case 0x2a: /* Sandy Bridge */
380 case 0x2d:
381 case 0x3a: /* Ivy Bridge */
382
383 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
384 "from" information afer an EIST transition, T-states, C1E, or
385 Adaptive Thermal Throttling. */
386 return 0;
387 }
388 }
389
390 return 1;
391 }
392
393 /* Check whether the cpu supports BTS. */
394
395 static int
396 cpu_supports_bts (void)
397 {
398 struct btrace_cpu cpu;
399
400 cpu = btrace_this_cpu ();
401 switch (cpu.vendor)
402 {
403 default:
404 /* Don't know about others. Let's assume they do. */
405 return 1;
406
407 case CV_INTEL:
408 return intel_supports_bts (&cpu);
409 }
410 }
411
412 /* The perf_event_open syscall failed. Try to print a helpful error
413 message. */
414
415 static void
416 diagnose_perf_event_open_fail ()
417 {
418 switch (errno)
419 {
420 case EPERM:
421 case EACCES:
422 {
423 static const char filename[] = "/proc/sys/kernel/perf_event_paranoid";
424 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
425 if (file.get () == nullptr)
426 break;
427
428 int level, found = fscanf (file.get (), "%d", &level);
429 if (found == 1 && level > 2)
430 error (_("You do not have permission to record the process. "
431 "Try setting %s to 2 or less."), filename);
432 }
433
434 break;
435 }
436
437 error (_("Failed to start recording: %s"), safe_strerror (errno));
438 }
439
440 /* Enable branch tracing in BTS format. */
441
442 static struct btrace_target_info *
443 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
444 {
445 struct btrace_tinfo_bts *bts;
446 size_t size, pages;
447 __u64 data_offset;
448 int pid, pg;
449
450 if (!cpu_supports_bts ())
451 error (_("BTS support has been disabled for the target cpu."));
452
453 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
454 (XCNEW (btrace_target_info));
455 tinfo->ptid = ptid;
456
457 tinfo->conf.format = BTRACE_FORMAT_BTS;
458 bts = &tinfo->variant.bts;
459
460 bts->attr.size = sizeof (bts->attr);
461 bts->attr.type = PERF_TYPE_HARDWARE;
462 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
463 bts->attr.sample_period = 1;
464
465 /* We sample from and to address. */
466 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
467
468 bts->attr.exclude_kernel = 1;
469 bts->attr.exclude_hv = 1;
470 bts->attr.exclude_idle = 1;
471
472 pid = ptid.lwp ();
473 if (pid == 0)
474 pid = ptid.pid ();
475
476 errno = 0;
477 scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0));
478 if (fd.get () < 0)
479 diagnose_perf_event_open_fail ();
480
481 /* Convert the requested size in bytes to pages (rounding up). */
482 pages = ((size_t) conf->size / PAGE_SIZE
483 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
484 /* We need at least one page. */
485 if (pages == 0)
486 pages = 1;
487
488 /* The buffer size can be requested in powers of two pages. Adjust PAGES
489 to the next power of two. */
490 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
491 if ((pages & ((size_t) 1 << pg)) != 0)
492 pages += ((size_t) 1 << pg);
493
494 /* We try to allocate the requested size.
495 If that fails, try to get as much as we can. */
496 scoped_mmap data;
497 for (; pages > 0; pages >>= 1)
498 {
499 size_t length;
500 __u64 data_size;
501
502 data_size = (__u64) pages * PAGE_SIZE;
503
504 /* Don't ask for more than we can represent in the configuration. */
505 if ((__u64) UINT_MAX < data_size)
506 continue;
507
508 size = (size_t) data_size;
509 length = size + PAGE_SIZE;
510
511 /* Check for overflows. */
512 if ((__u64) length != data_size + PAGE_SIZE)
513 continue;
514
515 errno = 0;
516 /* The number of pages we request needs to be a power of two. */
517 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
518 if (data.get () != MAP_FAILED)
519 break;
520 }
521
522 if (pages == 0)
523 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
524
525 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
526 data.get ();
527 data_offset = PAGE_SIZE;
528
529 #if defined (PERF_ATTR_SIZE_VER5)
530 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
531 {
532 __u64 data_size;
533
534 data_offset = header->data_offset;
535 data_size = header->data_size;
536
537 size = (unsigned int) data_size;
538
539 /* Check for overflows. */
540 if ((__u64) size != data_size)
541 error (_("Failed to determine trace buffer size."));
542 }
543 #endif /* defined (PERF_ATTR_SIZE_VER5) */
544
545 bts->bts.size = size;
546 bts->bts.data_head = &header->data_head;
547 bts->bts.mem = (const uint8_t *) data.release () + data_offset;
548 bts->bts.last_head = 0ull;
549 bts->header = header;
550 bts->file = fd.release ();
551
552 tinfo->conf.bts.size = (unsigned int) size;
553 return tinfo.release ();
554 }
555
556 #if defined (PERF_ATTR_SIZE_VER5)
557
558 /* Determine the event type. */
559
560 static int
561 perf_event_pt_event_type ()
562 {
563 static const char filename[] = "/sys/bus/event_source/devices/intel_pt/type";
564
565 errno = 0;
566 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
567 if (file.get () == nullptr)
568 error (_("Failed to open %s: %s."), filename, safe_strerror (errno));
569
570 int type, found = fscanf (file.get (), "%d", &type);
571 if (found != 1)
572 error (_("Failed to read the PT event type from %s."), filename);
573
574 return type;
575 }
576
577 /* Enable branch tracing in Intel Processor Trace format. */
578
579 static struct btrace_target_info *
580 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
581 {
582 struct btrace_tinfo_pt *pt;
583 size_t pages;
584 int pid, pg;
585
586 pid = ptid.lwp ();
587 if (pid == 0)
588 pid = ptid.pid ();
589
590 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
591 (XCNEW (btrace_target_info));
592 tinfo->ptid = ptid;
593
594 tinfo->conf.format = BTRACE_FORMAT_PT;
595 pt = &tinfo->variant.pt;
596
597 pt->attr.size = sizeof (pt->attr);
598 pt->attr.type = perf_event_pt_event_type ();
599
600 pt->attr.exclude_kernel = 1;
601 pt->attr.exclude_hv = 1;
602 pt->attr.exclude_idle = 1;
603
604 errno = 0;
605 scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
606 if (fd.get () < 0)
607 diagnose_perf_event_open_fail ();
608
609 /* Allocate the configuration page. */
610 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
611 fd.get (), 0);
612 if (data.get () == MAP_FAILED)
613 error (_("Failed to map trace user page: %s."), safe_strerror (errno));
614
615 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
616 data.get ();
617
618 header->aux_offset = header->data_offset + header->data_size;
619
620 /* Convert the requested size in bytes to pages (rounding up). */
621 pages = ((size_t) conf->size / PAGE_SIZE
622 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
623 /* We need at least one page. */
624 if (pages == 0)
625 pages = 1;
626
627 /* The buffer size can be requested in powers of two pages. Adjust PAGES
628 to the next power of two. */
629 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
630 if ((pages & ((size_t) 1 << pg)) != 0)
631 pages += ((size_t) 1 << pg);
632
633 /* We try to allocate the requested size.
634 If that fails, try to get as much as we can. */
635 scoped_mmap aux;
636 for (; pages > 0; pages >>= 1)
637 {
638 size_t length;
639 __u64 data_size;
640
641 data_size = (__u64) pages * PAGE_SIZE;
642
643 /* Don't ask for more than we can represent in the configuration. */
644 if ((__u64) UINT_MAX < data_size)
645 continue;
646
647 length = (size_t) data_size;
648
649 /* Check for overflows. */
650 if ((__u64) length != data_size)
651 continue;
652
653 header->aux_size = data_size;
654
655 errno = 0;
656 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
657 header->aux_offset);
658 if (aux.get () != MAP_FAILED)
659 break;
660 }
661
662 if (pages == 0)
663 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
664
665 pt->pt.size = aux.size ();
666 pt->pt.mem = (const uint8_t *) aux.release ();
667 pt->pt.data_head = &header->aux_head;
668 pt->header = (struct perf_event_mmap_page *) data.release ();
669 gdb_assert (pt->header == header);
670 pt->file = fd.release ();
671
672 tinfo->conf.pt.size = (unsigned int) pt->pt.size;
673 return tinfo.release ();
674 }
675
676 #else /* !defined (PERF_ATTR_SIZE_VER5) */
677
678 static struct btrace_target_info *
679 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
680 {
681 error (_("Intel Processor Trace support was disabled at compile time."));
682 }
683
684 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
685
686 /* See linux-btrace.h. */
687
688 struct btrace_target_info *
689 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
690 {
691 switch (conf->format)
692 {
693 case BTRACE_FORMAT_NONE:
694 error (_("Bad branch trace format."));
695
696 default:
697 error (_("Unknown branch trace format."));
698
699 case BTRACE_FORMAT_BTS:
700 return linux_enable_bts (ptid, &conf->bts);
701
702 case BTRACE_FORMAT_PT:
703 return linux_enable_pt (ptid, &conf->pt);
704 }
705 }
706
707 /* Disable BTS tracing. */
708
709 static enum btrace_error
710 linux_disable_bts (struct btrace_tinfo_bts *tinfo)
711 {
712 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
713 close (tinfo->file);
714
715 return BTRACE_ERR_NONE;
716 }
717
718 /* Disable Intel Processor Trace tracing. */
719
720 static enum btrace_error
721 linux_disable_pt (struct btrace_tinfo_pt *tinfo)
722 {
723 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
724 munmap((void *) tinfo->header, PAGE_SIZE);
725 close (tinfo->file);
726
727 return BTRACE_ERR_NONE;
728 }
729
730 /* See linux-btrace.h. */
731
732 enum btrace_error
733 linux_disable_btrace (struct btrace_target_info *tinfo)
734 {
735 enum btrace_error errcode;
736
737 errcode = BTRACE_ERR_NOT_SUPPORTED;
738 switch (tinfo->conf.format)
739 {
740 case BTRACE_FORMAT_NONE:
741 break;
742
743 case BTRACE_FORMAT_BTS:
744 errcode = linux_disable_bts (&tinfo->variant.bts);
745 break;
746
747 case BTRACE_FORMAT_PT:
748 errcode = linux_disable_pt (&tinfo->variant.pt);
749 break;
750 }
751
752 if (errcode == BTRACE_ERR_NONE)
753 xfree (tinfo);
754
755 return errcode;
756 }
757
758 /* Read branch trace data in BTS format for the thread given by TINFO into
759 BTRACE using the TYPE reading method. */
760
761 static enum btrace_error
762 linux_read_bts (struct btrace_data_bts *btrace,
763 struct btrace_target_info *tinfo,
764 enum btrace_read_type type)
765 {
766 struct perf_event_buffer *pevent;
767 const uint8_t *begin, *end, *start;
768 size_t buffer_size, size;
769 __u64 data_head, data_tail;
770 unsigned int retries = 5;
771
772 pevent = &tinfo->variant.bts.bts;
773
774 /* For delta reads, we return at least the partial last block containing
775 the current PC. */
776 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
777 return BTRACE_ERR_NONE;
778
779 buffer_size = pevent->size;
780 data_tail = pevent->last_head;
781
782 /* We may need to retry reading the trace. See below. */
783 while (retries--)
784 {
785 data_head = *pevent->data_head;
786
787 /* Delete any leftover trace from the previous iteration. */
788 delete btrace->blocks;
789 btrace->blocks = nullptr;
790
791 if (type == BTRACE_READ_DELTA)
792 {
793 __u64 data_size;
794
795 /* Determine the number of bytes to read and check for buffer
796 overflows. */
797
798 /* Check for data head overflows. We might be able to recover from
799 those but they are very unlikely and it's not really worth the
800 effort, I think. */
801 if (data_head < data_tail)
802 return BTRACE_ERR_OVERFLOW;
803
804 /* If the buffer is smaller than the trace delta, we overflowed. */
805 data_size = data_head - data_tail;
806 if (buffer_size < data_size)
807 return BTRACE_ERR_OVERFLOW;
808
809 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
810 size = (size_t) data_size;
811 }
812 else
813 {
814 /* Read the entire buffer. */
815 size = buffer_size;
816
817 /* Adjust the size if the buffer has not overflowed, yet. */
818 if (data_head < size)
819 size = (size_t) data_head;
820 }
821
822 /* Data_head keeps growing; the buffer itself is circular. */
823 begin = pevent->mem;
824 start = begin + data_head % buffer_size;
825
826 if (data_head <= buffer_size)
827 end = start;
828 else
829 end = begin + pevent->size;
830
831 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
832
833 /* The stopping thread notifies its ptracer before it is scheduled out.
834 On multi-core systems, the debugger might therefore run while the
835 kernel might be writing the last branch trace records.
836
837 Let's check whether the data head moved while we read the trace. */
838 if (data_head == *pevent->data_head)
839 break;
840 }
841
842 pevent->last_head = data_head;
843
844 /* Prune the incomplete last block (i.e. the first one of inferior execution)
845 if we're not doing a delta read. There is no way of filling in its zeroed
846 BEGIN element. */
847 if (!btrace->blocks->empty () && type != BTRACE_READ_DELTA)
848 btrace->blocks->pop_back ();
849
850 return BTRACE_ERR_NONE;
851 }
852
853 /* Fill in the Intel Processor Trace configuration information. */
854
855 static void
856 linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
857 {
858 conf->cpu = btrace_this_cpu ();
859 }
860
861 /* Read branch trace data in Intel Processor Trace format for the thread
862 given by TINFO into BTRACE using the TYPE reading method. */
863
864 static enum btrace_error
865 linux_read_pt (struct btrace_data_pt *btrace,
866 struct btrace_target_info *tinfo,
867 enum btrace_read_type type)
868 {
869 struct perf_event_buffer *pt;
870
871 pt = &tinfo->variant.pt.pt;
872
873 linux_fill_btrace_pt_config (&btrace->config);
874
875 switch (type)
876 {
877 case BTRACE_READ_DELTA:
878 /* We don't support delta reads. The data head (i.e. aux_head) wraps
879 around to stay inside the aux buffer. */
880 return BTRACE_ERR_NOT_SUPPORTED;
881
882 case BTRACE_READ_NEW:
883 if (!perf_event_new_data (pt))
884 return BTRACE_ERR_NONE;
885
886 /* Fall through. */
887 case BTRACE_READ_ALL:
888 perf_event_read_all (pt, &btrace->data, &btrace->size);
889 return BTRACE_ERR_NONE;
890 }
891
892 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
893 }
894
895 /* See linux-btrace.h. */
896
897 enum btrace_error
898 linux_read_btrace (struct btrace_data *btrace,
899 struct btrace_target_info *tinfo,
900 enum btrace_read_type type)
901 {
902 switch (tinfo->conf.format)
903 {
904 case BTRACE_FORMAT_NONE:
905 return BTRACE_ERR_NOT_SUPPORTED;
906
907 case BTRACE_FORMAT_BTS:
908 /* We read btrace in BTS format. */
909 btrace->format = BTRACE_FORMAT_BTS;
910 btrace->variant.bts.blocks = NULL;
911
912 return linux_read_bts (&btrace->variant.bts, tinfo, type);
913
914 case BTRACE_FORMAT_PT:
915 /* We read btrace in Intel Processor Trace format. */
916 btrace->format = BTRACE_FORMAT_PT;
917 btrace->variant.pt.data = NULL;
918 btrace->variant.pt.size = 0;
919
920 return linux_read_pt (&btrace->variant.pt, tinfo, type);
921 }
922
923 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
924 }
925
926 /* See linux-btrace.h. */
927
928 const struct btrace_config *
929 linux_btrace_conf (const struct btrace_target_info *tinfo)
930 {
931 return &tinfo->conf;
932 }
933
934 #else /* !HAVE_LINUX_PERF_EVENT_H */
935
936 /* See linux-btrace.h. */
937
938 struct btrace_target_info *
939 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
940 {
941 return NULL;
942 }
943
944 /* See linux-btrace.h. */
945
946 enum btrace_error
947 linux_disable_btrace (struct btrace_target_info *tinfo)
948 {
949 return BTRACE_ERR_NOT_SUPPORTED;
950 }
951
952 /* See linux-btrace.h. */
953
954 enum btrace_error
955 linux_read_btrace (struct btrace_data *btrace,
956 struct btrace_target_info *tinfo,
957 enum btrace_read_type type)
958 {
959 return BTRACE_ERR_NOT_SUPPORTED;
960 }
961
962 /* See linux-btrace.h. */
963
964 const struct btrace_config *
965 linux_btrace_conf (const struct btrace_target_info *tinfo)
966 {
967 return NULL;
968 }
969
970 #endif /* !HAVE_LINUX_PERF_EVENT_H */
This page took 0.049183 seconds and 4 git commands to generate.