| 1 | /* Linux-dependent part of branch trace support for GDB, and GDBserver. |
| 2 | |
| 3 | Copyright (C) 2013-2020 Free Software Foundation, Inc. |
| 4 | |
| 5 | Contributed by Intel Corp. <markus.t.metzger@intel.com> |
| 6 | |
| 7 | This file is part of GDB. |
| 8 | |
| 9 | This program is free software; you can redistribute it and/or modify |
| 10 | it under the terms of the GNU General Public License as published by |
| 11 | the Free Software Foundation; either version 3 of the License, or |
| 12 | (at your option) any later version. |
| 13 | |
| 14 | This program is distributed in the hope that it will be useful, |
| 15 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 17 | GNU General Public License for more details. |
| 18 | |
| 19 | You should have received a copy of the GNU General Public License |
| 20 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
| 21 | |
| 22 | #include "gdbsupport/common-defs.h" |
| 23 | |
| 24 | #undef PACKAGE |
| 25 | #undef PACKAGE_NAME |
| 26 | #undef PACKAGE_VERSION |
| 27 | #undef PACKAGE_STRING |
| 28 | #undef PACKAGE_TARNAME |
| 29 | |
| 30 | #include <config.h> |
| 31 | #include "linux-btrace.h" |
| 32 | #include "gdbsupport/common-regcache.h" |
| 33 | #include "gdbsupport/gdb_wait.h" |
| 34 | #include "x86-cpuid.h" |
| 35 | #include "gdbsupport/filestuff.h" |
| 36 | #include "gdbsupport/scoped_fd.h" |
| 37 | #include "gdbsupport/scoped_mmap.h" |
| 38 | |
| 39 | #include <inttypes.h> |
| 40 | |
| 41 | #include <sys/syscall.h> |
| 42 | |
| 43 | #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open) |
| 44 | #include <unistd.h> |
| 45 | #include <sys/mman.h> |
| 46 | #include <sys/user.h> |
| 47 | #include "nat/gdb_ptrace.h" |
| 48 | #include <sys/types.h> |
| 49 | #include <signal.h> |
| 50 | |
| 51 | /* A branch trace record in perf_event. */ |
| 52 | struct perf_event_bts |
| 53 | { |
| 54 | /* The linear address of the branch source. */ |
| 55 | uint64_t from; |
| 56 | |
| 57 | /* The linear address of the branch destination. */ |
| 58 | uint64_t to; |
| 59 | }; |
| 60 | |
| 61 | /* A perf_event branch trace sample. */ |
| 62 | struct perf_event_sample |
| 63 | { |
| 64 | /* The perf_event sample header. */ |
| 65 | struct perf_event_header header; |
| 66 | |
| 67 | /* The perf_event branch tracing payload. */ |
| 68 | struct perf_event_bts bts; |
| 69 | }; |
| 70 | |
| 71 | /* Identify the cpu we're running on. */ |
| 72 | static struct btrace_cpu |
| 73 | btrace_this_cpu (void) |
| 74 | { |
| 75 | struct btrace_cpu cpu; |
| 76 | unsigned int eax, ebx, ecx, edx; |
| 77 | int ok; |
| 78 | |
| 79 | memset (&cpu, 0, sizeof (cpu)); |
| 80 | |
| 81 | ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx); |
| 82 | if (ok != 0) |
| 83 | { |
| 84 | if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx |
| 85 | && edx == signature_INTEL_edx) |
| 86 | { |
| 87 | unsigned int cpuid, ignore; |
| 88 | |
| 89 | ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore); |
| 90 | if (ok != 0) |
| 91 | { |
| 92 | cpu.vendor = CV_INTEL; |
| 93 | |
| 94 | cpu.family = (cpuid >> 8) & 0xf; |
| 95 | cpu.model = (cpuid >> 4) & 0xf; |
| 96 | |
| 97 | if (cpu.family == 0x6) |
| 98 | cpu.model += (cpuid >> 12) & 0xf0; |
| 99 | } |
| 100 | } |
| 101 | } |
| 102 | |
| 103 | return cpu; |
| 104 | } |
| 105 | |
| 106 | /* Return non-zero if there is new data in PEVENT; zero otherwise. */ |
| 107 | |
| 108 | static int |
| 109 | perf_event_new_data (const struct perf_event_buffer *pev) |
| 110 | { |
| 111 | return *pev->data_head != pev->last_head; |
| 112 | } |
| 113 | |
| 114 | /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer |
| 115 | to the memory holding the copy. |
| 116 | The caller is responsible for freeing the memory. */ |
| 117 | |
| 118 | static gdb_byte * |
| 119 | perf_event_read (const struct perf_event_buffer *pev, __u64 data_head, |
| 120 | size_t size) |
| 121 | { |
| 122 | const gdb_byte *begin, *end, *start, *stop; |
| 123 | gdb_byte *buffer; |
| 124 | size_t buffer_size; |
| 125 | __u64 data_tail; |
| 126 | |
| 127 | if (size == 0) |
| 128 | return NULL; |
| 129 | |
| 130 | /* We should never ask for more data than the buffer can hold. */ |
| 131 | buffer_size = pev->size; |
| 132 | gdb_assert (size <= buffer_size); |
| 133 | |
| 134 | /* If we ask for more data than we seem to have, we wrap around and read |
| 135 | data from the end of the buffer. This is already handled by the % |
| 136 | BUFFER_SIZE operation, below. Here, we just need to make sure that we |
| 137 | don't underflow. |
| 138 | |
| 139 | Note that this is perfectly OK for perf event buffers where data_head |
| 140 | doesn'grow indefinitely and instead wraps around to remain within the |
| 141 | buffer's boundaries. */ |
| 142 | if (data_head < size) |
| 143 | data_head += buffer_size; |
| 144 | |
| 145 | gdb_assert (size <= data_head); |
| 146 | data_tail = data_head - size; |
| 147 | |
| 148 | begin = pev->mem; |
| 149 | start = begin + data_tail % buffer_size; |
| 150 | stop = begin + data_head % buffer_size; |
| 151 | |
| 152 | buffer = (gdb_byte *) xmalloc (size); |
| 153 | |
| 154 | if (start < stop) |
| 155 | memcpy (buffer, start, stop - start); |
| 156 | else |
| 157 | { |
| 158 | end = begin + buffer_size; |
| 159 | |
| 160 | memcpy (buffer, start, end - start); |
| 161 | memcpy (buffer + (end - start), begin, stop - begin); |
| 162 | } |
| 163 | |
| 164 | return buffer; |
| 165 | } |
| 166 | |
| 167 | /* Copy the perf event buffer data from PEV. |
| 168 | Store a pointer to the copy into DATA and its size in SIZE. */ |
| 169 | |
| 170 | static void |
| 171 | perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data, |
| 172 | size_t *psize) |
| 173 | { |
| 174 | size_t size; |
| 175 | __u64 data_head; |
| 176 | |
| 177 | data_head = *pev->data_head; |
| 178 | size = pev->size; |
| 179 | |
| 180 | *data = perf_event_read (pev, data_head, size); |
| 181 | *psize = size; |
| 182 | |
| 183 | pev->last_head = data_head; |
| 184 | } |
| 185 | |
| 186 | /* Try to determine the start address of the Linux kernel. */ |
| 187 | |
| 188 | static uint64_t |
| 189 | linux_determine_kernel_start (void) |
| 190 | { |
| 191 | static uint64_t kernel_start; |
| 192 | static int cached; |
| 193 | |
| 194 | if (cached != 0) |
| 195 | return kernel_start; |
| 196 | |
| 197 | cached = 1; |
| 198 | |
| 199 | gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r"); |
| 200 | if (file == NULL) |
| 201 | return kernel_start; |
| 202 | |
| 203 | while (!feof (file.get ())) |
| 204 | { |
| 205 | char buffer[1024], symbol[8], *line; |
| 206 | uint64_t addr; |
| 207 | int match; |
| 208 | |
| 209 | line = fgets (buffer, sizeof (buffer), file.get ()); |
| 210 | if (line == NULL) |
| 211 | break; |
| 212 | |
| 213 | match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol); |
| 214 | if (match != 2) |
| 215 | continue; |
| 216 | |
| 217 | if (strcmp (symbol, "_text") == 0) |
| 218 | { |
| 219 | kernel_start = addr; |
| 220 | break; |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | return kernel_start; |
| 225 | } |
| 226 | |
| 227 | /* Check whether an address is in the kernel. */ |
| 228 | |
| 229 | static inline int |
| 230 | perf_event_is_kernel_addr (uint64_t addr) |
| 231 | { |
| 232 | uint64_t kernel_start; |
| 233 | |
| 234 | kernel_start = linux_determine_kernel_start (); |
| 235 | if (kernel_start != 0ull) |
| 236 | return (addr >= kernel_start); |
| 237 | |
| 238 | /* If we don't know the kernel's start address, let's check the most |
| 239 | significant bit. This will work at least for 64-bit kernels. */ |
| 240 | return ((addr & (1ull << 63)) != 0); |
| 241 | } |
| 242 | |
| 243 | /* Check whether a perf event record should be skipped. */ |
| 244 | |
| 245 | static inline int |
| 246 | perf_event_skip_bts_record (const struct perf_event_bts *bts) |
| 247 | { |
| 248 | /* The hardware may report branches from kernel into user space. Branches |
| 249 | from user into kernel space will be suppressed. We filter the former to |
| 250 | provide a consistent branch trace excluding kernel. */ |
| 251 | return perf_event_is_kernel_addr (bts->from); |
| 252 | } |
| 253 | |
| 254 | /* Perform a few consistency checks on a perf event sample record. This is |
| 255 | meant to catch cases when we get out of sync with the perf event stream. */ |
| 256 | |
| 257 | static inline int |
| 258 | perf_event_sample_ok (const struct perf_event_sample *sample) |
| 259 | { |
| 260 | if (sample->header.type != PERF_RECORD_SAMPLE) |
| 261 | return 0; |
| 262 | |
| 263 | if (sample->header.size != sizeof (*sample)) |
| 264 | return 0; |
| 265 | |
| 266 | return 1; |
| 267 | } |
| 268 | |
| 269 | /* Branch trace is collected in a circular buffer [begin; end) as pairs of from |
| 270 | and to addresses (plus a header). |
| 271 | |
| 272 | Start points into that buffer at the next sample position. |
| 273 | We read the collected samples backwards from start. |
| 274 | |
| 275 | While reading the samples, we convert the information into a list of blocks. |
| 276 | For two adjacent samples s1 and s2, we form a block b such that b.begin = |
| 277 | s1.to and b.end = s2.from. |
| 278 | |
| 279 | In case the buffer overflows during sampling, one sample may have its lower |
| 280 | part at the end and its upper part at the beginning of the buffer. */ |
| 281 | |
| 282 | static std::vector<btrace_block> * |
| 283 | perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin, |
| 284 | const uint8_t *end, const uint8_t *start, size_t size) |
| 285 | { |
| 286 | std::vector<btrace_block> *btrace = new std::vector<btrace_block>; |
| 287 | struct perf_event_sample sample; |
| 288 | size_t read = 0; |
| 289 | struct btrace_block block = { 0, 0 }; |
| 290 | struct regcache *regcache; |
| 291 | |
| 292 | gdb_assert (begin <= start); |
| 293 | gdb_assert (start <= end); |
| 294 | |
| 295 | /* The first block ends at the current pc. */ |
| 296 | regcache = get_thread_regcache_for_ptid (tinfo->ptid); |
| 297 | block.end = regcache_read_pc (regcache); |
| 298 | |
| 299 | /* The buffer may contain a partial record as its last entry (i.e. when the |
| 300 | buffer size is not a multiple of the sample size). */ |
| 301 | read = sizeof (sample) - 1; |
| 302 | |
| 303 | for (; read < size; read += sizeof (sample)) |
| 304 | { |
| 305 | const struct perf_event_sample *psample; |
| 306 | |
| 307 | /* Find the next perf_event sample in a backwards traversal. */ |
| 308 | start -= sizeof (sample); |
| 309 | |
| 310 | /* If we're still inside the buffer, we're done. */ |
| 311 | if (begin <= start) |
| 312 | psample = (const struct perf_event_sample *) start; |
| 313 | else |
| 314 | { |
| 315 | int missing; |
| 316 | |
| 317 | /* We're to the left of the ring buffer, we will wrap around and |
| 318 | reappear at the very right of the ring buffer. */ |
| 319 | |
| 320 | missing = (begin - start); |
| 321 | start = (end - missing); |
| 322 | |
| 323 | /* If the entire sample is missing, we're done. */ |
| 324 | if (missing == sizeof (sample)) |
| 325 | psample = (const struct perf_event_sample *) start; |
| 326 | else |
| 327 | { |
| 328 | uint8_t *stack; |
| 329 | |
| 330 | /* The sample wrapped around. The lower part is at the end and |
| 331 | the upper part is at the beginning of the buffer. */ |
| 332 | stack = (uint8_t *) &sample; |
| 333 | |
| 334 | /* Copy the two parts so we have a contiguous sample. */ |
| 335 | memcpy (stack, start, missing); |
| 336 | memcpy (stack + missing, begin, sizeof (sample) - missing); |
| 337 | |
| 338 | psample = &sample; |
| 339 | } |
| 340 | } |
| 341 | |
| 342 | if (!perf_event_sample_ok (psample)) |
| 343 | { |
| 344 | warning (_("Branch trace may be incomplete.")); |
| 345 | break; |
| 346 | } |
| 347 | |
| 348 | if (perf_event_skip_bts_record (&psample->bts)) |
| 349 | continue; |
| 350 | |
| 351 | /* We found a valid sample, so we can complete the current block. */ |
| 352 | block.begin = psample->bts.to; |
| 353 | |
| 354 | btrace->push_back (block); |
| 355 | |
| 356 | /* Start the next block. */ |
| 357 | block.end = psample->bts.from; |
| 358 | } |
| 359 | |
| 360 | /* Push the last block (i.e. the first one of inferior execution), as well. |
| 361 | We don't know where it ends, but we know where it starts. If we're |
| 362 | reading delta trace, we can fill in the start address later on. |
| 363 | Otherwise we will prune it. */ |
| 364 | block.begin = 0; |
| 365 | btrace->push_back (block); |
| 366 | |
| 367 | return btrace; |
| 368 | } |
| 369 | |
| 370 | /* Check whether an Intel cpu supports BTS. */ |
| 371 | |
| 372 | static int |
| 373 | intel_supports_bts (const struct btrace_cpu *cpu) |
| 374 | { |
| 375 | switch (cpu->family) |
| 376 | { |
| 377 | case 0x6: |
| 378 | switch (cpu->model) |
| 379 | { |
| 380 | case 0x1a: /* Nehalem */ |
| 381 | case 0x1f: |
| 382 | case 0x1e: |
| 383 | case 0x2e: |
| 384 | case 0x25: /* Westmere */ |
| 385 | case 0x2c: |
| 386 | case 0x2f: |
| 387 | case 0x2a: /* Sandy Bridge */ |
| 388 | case 0x2d: |
| 389 | case 0x3a: /* Ivy Bridge */ |
| 390 | |
| 391 | /* AAJ122: LBR, BTM, or BTS records may have incorrect branch |
| 392 | "from" information afer an EIST transition, T-states, C1E, or |
| 393 | Adaptive Thermal Throttling. */ |
| 394 | return 0; |
| 395 | } |
| 396 | } |
| 397 | |
| 398 | return 1; |
| 399 | } |
| 400 | |
| 401 | /* Check whether the cpu supports BTS. */ |
| 402 | |
| 403 | static int |
| 404 | cpu_supports_bts (void) |
| 405 | { |
| 406 | struct btrace_cpu cpu; |
| 407 | |
| 408 | cpu = btrace_this_cpu (); |
| 409 | switch (cpu.vendor) |
| 410 | { |
| 411 | default: |
| 412 | /* Don't know about others. Let's assume they do. */ |
| 413 | return 1; |
| 414 | |
| 415 | case CV_INTEL: |
| 416 | return intel_supports_bts (&cpu); |
| 417 | } |
| 418 | } |
| 419 | |
| 420 | /* The perf_event_open syscall failed. Try to print a helpful error |
| 421 | message. */ |
| 422 | |
| 423 | static void |
| 424 | diagnose_perf_event_open_fail () |
| 425 | { |
| 426 | switch (errno) |
| 427 | { |
| 428 | case EPERM: |
| 429 | case EACCES: |
| 430 | { |
| 431 | static const char filename[] = "/proc/sys/kernel/perf_event_paranoid"; |
| 432 | gdb_file_up file = gdb_fopen_cloexec (filename, "r"); |
| 433 | if (file.get () == nullptr) |
| 434 | break; |
| 435 | |
| 436 | int level, found = fscanf (file.get (), "%d", &level); |
| 437 | if (found == 1 && level > 2) |
| 438 | error (_("You do not have permission to record the process. " |
| 439 | "Try setting %s to 2 or less."), filename); |
| 440 | } |
| 441 | |
| 442 | break; |
| 443 | } |
| 444 | |
| 445 | error (_("Failed to start recording: %s"), safe_strerror (errno)); |
| 446 | } |
| 447 | |
| 448 | /* Enable branch tracing in BTS format. */ |
| 449 | |
| 450 | static struct btrace_target_info * |
| 451 | linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf) |
| 452 | { |
| 453 | struct btrace_tinfo_bts *bts; |
| 454 | size_t size, pages; |
| 455 | __u64 data_offset; |
| 456 | int pid, pg; |
| 457 | |
| 458 | if (!cpu_supports_bts ()) |
| 459 | error (_("BTS support has been disabled for the target cpu.")); |
| 460 | |
| 461 | gdb::unique_xmalloc_ptr<btrace_target_info> tinfo |
| 462 | (XCNEW (btrace_target_info)); |
| 463 | tinfo->ptid = ptid; |
| 464 | |
| 465 | tinfo->conf.format = BTRACE_FORMAT_BTS; |
| 466 | bts = &tinfo->variant.bts; |
| 467 | |
| 468 | bts->attr.size = sizeof (bts->attr); |
| 469 | bts->attr.type = PERF_TYPE_HARDWARE; |
| 470 | bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; |
| 471 | bts->attr.sample_period = 1; |
| 472 | |
| 473 | /* We sample from and to address. */ |
| 474 | bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR; |
| 475 | |
| 476 | bts->attr.exclude_kernel = 1; |
| 477 | bts->attr.exclude_hv = 1; |
| 478 | bts->attr.exclude_idle = 1; |
| 479 | |
| 480 | pid = ptid.lwp (); |
| 481 | if (pid == 0) |
| 482 | pid = ptid.pid (); |
| 483 | |
| 484 | errno = 0; |
| 485 | scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0)); |
| 486 | if (fd.get () < 0) |
| 487 | diagnose_perf_event_open_fail (); |
| 488 | |
| 489 | /* Convert the requested size in bytes to pages (rounding up). */ |
| 490 | pages = ((size_t) conf->size / PAGE_SIZE |
| 491 | + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1)); |
| 492 | /* We need at least one page. */ |
| 493 | if (pages == 0) |
| 494 | pages = 1; |
| 495 | |
| 496 | /* The buffer size can be requested in powers of two pages. Adjust PAGES |
| 497 | to the next power of two. */ |
| 498 | for (pg = 0; pages != ((size_t) 1 << pg); ++pg) |
| 499 | if ((pages & ((size_t) 1 << pg)) != 0) |
| 500 | pages += ((size_t) 1 << pg); |
| 501 | |
| 502 | /* We try to allocate the requested size. |
| 503 | If that fails, try to get as much as we can. */ |
| 504 | scoped_mmap data; |
| 505 | for (; pages > 0; pages >>= 1) |
| 506 | { |
| 507 | size_t length; |
| 508 | __u64 data_size; |
| 509 | |
| 510 | data_size = (__u64) pages * PAGE_SIZE; |
| 511 | |
| 512 | /* Don't ask for more than we can represent in the configuration. */ |
| 513 | if ((__u64) UINT_MAX < data_size) |
| 514 | continue; |
| 515 | |
| 516 | size = (size_t) data_size; |
| 517 | length = size + PAGE_SIZE; |
| 518 | |
| 519 | /* Check for overflows. */ |
| 520 | if ((__u64) length != data_size + PAGE_SIZE) |
| 521 | continue; |
| 522 | |
| 523 | errno = 0; |
| 524 | /* The number of pages we request needs to be a power of two. */ |
| 525 | data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0); |
| 526 | if (data.get () != MAP_FAILED) |
| 527 | break; |
| 528 | } |
| 529 | |
| 530 | if (pages == 0) |
| 531 | error (_("Failed to map trace buffer: %s."), safe_strerror (errno)); |
| 532 | |
| 533 | struct perf_event_mmap_page *header = (struct perf_event_mmap_page *) |
| 534 | data.get (); |
| 535 | data_offset = PAGE_SIZE; |
| 536 | |
| 537 | #if defined (PERF_ATTR_SIZE_VER5) |
| 538 | if (offsetof (struct perf_event_mmap_page, data_size) <= header->size) |
| 539 | { |
| 540 | __u64 data_size; |
| 541 | |
| 542 | data_offset = header->data_offset; |
| 543 | data_size = header->data_size; |
| 544 | |
| 545 | size = (unsigned int) data_size; |
| 546 | |
| 547 | /* Check for overflows. */ |
| 548 | if ((__u64) size != data_size) |
| 549 | error (_("Failed to determine trace buffer size.")); |
| 550 | } |
| 551 | #endif /* defined (PERF_ATTR_SIZE_VER5) */ |
| 552 | |
| 553 | bts->bts.size = size; |
| 554 | bts->bts.data_head = &header->data_head; |
| 555 | bts->bts.mem = (const uint8_t *) data.release () + data_offset; |
| 556 | bts->bts.last_head = 0ull; |
| 557 | bts->header = header; |
| 558 | bts->file = fd.release (); |
| 559 | |
| 560 | tinfo->conf.bts.size = (unsigned int) size; |
| 561 | return tinfo.release (); |
| 562 | } |
| 563 | |
| 564 | #if defined (PERF_ATTR_SIZE_VER5) |
| 565 | |
| 566 | /* Determine the event type. */ |
| 567 | |
| 568 | static int |
| 569 | perf_event_pt_event_type () |
| 570 | { |
| 571 | static const char filename[] = "/sys/bus/event_source/devices/intel_pt/type"; |
| 572 | |
| 573 | errno = 0; |
| 574 | gdb_file_up file = gdb_fopen_cloexec (filename, "r"); |
| 575 | if (file.get () == nullptr) |
| 576 | error (_("Failed to open %s: %s."), filename, safe_strerror (errno)); |
| 577 | |
| 578 | int type, found = fscanf (file.get (), "%d", &type); |
| 579 | if (found != 1) |
| 580 | error (_("Failed to read the PT event type from %s."), filename); |
| 581 | |
| 582 | return type; |
| 583 | } |
| 584 | |
| 585 | /* Enable branch tracing in Intel Processor Trace format. */ |
| 586 | |
| 587 | static struct btrace_target_info * |
| 588 | linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf) |
| 589 | { |
| 590 | struct btrace_tinfo_pt *pt; |
| 591 | size_t pages; |
| 592 | int pid, pg; |
| 593 | |
| 594 | pid = ptid.lwp (); |
| 595 | if (pid == 0) |
| 596 | pid = ptid.pid (); |
| 597 | |
| 598 | gdb::unique_xmalloc_ptr<btrace_target_info> tinfo |
| 599 | (XCNEW (btrace_target_info)); |
| 600 | tinfo->ptid = ptid; |
| 601 | |
| 602 | tinfo->conf.format = BTRACE_FORMAT_PT; |
| 603 | pt = &tinfo->variant.pt; |
| 604 | |
| 605 | pt->attr.size = sizeof (pt->attr); |
| 606 | pt->attr.type = perf_event_pt_event_type (); |
| 607 | |
| 608 | pt->attr.exclude_kernel = 1; |
| 609 | pt->attr.exclude_hv = 1; |
| 610 | pt->attr.exclude_idle = 1; |
| 611 | |
| 612 | errno = 0; |
| 613 | scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0)); |
| 614 | if (fd.get () < 0) |
| 615 | diagnose_perf_event_open_fail (); |
| 616 | |
| 617 | /* Allocate the configuration page. */ |
| 618 | scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, |
| 619 | fd.get (), 0); |
| 620 | if (data.get () == MAP_FAILED) |
| 621 | error (_("Failed to map trace user page: %s."), safe_strerror (errno)); |
| 622 | |
| 623 | struct perf_event_mmap_page *header = (struct perf_event_mmap_page *) |
| 624 | data.get (); |
| 625 | |
| 626 | header->aux_offset = header->data_offset + header->data_size; |
| 627 | |
| 628 | /* Convert the requested size in bytes to pages (rounding up). */ |
| 629 | pages = ((size_t) conf->size / PAGE_SIZE |
| 630 | + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1)); |
| 631 | /* We need at least one page. */ |
| 632 | if (pages == 0) |
| 633 | pages = 1; |
| 634 | |
| 635 | /* The buffer size can be requested in powers of two pages. Adjust PAGES |
| 636 | to the next power of two. */ |
| 637 | for (pg = 0; pages != ((size_t) 1 << pg); ++pg) |
| 638 | if ((pages & ((size_t) 1 << pg)) != 0) |
| 639 | pages += ((size_t) 1 << pg); |
| 640 | |
| 641 | /* We try to allocate the requested size. |
| 642 | If that fails, try to get as much as we can. */ |
| 643 | scoped_mmap aux; |
| 644 | for (; pages > 0; pages >>= 1) |
| 645 | { |
| 646 | size_t length; |
| 647 | __u64 data_size; |
| 648 | |
| 649 | data_size = (__u64) pages * PAGE_SIZE; |
| 650 | |
| 651 | /* Don't ask for more than we can represent in the configuration. */ |
| 652 | if ((__u64) UINT_MAX < data_size) |
| 653 | continue; |
| 654 | |
| 655 | length = (size_t) data_size; |
| 656 | |
| 657 | /* Check for overflows. */ |
| 658 | if ((__u64) length != data_size) |
| 659 | continue; |
| 660 | |
| 661 | header->aux_size = data_size; |
| 662 | |
| 663 | errno = 0; |
| 664 | aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), |
| 665 | header->aux_offset); |
| 666 | if (aux.get () != MAP_FAILED) |
| 667 | break; |
| 668 | } |
| 669 | |
| 670 | if (pages == 0) |
| 671 | error (_("Failed to map trace buffer: %s."), safe_strerror (errno)); |
| 672 | |
| 673 | pt->pt.size = aux.size (); |
| 674 | pt->pt.mem = (const uint8_t *) aux.release (); |
| 675 | pt->pt.data_head = &header->aux_head; |
| 676 | pt->header = (struct perf_event_mmap_page *) data.release (); |
| 677 | gdb_assert (pt->header == header); |
| 678 | pt->file = fd.release (); |
| 679 | |
| 680 | tinfo->conf.pt.size = (unsigned int) pt->pt.size; |
| 681 | return tinfo.release (); |
| 682 | } |
| 683 | |
| 684 | #else /* !defined (PERF_ATTR_SIZE_VER5) */ |
| 685 | |
| 686 | static struct btrace_target_info * |
| 687 | linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf) |
| 688 | { |
| 689 | error (_("Intel Processor Trace support was disabled at compile time.")); |
| 690 | } |
| 691 | |
| 692 | #endif /* !defined (PERF_ATTR_SIZE_VER5) */ |
| 693 | |
| 694 | /* See linux-btrace.h. */ |
| 695 | |
| 696 | struct btrace_target_info * |
| 697 | linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf) |
| 698 | { |
| 699 | switch (conf->format) |
| 700 | { |
| 701 | case BTRACE_FORMAT_NONE: |
| 702 | error (_("Bad branch trace format.")); |
| 703 | |
| 704 | default: |
| 705 | error (_("Unknown branch trace format.")); |
| 706 | |
| 707 | case BTRACE_FORMAT_BTS: |
| 708 | return linux_enable_bts (ptid, &conf->bts); |
| 709 | |
| 710 | case BTRACE_FORMAT_PT: |
| 711 | return linux_enable_pt (ptid, &conf->pt); |
| 712 | } |
| 713 | } |
| 714 | |
| 715 | /* Disable BTS tracing. */ |
| 716 | |
| 717 | static enum btrace_error |
| 718 | linux_disable_bts (struct btrace_tinfo_bts *tinfo) |
| 719 | { |
| 720 | munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE); |
| 721 | close (tinfo->file); |
| 722 | |
| 723 | return BTRACE_ERR_NONE; |
| 724 | } |
| 725 | |
| 726 | /* Disable Intel Processor Trace tracing. */ |
| 727 | |
| 728 | static enum btrace_error |
| 729 | linux_disable_pt (struct btrace_tinfo_pt *tinfo) |
| 730 | { |
| 731 | munmap((void *) tinfo->pt.mem, tinfo->pt.size); |
| 732 | munmap((void *) tinfo->header, PAGE_SIZE); |
| 733 | close (tinfo->file); |
| 734 | |
| 735 | return BTRACE_ERR_NONE; |
| 736 | } |
| 737 | |
| 738 | /* See linux-btrace.h. */ |
| 739 | |
| 740 | enum btrace_error |
| 741 | linux_disable_btrace (struct btrace_target_info *tinfo) |
| 742 | { |
| 743 | enum btrace_error errcode; |
| 744 | |
| 745 | errcode = BTRACE_ERR_NOT_SUPPORTED; |
| 746 | switch (tinfo->conf.format) |
| 747 | { |
| 748 | case BTRACE_FORMAT_NONE: |
| 749 | break; |
| 750 | |
| 751 | case BTRACE_FORMAT_BTS: |
| 752 | errcode = linux_disable_bts (&tinfo->variant.bts); |
| 753 | break; |
| 754 | |
| 755 | case BTRACE_FORMAT_PT: |
| 756 | errcode = linux_disable_pt (&tinfo->variant.pt); |
| 757 | break; |
| 758 | } |
| 759 | |
| 760 | if (errcode == BTRACE_ERR_NONE) |
| 761 | xfree (tinfo); |
| 762 | |
| 763 | return errcode; |
| 764 | } |
| 765 | |
| 766 | /* Read branch trace data in BTS format for the thread given by TINFO into |
| 767 | BTRACE using the TYPE reading method. */ |
| 768 | |
| 769 | static enum btrace_error |
| 770 | linux_read_bts (struct btrace_data_bts *btrace, |
| 771 | struct btrace_target_info *tinfo, |
| 772 | enum btrace_read_type type) |
| 773 | { |
| 774 | struct perf_event_buffer *pevent; |
| 775 | const uint8_t *begin, *end, *start; |
| 776 | size_t buffer_size, size; |
| 777 | __u64 data_head, data_tail; |
| 778 | unsigned int retries = 5; |
| 779 | |
| 780 | pevent = &tinfo->variant.bts.bts; |
| 781 | |
| 782 | /* For delta reads, we return at least the partial last block containing |
| 783 | the current PC. */ |
| 784 | if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent)) |
| 785 | return BTRACE_ERR_NONE; |
| 786 | |
| 787 | buffer_size = pevent->size; |
| 788 | data_tail = pevent->last_head; |
| 789 | |
| 790 | /* We may need to retry reading the trace. See below. */ |
| 791 | while (retries--) |
| 792 | { |
| 793 | data_head = *pevent->data_head; |
| 794 | |
| 795 | /* Delete any leftover trace from the previous iteration. */ |
| 796 | delete btrace->blocks; |
| 797 | btrace->blocks = nullptr; |
| 798 | |
| 799 | if (type == BTRACE_READ_DELTA) |
| 800 | { |
| 801 | __u64 data_size; |
| 802 | |
| 803 | /* Determine the number of bytes to read and check for buffer |
| 804 | overflows. */ |
| 805 | |
| 806 | /* Check for data head overflows. We might be able to recover from |
| 807 | those but they are very unlikely and it's not really worth the |
| 808 | effort, I think. */ |
| 809 | if (data_head < data_tail) |
| 810 | return BTRACE_ERR_OVERFLOW; |
| 811 | |
| 812 | /* If the buffer is smaller than the trace delta, we overflowed. */ |
| 813 | data_size = data_head - data_tail; |
| 814 | if (buffer_size < data_size) |
| 815 | return BTRACE_ERR_OVERFLOW; |
| 816 | |
| 817 | /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */ |
| 818 | size = (size_t) data_size; |
| 819 | } |
| 820 | else |
| 821 | { |
| 822 | /* Read the entire buffer. */ |
| 823 | size = buffer_size; |
| 824 | |
| 825 | /* Adjust the size if the buffer has not overflowed, yet. */ |
| 826 | if (data_head < size) |
| 827 | size = (size_t) data_head; |
| 828 | } |
| 829 | |
| 830 | /* Data_head keeps growing; the buffer itself is circular. */ |
| 831 | begin = pevent->mem; |
| 832 | start = begin + data_head % buffer_size; |
| 833 | |
| 834 | if (data_head <= buffer_size) |
| 835 | end = start; |
| 836 | else |
| 837 | end = begin + pevent->size; |
| 838 | |
| 839 | btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size); |
| 840 | |
| 841 | /* The stopping thread notifies its ptracer before it is scheduled out. |
| 842 | On multi-core systems, the debugger might therefore run while the |
| 843 | kernel might be writing the last branch trace records. |
| 844 | |
| 845 | Let's check whether the data head moved while we read the trace. */ |
| 846 | if (data_head == *pevent->data_head) |
| 847 | break; |
| 848 | } |
| 849 | |
| 850 | pevent->last_head = data_head; |
| 851 | |
| 852 | /* Prune the incomplete last block (i.e. the first one of inferior execution) |
| 853 | if we're not doing a delta read. There is no way of filling in its zeroed |
| 854 | BEGIN element. */ |
| 855 | if (!btrace->blocks->empty () && type != BTRACE_READ_DELTA) |
| 856 | btrace->blocks->pop_back (); |
| 857 | |
| 858 | return BTRACE_ERR_NONE; |
| 859 | } |
| 860 | |
| 861 | /* Fill in the Intel Processor Trace configuration information. */ |
| 862 | |
| 863 | static void |
| 864 | linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf) |
| 865 | { |
| 866 | conf->cpu = btrace_this_cpu (); |
| 867 | } |
| 868 | |
| 869 | /* Read branch trace data in Intel Processor Trace format for the thread |
| 870 | given by TINFO into BTRACE using the TYPE reading method. */ |
| 871 | |
| 872 | static enum btrace_error |
| 873 | linux_read_pt (struct btrace_data_pt *btrace, |
| 874 | struct btrace_target_info *tinfo, |
| 875 | enum btrace_read_type type) |
| 876 | { |
| 877 | struct perf_event_buffer *pt; |
| 878 | |
| 879 | pt = &tinfo->variant.pt.pt; |
| 880 | |
| 881 | linux_fill_btrace_pt_config (&btrace->config); |
| 882 | |
| 883 | switch (type) |
| 884 | { |
| 885 | case BTRACE_READ_DELTA: |
| 886 | /* We don't support delta reads. The data head (i.e. aux_head) wraps |
| 887 | around to stay inside the aux buffer. */ |
| 888 | return BTRACE_ERR_NOT_SUPPORTED; |
| 889 | |
| 890 | case BTRACE_READ_NEW: |
| 891 | if (!perf_event_new_data (pt)) |
| 892 | return BTRACE_ERR_NONE; |
| 893 | |
| 894 | /* Fall through. */ |
| 895 | case BTRACE_READ_ALL: |
| 896 | perf_event_read_all (pt, &btrace->data, &btrace->size); |
| 897 | return BTRACE_ERR_NONE; |
| 898 | } |
| 899 | |
| 900 | internal_error (__FILE__, __LINE__, _("Unkown btrace read type.")); |
| 901 | } |
| 902 | |
| 903 | /* See linux-btrace.h. */ |
| 904 | |
| 905 | enum btrace_error |
| 906 | linux_read_btrace (struct btrace_data *btrace, |
| 907 | struct btrace_target_info *tinfo, |
| 908 | enum btrace_read_type type) |
| 909 | { |
| 910 | switch (tinfo->conf.format) |
| 911 | { |
| 912 | case BTRACE_FORMAT_NONE: |
| 913 | return BTRACE_ERR_NOT_SUPPORTED; |
| 914 | |
| 915 | case BTRACE_FORMAT_BTS: |
| 916 | /* We read btrace in BTS format. */ |
| 917 | btrace->format = BTRACE_FORMAT_BTS; |
| 918 | btrace->variant.bts.blocks = NULL; |
| 919 | |
| 920 | return linux_read_bts (&btrace->variant.bts, tinfo, type); |
| 921 | |
| 922 | case BTRACE_FORMAT_PT: |
| 923 | /* We read btrace in Intel Processor Trace format. */ |
| 924 | btrace->format = BTRACE_FORMAT_PT; |
| 925 | btrace->variant.pt.data = NULL; |
| 926 | btrace->variant.pt.size = 0; |
| 927 | |
| 928 | return linux_read_pt (&btrace->variant.pt, tinfo, type); |
| 929 | } |
| 930 | |
| 931 | internal_error (__FILE__, __LINE__, _("Unkown branch trace format.")); |
| 932 | } |
| 933 | |
| 934 | /* See linux-btrace.h. */ |
| 935 | |
| 936 | const struct btrace_config * |
| 937 | linux_btrace_conf (const struct btrace_target_info *tinfo) |
| 938 | { |
| 939 | return &tinfo->conf; |
| 940 | } |
| 941 | |
| 942 | #else /* !HAVE_LINUX_PERF_EVENT_H */ |
| 943 | |
| 944 | /* See linux-btrace.h. */ |
| 945 | |
| 946 | struct btrace_target_info * |
| 947 | linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf) |
| 948 | { |
| 949 | return NULL; |
| 950 | } |
| 951 | |
| 952 | /* See linux-btrace.h. */ |
| 953 | |
| 954 | enum btrace_error |
| 955 | linux_disable_btrace (struct btrace_target_info *tinfo) |
| 956 | { |
| 957 | return BTRACE_ERR_NOT_SUPPORTED; |
| 958 | } |
| 959 | |
| 960 | /* See linux-btrace.h. */ |
| 961 | |
| 962 | enum btrace_error |
| 963 | linux_read_btrace (struct btrace_data *btrace, |
| 964 | struct btrace_target_info *tinfo, |
| 965 | enum btrace_read_type type) |
| 966 | { |
| 967 | return BTRACE_ERR_NOT_SUPPORTED; |
| 968 | } |
| 969 | |
| 970 | /* See linux-btrace.h. */ |
| 971 | |
| 972 | const struct btrace_config * |
| 973 | linux_btrace_conf (const struct btrace_target_info *tinfo) |
| 974 | { |
| 975 | return NULL; |
| 976 | } |
| 977 | |
| 978 | #endif /* !HAVE_LINUX_PERF_EVENT_H */ |