+ phdrs_size = bfd_get_elf_phdr_upper_bound (core_bfd);
+ if (phdrs_size == -1)
+ return 0;
+
+ phdrs = (Elf_Internal_Phdr *) alloca (phdrs_size);
+ num_phdrs = bfd_get_elf_phdrs (core_bfd, phdrs);
+ if (num_phdrs == -1)
+ return 0;
+
+ for (i = 0; i < num_phdrs; i++)
+ if (phdrs[i].p_type == PT_LOAD
+ && phdrs[i].p_vaddr == range->start)
+ {
+ range->length = phdrs[i].p_memsz;
+ return 1;
+ }
+
+ return 0;
+ }
+
+ /* We need to know the real target PID to access /proc. */
+ if (current_inferior ()->fake_pid_p)
+ return 0;
+
+ pid = current_inferior ()->pid;
+
+ /* Note that reading /proc/PID/task/PID/maps (1) is much faster than
+ reading /proc/PID/maps (2). The later identifies thread stacks
+ in the output, which requires scanning every thread in the thread
+ group to check whether a VMA is actually a thread's stack. With
+ Linux 4.4 on an Intel i7-4810MQ @ 2.80GHz, with an inferior with
+ a few thousand threads, (1) takes a few miliseconds, while (2)
+ takes several seconds. Also note that "smaps", what we read for
+ determining core dump mappings, is even slower than "maps". */
+ xsnprintf (filename, sizeof filename, "/proc/%ld/task/%ld/maps", pid, pid);
+ gdb::unique_xmalloc_ptr<char> data
+ = target_fileio_read_stralloc (NULL, filename);
+ if (data != NULL)
+ {
+ char *line;
+ char *saveptr = NULL;
+
+ for (line = strtok_r (data.get (), "\n", &saveptr);
+ line != NULL;
+ line = strtok_r (NULL, "\n", &saveptr))
+ {
+ ULONGEST addr, endaddr;
+ const char *p = line;
+
+ addr = strtoulst (p, &p, 16);
+ if (addr == range->start)
+ {
+ if (*p == '-')
+ p++;
+ endaddr = strtoulst (p, &p, 16);
+ range->length = endaddr - addr;
+ return 1;
+ }
+ }
+ }
+ else
+ warning (_("unable to open /proc file '%s'"), filename);
+
+ return 0;