/* GNU/Linux/x86-64 specific low level interface, for the in-process
agent library for GDB.
- Copyright (C) 2010 Free Software Foundation, Inc.
+ Copyright (C) 2010-2019 Free Software Foundation, Inc.
This file is part of GDB.
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "server.h"
+#include <sys/mman.h>
+#include "tracepoint.h"
+#include "linux-x86-tdesc.h"
+#include "gdbsupport/x86-xstate.h"
/* Defined in auto-generated file amd64-linux.c. */
void init_registers_amd64_linux (void);
+extern const struct target_desc *tdesc_amd64_linux;
/* fast tracepoints collect registers. */
((char *) buf) + x86_64_ft_collect_regmap[i]);
}
-/* This is only needed because reg-i386-linux-lib.o references it. We
- may use it proper at some point. */
-const char *gdbserver_xmltarget;
+ULONGEST
+get_raw_reg (const unsigned char *raw_regs, int regnum)
+{
+ if (regnum >= X86_64_NUM_FT_COLLECT_GREGS)
+ return 0;
+
+ return *(ULONGEST *) (raw_regs + x86_64_ft_collect_regmap[regnum]);
+}
+
+#ifdef HAVE_UST
+
+#include <ust/processor.h>
+
+/* "struct registers" is the UST object type holding the registers at
+ the time of the static tracepoint marker call. This doesn't
+ contain RIP, but we know what it must have been (the marker
+ address). */
+
+#define ST_REGENTRY(REG) \
+ { \
+ offsetof (struct registers, REG), \
+ sizeof (((struct registers *) NULL)->REG) \
+ }
+
+static struct
+{
+ int offset;
+ int size;
+} x86_64_st_collect_regmap[] =
+ {
+ ST_REGENTRY(rax),
+ ST_REGENTRY(rbx),
+ ST_REGENTRY(rcx),
+ ST_REGENTRY(rdx),
+ ST_REGENTRY(rsi),
+ ST_REGENTRY(rdi),
+ ST_REGENTRY(rbp),
+ ST_REGENTRY(rsp),
+ ST_REGENTRY(r8),
+ ST_REGENTRY(r9),
+ ST_REGENTRY(r10),
+ ST_REGENTRY(r11),
+ ST_REGENTRY(r12),
+ ST_REGENTRY(r13),
+ ST_REGENTRY(r14),
+ ST_REGENTRY(r15),
+ { -1, 0 },
+ ST_REGENTRY(rflags),
+ ST_REGENTRY(cs),
+ ST_REGENTRY(ss),
+ };
+
+#define X86_64_NUM_ST_COLLECT_GREGS \
+ (sizeof (x86_64_st_collect_regmap) / sizeof (x86_64_st_collect_regmap[0]))
+
+/* GDB's RIP register number. */
+#define AMD64_RIP_REGNUM 16
+
+void
+supply_static_tracepoint_registers (struct regcache *regcache,
+ const unsigned char *buf,
+ CORE_ADDR pc)
+{
+ int i;
+ unsigned long newpc = pc;
+
+ supply_register (regcache, AMD64_RIP_REGNUM, &newpc);
+
+ for (i = 0; i < X86_64_NUM_ST_COLLECT_GREGS; i++)
+ if (x86_64_st_collect_regmap[i].offset != -1)
+ {
+ switch (x86_64_st_collect_regmap[i].size)
+ {
+ case 8:
+ supply_register (regcache, i,
+ ((char *) buf)
+ + x86_64_st_collect_regmap[i].offset);
+ break;
+ case 2:
+ {
+ unsigned long reg
+ = * (short *) (((char *) buf)
+ + x86_64_st_collect_regmap[i].offset);
+ reg &= 0xffff;
+ supply_register (regcache, i, ®);
+ }
+ break;
+ default:
+ internal_error (__FILE__, __LINE__,
+ "unhandled register size: %d",
+ x86_64_st_collect_regmap[i].size);
+ break;
+ }
+ }
+}
+
+#endif /* HAVE_UST */
+
+#if !defined __ILP32__
+/* Map the tdesc index to xcr0 mask. */
+static uint64_t idx2mask[X86_TDESC_LAST] = {
+ X86_XSTATE_X87_MASK,
+ X86_XSTATE_SSE_MASK,
+ X86_XSTATE_AVX_MASK,
+ X86_XSTATE_MPX_MASK,
+ X86_XSTATE_AVX_MPX_MASK,
+ X86_XSTATE_AVX_AVX512_MASK,
+ X86_XSTATE_AVX_MPX_AVX512_PKU_MASK,
+};
+#endif
+
+/* Return target_desc to use for IPA, given the tdesc index passed by
+ gdbserver. */
+
+const struct target_desc *
+get_ipa_tdesc (int idx)
+{
+ if (idx >= X86_TDESC_LAST)
+ {
+ internal_error (__FILE__, __LINE__,
+ "unknown ipa tdesc index: %d", idx);
+ }
+
+#if defined __ILP32__
+ switch (idx)
+ {
+ case X86_TDESC_SSE:
+ return amd64_linux_read_description (X86_XSTATE_SSE_MASK, true);
+ case X86_TDESC_AVX:
+ return amd64_linux_read_description (X86_XSTATE_AVX_MASK, true);
+ case X86_TDESC_AVX_AVX512:
+ return amd64_linux_read_description (X86_XSTATE_AVX_AVX512_MASK, true);
+ default:
+ break;
+ }
+#else
+ return amd64_linux_read_description (idx2mask[idx], false);
+#endif
+
+ internal_error (__FILE__, __LINE__,
+ "unknown ipa tdesc index: %d", idx);
+}
+
+/* Allocate buffer for the jump pads. The branch instruction has a
+ reach of +/- 31-bit, and the executable is loaded at low addresses.
+
+ 64-bit: Use MAP_32BIT to allocate in the first 2GB. Shared
+ libraries, being allocated at the top, are unfortunately out of
+ luck.
+
+ x32: Since MAP_32BIT is 64-bit only, do the placement manually.
+ Try allocating at '0x80000000 - SIZE' initially, decreasing until
+ we hit a free area. This ensures the executable is fully covered,
+ and is as close as possible to the shared libraries, which are
+ usually mapped at the top of the first 4GB of the address space.
+*/
+
+void *
+alloc_jump_pad_buffer (size_t size)
+{
+#if __ILP32__
+ uintptr_t addr;
+ int pagesize;
+
+ pagesize = sysconf (_SC_PAGE_SIZE);
+ if (pagesize == -1)
+ perror_with_name ("sysconf");
+
+ addr = 0x80000000 - size;
+
+ /* size should already be page-aligned, but this can't hurt. */
+ addr &= ~(pagesize - 1);
+
+ /* Search for a free area. If we hit 0, we're out of luck. */
+ for (; addr; addr -= pagesize)
+ {
+ void *res;
+
+ /* No MAP_FIXED - we don't want to zap someone's mapping. */
+ res = mmap ((void *) addr, size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ /* If we got what we wanted, return. */
+ if ((uintptr_t) res == addr)
+ return res;
+
+ /* If we got a mapping, but at a wrong address, undo it. */
+ if (res != MAP_FAILED)
+ munmap (res, size);
+ }
+
+ return NULL;
+#else
+ void *res = mmap (NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_32BIT, -1, 0);
+
+ if (res == MAP_FAILED)
+ return NULL;
+
+ return res;
+#endif
+}
void
initialize_low_tracepoint (void)
{
- init_registers_amd64_linux ();
+#if defined __ILP32__
+ amd64_linux_read_description (X86_XSTATE_SSE_MASK, true);
+ amd64_linux_read_description (X86_XSTATE_AVX_MASK, true);
+ amd64_linux_read_description (X86_XSTATE_AVX_AVX512_MASK, true);
+#else
+ for (auto i = 0; i < X86_TDESC_LAST; i++)
+ amd64_linux_read_description (idx2mask[i], false);
+#endif
}