/* Target-dependent code for AMD64.
- Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
- Free Software Foundation, Inc.
+ Copyright (C) 2001-2015 Free Software Foundation, Inc.
Contributed by Jiri Smid, SuSE Labs.
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#include "defs.h"
+#include "opcode/i386.h"
+#include "dis-asm.h"
#include "arch-utils.h"
#include "block.h"
#include "dummy-frame.h"
#include "frame-base.h"
#include "frame-unwind.h"
#include "inferior.h"
+#include "infrun.h"
#include "gdbcmd.h"
#include "gdbcore.h"
#include "objfiles.h"
#include "regcache.h"
#include "regset.h"
#include "symfile.h"
-
-#include "gdb_assert.h"
-
+#include "disasm.h"
#include "amd64-tdep.h"
#include "i387-tdep.h"
+#include "features/i386/amd64.c"
+#include "features/i386/amd64-avx.c"
+#include "features/i386/amd64-mpx.c"
+#include "features/i386/amd64-avx512.c"
+
+#include "features/i386/x32.c"
+#include "features/i386/x32-avx.c"
+#include "features/i386/x32-avx512.c"
+
+#include "ax.h"
+#include "ax-gdb.h"
+
/* Note that the AMD64 architecture was previously known as x86-64.
The latter is (forever) engraved into the canonical system name as
returned by config.guess, and used as the name for the AMD64 port
"mxcsr",
};
-/* Total number of registers. */
-#define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
+static const char *amd64_ymm_names[] =
+{
+ "ymm0", "ymm1", "ymm2", "ymm3",
+ "ymm4", "ymm5", "ymm6", "ymm7",
+ "ymm8", "ymm9", "ymm10", "ymm11",
+ "ymm12", "ymm13", "ymm14", "ymm15"
+};
-/* Return the name of register REGNUM. */
+static const char *amd64_ymm_avx512_names[] =
+{
+ "ymm16", "ymm17", "ymm18", "ymm19",
+ "ymm20", "ymm21", "ymm22", "ymm23",
+ "ymm24", "ymm25", "ymm26", "ymm27",
+ "ymm28", "ymm29", "ymm30", "ymm31"
+};
-const char *
-amd64_register_name (struct gdbarch *gdbarch, int regnum)
+static const char *amd64_ymmh_names[] =
{
- if (regnum >= 0 && regnum < AMD64_NUM_REGS)
- return amd64_register_names[regnum];
+ "ymm0h", "ymm1h", "ymm2h", "ymm3h",
+ "ymm4h", "ymm5h", "ymm6h", "ymm7h",
+ "ymm8h", "ymm9h", "ymm10h", "ymm11h",
+ "ymm12h", "ymm13h", "ymm14h", "ymm15h"
+};
- return NULL;
-}
+static const char *amd64_ymmh_avx512_names[] =
+{
+ "ymm16h", "ymm17h", "ymm18h", "ymm19h",
+ "ymm20h", "ymm21h", "ymm22h", "ymm23h",
+ "ymm24h", "ymm25h", "ymm26h", "ymm27h",
+ "ymm28h", "ymm29h", "ymm30h", "ymm31h"
+};
+
+static const char *amd64_mpx_names[] =
+{
+ "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
+};
+
+static const char *amd64_k_names[] =
+{
+ "k0", "k1", "k2", "k3",
+ "k4", "k5", "k6", "k7"
+};
-/* Return the GDB type object for the "standard" data type of data in
- register REGNUM. */
+static const char *amd64_zmmh_names[] =
+{
+ "zmm0h", "zmm1h", "zmm2h", "zmm3h",
+ "zmm4h", "zmm5h", "zmm6h", "zmm7h",
+ "zmm8h", "zmm9h", "zmm10h", "zmm11h",
+ "zmm12h", "zmm13h", "zmm14h", "zmm15h",
+ "zmm16h", "zmm17h", "zmm18h", "zmm19h",
+ "zmm20h", "zmm21h", "zmm22h", "zmm23h",
+ "zmm24h", "zmm25h", "zmm26h", "zmm27h",
+ "zmm28h", "zmm29h", "zmm30h", "zmm31h"
+};
-struct type *
-amd64_register_type (struct gdbarch *gdbarch, int regnum)
+static const char *amd64_zmm_names[] =
{
- if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_RDI_REGNUM)
- return builtin_type_int64;
- if (regnum == AMD64_RBP_REGNUM || regnum == AMD64_RSP_REGNUM)
- return builtin_type_void_data_ptr;
- if (regnum >= AMD64_R8_REGNUM && regnum <= AMD64_R15_REGNUM)
- return builtin_type_int64;
- if (regnum == AMD64_RIP_REGNUM)
- return builtin_type_void_func_ptr;
- if (regnum == AMD64_EFLAGS_REGNUM)
- return i386_eflags_type;
- if (regnum >= AMD64_CS_REGNUM && regnum <= AMD64_GS_REGNUM)
- return builtin_type_int32;
- if (regnum >= AMD64_ST0_REGNUM && regnum <= AMD64_ST0_REGNUM + 7)
- return builtin_type_i387_ext;
- if (regnum >= AMD64_FCTRL_REGNUM && regnum <= AMD64_FCTRL_REGNUM + 7)
- return builtin_type_int32;
- if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
- return i386_sse_type (gdbarch);
- if (regnum == AMD64_MXCSR_REGNUM)
- return i386_mxcsr_type;
+ "zmm0", "zmm1", "zmm2", "zmm3",
+ "zmm4", "zmm5", "zmm6", "zmm7",
+ "zmm8", "zmm9", "zmm10", "zmm11",
+ "zmm12", "zmm13", "zmm14", "zmm15",
+ "zmm16", "zmm17", "zmm18", "zmm19",
+ "zmm20", "zmm21", "zmm22", "zmm23",
+ "zmm24", "zmm25", "zmm26", "zmm27",
+ "zmm28", "zmm29", "zmm30", "zmm31"
+};
- internal_error (__FILE__, __LINE__, _("invalid regnum"));
-}
+static const char *amd64_xmm_avx512_names[] = {
+ "xmm16", "xmm17", "xmm18", "xmm19",
+ "xmm20", "xmm21", "xmm22", "xmm23",
+ "xmm24", "xmm25", "xmm26", "xmm27",
+ "xmm28", "xmm29", "xmm30", "xmm31"
+};
/* DWARF Register Number Mapping as defined in the System V psABI,
section 3.6. */
AMD64_RSP_REGNUM,
/* Extended Integer Registers 8 - 15. */
- 8, 9, 10, 11, 12, 13, 14, 15,
+ AMD64_R8_REGNUM, /* %r8 */
+ AMD64_R9_REGNUM, /* %r9 */
+ AMD64_R10_REGNUM, /* %r10 */
+ AMD64_R11_REGNUM, /* %r11 */
+ AMD64_R12_REGNUM, /* %r12 */
+ AMD64_R13_REGNUM, /* %r13 */
+ AMD64_R14_REGNUM, /* %r14 */
+ AMD64_R15_REGNUM, /* %r15 */
/* Return Address RA. Mapped to RIP. */
AMD64_RIP_REGNUM,
AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
-
+
+ /* MMX Registers 0 - 7.
+ We have to handle those registers specifically, as their register
+ number within GDB depends on the target (or they may even not be
+ available at all). */
+ -1, -1, -1, -1, -1, -1, -1, -1,
+
/* Control and Status Flags Register. */
AMD64_EFLAGS_REGNUM,
static int
amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ int ymm0_regnum = tdep->ymm0_regnum;
int regnum = -1;
if (reg >= 0 && reg < amd64_dwarf_regmap_len)
if (regnum == -1)
warning (_("Unmapped DWARF Register #%d encountered."), reg);
+ else if (ymm0_regnum >= 0
+ && i386_xmm_regnum_p (gdbarch, regnum))
+ regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
return regnum;
}
+/* Map architectural register numbers to gdb register numbers. */
+
+static const int amd64_arch_regmap[16] =
+{
+ AMD64_RAX_REGNUM, /* %rax */
+ AMD64_RCX_REGNUM, /* %rcx */
+ AMD64_RDX_REGNUM, /* %rdx */
+ AMD64_RBX_REGNUM, /* %rbx */
+ AMD64_RSP_REGNUM, /* %rsp */
+ AMD64_RBP_REGNUM, /* %rbp */
+ AMD64_RSI_REGNUM, /* %rsi */
+ AMD64_RDI_REGNUM, /* %rdi */
+ AMD64_R8_REGNUM, /* %r8 */
+ AMD64_R9_REGNUM, /* %r9 */
+ AMD64_R10_REGNUM, /* %r10 */
+ AMD64_R11_REGNUM, /* %r11 */
+ AMD64_R12_REGNUM, /* %r12 */
+ AMD64_R13_REGNUM, /* %r13 */
+ AMD64_R14_REGNUM, /* %r14 */
+ AMD64_R15_REGNUM /* %r15 */
+};
+
+static const int amd64_arch_regmap_len =
+ (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
+
+/* Convert architectural register number REG to the appropriate register
+ number used by GDB. */
+
+static int
+amd64_arch_reg_to_regnum (int reg)
+{
+ gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
+
+ return amd64_arch_regmap[reg];
+}
+
+/* Register names for byte pseudo-registers. */
+
+static const char *amd64_byte_names[] =
+{
+ "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
+ "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
+ "ah", "bh", "ch", "dh"
+};
+
+/* Number of lower byte registers. */
+#define AMD64_NUM_LOWER_BYTE_REGS 16
+
+/* Register names for word pseudo-registers. */
+
+static const char *amd64_word_names[] =
+{
+ "ax", "bx", "cx", "dx", "si", "di", "bp", "",
+ "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
+};
+
+/* Register names for dword pseudo-registers. */
+
+static const char *amd64_dword_names[] =
+{
+ "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
+ "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
+ "eip"
+};
+
+/* Return the name of register REGNUM. */
+
+static const char *
+amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ if (i386_byte_regnum_p (gdbarch, regnum))
+ return amd64_byte_names[regnum - tdep->al_regnum];
+ else if (i386_zmm_regnum_p (gdbarch, regnum))
+ return amd64_zmm_names[regnum - tdep->zmm0_regnum];
+ else if (i386_ymm_regnum_p (gdbarch, regnum))
+ return amd64_ymm_names[regnum - tdep->ymm0_regnum];
+ else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
+ return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
+ else if (i386_word_regnum_p (gdbarch, regnum))
+ return amd64_word_names[regnum - tdep->ax_regnum];
+ else if (i386_dword_regnum_p (gdbarch, regnum))
+ return amd64_dword_names[regnum - tdep->eax_regnum];
+ else
+ return i386_pseudo_register_name (gdbarch, regnum);
+}
+
+static struct value *
+amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
+ struct regcache *regcache,
+ int regnum)
+{
+ gdb_byte raw_buf[MAX_REGISTER_SIZE];
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ enum register_status status;
+ struct value *result_value;
+ gdb_byte *buf;
+
+ result_value = allocate_value (register_type (gdbarch, regnum));
+ VALUE_LVAL (result_value) = lval_register;
+ VALUE_REGNUM (result_value) = regnum;
+ buf = value_contents_raw (result_value);
+
+ if (i386_byte_regnum_p (gdbarch, regnum))
+ {
+ int gpnum = regnum - tdep->al_regnum;
+
+ /* Extract (always little endian). */
+ if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
+ {
+ /* Special handling for AH, BH, CH, DH. */
+ status = regcache_raw_read (regcache,
+ gpnum - AMD64_NUM_LOWER_BYTE_REGS,
+ raw_buf);
+ if (status == REG_VALID)
+ memcpy (buf, raw_buf + 1, 1);
+ else
+ mark_value_bytes_unavailable (result_value, 0,
+ TYPE_LENGTH (value_type (result_value)));
+ }
+ else
+ {
+ status = regcache_raw_read (regcache, gpnum, raw_buf);
+ if (status == REG_VALID)
+ memcpy (buf, raw_buf, 1);
+ else
+ mark_value_bytes_unavailable (result_value, 0,
+ TYPE_LENGTH (value_type (result_value)));
+ }
+ }
+ else if (i386_dword_regnum_p (gdbarch, regnum))
+ {
+ int gpnum = regnum - tdep->eax_regnum;
+ /* Extract (always little endian). */
+ status = regcache_raw_read (regcache, gpnum, raw_buf);
+ if (status == REG_VALID)
+ memcpy (buf, raw_buf, 4);
+ else
+ mark_value_bytes_unavailable (result_value, 0,
+ TYPE_LENGTH (value_type (result_value)));
+ }
+ else
+ i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
+ result_value);
+
+ return result_value;
+}
+
+static void
+amd64_pseudo_register_write (struct gdbarch *gdbarch,
+ struct regcache *regcache,
+ int regnum, const gdb_byte *buf)
+{
+ gdb_byte raw_buf[MAX_REGISTER_SIZE];
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
+ if (i386_byte_regnum_p (gdbarch, regnum))
+ {
+ int gpnum = regnum - tdep->al_regnum;
+
+ if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
+ {
+ /* Read ... AH, BH, CH, DH. */
+ regcache_raw_read (regcache,
+ gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
+ /* ... Modify ... (always little endian). */
+ memcpy (raw_buf + 1, buf, 1);
+ /* ... Write. */
+ regcache_raw_write (regcache,
+ gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
+ }
+ else
+ {
+ /* Read ... */
+ regcache_raw_read (regcache, gpnum, raw_buf);
+ /* ... Modify ... (always little endian). */
+ memcpy (raw_buf, buf, 1);
+ /* ... Write. */
+ regcache_raw_write (regcache, gpnum, raw_buf);
+ }
+ }
+ else if (i386_dword_regnum_p (gdbarch, regnum))
+ {
+ int gpnum = regnum - tdep->eax_regnum;
+
+ /* Read ... */
+ regcache_raw_read (regcache, gpnum, raw_buf);
+ /* ... Modify ... (always little endian). */
+ memcpy (raw_buf, buf, 4);
+ /* ... Write. */
+ regcache_raw_write (regcache, gpnum, raw_buf);
+ }
+ else
+ i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
+}
+
\f
/* Register classes as defined in the psABI. */
return AMD64_SSE;
}
-static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
+static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
/* Return non-zero if TYPE is a non-POD structure or union type. */
arrays) and union types, and store the result in CLASS. */
static void
-amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
+amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
{
- int len = TYPE_LENGTH (type);
-
/* 1. If the size of an object is larger than two eightbytes, or in
C++, is a non-POD structure or union type, or contains
unaligned fields, it has class memory. */
- if (len > 16 || amd64_non_pod_p (type))
+ if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
{
- class[0] = class[1] = AMD64_MEMORY;
+ theclass[0] = theclass[1] = AMD64_MEMORY;
return;
}
/* 2. Both eightbytes get initialized to class NO_CLASS. */
- class[0] = class[1] = AMD64_NO_CLASS;
+ theclass[0] = theclass[1] = AMD64_NO_CLASS;
/* 3. Each field of an object is classified recursively so that
always two fields are considered. The resulting class is
struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
/* All fields in an array have the same type. */
- amd64_classify (subtype, class);
- if (len > 8 && class[1] == AMD64_NO_CLASS)
- class[1] = class[0];
+ amd64_classify (subtype, theclass);
+ if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS)
+ theclass[1] = theclass[0];
}
else
{
struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
int pos = TYPE_FIELD_BITPOS (type, i) / 64;
enum amd64_reg_class subclass[2];
+ int bitsize = TYPE_FIELD_BITSIZE (type, i);
+ int endpos;
+
+ if (bitsize == 0)
+ bitsize = TYPE_LENGTH (subtype) * 8;
+ endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
/* Ignore static fields. */
- if (TYPE_FIELD_STATIC (type, i))
+ if (field_is_static (&TYPE_FIELD (type, i)))
continue;
gdb_assert (pos == 0 || pos == 1);
amd64_classify (subtype, subclass);
- class[pos] = amd64_merge_classes (class[pos], subclass[0]);
+ theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
+ if (bitsize <= 64 && pos == 0 && endpos == 1)
+ /* This is a bit of an odd case: We have a field that would
+ normally fit in one of the two eightbytes, except that
+ it is placed in a way that this field straddles them.
+ This has been seen with a structure containing an array.
+
+ The ABI is a bit unclear in this case, but we assume that
+ this field's class (stored in subclass[0]) must also be merged
+ into class[1]. In other words, our field has a piece stored
+ in the second eight-byte, and thus its class applies to
+ the second eight-byte as well.
+
+ In the case where the field length exceeds 8 bytes,
+ it should not be necessary to merge the field class
+ into class[1]. As LEN > 8, subclass[1] is necessarily
+ different from AMD64_NO_CLASS. If subclass[1] is equal
+ to subclass[0], then the normal class[1]/subclass[1]
+ merging will take care of everything. For subclass[1]
+ to be different from subclass[0], I can only see the case
+ where we have a SSE/SSEUP or X87/X87UP pair, which both
+ use up all 16 bytes of the aggregate, and are already
+ handled just fine (because each portion sits on its own
+ 8-byte). */
+ theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
if (pos == 0)
- class[1] = amd64_merge_classes (class[1], subclass[1]);
+ theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
}
}
/* Rule (a): If one of the classes is MEMORY, the whole argument is
passed in memory. */
- if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
- class[0] = class[1] = AMD64_MEMORY;
+ if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
+ theclass[0] = theclass[1] = AMD64_MEMORY;
- /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
+ /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
SSE. */
- if (class[0] == AMD64_SSEUP)
- class[0] = AMD64_SSE;
- if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
- class[1] = AMD64_SSE;
+ if (theclass[0] == AMD64_SSEUP)
+ theclass[0] = AMD64_SSE;
+ if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
+ theclass[1] = AMD64_SSE;
}
/* Classify TYPE, and store the result in CLASS. */
static void
-amd64_classify (struct type *type, enum amd64_reg_class class[2])
+amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
{
enum type_code code = TYPE_CODE (type);
int len = TYPE_LENGTH (type);
- class[0] = class[1] = AMD64_NO_CLASS;
+ theclass[0] = theclass[1] = AMD64_NO_CLASS;
/* Arguments of types (signed and unsigned) _Bool, char, short, int,
long, long long, and pointers are in the INTEGER class. Similarly,
|| code == TYPE_CODE_CHAR
|| code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
&& (len == 1 || len == 2 || len == 4 || len == 8))
- class[0] = AMD64_INTEGER;
+ theclass[0] = AMD64_INTEGER;
/* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
are in class SSE. */
else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
&& (len == 4 || len == 8))
/* FIXME: __m64 . */
- class[0] = AMD64_SSE;
+ theclass[0] = AMD64_SSE;
/* Arguments of types __float128, _Decimal128 and __m128 are split into
two halves. The least significant ones belong to class SSE, the most
significant one to class SSEUP. */
else if (code == TYPE_CODE_DECFLOAT && len == 16)
/* FIXME: __float128, __m128. */
- class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
+ theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
/* The 64-bit mantissa of arguments of type long double belongs to
class X87, the 16-bit exponent plus 6 bytes of padding belongs to
class X87UP. */
else if (code == TYPE_CODE_FLT && len == 16)
/* Class X87 and X87UP. */
- class[0] = AMD64_X87, class[1] = AMD64_X87UP;
+ theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
+
+ /* Arguments of complex T where T is one of the types float or
+ double get treated as if they are implemented as:
+
+ struct complexT {
+ T real;
+ T imag;
+ };
+
+ */
+ else if (code == TYPE_CODE_COMPLEX && len == 8)
+ theclass[0] = AMD64_SSE;
+ else if (code == TYPE_CODE_COMPLEX && len == 16)
+ theclass[0] = theclass[1] = AMD64_SSE;
+
+ /* A variable of type complex long double is classified as type
+ COMPLEX_X87. */
+ else if (code == TYPE_CODE_COMPLEX && len == 32)
+ theclass[0] = AMD64_COMPLEX_X87;
/* Aggregates. */
else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
|| code == TYPE_CODE_UNION)
- amd64_classify_aggregate (type, class);
+ amd64_classify_aggregate (type, theclass);
}
static enum return_value_convention
-amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
+amd64_return_value (struct gdbarch *gdbarch, struct value *function,
struct type *type, struct regcache *regcache,
gdb_byte *readbuf, const gdb_byte *writebuf)
{
- enum amd64_reg_class class[2];
+ enum amd64_reg_class theclass[2];
int len = TYPE_LENGTH (type);
static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
gdb_assert (!(readbuf && writebuf));
/* 1. Classify the return type with the classification algorithm. */
- amd64_classify (type, class);
+ amd64_classify (type, theclass);
/* 2. If the type has class MEMORY, then the caller provides space
for the return value and passes the address of this storage in
- %rdi as if it were the first argument to the function. In effect,
+ %rdi as if it were the first argument to the function. In effect,
this address becomes a hidden first argument.
On return %rax will contain the address that has been passed in
by the caller in %rdi. */
- if (class[0] == AMD64_MEMORY)
+ if (theclass[0] == AMD64_MEMORY)
{
/* As indicated by the comment above, the ABI guarantees that we
can always find the return value just after the function has
return RETURN_VALUE_ABI_RETURNS_ADDRESS;
}
- gdb_assert (class[1] != AMD64_MEMORY);
+ /* 8. If the class is COMPLEX_X87, the real part of the value is
+ returned in %st0 and the imaginary part in %st1. */
+ if (theclass[0] == AMD64_COMPLEX_X87)
+ {
+ if (readbuf)
+ {
+ regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
+ regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
+ }
+
+ if (writebuf)
+ {
+ i387_return_value (gdbarch, regcache);
+ regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
+ regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
+
+ /* Fix up the tag word such that both %st(0) and %st(1) are
+ marked as valid. */
+ regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
+ }
+
+ return RETURN_VALUE_REGISTER_CONVENTION;
+ }
+
+ gdb_assert (theclass[1] != AMD64_MEMORY);
gdb_assert (len <= 16);
for (i = 0; len > 0; i++, len -= 8)
int regnum = -1;
int offset = 0;
- switch (class[i])
+ switch (theclass[i])
{
case AMD64_INTEGER:
/* 3. If the class is INTEGER, the next available register
case AMD64_X87UP:
/* 7. If the class is X87UP, the value is returned together
with the previous X87 value in %st0. */
- gdb_assert (i > 0 && class[0] == AMD64_X87);
+ gdb_assert (i > 0 && theclass[0] == AMD64_X87);
regnum = AMD64_ST0_REGNUM;
offset = 8;
len = 2;
AMD64_RSI_REGNUM, /* %rsi */
AMD64_RDX_REGNUM, /* %rdx */
AMD64_RCX_REGNUM, /* %rcx */
- 8, /* %r8 */
- 9 /* %r9 */
+ AMD64_R8_REGNUM, /* %r8 */
+ AMD64_R9_REGNUM /* %r9 */
};
static int sse_regnum[] =
{
{
struct type *type = value_type (args[i]);
int len = TYPE_LENGTH (type);
- enum amd64_reg_class class[2];
+ enum amd64_reg_class theclass[2];
int needed_integer_regs = 0;
int needed_sse_regs = 0;
int j;
/* Classify argument. */
- amd64_classify (type, class);
+ amd64_classify (type, theclass);
/* Calculate the number of integer and SSE registers needed for
this argument. */
for (j = 0; j < 2; j++)
{
- if (class[j] == AMD64_INTEGER)
+ if (theclass[j] == AMD64_INTEGER)
needed_integer_regs++;
- else if (class[j] == AMD64_SSE)
+ else if (theclass[j] == AMD64_SSE)
needed_sse_regs++;
}
int regnum = -1;
int offset = 0;
- switch (class[j])
+ switch (theclass[j])
{
case AMD64_INTEGER:
regnum = integer_regnum[integer_reg++];
int nargs, struct value **args, CORE_ADDR sp,
int struct_return, CORE_ADDR struct_addr)
{
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
gdb_byte buf[8];
/* Pass arguments. */
/* Pass "hidden" argument". */
if (struct_return)
{
- store_unsigned_integer (buf, 8, struct_addr);
+ store_unsigned_integer (buf, 8, byte_order, struct_addr);
regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
}
/* Store return address. */
sp -= 8;
- store_unsigned_integer (buf, 8, bp_addr);
+ store_unsigned_integer (buf, 8, byte_order, bp_addr);
write_memory (sp, buf, 8);
/* Finally, update the stack pointer... */
- store_unsigned_integer (buf, 8, sp);
+ store_unsigned_integer (buf, 8, byte_order, sp);
regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
/* ...and fake a frame pointer. */
return sp + 16;
}
\f
+/* Displaced instruction handling. */
-/* The maximum number of saved registers. This should include %rip. */
-#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
+/* A partially decoded instruction.
+ This contains enough details for displaced stepping purposes. */
-struct amd64_frame_cache
+struct amd64_insn
{
- /* Base address. */
- CORE_ADDR base;
- CORE_ADDR sp_offset;
- CORE_ADDR pc;
-
- /* Saved registers. */
- CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
- CORE_ADDR saved_sp;
-
- /* Do we have a frame? */
- int frameless_p;
+ /* The number of opcode bytes. */
+ int opcode_len;
+ /* The offset of the rex prefix or -1 if not present. */
+ int rex_offset;
+ /* The offset to the first opcode byte. */
+ int opcode_offset;
+ /* The offset to the modrm byte or -1 if not present. */
+ int modrm_offset;
+
+ /* The raw instruction. */
+ gdb_byte *raw_insn;
};
-/* Initialize a frame cache. */
-
-static void
-amd64_init_frame_cache (struct amd64_frame_cache *cache)
+struct displaced_step_closure
{
- int i;
-
- /* Base address. */
- cache->base = 0;
- cache->sp_offset = -8;
- cache->pc = 0;
-
- /* Saved registers. We initialize these to -1 since zero is a valid
- offset (that's where %rbp is supposed to be stored). */
- for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
- cache->saved_regs[i] = -1;
- cache->saved_sp = 0;
+ /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
+ int tmp_used;
+ int tmp_regno;
+ ULONGEST tmp_save;
- /* Frameless until proven otherwise. */
- cache->frameless_p = 1;
-}
+ /* Details of the instruction. */
+ struct amd64_insn insn_details;
-/* Allocate and initialize a frame cache. */
+ /* Amount of space allocated to insn_buf. */
+ int max_len;
-static struct amd64_frame_cache *
-amd64_alloc_frame_cache (void)
-{
- struct amd64_frame_cache *cache;
+ /* The possibly modified insn.
+ This is a variable-length field. */
+ gdb_byte insn_buf[1];
+};
- cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
- amd64_init_frame_cache (cache);
- return cache;
-}
+/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
+ ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
+ at which point delete these in favor of libopcodes' versions). */
+
+static const unsigned char onebyte_has_modrm[256] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ------------------------------- */
+ /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
+ /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
+ /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
+ /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
+ /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
+ /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
+ /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
+ /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
+ /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
+ /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
+ /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
+ /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
+ /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
+ /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
+ /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
+ /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
+ /* ------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
-/* Do a limited analysis of the prologue at PC and update CACHE
- accordingly. Bail out early if CURRENT_PC is reached. Return the
- address where the analysis stopped.
+static const unsigned char twobyte_has_modrm[256] = {
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+ /* ------------------------------- */
+ /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
+ /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
+ /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
+ /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
+ /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
+ /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
+ /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
+ /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
+ /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
+ /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
+ /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
+ /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
+ /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
+ /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
+ /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
+ /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
+ /* ------------------------------- */
+ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
+};
- We will handle only functions beginning with:
+static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
- pushq %rbp 0x55
- movq %rsp, %rbp 0x48 0x89 0xe5
+static int
+rex_prefix_p (gdb_byte pfx)
+{
+ return REX_PREFIX_P (pfx);
+}
- Any function that doesn't start with this sequence will be assumed
- to have no prologue and thus no valid frame pointer in %rbp. */
+/* Skip the legacy instruction prefixes in INSN.
+ We assume INSN is properly sentineled so we don't have to worry
+ about falling off the end of the buffer. */
-static CORE_ADDR
-amd64_analyze_prologue (CORE_ADDR pc, CORE_ADDR current_pc,
- struct amd64_frame_cache *cache)
+static gdb_byte *
+amd64_skip_prefixes (gdb_byte *insn)
{
- static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
- gdb_byte buf[3];
- gdb_byte op;
+ while (1)
+ {
+ switch (*insn)
+ {
+ case DATA_PREFIX_OPCODE:
+ case ADDR_PREFIX_OPCODE:
+ case CS_PREFIX_OPCODE:
+ case DS_PREFIX_OPCODE:
+ case ES_PREFIX_OPCODE:
+ case FS_PREFIX_OPCODE:
+ case GS_PREFIX_OPCODE:
+ case SS_PREFIX_OPCODE:
+ case LOCK_PREFIX_OPCODE:
+ case REPE_PREFIX_OPCODE:
+ case REPNE_PREFIX_OPCODE:
+ ++insn;
+ continue;
+ default:
+ break;
+ }
+ break;
+ }
- if (current_pc <= pc)
- return current_pc;
+ return insn;
+}
- op = read_memory_unsigned_integer (pc, 1);
+/* Return an integer register (other than RSP) that is unused as an input
+ operand in INSN.
+ In order to not require adding a rex prefix if the insn doesn't already
+ have one, the result is restricted to RAX ... RDI, sans RSP.
+ The register numbering of the result follows architecture ordering,
+ e.g. RDI = 7. */
- if (op == 0x55) /* pushq %rbp */
+static int
+amd64_get_unused_input_int_reg (const struct amd64_insn *details)
+{
+ /* 1 bit for each reg */
+ int used_regs_mask = 0;
+
+ /* There can be at most 3 int regs used as inputs in an insn, and we have
+ 7 to choose from (RAX ... RDI, sans RSP).
+ This allows us to take a conservative approach and keep things simple.
+ E.g. By avoiding RAX, we don't have to specifically watch for opcodes
+ that implicitly specify RAX. */
+
+ /* Avoid RAX. */
+ used_regs_mask |= 1 << EAX_REG_NUM;
+ /* Similarily avoid RDX, implicit operand in divides. */
+ used_regs_mask |= 1 << EDX_REG_NUM;
+ /* Avoid RSP. */
+ used_regs_mask |= 1 << ESP_REG_NUM;
+
+ /* If the opcode is one byte long and there's no ModRM byte,
+ assume the opcode specifies a register. */
+ if (details->opcode_len == 1 && details->modrm_offset == -1)
+ used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
+
+ /* Mark used regs in the modrm/sib bytes. */
+ if (details->modrm_offset != -1)
{
- /* Take into account that we've executed the `pushq %rbp' that
- starts this instruction sequence. */
- cache->saved_regs[AMD64_RBP_REGNUM] = 0;
- cache->sp_offset += 8;
-
- /* If that's all, return now. */
- if (current_pc <= pc + 1)
- return current_pc;
+ int modrm = details->raw_insn[details->modrm_offset];
+ int mod = MODRM_MOD_FIELD (modrm);
+ int reg = MODRM_REG_FIELD (modrm);
+ int rm = MODRM_RM_FIELD (modrm);
+ int have_sib = mod != 3 && rm == 4;
- /* Check for `movq %rsp, %rbp'. */
- read_memory (pc + 1, buf, 3);
- if (memcmp (buf, proto, 3) != 0)
- return pc + 1;
+ /* Assume the reg field of the modrm byte specifies a register. */
+ used_regs_mask |= 1 << reg;
- /* OK, we actually have a frame. */
- cache->frameless_p = 0;
- return pc + 4;
+ if (have_sib)
+ {
+ int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
+ int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
+ used_regs_mask |= 1 << base;
+ used_regs_mask |= 1 << idx;
+ }
+ else
+ {
+ used_regs_mask |= 1 << rm;
+ }
}
- return pc;
-}
-
-/* Return PC of first real instruction. */
+ gdb_assert (used_regs_mask < 256);
+ gdb_assert (used_regs_mask != 255);
-static CORE_ADDR
-amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
-{
- struct amd64_frame_cache cache;
- CORE_ADDR pc;
+ /* Finally, find a free reg. */
+ {
+ int i;
- amd64_init_frame_cache (&cache);
- pc = amd64_analyze_prologue (start_pc, 0xffffffffffffffffLL, &cache);
- if (cache.frameless_p)
- return start_pc;
+ for (i = 0; i < 8; ++i)
+ {
+ if (! (used_regs_mask & (1 << i)))
+ return i;
+ }
- return pc;
+ /* We shouldn't get here. */
+ internal_error (__FILE__, __LINE__, _("unable to find free reg"));
+ }
}
-\f
-/* Normal frames. */
+/* Extract the details of INSN that we need. */
-static struct amd64_frame_cache *
-amd64_frame_cache (struct frame_info *next_frame, void **this_cache)
+static void
+amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
{
- struct amd64_frame_cache *cache;
- gdb_byte buf[8];
- int i;
+ gdb_byte *start = insn;
+ int need_modrm;
- if (*this_cache)
- return *this_cache;
+ details->raw_insn = insn;
- cache = amd64_alloc_frame_cache ();
- *this_cache = cache;
+ details->opcode_len = -1;
+ details->rex_offset = -1;
+ details->opcode_offset = -1;
+ details->modrm_offset = -1;
- cache->pc = frame_func_unwind (next_frame, NORMAL_FRAME);
- if (cache->pc != 0)
- amd64_analyze_prologue (cache->pc, frame_pc_unwind (next_frame), cache);
+ /* Skip legacy instruction prefixes. */
+ insn = amd64_skip_prefixes (insn);
- if (cache->frameless_p)
+ /* Skip REX instruction prefix. */
+ if (rex_prefix_p (*insn))
{
- /* We didn't find a valid frame. If we're at the start of a
- function, or somewhere half-way its prologue, the function's
- frame probably hasn't been fully setup yet. Try to
- reconstruct the base address for the stack frame by looking
- at the stack pointer. For truly "frameless" functions this
- might work too. */
+ details->rex_offset = insn - start;
+ ++insn;
+ }
- frame_unwind_register (next_frame, AMD64_RSP_REGNUM, buf);
- cache->base = extract_unsigned_integer (buf, 8) + cache->sp_offset;
+ details->opcode_offset = insn - start;
+
+ if (*insn == TWO_BYTE_OPCODE_ESCAPE)
+ {
+ /* Two or three-byte opcode. */
+ ++insn;
+ need_modrm = twobyte_has_modrm[*insn];
+
+ /* Check for three-byte opcode. */
+ switch (*insn)
+ {
+ case 0x24:
+ case 0x25:
+ case 0x38:
+ case 0x3a:
+ case 0x7a:
+ case 0x7b:
+ ++insn;
+ details->opcode_len = 3;
+ break;
+ default:
+ details->opcode_len = 2;
+ break;
+ }
}
else
{
- frame_unwind_register (next_frame, AMD64_RBP_REGNUM, buf);
- cache->base = extract_unsigned_integer (buf, 8);
+ /* One-byte opcode. */
+ need_modrm = onebyte_has_modrm[*insn];
+ details->opcode_len = 1;
}
- /* Now that we have the base address for the stack frame we can
- calculate the value of %rsp in the calling frame. */
- cache->saved_sp = cache->base + 16;
+ if (need_modrm)
+ {
+ ++insn;
+ details->modrm_offset = insn - start;
+ }
+}
- /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
- frame we find it at the same offset from the reconstructed base
- address. */
- cache->saved_regs[AMD64_RIP_REGNUM] = 8;
+/* Update %rip-relative addressing in INSN.
- /* Adjust all the saved registers such that they contain addresses
- instead of offsets. */
- for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
- if (cache->saved_regs[i] != -1)
- cache->saved_regs[i] += cache->base;
+ %rip-relative addressing only uses a 32-bit displacement.
+ 32 bits is not enough to be guaranteed to cover the distance between where
+ the real instruction is and where its copy is.
+ Convert the insn to use base+disp addressing.
+ We set base = pc + insn_length so we can leave disp unchanged. */
- return cache;
+static void
+fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
+ CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
+{
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ const struct amd64_insn *insn_details = &dsc->insn_details;
+ int modrm_offset = insn_details->modrm_offset;
+ gdb_byte *insn = insn_details->raw_insn + modrm_offset;
+ CORE_ADDR rip_base;
+ int32_t disp;
+ int insn_length;
+ int arch_tmp_regno, tmp_regno;
+ ULONGEST orig_value;
+
+ /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
+ ++insn;
+
+ /* Compute the rip-relative address. */
+ disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
+ insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
+ dsc->max_len, from);
+ rip_base = from + insn_length;
+
+ /* We need a register to hold the address.
+ Pick one not used in the insn.
+ NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
+ arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
+ tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
+
+ /* REX.B should be unset as we were using rip-relative addressing,
+ but ensure it's unset anyway, tmp_regno is not r8-r15. */
+ if (insn_details->rex_offset != -1)
+ dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
+
+ regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
+ dsc->tmp_regno = tmp_regno;
+ dsc->tmp_save = orig_value;
+ dsc->tmp_used = 1;
+
+ /* Convert the ModRM field to be base+disp. */
+ dsc->insn_buf[modrm_offset] &= ~0xc7;
+ dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
+
+ regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
+ "displaced: using temp reg %d, old value %s, new value %s\n",
+ dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
+ paddress (gdbarch, rip_base));
}
static void
-amd64_frame_this_id (struct frame_info *next_frame, void **this_cache,
- struct frame_id *this_id)
+fixup_displaced_copy (struct gdbarch *gdbarch,
+ struct displaced_step_closure *dsc,
+ CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
+{
+ const struct amd64_insn *details = &dsc->insn_details;
+
+ if (details->modrm_offset != -1)
+ {
+ gdb_byte modrm = details->raw_insn[details->modrm_offset];
+
+ if ((modrm & 0xc7) == 0x05)
+ {
+ /* The insn uses rip-relative addressing.
+ Deal with it. */
+ fixup_riprel (gdbarch, dsc, from, to, regs);
+ }
+ }
+}
+
+struct displaced_step_closure *
+amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
+ CORE_ADDR from, CORE_ADDR to,
+ struct regcache *regs)
+{
+ int len = gdbarch_max_insn_length (gdbarch);
+ /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
+ continually watch for running off the end of the buffer. */
+ int fixup_sentinel_space = len;
+ struct displaced_step_closure *dsc =
+ xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
+ gdb_byte *buf = &dsc->insn_buf[0];
+ struct amd64_insn *details = &dsc->insn_details;
+
+ dsc->tmp_used = 0;
+ dsc->max_len = len + fixup_sentinel_space;
+
+ read_memory (from, buf, len);
+
+ /* Set up the sentinel space so we don't have to worry about running
+ off the end of the buffer. An excessive number of leading prefixes
+ could otherwise cause this. */
+ memset (buf + len, 0, fixup_sentinel_space);
+
+ amd64_get_insn_details (buf, details);
+
+ /* GDB may get control back after the insn after the syscall.
+ Presumably this is a kernel bug.
+ If this is a syscall, make sure there's a nop afterwards. */
+ {
+ int syscall_length;
+
+ if (amd64_syscall_p (details, &syscall_length))
+ buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
+ }
+
+ /* Modify the insn to cope with the address where it will be executed from.
+ In particular, handle any rip-relative addressing. */
+ fixup_displaced_copy (gdbarch, dsc, from, to, regs);
+
+ write_memory (to, buf, len);
+
+ if (debug_displaced)
+ {
+ fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
+ paddress (gdbarch, from), paddress (gdbarch, to));
+ displaced_step_dump_bytes (gdb_stdlog, buf, len);
+ }
+
+ return dsc;
+}
+
+static int
+amd64_absolute_jmp_p (const struct amd64_insn *details)
+{
+ const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
+
+ if (insn[0] == 0xff)
+ {
+ /* jump near, absolute indirect (/4) */
+ if ((insn[1] & 0x38) == 0x20)
+ return 1;
+
+ /* jump far, absolute indirect (/5) */
+ if ((insn[1] & 0x38) == 0x28)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
+
+static int
+amd64_jmp_p (const struct amd64_insn *details)
+{
+ const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
+
+ /* jump short, relative. */
+ if (insn[0] == 0xeb)
+ return 1;
+
+ /* jump near, relative. */
+ if (insn[0] == 0xe9)
+ return 1;
+
+ return amd64_absolute_jmp_p (details);
+}
+
+static int
+amd64_absolute_call_p (const struct amd64_insn *details)
+{
+ const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
+
+ if (insn[0] == 0xff)
+ {
+ /* Call near, absolute indirect (/2) */
+ if ((insn[1] & 0x38) == 0x10)
+ return 1;
+
+ /* Call far, absolute indirect (/3) */
+ if ((insn[1] & 0x38) == 0x18)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+amd64_ret_p (const struct amd64_insn *details)
+{
+ /* NOTE: gcc can emit "repz ; ret". */
+ const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
+
+ switch (insn[0])
+ {
+ case 0xc2: /* ret near, pop N bytes */
+ case 0xc3: /* ret near */
+ case 0xca: /* ret far, pop N bytes */
+ case 0xcb: /* ret far */
+ case 0xcf: /* iret */
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static int
+amd64_call_p (const struct amd64_insn *details)
+{
+ const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
+
+ if (amd64_absolute_call_p (details))
+ return 1;
+
+ /* call near, relative */
+ if (insn[0] == 0xe8)
+ return 1;
+
+ return 0;
+}
+
+/* Return non-zero if INSN is a system call, and set *LENGTHP to its
+ length in bytes. Otherwise, return zero. */
+
+static int
+amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
+{
+ const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
+
+ if (insn[0] == 0x0f && insn[1] == 0x05)
+ {
+ *lengthp = 2;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Classify the instruction at ADDR using PRED.
+ Throw an error if the memory can't be read. */
+
+static int
+amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
+ int (*pred) (const struct amd64_insn *))
+{
+ struct amd64_insn details;
+ gdb_byte *buf;
+ int len, classification;
+
+ len = gdbarch_max_insn_length (gdbarch);
+ buf = alloca (len);
+
+ read_code (addr, buf, len);
+ amd64_get_insn_details (buf, &details);
+
+ classification = pred (&details);
+
+ return classification;
+}
+
+/* The gdbarch insn_is_call method. */
+
+static int
+amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
+}
+
+/* The gdbarch insn_is_ret method. */
+
+static int
+amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
+}
+
+/* The gdbarch insn_is_jump method. */
+
+static int
+amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
+{
+ return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
+}
+
+/* Fix up the state of registers and memory after having single-stepped
+ a displaced instruction. */
+
+void
+amd64_displaced_step_fixup (struct gdbarch *gdbarch,
+ struct displaced_step_closure *dsc,
+ CORE_ADDR from, CORE_ADDR to,
+ struct regcache *regs)
+{
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ /* The offset we applied to the instruction's address. */
+ ULONGEST insn_offset = to - from;
+ gdb_byte *insn = dsc->insn_buf;
+ const struct amd64_insn *insn_details = &dsc->insn_details;
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog,
+ "displaced: fixup (%s, %s), "
+ "insn = 0x%02x 0x%02x ...\n",
+ paddress (gdbarch, from), paddress (gdbarch, to),
+ insn[0], insn[1]);
+
+ /* If we used a tmp reg, restore it. */
+
+ if (dsc->tmp_used)
+ {
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
+ dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
+ regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
+ }
+
+ /* The list of issues to contend with here is taken from
+ resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
+ Yay for Free Software! */
+
+ /* Relocate the %rip back to the program's instruction stream,
+ if necessary. */
+
+ /* Except in the case of absolute or indirect jump or call
+ instructions, or a return instruction, the new rip is relative to
+ the displaced instruction; make it relative to the original insn.
+ Well, signal handler returns don't need relocation either, but we use the
+ value of %rip to recognize those; see below. */
+ if (! amd64_absolute_jmp_p (insn_details)
+ && ! amd64_absolute_call_p (insn_details)
+ && ! amd64_ret_p (insn_details))
+ {
+ ULONGEST orig_rip;
+ int insn_len;
+
+ regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
+
+ /* A signal trampoline system call changes the %rip, resuming
+ execution of the main program after the signal handler has
+ returned. That makes them like 'return' instructions; we
+ shouldn't relocate %rip.
+
+ But most system calls don't, and we do need to relocate %rip.
+
+ Our heuristic for distinguishing these cases: if stepping
+ over the system call instruction left control directly after
+ the instruction, the we relocate --- control almost certainly
+ doesn't belong in the displaced copy. Otherwise, we assume
+ the instruction has put control where it belongs, and leave
+ it unrelocated. Goodness help us if there are PC-relative
+ system calls. */
+ if (amd64_syscall_p (insn_details, &insn_len)
+ && orig_rip != to + insn_len
+ /* GDB can get control back after the insn after the syscall.
+ Presumably this is a kernel bug.
+ Fixup ensures its a nop, we add one to the length for it. */
+ && orig_rip != to + insn_len + 1)
+ {
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog,
+ "displaced: syscall changed %%rip; "
+ "not relocating\n");
+ }
+ else
+ {
+ ULONGEST rip = orig_rip - insn_offset;
+
+ /* If we just stepped over a breakpoint insn, we don't backup
+ the pc on purpose; this is to match behaviour without
+ stepping. */
+
+ regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog,
+ "displaced: "
+ "relocated %%rip from %s to %s\n",
+ paddress (gdbarch, orig_rip),
+ paddress (gdbarch, rip));
+ }
+ }
+
+ /* If the instruction was PUSHFL, then the TF bit will be set in the
+ pushed value, and should be cleared. We'll leave this for later,
+ since GDB already messes up the TF flag when stepping over a
+ pushfl. */
+
+ /* If the instruction was a call, the return address now atop the
+ stack is the address following the copied instruction. We need
+ to make it the address following the original instruction. */
+ if (amd64_call_p (insn_details))
+ {
+ ULONGEST rsp;
+ ULONGEST retaddr;
+ const ULONGEST retaddr_len = 8;
+
+ regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
+ retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
+ retaddr = (retaddr - insn_offset) & 0xffffffffUL;
+ write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog,
+ "displaced: relocated return addr at %s "
+ "to %s\n",
+ paddress (gdbarch, rsp),
+ paddress (gdbarch, retaddr));
+ }
+}
+
+/* If the instruction INSN uses RIP-relative addressing, return the
+ offset into the raw INSN where the displacement to be adjusted is
+ found. Returns 0 if the instruction doesn't use RIP-relative
+ addressing. */
+
+static int
+rip_relative_offset (struct amd64_insn *insn)
+{
+ if (insn->modrm_offset != -1)
+ {
+ gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
+
+ if ((modrm & 0xc7) == 0x05)
+ {
+ /* The displacement is found right after the ModRM byte. */
+ return insn->modrm_offset + 1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
+{
+ target_write_memory (*to, buf, len);
+ *to += len;
+}
+
+static void
+amd64_relocate_instruction (struct gdbarch *gdbarch,
+ CORE_ADDR *to, CORE_ADDR oldloc)
+{
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ int len = gdbarch_max_insn_length (gdbarch);
+ /* Extra space for sentinels. */
+ int fixup_sentinel_space = len;
+ gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
+ struct amd64_insn insn_details;
+ int offset = 0;
+ LONGEST rel32, newrel;
+ gdb_byte *insn;
+ int insn_length;
+
+ read_memory (oldloc, buf, len);
+
+ /* Set up the sentinel space so we don't have to worry about running
+ off the end of the buffer. An excessive number of leading prefixes
+ could otherwise cause this. */
+ memset (buf + len, 0, fixup_sentinel_space);
+
+ insn = buf;
+ amd64_get_insn_details (insn, &insn_details);
+
+ insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
+
+ /* Skip legacy instruction prefixes. */
+ insn = amd64_skip_prefixes (insn);
+
+ /* Adjust calls with 32-bit relative addresses as push/jump, with
+ the address pushed being the location where the original call in
+ the user program would return to. */
+ if (insn[0] == 0xe8)
+ {
+ gdb_byte push_buf[16];
+ unsigned int ret_addr;
+
+ /* Where "ret" in the original code will return to. */
+ ret_addr = oldloc + insn_length;
+ push_buf[0] = 0x68; /* pushq $... */
+ store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
+ /* Push the push. */
+ append_insns (to, 5, push_buf);
+
+ /* Convert the relative call to a relative jump. */
+ insn[0] = 0xe9;
+
+ /* Adjust the destination offset. */
+ rel32 = extract_signed_integer (insn + 1, 4, byte_order);
+ newrel = (oldloc - *to) + rel32;
+ store_signed_integer (insn + 1, 4, byte_order, newrel);
+
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog,
+ "Adjusted insn rel32=%s at %s to"
+ " rel32=%s at %s\n",
+ hex_string (rel32), paddress (gdbarch, oldloc),
+ hex_string (newrel), paddress (gdbarch, *to));
+
+ /* Write the adjusted jump into its displaced location. */
+ append_insns (to, 5, insn);
+ return;
+ }
+
+ offset = rip_relative_offset (&insn_details);
+ if (!offset)
+ {
+ /* Adjust jumps with 32-bit relative addresses. Calls are
+ already handled above. */
+ if (insn[0] == 0xe9)
+ offset = 1;
+ /* Adjust conditional jumps. */
+ else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
+ offset = 2;
+ }
+
+ if (offset)
+ {
+ rel32 = extract_signed_integer (insn + offset, 4, byte_order);
+ newrel = (oldloc - *to) + rel32;
+ store_signed_integer (insn + offset, 4, byte_order, newrel);
+ if (debug_displaced)
+ fprintf_unfiltered (gdb_stdlog,
+ "Adjusted insn rel32=%s at %s to"
+ " rel32=%s at %s\n",
+ hex_string (rel32), paddress (gdbarch, oldloc),
+ hex_string (newrel), paddress (gdbarch, *to));
+ }
+
+ /* Write the adjusted instruction into its displaced location. */
+ append_insns (to, insn_length, buf);
+}
+
+\f
+/* The maximum number of saved registers. This should include %rip. */
+#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
+
+struct amd64_frame_cache
+{
+ /* Base address. */
+ CORE_ADDR base;
+ int base_p;
+ CORE_ADDR sp_offset;
+ CORE_ADDR pc;
+
+ /* Saved registers. */
+ CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
+ CORE_ADDR saved_sp;
+ int saved_sp_reg;
+
+ /* Do we have a frame? */
+ int frameless_p;
+};
+
+/* Initialize a frame cache. */
+
+static void
+amd64_init_frame_cache (struct amd64_frame_cache *cache)
+{
+ int i;
+
+ /* Base address. */
+ cache->base = 0;
+ cache->base_p = 0;
+ cache->sp_offset = -8;
+ cache->pc = 0;
+
+ /* Saved registers. We initialize these to -1 since zero is a valid
+ offset (that's where %rbp is supposed to be stored).
+ The values start out as being offsets, and are later converted to
+ addresses (at which point -1 is interpreted as an address, still meaning
+ "invalid"). */
+ for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
+ cache->saved_regs[i] = -1;
+ cache->saved_sp = 0;
+ cache->saved_sp_reg = -1;
+
+ /* Frameless until proven otherwise. */
+ cache->frameless_p = 1;
+}
+
+/* Allocate and initialize a frame cache. */
+
+static struct amd64_frame_cache *
+amd64_alloc_frame_cache (void)
+{
+ struct amd64_frame_cache *cache;
+
+ cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
+ amd64_init_frame_cache (cache);
+ return cache;
+}
+
+/* GCC 4.4 and later, can put code in the prologue to realign the
+ stack pointer. Check whether PC points to such code, and update
+ CACHE accordingly. Return the first instruction after the code
+ sequence or CURRENT_PC, whichever is smaller. If we don't
+ recognize the code, return PC. */
+
+static CORE_ADDR
+amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
+ struct amd64_frame_cache *cache)
+{
+ /* There are 2 code sequences to re-align stack before the frame
+ gets set up:
+
+ 1. Use a caller-saved saved register:
+
+ leaq 8(%rsp), %reg
+ andq $-XXX, %rsp
+ pushq -8(%reg)
+
+ 2. Use a callee-saved saved register:
+
+ pushq %reg
+ leaq 16(%rsp), %reg
+ andq $-XXX, %rsp
+ pushq -8(%reg)
+
+ "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
+
+ 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
+ 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
+ */
+
+ gdb_byte buf[18];
+ int reg, r;
+ int offset, offset_and;
+
+ if (target_read_code (pc, buf, sizeof buf))
+ return pc;
+
+ /* Check caller-saved saved register. The first instruction has
+ to be "leaq 8(%rsp), %reg". */
+ if ((buf[0] & 0xfb) == 0x48
+ && buf[1] == 0x8d
+ && buf[3] == 0x24
+ && buf[4] == 0x8)
+ {
+ /* MOD must be binary 10 and R/M must be binary 100. */
+ if ((buf[2] & 0xc7) != 0x44)
+ return pc;
+
+ /* REG has register number. */
+ reg = (buf[2] >> 3) & 7;
+
+ /* Check the REX.R bit. */
+ if (buf[0] == 0x4c)
+ reg += 8;
+
+ offset = 5;
+ }
+ else
+ {
+ /* Check callee-saved saved register. The first instruction
+ has to be "pushq %reg". */
+ reg = 0;
+ if ((buf[0] & 0xf8) == 0x50)
+ offset = 0;
+ else if ((buf[0] & 0xf6) == 0x40
+ && (buf[1] & 0xf8) == 0x50)
+ {
+ /* Check the REX.B bit. */
+ if ((buf[0] & 1) != 0)
+ reg = 8;
+
+ offset = 1;
+ }
+ else
+ return pc;
+
+ /* Get register. */
+ reg += buf[offset] & 0x7;
+
+ offset++;
+
+ /* The next instruction has to be "leaq 16(%rsp), %reg". */
+ if ((buf[offset] & 0xfb) != 0x48
+ || buf[offset + 1] != 0x8d
+ || buf[offset + 3] != 0x24
+ || buf[offset + 4] != 0x10)
+ return pc;
+
+ /* MOD must be binary 10 and R/M must be binary 100. */
+ if ((buf[offset + 2] & 0xc7) != 0x44)
+ return pc;
+
+ /* REG has register number. */
+ r = (buf[offset + 2] >> 3) & 7;
+
+ /* Check the REX.R bit. */
+ if (buf[offset] == 0x4c)
+ r += 8;
+
+ /* Registers in pushq and leaq have to be the same. */
+ if (reg != r)
+ return pc;
+
+ offset += 5;
+ }
+
+ /* Rigister can't be %rsp nor %rbp. */
+ if (reg == 4 || reg == 5)
+ return pc;
+
+ /* The next instruction has to be "andq $-XXX, %rsp". */
+ if (buf[offset] != 0x48
+ || buf[offset + 2] != 0xe4
+ || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
+ return pc;
+
+ offset_and = offset;
+ offset += buf[offset + 1] == 0x81 ? 7 : 4;
+
+ /* The next instruction has to be "pushq -8(%reg)". */
+ r = 0;
+ if (buf[offset] == 0xff)
+ offset++;
+ else if ((buf[offset] & 0xf6) == 0x40
+ && buf[offset + 1] == 0xff)
+ {
+ /* Check the REX.B bit. */
+ if ((buf[offset] & 0x1) != 0)
+ r = 8;
+ offset += 2;
+ }
+ else
+ return pc;
+
+ /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
+ 01. */
+ if (buf[offset + 1] != 0xf8
+ || (buf[offset] & 0xf8) != 0x70)
+ return pc;
+
+ /* R/M has register. */
+ r += buf[offset] & 7;
+
+ /* Registers in leaq and pushq have to be the same. */
+ if (reg != r)
+ return pc;
+
+ if (current_pc > pc + offset_and)
+ cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
+
+ return min (pc + offset + 2, current_pc);
+}
+
+/* Similar to amd64_analyze_stack_align for x32. */
+
+static CORE_ADDR
+amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
+ struct amd64_frame_cache *cache)
+{
+ /* There are 2 code sequences to re-align stack before the frame
+ gets set up:
+
+ 1. Use a caller-saved saved register:
+
+ leaq 8(%rsp), %reg
+ andq $-XXX, %rsp
+ pushq -8(%reg)
+
+ or
+
+ [addr32] leal 8(%rsp), %reg
+ andl $-XXX, %esp
+ [addr32] pushq -8(%reg)
+
+ 2. Use a callee-saved saved register:
+
+ pushq %reg
+ leaq 16(%rsp), %reg
+ andq $-XXX, %rsp
+ pushq -8(%reg)
+
+ or
+
+ pushq %reg
+ [addr32] leal 16(%rsp), %reg
+ andl $-XXX, %esp
+ [addr32] pushq -8(%reg)
+
+ "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
+
+ 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
+ 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
+
+ "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
+
+ 0x83 0xe4 0xf0 andl $-16, %esp
+ 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
+ */
+
+ gdb_byte buf[19];
+ int reg, r;
+ int offset, offset_and;
+
+ if (target_read_memory (pc, buf, sizeof buf))
+ return pc;
+
+ /* Skip optional addr32 prefix. */
+ offset = buf[0] == 0x67 ? 1 : 0;
+
+ /* Check caller-saved saved register. The first instruction has
+ to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
+ if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
+ && buf[offset + 1] == 0x8d
+ && buf[offset + 3] == 0x24
+ && buf[offset + 4] == 0x8)
+ {
+ /* MOD must be binary 10 and R/M must be binary 100. */
+ if ((buf[offset + 2] & 0xc7) != 0x44)
+ return pc;
+
+ /* REG has register number. */
+ reg = (buf[offset + 2] >> 3) & 7;
+
+ /* Check the REX.R bit. */
+ if ((buf[offset] & 0x4) != 0)
+ reg += 8;
+
+ offset += 5;
+ }
+ else
+ {
+ /* Check callee-saved saved register. The first instruction
+ has to be "pushq %reg". */
+ reg = 0;
+ if ((buf[offset] & 0xf6) == 0x40
+ && (buf[offset + 1] & 0xf8) == 0x50)
+ {
+ /* Check the REX.B bit. */
+ if ((buf[offset] & 1) != 0)
+ reg = 8;
+
+ offset += 1;
+ }
+ else if ((buf[offset] & 0xf8) != 0x50)
+ return pc;
+
+ /* Get register. */
+ reg += buf[offset] & 0x7;
+
+ offset++;
+
+ /* Skip optional addr32 prefix. */
+ if (buf[offset] == 0x67)
+ offset++;
+
+ /* The next instruction has to be "leaq 16(%rsp), %reg" or
+ "leal 16(%rsp), %reg". */
+ if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
+ || buf[offset + 1] != 0x8d
+ || buf[offset + 3] != 0x24
+ || buf[offset + 4] != 0x10)
+ return pc;
+
+ /* MOD must be binary 10 and R/M must be binary 100. */
+ if ((buf[offset + 2] & 0xc7) != 0x44)
+ return pc;
+
+ /* REG has register number. */
+ r = (buf[offset + 2] >> 3) & 7;
+
+ /* Check the REX.R bit. */
+ if ((buf[offset] & 0x4) != 0)
+ r += 8;
+
+ /* Registers in pushq and leaq have to be the same. */
+ if (reg != r)
+ return pc;
+
+ offset += 5;
+ }
+
+ /* Rigister can't be %rsp nor %rbp. */
+ if (reg == 4 || reg == 5)
+ return pc;
+
+ /* The next instruction may be "andq $-XXX, %rsp" or
+ "andl $-XXX, %esp". */
+ if (buf[offset] != 0x48)
+ offset--;
+
+ if (buf[offset + 2] != 0xe4
+ || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
+ return pc;
+
+ offset_and = offset;
+ offset += buf[offset + 1] == 0x81 ? 7 : 4;
+
+ /* Skip optional addr32 prefix. */
+ if (buf[offset] == 0x67)
+ offset++;
+
+ /* The next instruction has to be "pushq -8(%reg)". */
+ r = 0;
+ if (buf[offset] == 0xff)
+ offset++;
+ else if ((buf[offset] & 0xf6) == 0x40
+ && buf[offset + 1] == 0xff)
+ {
+ /* Check the REX.B bit. */
+ if ((buf[offset] & 0x1) != 0)
+ r = 8;
+ offset += 2;
+ }
+ else
+ return pc;
+
+ /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
+ 01. */
+ if (buf[offset + 1] != 0xf8
+ || (buf[offset] & 0xf8) != 0x70)
+ return pc;
+
+ /* R/M has register. */
+ r += buf[offset] & 7;
+
+ /* Registers in leaq and pushq have to be the same. */
+ if (reg != r)
+ return pc;
+
+ if (current_pc > pc + offset_and)
+ cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
+
+ return min (pc + offset + 2, current_pc);
+}
+
+/* Do a limited analysis of the prologue at PC and update CACHE
+ accordingly. Bail out early if CURRENT_PC is reached. Return the
+ address where the analysis stopped.
+
+ We will handle only functions beginning with:
+
+ pushq %rbp 0x55
+ movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
+
+ or (for the X32 ABI):
+
+ pushq %rbp 0x55
+ movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
+
+ Any function that doesn't start with one of these sequences will be
+ assumed to have no prologue and thus no valid frame pointer in
+ %rbp. */
+
+static CORE_ADDR
+amd64_analyze_prologue (struct gdbarch *gdbarch,
+ CORE_ADDR pc, CORE_ADDR current_pc,
+ struct amd64_frame_cache *cache)
+{
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ /* There are two variations of movq %rsp, %rbp. */
+ static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
+ static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
+ /* Ditto for movl %esp, %ebp. */
+ static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
+ static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
+
+ gdb_byte buf[3];
+ gdb_byte op;
+
+ if (current_pc <= pc)
+ return current_pc;
+
+ if (gdbarch_ptr_bit (gdbarch) == 32)
+ pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
+ else
+ pc = amd64_analyze_stack_align (pc, current_pc, cache);
+
+ op = read_code_unsigned_integer (pc, 1, byte_order);
+
+ if (op == 0x55) /* pushq %rbp */
+ {
+ /* Take into account that we've executed the `pushq %rbp' that
+ starts this instruction sequence. */
+ cache->saved_regs[AMD64_RBP_REGNUM] = 0;
+ cache->sp_offset += 8;
+
+ /* If that's all, return now. */
+ if (current_pc <= pc + 1)
+ return current_pc;
+
+ read_code (pc + 1, buf, 3);
+
+ /* Check for `movq %rsp, %rbp'. */
+ if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
+ || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
+ {
+ /* OK, we actually have a frame. */
+ cache->frameless_p = 0;
+ return pc + 4;
+ }
+
+ /* For X32, also check for `movq %esp, %ebp'. */
+ if (gdbarch_ptr_bit (gdbarch) == 32)
+ {
+ if (memcmp (buf, mov_esp_ebp_1, 2) == 0
+ || memcmp (buf, mov_esp_ebp_2, 2) == 0)
+ {
+ /* OK, we actually have a frame. */
+ cache->frameless_p = 0;
+ return pc + 3;
+ }
+ }
+
+ return pc + 1;
+ }
+
+ return pc;
+}
+
+/* Work around false termination of prologue - GCC PR debug/48827.
+
+ START_PC is the first instruction of a function, PC is its minimal already
+ determined advanced address. Function returns PC if it has nothing to do.
+
+ 84 c0 test %al,%al
+ 74 23 je after
+ <-- here is 0 lines advance - the false prologue end marker.
+ 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
+ 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
+ 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
+ 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
+ 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
+ 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
+ 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
+ 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
+ after: */
+
+static CORE_ADDR
+amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
+{
+ struct symtab_and_line start_pc_sal, next_sal;
+ gdb_byte buf[4 + 8 * 7];
+ int offset, xmmreg;
+
+ if (pc == start_pc)
+ return pc;
+
+ start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
+ if (start_pc_sal.symtab == NULL
+ || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
+ (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
+ || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
+ return pc;
+
+ next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
+ if (next_sal.line != start_pc_sal.line)
+ return pc;
+
+ /* START_PC can be from overlayed memory, ignored here. */
+ if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
+ return pc;
+
+ /* test %al,%al */
+ if (buf[0] != 0x84 || buf[1] != 0xc0)
+ return pc;
+ /* je AFTER */
+ if (buf[2] != 0x74)
+ return pc;
+
+ offset = 4;
+ for (xmmreg = 0; xmmreg < 8; xmmreg++)
+ {
+ /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
+ if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
+ || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
+ return pc;
+
+ /* 0b01?????? */
+ if ((buf[offset + 2] & 0xc0) == 0x40)
+ {
+ /* 8-bit displacement. */
+ offset += 4;
+ }
+ /* 0b10?????? */
+ else if ((buf[offset + 2] & 0xc0) == 0x80)
+ {
+ /* 32-bit displacement. */
+ offset += 7;
+ }
+ else
+ return pc;
+ }
+
+ /* je AFTER */
+ if (offset - 4 != buf[3])
+ return pc;
+
+ return next_sal.end;
+}
+
+/* Return PC of first real instruction. */
+
+static CORE_ADDR
+amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
+{
+ struct amd64_frame_cache cache;
+ CORE_ADDR pc;
+ CORE_ADDR func_addr;
+
+ if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
+ {
+ CORE_ADDR post_prologue_pc
+ = skip_prologue_using_sal (gdbarch, func_addr);
+ struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
+
+ /* Clang always emits a line note before the prologue and another
+ one after. We trust clang to emit usable line notes. */
+ if (post_prologue_pc
+ && (cust != NULL
+ && COMPUNIT_PRODUCER (cust) != NULL
+ && strncmp (COMPUNIT_PRODUCER (cust), "clang ",
+ sizeof ("clang ") - 1) == 0))
+ return max (start_pc, post_prologue_pc);
+ }
+
+ amd64_init_frame_cache (&cache);
+ pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
+ &cache);
+ if (cache.frameless_p)
+ return start_pc;
+
+ return amd64_skip_xmm_prologue (pc, start_pc);
+}
+\f
+
+/* Normal frames. */
+
+static void
+amd64_frame_cache_1 (struct frame_info *this_frame,
+ struct amd64_frame_cache *cache)
+{
+ struct gdbarch *gdbarch = get_frame_arch (this_frame);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ gdb_byte buf[8];
+ int i;
+
+ cache->pc = get_frame_func (this_frame);
+ if (cache->pc != 0)
+ amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
+ cache);
+
+ if (cache->frameless_p)
+ {
+ /* We didn't find a valid frame. If we're at the start of a
+ function, or somewhere half-way its prologue, the function's
+ frame probably hasn't been fully setup yet. Try to
+ reconstruct the base address for the stack frame by looking
+ at the stack pointer. For truly "frameless" functions this
+ might work too. */
+
+ if (cache->saved_sp_reg != -1)
+ {
+ /* Stack pointer has been saved. */
+ get_frame_register (this_frame, cache->saved_sp_reg, buf);
+ cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
+
+ /* We're halfway aligning the stack. */
+ cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
+ cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
+
+ /* This will be added back below. */
+ cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
+ }
+ else
+ {
+ get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
+ cache->base = extract_unsigned_integer (buf, 8, byte_order)
+ + cache->sp_offset;
+ }
+ }
+ else
+ {
+ get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
+ cache->base = extract_unsigned_integer (buf, 8, byte_order);
+ }
+
+ /* Now that we have the base address for the stack frame we can
+ calculate the value of %rsp in the calling frame. */
+ cache->saved_sp = cache->base + 16;
+
+ /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
+ frame we find it at the same offset from the reconstructed base
+ address. If we're halfway aligning the stack, %rip is handled
+ differently (see above). */
+ if (!cache->frameless_p || cache->saved_sp_reg == -1)
+ cache->saved_regs[AMD64_RIP_REGNUM] = 8;
+
+ /* Adjust all the saved registers such that they contain addresses
+ instead of offsets. */
+ for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
+ if (cache->saved_regs[i] != -1)
+ cache->saved_regs[i] += cache->base;
+
+ cache->base_p = 1;
+}
+
+static struct amd64_frame_cache *
+amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
+{
+ volatile struct gdb_exception ex;
+ struct amd64_frame_cache *cache;
+
+ if (*this_cache)
+ return *this_cache;
+
+ cache = amd64_alloc_frame_cache ();
+ *this_cache = cache;
+
+ TRY_CATCH (ex, RETURN_MASK_ERROR)
+ {
+ amd64_frame_cache_1 (this_frame, cache);
+ }
+ if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
+ throw_exception (ex);
+
+ return cache;
+}
+
+static enum unwind_stop_reason
+amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
+ void **this_cache)
{
struct amd64_frame_cache *cache =
- amd64_frame_cache (next_frame, this_cache);
+ amd64_frame_cache (this_frame, this_cache);
+
+ if (!cache->base_p)
+ return UNWIND_UNAVAILABLE;
/* This marks the outermost frame. */
if (cache->base == 0)
- return;
+ return UNWIND_OUTERMOST;
- (*this_id) = frame_id_build (cache->base + 16, cache->pc);
+ return UNWIND_NO_REASON;
}
static void
-amd64_frame_prev_register (struct frame_info *next_frame, void **this_cache,
- int regnum, int *optimizedp,
- enum lval_type *lvalp, CORE_ADDR *addrp,
- int *realnump, gdb_byte *valuep)
+amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
+ struct frame_id *this_id)
{
- struct gdbarch *gdbarch = get_frame_arch (next_frame);
struct amd64_frame_cache *cache =
- amd64_frame_cache (next_frame, this_cache);
-
- gdb_assert (regnum >= 0);
+ amd64_frame_cache (this_frame, this_cache);
- if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
+ if (!cache->base_p)
+ (*this_id) = frame_id_build_unavailable_stack (cache->pc);
+ else if (cache->base == 0)
{
- *optimizedp = 0;
- *lvalp = not_lval;
- *addrp = 0;
- *realnump = -1;
- if (valuep)
- {
- /* Store the value. */
- store_unsigned_integer (valuep, 8, cache->saved_sp);
- }
+ /* This marks the outermost frame. */
return;
}
+ else
+ (*this_id) = frame_id_build (cache->base + 16, cache->pc);
+}
+
+static struct value *
+amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
+ int regnum)
+{
+ struct gdbarch *gdbarch = get_frame_arch (this_frame);
+ struct amd64_frame_cache *cache =
+ amd64_frame_cache (this_frame, this_cache);
+
+ gdb_assert (regnum >= 0);
+
+ if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
+ return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
- {
- *optimizedp = 0;
- *lvalp = lval_memory;
- *addrp = cache->saved_regs[regnum];
- *realnump = -1;
- if (valuep)
- {
- /* Read the value in from memory. */
- read_memory (*addrp, valuep,
- register_size (gdbarch, regnum));
- }
- return;
- }
+ return frame_unwind_got_memory (this_frame, regnum,
+ cache->saved_regs[regnum]);
- *optimizedp = 0;
- *lvalp = lval_register;
- *addrp = 0;
- *realnump = regnum;
- if (valuep)
- frame_unwind_register (next_frame, (*realnump), valuep);
+ return frame_unwind_got_register (this_frame, regnum, regnum);
}
static const struct frame_unwind amd64_frame_unwind =
{
NORMAL_FRAME,
+ amd64_frame_unwind_stop_reason,
amd64_frame_this_id,
- amd64_frame_prev_register
+ amd64_frame_prev_register,
+ NULL,
+ default_frame_sniffer
};
+\f
+/* Generate a bytecode expression to get the value of the saved PC. */
-static const struct frame_unwind *
-amd64_frame_sniffer (struct frame_info *next_frame)
+static void
+amd64_gen_return_address (struct gdbarch *gdbarch,
+ struct agent_expr *ax, struct axs_value *value,
+ CORE_ADDR scope)
{
- return &amd64_frame_unwind;
+ /* The following sequence assumes the traditional use of the base
+ register. */
+ ax_reg (ax, AMD64_RBP_REGNUM);
+ ax_const_l (ax, 8);
+ ax_simple (ax, aop_add);
+ value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
+ value->kind = axs_lvalue_memory;
}
\f
on both platforms. */
static struct amd64_frame_cache *
-amd64_sigtramp_frame_cache (struct frame_info *next_frame, void **this_cache)
+amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
{
+ struct gdbarch *gdbarch = get_frame_arch (this_frame);
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ volatile struct gdb_exception ex;
struct amd64_frame_cache *cache;
- struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (next_frame));
CORE_ADDR addr;
gdb_byte buf[8];
int i;
cache = amd64_alloc_frame_cache ();
- frame_unwind_register (next_frame, AMD64_RSP_REGNUM, buf);
- cache->base = extract_unsigned_integer (buf, 8) - 8;
+ TRY_CATCH (ex, RETURN_MASK_ERROR)
+ {
+ get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
+ cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
+
+ addr = tdep->sigcontext_addr (this_frame);
+ gdb_assert (tdep->sc_reg_offset);
+ gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
+ for (i = 0; i < tdep->sc_num_regs; i++)
+ if (tdep->sc_reg_offset[i] != -1)
+ cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
- addr = tdep->sigcontext_addr (next_frame);
- gdb_assert (tdep->sc_reg_offset);
- gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
- for (i = 0; i < tdep->sc_num_regs; i++)
- if (tdep->sc_reg_offset[i] != -1)
- cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
+ cache->base_p = 1;
+ }
+ if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
+ throw_exception (ex);
*this_cache = cache;
return cache;
}
-static void
-amd64_sigtramp_frame_this_id (struct frame_info *next_frame,
- void **this_cache, struct frame_id *this_id)
+static enum unwind_stop_reason
+amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
+ void **this_cache)
{
struct amd64_frame_cache *cache =
- amd64_sigtramp_frame_cache (next_frame, this_cache);
+ amd64_sigtramp_frame_cache (this_frame, this_cache);
- (*this_id) = frame_id_build (cache->base + 16, frame_pc_unwind (next_frame));
+ if (!cache->base_p)
+ return UNWIND_UNAVAILABLE;
+
+ return UNWIND_NO_REASON;
}
static void
-amd64_sigtramp_frame_prev_register (struct frame_info *next_frame,
- void **this_cache,
- int regnum, int *optimizedp,
- enum lval_type *lvalp, CORE_ADDR *addrp,
- int *realnump, gdb_byte *valuep)
+amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
+ void **this_cache, struct frame_id *this_id)
{
- /* Make sure we've initialized the cache. */
- amd64_sigtramp_frame_cache (next_frame, this_cache);
+ struct amd64_frame_cache *cache =
+ amd64_sigtramp_frame_cache (this_frame, this_cache);
- amd64_frame_prev_register (next_frame, this_cache, regnum,
- optimizedp, lvalp, addrp, realnump, valuep);
+ if (!cache->base_p)
+ (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
+ else if (cache->base == 0)
+ {
+ /* This marks the outermost frame. */
+ return;
+ }
+ else
+ (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
}
-static const struct frame_unwind amd64_sigtramp_frame_unwind =
+static struct value *
+amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
+ void **this_cache, int regnum)
{
- SIGTRAMP_FRAME,
- amd64_sigtramp_frame_this_id,
- amd64_sigtramp_frame_prev_register
-};
+ /* Make sure we've initialized the cache. */
+ amd64_sigtramp_frame_cache (this_frame, this_cache);
+
+ return amd64_frame_prev_register (this_frame, this_cache, regnum);
+}
-static const struct frame_unwind *
-amd64_sigtramp_frame_sniffer (struct frame_info *next_frame)
+static int
+amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
+ struct frame_info *this_frame,
+ void **this_cache)
{
- struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (next_frame));
+ struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
/* We shouldn't even bother if we don't have a sigcontext_addr
handler. */
if (tdep->sigcontext_addr == NULL)
- return NULL;
+ return 0;
if (tdep->sigtramp_p != NULL)
{
- if (tdep->sigtramp_p (next_frame))
- return &amd64_sigtramp_frame_unwind;
+ if (tdep->sigtramp_p (this_frame))
+ return 1;
}
if (tdep->sigtramp_start != 0)
{
- CORE_ADDR pc = frame_pc_unwind (next_frame);
+ CORE_ADDR pc = get_frame_pc (this_frame);
gdb_assert (tdep->sigtramp_end != 0);
if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
- return &amd64_sigtramp_frame_unwind;
+ return 1;
}
- return NULL;
+ return 0;
}
+
+static const struct frame_unwind amd64_sigtramp_frame_unwind =
+{
+ SIGTRAMP_FRAME,
+ amd64_sigtramp_frame_unwind_stop_reason,
+ amd64_sigtramp_frame_this_id,
+ amd64_sigtramp_frame_prev_register,
+ NULL,
+ amd64_sigtramp_frame_sniffer
+};
\f
static CORE_ADDR
-amd64_frame_base_address (struct frame_info *next_frame, void **this_cache)
+amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
{
struct amd64_frame_cache *cache =
- amd64_frame_cache (next_frame, this_cache);
+ amd64_frame_cache (this_frame, this_cache);
return cache->base;
}
amd64_frame_base_address
};
-static struct frame_id
-amd64_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame)
+/* Normal frames, but in a function epilogue. */
+
+/* The epilogue is defined here as the 'ret' instruction, which will
+ follow any instruction such as 'leave' or 'pop %ebp' that destroys
+ the function's stack frame. */
+
+static int
+amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
+{
+ gdb_byte insn;
+ struct compunit_symtab *cust;
+
+ cust = find_pc_compunit_symtab (pc);
+ if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
+ return 0;
+
+ if (target_read_memory (pc, &insn, 1))
+ return 0; /* Can't read memory at pc. */
+
+ if (insn != 0xc3) /* 'ret' instruction. */
+ return 0;
+
+ return 1;
+}
+
+static int
+amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
+ struct frame_info *this_frame,
+ void **this_prologue_cache)
+{
+ if (frame_relative_level (this_frame) == 0)
+ return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
+ get_frame_pc (this_frame));
+ else
+ return 0;
+}
+
+static struct amd64_frame_cache *
+amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
{
+ struct gdbarch *gdbarch = get_frame_arch (this_frame);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ volatile struct gdb_exception ex;
+ struct amd64_frame_cache *cache;
gdb_byte buf[8];
+
+ if (*this_cache)
+ return *this_cache;
+
+ cache = amd64_alloc_frame_cache ();
+ *this_cache = cache;
+
+ TRY_CATCH (ex, RETURN_MASK_ERROR)
+ {
+ /* Cache base will be %esp plus cache->sp_offset (-8). */
+ get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
+ cache->base = extract_unsigned_integer (buf, 8,
+ byte_order) + cache->sp_offset;
+
+ /* Cache pc will be the frame func. */
+ cache->pc = get_frame_pc (this_frame);
+
+ /* The saved %esp will be at cache->base plus 16. */
+ cache->saved_sp = cache->base + 16;
+
+ /* The saved %eip will be at cache->base plus 8. */
+ cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
+
+ cache->base_p = 1;
+ }
+ if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
+ throw_exception (ex);
+
+ return cache;
+}
+
+static enum unwind_stop_reason
+amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
+ void **this_cache)
+{
+ struct amd64_frame_cache *cache
+ = amd64_epilogue_frame_cache (this_frame, this_cache);
+
+ if (!cache->base_p)
+ return UNWIND_UNAVAILABLE;
+
+ return UNWIND_NO_REASON;
+}
+
+static void
+amd64_epilogue_frame_this_id (struct frame_info *this_frame,
+ void **this_cache,
+ struct frame_id *this_id)
+{
+ struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
+ this_cache);
+
+ if (!cache->base_p)
+ (*this_id) = frame_id_build_unavailable_stack (cache->pc);
+ else
+ (*this_id) = frame_id_build (cache->base + 8, cache->pc);
+}
+
+static const struct frame_unwind amd64_epilogue_frame_unwind =
+{
+ NORMAL_FRAME,
+ amd64_epilogue_frame_unwind_stop_reason,
+ amd64_epilogue_frame_this_id,
+ amd64_frame_prev_register,
+ NULL,
+ amd64_epilogue_frame_sniffer
+};
+
+static struct frame_id
+amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
+{
CORE_ADDR fp;
- frame_unwind_register (next_frame, AMD64_RBP_REGNUM, buf);
- fp = extract_unsigned_integer (buf, 8);
+ fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
- return frame_id_build (fp + 16, frame_pc_unwind (next_frame));
+ return frame_id_build (fp + 16, get_frame_pc (this_frame));
}
/* 16 byte align the SP per frame requirements. */
amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
int regnum, const void *fpregs, size_t len)
{
- const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
+ struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
- gdb_assert (len == tdep->sizeof_fpregset);
+ gdb_assert (len >= tdep->sizeof_fpregset);
amd64_supply_fxsave (regcache, regnum, fpregs);
}
const struct regcache *regcache,
int regnum, void *fpregs, size_t len)
{
- const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
+ struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
- gdb_assert (len == tdep->sizeof_fpregset);
+ gdb_assert (len >= tdep->sizeof_fpregset);
amd64_collect_fxsave (regcache, regnum, fpregs);
}
-/* Return the appropriate register set for the core section identified
- by SECT_NAME and SECT_SIZE. */
+const struct regset amd64_fpregset =
+ {
+ NULL, amd64_supply_fpregset, amd64_collect_fpregset
+ };
+\f
+
+/* Figure out where the longjmp will land. Slurp the jmp_buf out of
+ %rdi. We expect its value to be a pointer to the jmp_buf structure
+ from which we extract the address that we will land at. This
+ address is copied into PC. This routine returns non-zero on
+ success. */
-static const struct regset *
-amd64_regset_from_core_section (struct gdbarch *gdbarch,
- const char *sect_name, size_t sect_size)
+static int
+amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
{
- struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ gdb_byte buf[8];
+ CORE_ADDR jb_addr;
+ struct gdbarch *gdbarch = get_frame_arch (frame);
+ int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
+ int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
- if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
- {
- if (tdep->fpregset == NULL)
- tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
- amd64_collect_fpregset);
+ /* If JB_PC_OFFSET is -1, we have no way to find out where the
+ longjmp will land. */
+ if (jb_pc_offset == -1)
+ return 0;
- return tdep->fpregset;
- }
+ get_frame_register (frame, AMD64_RDI_REGNUM, buf);
+ jb_addr= extract_typed_address
+ (buf, builtin_type (gdbarch)->builtin_data_ptr);
+ if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
+ return 0;
- return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
+ *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
+
+ return 1;
}
-\f
+
+static const int amd64_record_regmap[] =
+{
+ AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
+ AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
+ AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
+ AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
+ AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
+ AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
+};
void
amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
{
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ const struct target_desc *tdesc = info.target_desc;
+ static const char *const stap_integer_prefixes[] = { "$", NULL };
+ static const char *const stap_register_prefixes[] = { "%", NULL };
+ static const char *const stap_register_indirection_prefixes[] = { "(",
+ NULL };
+ static const char *const stap_register_indirection_suffixes[] = { ")",
+ NULL };
/* AMD64 generally uses `fxsave' instead of `fsave' for saving its
floating-point registers. */
tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
+ tdep->fpregset = &amd64_fpregset;
+
+ if (! tdesc_has_registers (tdesc))
+ tdesc = tdesc_amd64;
+ tdep->tdesc = tdesc;
+
+ tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
+ tdep->register_names = amd64_register_names;
+
+ if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
+ {
+ tdep->zmmh_register_names = amd64_zmmh_names;
+ tdep->k_register_names = amd64_k_names;
+ tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
+ tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
+
+ tdep->num_zmm_regs = 32;
+ tdep->num_xmm_avx512_regs = 16;
+ tdep->num_ymm_avx512_regs = 16;
+
+ tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
+ tdep->k0_regnum = AMD64_K0_REGNUM;
+ tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
+ tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
+ }
+
+ if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
+ {
+ tdep->ymmh_register_names = amd64_ymmh_names;
+ tdep->num_ymm_regs = 16;
+ tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
+ }
+
+ if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
+ {
+ tdep->mpx_register_names = amd64_mpx_names;
+ tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
+ tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
+ }
+
+ tdep->num_byte_regs = 20;
+ tdep->num_word_regs = 16;
+ tdep->num_dword_regs = 16;
+ /* Avoid wiring in the MMX registers for now. */
+ tdep->num_mmx_regs = 0;
+
+ set_gdbarch_pseudo_register_read_value (gdbarch,
+ amd64_pseudo_register_read_value);
+ set_gdbarch_pseudo_register_write (gdbarch,
+ amd64_pseudo_register_write);
+
+ set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
/* AMD64 has an FPU and 16 SSE registers. */
tdep->st0_regnum = AMD64_ST0_REGNUM;
set_gdbarch_long_double_bit (gdbarch, 128);
set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
- set_gdbarch_register_name (gdbarch, amd64_register_name);
- set_gdbarch_register_type (gdbarch, amd64_register_type);
/* Register numbers of various important registers. */
set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
DWARF-1), but we provide the same mapping just in case. This
mapping is also used for stabs, which GCC does support. */
set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
- set_gdbarch_dwarf_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
/* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
- /* Avoid wiring in the MMX registers for now. */
- set_gdbarch_num_pseudo_regs (gdbarch, 0);
- tdep->mm0_regnum = -1;
+ tdep->record_regmap = amd64_record_regmap;
+
+ set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
- set_gdbarch_unwind_dummy_id (gdbarch, amd64_unwind_dummy_id);
+ /* Hook the function epilogue frame unwinder. This unwinder is
+ appended to the list first, so that it supercedes the other
+ unwinders in function epilogues. */
+ frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
- frame_unwind_append_sniffer (gdbarch, amd64_sigtramp_frame_sniffer);
- frame_unwind_append_sniffer (gdbarch, amd64_frame_sniffer);
+ /* Hook the prologue-based frame unwinders. */
+ frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
+ frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
frame_base_set_default (gdbarch, &amd64_frame_base);
- /* If we have a register mapping, enable the generic core file support. */
- if (tdep->gregset_reg_offset)
- set_gdbarch_regset_from_core_section (gdbarch,
- amd64_regset_from_core_section);
+ set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
+
+ set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
+
+ set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
+
+ /* SystemTap variables and functions. */
+ set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
+ set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
+ set_gdbarch_stap_register_indirection_prefixes (gdbarch,
+ stap_register_indirection_prefixes);
+ set_gdbarch_stap_register_indirection_suffixes (gdbarch,
+ stap_register_indirection_suffixes);
+ set_gdbarch_stap_is_single_operand (gdbarch,
+ i386_stap_is_single_operand);
+ set_gdbarch_stap_parse_special_token (gdbarch,
+ i386_stap_parse_special_token);
+ set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
+ set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
+ set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
+}
+\f
+
+static struct type *
+amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
+ switch (regnum - tdep->eax_regnum)
+ {
+ case AMD64_RBP_REGNUM: /* %ebp */
+ case AMD64_RSP_REGNUM: /* %esp */
+ return builtin_type (gdbarch)->builtin_data_ptr;
+ case AMD64_RIP_REGNUM: /* %eip */
+ return builtin_type (gdbarch)->builtin_func_ptr;
+ }
+
+ return i386_pseudo_register_type (gdbarch, regnum);
+}
+
+void
+amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ const struct target_desc *tdesc = info.target_desc;
+
+ amd64_init_abi (info, gdbarch);
+
+ if (! tdesc_has_registers (tdesc))
+ tdesc = tdesc_x32;
+ tdep->tdesc = tdesc;
+
+ tdep->num_dword_regs = 17;
+ set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
+
+ set_gdbarch_long_bit (gdbarch, 32);
+ set_gdbarch_ptr_bit (gdbarch, 32);
+}
+
+/* Provide a prototype to silence -Wmissing-prototypes. */
+void _initialize_amd64_tdep (void);
+
+void
+_initialize_amd64_tdep (void)
+{
+ initialize_tdesc_amd64 ();
+ initialize_tdesc_amd64_avx ();
+ initialize_tdesc_amd64_mpx ();
+ initialize_tdesc_amd64_avx512 ();
+
+ initialize_tdesc_x32 ();
+ initialize_tdesc_x32_avx ();
+ initialize_tdesc_x32_avx512 ();
}
\f
i387_supply_fxsave (regcache, regnum, fxsave);
- if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
+ if (fxsave
+ && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
{
const gdb_byte *regs = fxsave;
}
}
+/* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
+
+void
+amd64_supply_xsave (struct regcache *regcache, int regnum,
+ const void *xsave)
+{
+ struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
+ i387_supply_xsave (regcache, regnum, xsave);
+
+ if (xsave
+ && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
+ {
+ const gdb_byte *regs = xsave;
+
+ if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
+ regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
+ regs + 12);
+ if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
+ regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
+ regs + 20);
+ }
+}
+
/* Fill register REGNUM (if it is a floating-point or SSE register) in
*FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
all registers. This function doesn't touch any of the reserved
i387_collect_fxsave (regcache, regnum, fxsave);
- if (gdbarch_ptr_bit (gdbarch) == 64)
+ if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
{
if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
}
}
+
+/* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
+
+void
+amd64_collect_xsave (const struct regcache *regcache, int regnum,
+ void *xsave, int gcore)
+{
+ struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ gdb_byte *regs = xsave;
+
+ i387_collect_xsave (regcache, regnum, xsave, gcore);
+
+ if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
+ {
+ if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
+ regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
+ regs + 12);
+ if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
+ regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
+ regs + 20);
+ }
+}