/* Target-dependent code for AMD64.
- Copyright (C) 2001-2020 Free Software Foundation, Inc.
+ Copyright (C) 2001-2021 Free Software Foundation, Inc.
Contributed by Jiri Smid, SuSE Labs.
#include "gdbsupport/byte-vector.h"
#include "osabi.h"
#include "x86-tdep.h"
+#include "amd64-ravenscar-thread.h"
/* Note that the AMD64 architecture was previously known as x86-64.
The latter is (forever) engraved into the canonical system name as
/* Register information. */
-static const char *amd64_register_names[] =
+static const char * const amd64_register_names[] =
{
"rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
"mxcsr",
};
-static const char *amd64_ymm_names[] =
+static const char * const amd64_ymm_names[] =
{
"ymm0", "ymm1", "ymm2", "ymm3",
"ymm4", "ymm5", "ymm6", "ymm7",
"ymm12", "ymm13", "ymm14", "ymm15"
};
-static const char *amd64_ymm_avx512_names[] =
+static const char * const amd64_ymm_avx512_names[] =
{
"ymm16", "ymm17", "ymm18", "ymm19",
"ymm20", "ymm21", "ymm22", "ymm23",
"ymm28", "ymm29", "ymm30", "ymm31"
};
-static const char *amd64_ymmh_names[] =
+static const char * const amd64_ymmh_names[] =
{
"ymm0h", "ymm1h", "ymm2h", "ymm3h",
"ymm4h", "ymm5h", "ymm6h", "ymm7h",
"ymm12h", "ymm13h", "ymm14h", "ymm15h"
};
-static const char *amd64_ymmh_avx512_names[] =
+static const char * const amd64_ymmh_avx512_names[] =
{
"ymm16h", "ymm17h", "ymm18h", "ymm19h",
"ymm20h", "ymm21h", "ymm22h", "ymm23h",
"ymm28h", "ymm29h", "ymm30h", "ymm31h"
};
-static const char *amd64_mpx_names[] =
+static const char * const amd64_mpx_names[] =
{
"bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
};
-static const char *amd64_k_names[] =
+static const char * const amd64_k_names[] =
{
"k0", "k1", "k2", "k3",
"k4", "k5", "k6", "k7"
};
-static const char *amd64_zmmh_names[] =
+static const char * const amd64_zmmh_names[] =
{
"zmm0h", "zmm1h", "zmm2h", "zmm3h",
"zmm4h", "zmm5h", "zmm6h", "zmm7h",
"zmm28h", "zmm29h", "zmm30h", "zmm31h"
};
-static const char *amd64_zmm_names[] =
+static const char * const amd64_zmm_names[] =
{
"zmm0", "zmm1", "zmm2", "zmm3",
"zmm4", "zmm5", "zmm6", "zmm7",
"zmm28", "zmm29", "zmm30", "zmm31"
};
-static const char *amd64_xmm_avx512_names[] = {
+static const char * const amd64_xmm_avx512_names[] = {
"xmm16", "xmm17", "xmm18", "xmm19",
"xmm20", "xmm21", "xmm22", "xmm23",
"xmm24", "xmm25", "xmm26", "xmm27",
"xmm28", "xmm29", "xmm30", "xmm31"
};
-static const char *amd64_pkeys_names[] = {
+static const char * const amd64_pkeys_names[] = {
"pkru"
};
/* Register names for byte pseudo-registers. */
-static const char *amd64_byte_names[] =
+static const char * const amd64_byte_names[] =
{
"al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
"r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
/* Register names for word pseudo-registers. */
-static const char *amd64_word_names[] =
+static const char * const amd64_word_names[] =
{
"ax", "bx", "cx", "dx", "si", "di", "bp", "",
"r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
/* Register names for dword pseudo-registers. */
-static const char *amd64_dword_names[] =
+static const char * const amd64_dword_names[] =
{
"eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
"r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
{
for (int i = 0; i < type->num_fields (); i++)
{
- struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
+ struct type *subtype = check_typedef (type->field (i).type ());
int bitpos = TYPE_FIELD_BITPOS (type, i);
int align = type_align(subtype);
enum amd64_reg_class theclass[2],
unsigned int bitoffset)
{
- struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
+ struct type *subtype = check_typedef (type->field (i).type ());
int bitpos = bitoffset + TYPE_FIELD_BITPOS (type, i);
int pos = bitpos / 64;
enum amd64_reg_class subclass[2];
amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
{
/* 1. If the size of an object is larger than two eightbytes, or it has
- unaligned fields, it has class memory. */
+ unaligned fields, it has class memory. */
if (TYPE_LENGTH (type) > 16 || amd64_has_unaligned_fields (type))
{
theclass[0] = theclass[1] = AMD64_MEMORY;
theclass[0] = theclass[1] = AMD64_NO_CLASS;
/* 3. Each field of an object is classified recursively so that
- always two fields are considered. The resulting class is
- calculated according to the classes of the fields in the
- eightbyte: */
+ always two fields are considered. The resulting class is
+ calculated according to the classes of the fields in the
+ eightbyte: */
if (type->code () == TYPE_CODE_ARRAY)
{
if (theclass[0] == AMD64_MEMORY)
{
/* As indicated by the comment above, the ABI guarantees that we
- can always find the return value just after the function has
- returned. */
+ can always find the return value just after the function has
+ returned. */
if (readbuf)
{
}
/* 8. If the class is COMPLEX_X87, the real part of the value is
- returned in %st0 and the imaginary part in %st1. */
+ returned in %st0 and the imaginary part in %st1. */
if (theclass[0] == AMD64_COMPLEX_X87)
{
if (readbuf)
case AMD64_SSE:
/* 4. If the class is SSE, the next available SSE register
- of the sequence %xmm0, %xmm1 is used. */
+ of the sequence %xmm0, %xmm1 is used. */
regnum = sse_regnum[sse_reg++];
break;
case AMD64_X87:
/* 6. If the class is X87, the value is returned on the X87
- stack in %st0 as 80-bit x87 number. */
+ stack in %st0 as 80-bit x87 number. */
regnum = AMD64_ST0_REGNUM;
if (writebuf)
i387_return_value (gdbarch, regcache);
case AMD64_X87UP:
/* 7. If the class is X87UP, the value is returned together
- with the previous X87 value in %st0. */
+ with the previous X87 value in %st0. */
gdb_assert (i > 0 && theclass[0] == AMD64_X87);
regnum = AMD64_ST0_REGNUM;
offset = 8;
amd64_classify (type, theclass);
/* Calculate the number of integer and SSE registers needed for
- this argument. */
+ this argument. */
for (j = 0; j < 2; j++)
{
if (theclass[j] == AMD64_INTEGER)
}
/* Check whether enough registers are available, and if the
- argument should be passed in registers at all. */
+ argument should be passed in registers at all. */
if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
|| sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
|| (needed_integer_regs == 0 && needed_sse_regs == 0))
gdb_byte *raw_insn;
};
-struct amd64_displaced_step_closure : public displaced_step_closure
+struct amd64_displaced_step_copy_insn_closure
+ : public displaced_step_copy_insn_closure
{
- amd64_displaced_step_closure (int insn_buf_len)
+ amd64_displaced_step_copy_insn_closure (int insn_buf_len)
: insn_buf (insn_buf_len, 0)
{}
We set base = pc + insn_length so we can leave disp unchanged. */
static void
-fixup_riprel (struct gdbarch *gdbarch, amd64_displaced_step_closure *dsc,
+fixup_riprel (struct gdbarch *gdbarch,
+ amd64_displaced_step_copy_insn_closure *dsc,
CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
{
const struct amd64_insn *insn_details = &dsc->insn_details;
regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
- if (debug_displaced)
- fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
- "displaced: using temp reg %d, old value %s, new value %s\n",
- dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
- paddress (gdbarch, rip_base));
+ displaced_debug_printf ("%%rip-relative addressing used.");
+ displaced_debug_printf ("using temp reg %d, old value %s, new value %s",
+ dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
+ paddress (gdbarch, rip_base));
}
static void
fixup_displaced_copy (struct gdbarch *gdbarch,
- amd64_displaced_step_closure *dsc,
+ amd64_displaced_step_copy_insn_closure *dsc,
CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
{
const struct amd64_insn *details = &dsc->insn_details;
}
}
-displaced_step_closure_up
+displaced_step_copy_insn_closure_up
amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
CORE_ADDR from, CORE_ADDR to,
struct regcache *regs)
/* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
continually watch for running off the end of the buffer. */
int fixup_sentinel_space = len;
- std::unique_ptr<amd64_displaced_step_closure> dsc
- (new amd64_displaced_step_closure (len + fixup_sentinel_space));
+ std::unique_ptr<amd64_displaced_step_copy_insn_closure> dsc
+ (new amd64_displaced_step_copy_insn_closure (len + fixup_sentinel_space));
gdb_byte *buf = &dsc->insn_buf[0];
struct amd64_insn *details = &dsc->insn_details;
write_memory (to, buf, len);
- if (debug_displaced)
- {
- fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
- paddress (gdbarch, from), paddress (gdbarch, to));
- displaced_step_dump_bytes (gdb_stdlog, buf, len);
- }
+ displaced_debug_printf ("copy %s->%s: %s",
+ paddress (gdbarch, from), paddress (gdbarch, to),
+ displaced_step_dump_bytes (buf, len).c_str ());
/* This is a work around for a problem with g++ 4.8. */
- return displaced_step_closure_up (dsc.release ());
+ return displaced_step_copy_insn_closure_up (dsc.release ());
}
static int
void
amd64_displaced_step_fixup (struct gdbarch *gdbarch,
- struct displaced_step_closure *dsc_,
+ struct displaced_step_copy_insn_closure *dsc_,
CORE_ADDR from, CORE_ADDR to,
struct regcache *regs)
{
- amd64_displaced_step_closure *dsc = (amd64_displaced_step_closure *) dsc_;
+ amd64_displaced_step_copy_insn_closure *dsc
+ = (amd64_displaced_step_copy_insn_closure *) dsc_;
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
/* The offset we applied to the instruction's address. */
ULONGEST insn_offset = to - from;
gdb_byte *insn = dsc->insn_buf.data ();
const struct amd64_insn *insn_details = &dsc->insn_details;
- if (debug_displaced)
- fprintf_unfiltered (gdb_stdlog,
- "displaced: fixup (%s, %s), "
- "insn = 0x%02x 0x%02x ...\n",
- paddress (gdbarch, from), paddress (gdbarch, to),
- insn[0], insn[1]);
+ displaced_debug_printf ("fixup (%s, %s), insn = 0x%02x 0x%02x ...",
+ paddress (gdbarch, from), paddress (gdbarch, to),
+ insn[0], insn[1]);
/* If we used a tmp reg, restore it. */
if (dsc->tmp_used)
{
- if (debug_displaced)
- fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
- dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
+ displaced_debug_printf ("restoring reg %d to %s",
+ dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
}
Presumably this is a kernel bug.
Fixup ensures its a nop, we add one to the length for it. */
&& orig_rip != to + insn_len + 1)
- {
- if (debug_displaced)
- fprintf_unfiltered (gdb_stdlog,
- "displaced: syscall changed %%rip; "
- "not relocating\n");
- }
+ displaced_debug_printf ("syscall changed %%rip; not relocating");
else
{
ULONGEST rip = orig_rip - insn_offset;
regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
- if (debug_displaced)
- fprintf_unfiltered (gdb_stdlog,
- "displaced: "
- "relocated %%rip from %s to %s\n",
- paddress (gdbarch, orig_rip),
- paddress (gdbarch, rip));
+ displaced_debug_printf ("relocated %%rip from %s to %s",
+ paddress (gdbarch, orig_rip),
+ paddress (gdbarch, rip));
}
}
retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
- if (debug_displaced)
- fprintf_unfiltered (gdb_stdlog,
- "displaced: relocated return addr at %s "
- "to %s\n",
- paddress (gdbarch, rsp),
- paddress (gdbarch, retaddr));
+ displaced_debug_printf ("relocated return addr at %s to %s",
+ paddress (gdbarch, rsp),
+ paddress (gdbarch, retaddr));
}
}
newrel = (oldloc - *to) + rel32;
store_signed_integer (insn + 1, 4, byte_order, newrel);
- if (debug_displaced)
- fprintf_unfiltered (gdb_stdlog,
- "Adjusted insn rel32=%s at %s to"
- " rel32=%s at %s\n",
- hex_string (rel32), paddress (gdbarch, oldloc),
- hex_string (newrel), paddress (gdbarch, *to));
+ displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
+ hex_string (rel32), paddress (gdbarch, oldloc),
+ hex_string (newrel), paddress (gdbarch, *to));
/* Write the adjusted jump into its displaced location. */
append_insns (to, 5, insn);
rel32 = extract_signed_integer (insn + offset, 4, byte_order);
newrel = (oldloc - *to) + rel32;
store_signed_integer (insn + offset, 4, byte_order, newrel);
- if (debug_displaced)
- fprintf_unfiltered (gdb_stdlog,
- "Adjusted insn rel32=%s at %s to"
- " rel32=%s at %s\n",
- hex_string (rel32), paddress (gdbarch, oldloc),
- hex_string (newrel), paddress (gdbarch, *to));
+ displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
+ hex_string (rel32), paddress (gdbarch, oldloc),
+ hex_string (newrel), paddress (gdbarch, *to));
}
/* Write the adjusted instruction into its displaced location. */
if (op == 0x55) /* pushq %rbp */
{
/* Take into account that we've executed the `pushq %rbp' that
- starts this instruction sequence. */
+ starts this instruction sequence. */
cache->saved_regs[AMD64_RBP_REGNUM] = 0;
cache->sp_offset += 8;
/* If that's all, return now. */
if (current_pc <= pc + 1)
- return current_pc;
+ return current_pc;
read_code (pc + 1, buf, 3);
return pc + 4;
}
- /* For X32, also check for `movq %esp, %ebp'. */
+ /* For X32, also check for `movl %esp, %ebp'. */
if (gdbarch_ptr_bit (gdbarch) == 32)
{
if (memcmp (buf, mov_esp_ebp_1, 2) == 0
{
/* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
- || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
+ || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
return pc;
/* 0b01?????? */
= skip_prologue_using_sal (gdbarch, func_addr);
struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
- /* Clang always emits a line note before the prologue and another
- one after. We trust clang to emit usable line notes. */
+ /* LLVM backend (Clang/Flang) always emits a line note before the
+ prologue and another one after. We trust clang and newer Intel
+ compilers to emit usable line notes. */
if (post_prologue_pc
&& (cust != NULL
&& COMPUNIT_PRODUCER (cust) != NULL
- && startswith (COMPUNIT_PRODUCER (cust), "clang ")))
+ && (producer_is_llvm (COMPUNIT_PRODUCER (cust))
+ || producer_is_icc_ge_19 (COMPUNIT_PRODUCER (cust)))))
return std::max (start_pc, post_prologue_pc);
}
set_gdbarch_in_indirect_branch_thunk (gdbarch,
amd64_in_indirect_branch_thunk);
+
+ register_amd64_ravenscar_ops (gdbarch);
}
/* Initialize ARCH for x86-64, no osabi. */