X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=gdb%2Famd64-tdep.c;h=f96a9868259ef521d4041b81d2312bdb097f6beb;hb=a50264baf57716993e701096fa6e466fb63e0301;hp=0eac35e536bdf28036f1e860c0252c7b7b092e68;hpb=0b8835861cde41744a08f215b48fccd135815b63;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/amd64-tdep.c b/gdb/amd64-tdep.c index 0eac35e536..f96a986825 100644 --- a/gdb/amd64-tdep.c +++ b/gdb/amd64-tdep.c @@ -1,6 +1,6 @@ /* Target-dependent code for AMD64. - Copyright (C) 2001-2018 Free Software Foundation, Inc. + Copyright (C) 2001-2020 Free Software Foundation, Inc. Contributed by Jiri Smid, SuSE Labs. @@ -39,14 +39,14 @@ #include "disasm.h" #include "amd64-tdep.h" #include "i387-tdep.h" -#include "x86-xstate.h" +#include "gdbsupport/x86-xstate.h" #include #include "target-descriptions.h" #include "arch/amd64.h" #include "producer.h" #include "ax.h" #include "ax-gdb.h" -#include "common/byte-vector.h" +#include "gdbsupport/byte-vector.h" #include "osabi.h" #include "x86-tdep.h" @@ -352,16 +352,12 @@ amd64_pseudo_register_read_value (struct gdbarch *gdbarch, readable_regcache *regcache, int regnum) { - gdb_byte *raw_buf = (gdb_byte *) alloca (register_size (gdbarch, regnum)); struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); - enum register_status status; - struct value *result_value; - gdb_byte *buf; - result_value = allocate_value (register_type (gdbarch, regnum)); + value *result_value = allocate_value (register_type (gdbarch, regnum)); VALUE_LVAL (result_value) = lval_register; VALUE_REGNUM (result_value) = regnum; - buf = value_contents_raw (result_value); + gdb_byte *buf = value_contents_raw (result_value); if (i386_byte_regnum_p (gdbarch, regnum)) { @@ -370,9 +366,11 @@ amd64_pseudo_register_read_value (struct gdbarch *gdbarch, /* Extract (always little endian). */ if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS) { + gpnum -= AMD64_NUM_LOWER_BYTE_REGS; + gdb_byte raw_buf[register_size (gdbarch, gpnum)]; + /* Special handling for AH, BH, CH, DH. */ - status = regcache->raw_read (gpnum - AMD64_NUM_LOWER_BYTE_REGS, - raw_buf); + register_status status = regcache->raw_read (gpnum, raw_buf); if (status == REG_VALID) memcpy (buf, raw_buf + 1, 1); else @@ -381,7 +379,8 @@ amd64_pseudo_register_read_value (struct gdbarch *gdbarch, } else { - status = regcache->raw_read (gpnum, raw_buf); + gdb_byte raw_buf[register_size (gdbarch, gpnum)]; + register_status status = regcache->raw_read (gpnum, raw_buf); if (status == REG_VALID) memcpy (buf, raw_buf, 1); else @@ -392,8 +391,9 @@ amd64_pseudo_register_read_value (struct gdbarch *gdbarch, else if (i386_dword_regnum_p (gdbarch, regnum)) { int gpnum = regnum - tdep->eax_regnum; + gdb_byte raw_buf[register_size (gdbarch, gpnum)]; /* Extract (always little endian). */ - status = regcache->raw_read (gpnum, raw_buf); + register_status status = regcache->raw_read (gpnum, raw_buf); if (status == REG_VALID) memcpy (buf, raw_buf, 4); else @@ -412,7 +412,6 @@ amd64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache, int regnum, const gdb_byte *buf) { - gdb_byte *raw_buf = (gdb_byte *) alloca (register_size (gdbarch, regnum)); struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); if (i386_byte_regnum_p (gdbarch, regnum)) @@ -421,34 +420,39 @@ amd64_pseudo_register_write (struct gdbarch *gdbarch, if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS) { + gpnum -= AMD64_NUM_LOWER_BYTE_REGS; + gdb_byte raw_buf[register_size (gdbarch, gpnum)]; + /* Read ... AH, BH, CH, DH. */ - regcache->raw_read (gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf); + regcache->raw_read (gpnum, raw_buf); /* ... Modify ... (always little endian). */ memcpy (raw_buf + 1, buf, 1); /* ... Write. */ - regcache_raw_write (regcache, - gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf); + regcache->raw_write (gpnum, raw_buf); } else { + gdb_byte raw_buf[register_size (gdbarch, gpnum)]; + /* Read ... */ regcache->raw_read (gpnum, raw_buf); /* ... Modify ... (always little endian). */ memcpy (raw_buf, buf, 1); /* ... Write. */ - regcache_raw_write (regcache, gpnum, raw_buf); + regcache->raw_write (gpnum, raw_buf); } } else if (i386_dword_regnum_p (gdbarch, regnum)) { int gpnum = regnum - tdep->eax_regnum; + gdb_byte raw_buf[register_size (gdbarch, gpnum)]; /* Read ... */ regcache->raw_read (gpnum, raw_buf); /* ... Modify ... (always little endian). */ memcpy (raw_buf, buf, 4); /* ... Write. */ - regcache_raw_write (regcache, gpnum, raw_buf); + regcache->raw_write (gpnum, raw_buf); } else i386_pseudo_register_write (gdbarch, regcache, regnum, buf); @@ -537,17 +541,108 @@ amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2) static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]); -/* Return non-zero if TYPE is a non-POD structure or union type. */ +/* Return true if TYPE is a structure or union with unaligned fields. */ -static int -amd64_non_pod_p (struct type *type) +static bool +amd64_has_unaligned_fields (struct type *type) { - /* ??? A class with a base class certainly isn't POD, but does this - catch all non-POD structure types? */ - if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0) - return 1; + if (type->code () == TYPE_CODE_STRUCT + || type->code () == TYPE_CODE_UNION) + { + for (int i = 0; i < type->num_fields (); i++) + { + struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i)); + int bitpos = TYPE_FIELD_BITPOS (type, i); + int align = type_align(subtype); + + /* Ignore static fields, empty fields (for example nested + empty structures), and bitfields (these are handled by + the caller). */ + if (field_is_static (&type->field (i)) + || (TYPE_FIELD_BITSIZE (type, i) == 0 + && TYPE_LENGTH (subtype) == 0) + || TYPE_FIELD_PACKED (type, i)) + continue; - return 0; + if (bitpos % 8 != 0) + return true; + + int bytepos = bitpos / 8; + if (bytepos % align != 0) + return true; + + if (amd64_has_unaligned_fields (subtype)) + return true; + } + } + + return false; +} + +/* Classify field I of TYPE starting at BITOFFSET according to the rules for + structures and union types, and store the result in THECLASS. */ + +static void +amd64_classify_aggregate_field (struct type *type, int i, + enum amd64_reg_class theclass[2], + unsigned int bitoffset) +{ + struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i)); + int bitpos = bitoffset + TYPE_FIELD_BITPOS (type, i); + int pos = bitpos / 64; + enum amd64_reg_class subclass[2]; + int bitsize = TYPE_FIELD_BITSIZE (type, i); + int endpos; + + if (bitsize == 0) + bitsize = TYPE_LENGTH (subtype) * 8; + endpos = (bitpos + bitsize - 1) / 64; + + /* Ignore static fields, or empty fields, for example nested + empty structures.*/ + if (field_is_static (&type->field (i)) || bitsize == 0) + return; + + if (subtype->code () == TYPE_CODE_STRUCT + || subtype->code () == TYPE_CODE_UNION) + { + /* Each field of an object is classified recursively. */ + int j; + for (j = 0; j < subtype->num_fields (); j++) + amd64_classify_aggregate_field (subtype, j, theclass, bitpos); + return; + } + + gdb_assert (pos == 0 || pos == 1); + + amd64_classify (subtype, subclass); + theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]); + if (bitsize <= 64 && pos == 0 && endpos == 1) + /* This is a bit of an odd case: We have a field that would + normally fit in one of the two eightbytes, except that + it is placed in a way that this field straddles them. + This has been seen with a structure containing an array. + + The ABI is a bit unclear in this case, but we assume that + this field's class (stored in subclass[0]) must also be merged + into class[1]. In other words, our field has a piece stored + in the second eight-byte, and thus its class applies to + the second eight-byte as well. + + In the case where the field length exceeds 8 bytes, + it should not be necessary to merge the field class + into class[1]. As LEN > 8, subclass[1] is necessarily + different from AMD64_NO_CLASS. If subclass[1] is equal + to subclass[0], then the normal class[1]/subclass[1] + merging will take care of everything. For subclass[1] + to be different from subclass[0], I can only see the case + where we have a SSE/SSEUP or X87/X87UP pair, which both + use up all 16 bytes of the aggregate, and are already + handled just fine (because each portion sits on its own + 8-byte). */ + theclass[1] = amd64_merge_classes (theclass[1], subclass[0]); + if (pos == 0) + theclass[1] = amd64_merge_classes (theclass[1], subclass[1]); } /* Classify TYPE according to the rules for aggregate (structures and @@ -556,10 +651,9 @@ amd64_non_pod_p (struct type *type) static void amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2]) { - /* 1. If the size of an object is larger than two eightbytes, or in - C++, is a non-POD structure or union type, or contains + /* 1. If the size of an object is larger than two eightbytes, or it has unaligned fields, it has class memory. */ - if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type)) + if (TYPE_LENGTH (type) > 16 || amd64_has_unaligned_fields (type)) { theclass[0] = theclass[1] = AMD64_MEMORY; return; @@ -573,7 +667,7 @@ amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2]) calculated according to the classes of the fields in the eightbyte: */ - if (TYPE_CODE (type) == TYPE_CODE_ARRAY) + if (type->code () == TYPE_CODE_ARRAY) { struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type)); @@ -587,57 +681,11 @@ amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2]) int i; /* Structure or union. */ - gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT - || TYPE_CODE (type) == TYPE_CODE_UNION); + gdb_assert (type->code () == TYPE_CODE_STRUCT + || type->code () == TYPE_CODE_UNION); - for (i = 0; i < TYPE_NFIELDS (type); i++) - { - struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i)); - int pos = TYPE_FIELD_BITPOS (type, i) / 64; - enum amd64_reg_class subclass[2]; - int bitsize = TYPE_FIELD_BITSIZE (type, i); - int endpos; - - if (bitsize == 0) - bitsize = TYPE_LENGTH (subtype) * 8; - endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64; - - /* Ignore static fields, or empty fields, for example nested - empty structures.*/ - if (field_is_static (&TYPE_FIELD (type, i)) || bitsize == 0) - continue; - - gdb_assert (pos == 0 || pos == 1); - - amd64_classify (subtype, subclass); - theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]); - if (bitsize <= 64 && pos == 0 && endpos == 1) - /* This is a bit of an odd case: We have a field that would - normally fit in one of the two eightbytes, except that - it is placed in a way that this field straddles them. - This has been seen with a structure containing an array. - - The ABI is a bit unclear in this case, but we assume that - this field's class (stored in subclass[0]) must also be merged - into class[1]. In other words, our field has a piece stored - in the second eight-byte, and thus its class applies to - the second eight-byte as well. - - In the case where the field length exceeds 8 bytes, - it should not be necessary to merge the field class - into class[1]. As LEN > 8, subclass[1] is necessarily - different from AMD64_NO_CLASS. If subclass[1] is equal - to subclass[0], then the normal class[1]/subclass[1] - merging will take care of everything. For subclass[1] - to be different from subclass[0], I can only see the case - where we have a SSE/SSEUP or X87/X87UP pair, which both - use up all 16 bytes of the aggregate, and are already - handled just fine (because each portion sits on its own - 8-byte). */ - theclass[1] = amd64_merge_classes (theclass[1], subclass[0]); - if (pos == 0) - theclass[1] = amd64_merge_classes (theclass[1], subclass[1]); - } + for (i = 0; i < type->num_fields (); i++) + amd64_classify_aggregate_field (type, i, theclass, 0); } /* 4. Then a post merger cleanup is done: */ @@ -660,7 +708,7 @@ amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2]) static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]) { - enum type_code code = TYPE_CODE (type); + enum type_code code = type->code (); int len = TYPE_LENGTH (type); theclass[0] = theclass[1] = AMD64_NO_CLASS; @@ -777,8 +825,8 @@ amd64_return_value (struct gdbarch *gdbarch, struct value *function, if (writebuf) { i387_return_value (gdbarch, regcache); - regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf); - regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16); + regcache->raw_write (AMD64_ST0_REGNUM, writebuf); + regcache->raw_write (AMD64_ST1_REGNUM, writebuf + 16); /* Fix up the tag word such that both %st(0) and %st(1) are marked as valid. */ @@ -845,11 +893,11 @@ amd64_return_value (struct gdbarch *gdbarch, struct value *function, gdb_assert (regnum != -1); if (readbuf) - regcache_raw_read_part (regcache, regnum, offset, std::min (len, 8), - readbuf + i * 8); + regcache->raw_read_part (regnum, offset, std::min (len, 8), + readbuf + i * 8); if (writebuf) - regcache_raw_write_part (regcache, regnum, offset, std::min (len, 8), - writebuf + i * 8); + regcache->raw_write_part (regnum, offset, std::min (len, 8), + writebuf + i * 8); } return RETURN_VALUE_REGISTER_CONVENTION; @@ -857,8 +905,8 @@ amd64_return_value (struct gdbarch *gdbarch, struct value *function, static CORE_ADDR -amd64_push_arguments (struct regcache *regcache, int nargs, - struct value **args, CORE_ADDR sp, int struct_return) +amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args, + CORE_ADDR sp, function_call_return_method return_method) { static int integer_regnum[] = { @@ -886,7 +934,7 @@ amd64_push_arguments (struct regcache *regcache, int nargs, int i; /* Reserve a register for the "hidden" argument. */ - if (struct_return) +if (return_method == return_method_struct) integer_reg++; for (i = 0; i < nargs; i++) @@ -950,6 +998,9 @@ amd64_push_arguments (struct regcache *regcache, int nargs, offset = 8; break; + case AMD64_NO_CLASS: + continue; + default: gdb_assert (!"Unexpected register class."); } @@ -957,7 +1008,7 @@ amd64_push_arguments (struct regcache *regcache, int nargs, gdb_assert (regnum != -1); memset (buf, 0, sizeof buf); memcpy (buf, valbuf + j * 8, std::min (len, 8)); - regcache_raw_write_part (regcache, regnum, offset, 8, buf); + regcache->raw_write_part (regnum, offset, 8, buf); } } } @@ -992,7 +1043,8 @@ static CORE_ADDR amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, struct regcache *regcache, CORE_ADDR bp_addr, int nargs, struct value **args, CORE_ADDR sp, - int struct_return, CORE_ADDR struct_addr) + function_call_return_method return_method, + CORE_ADDR struct_addr) { enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); gdb_byte buf[8]; @@ -1005,13 +1057,13 @@ amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, i387_reset_bnd_regs (gdbarch, regcache); /* Pass arguments. */ - sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return); + sp = amd64_push_arguments (regcache, nargs, args, sp, return_method); /* Pass "hidden" argument". */ - if (struct_return) + if (return_method == return_method_struct) { store_unsigned_integer (buf, 8, byte_order, struct_addr); - regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf); + regcache->cooked_write (AMD64_RDI_REGNUM, buf); } /* Store return address. */ @@ -1021,10 +1073,10 @@ amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, /* Finally, update the stack pointer... */ store_unsigned_integer (buf, 8, byte_order, sp); - regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf); + regcache->cooked_write (AMD64_RSP_REGNUM, buf); /* ...and fake a frame pointer. */ - regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf); + regcache->cooked_write (AMD64_RBP_REGNUM, buf); return sp + 16; } @@ -1413,7 +1465,7 @@ fixup_displaced_copy (struct gdbarch *gdbarch, } } -struct displaced_step_closure * +displaced_step_closure_up amd64_displaced_step_copy_insn (struct gdbarch *gdbarch, CORE_ADDR from, CORE_ADDR to, struct regcache *regs) @@ -1422,8 +1474,8 @@ amd64_displaced_step_copy_insn (struct gdbarch *gdbarch, /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to continually watch for running off the end of the buffer. */ int fixup_sentinel_space = len; - amd64_displaced_step_closure *dsc - = new amd64_displaced_step_closure (len + fixup_sentinel_space); + std::unique_ptr dsc + (new amd64_displaced_step_closure (len + fixup_sentinel_space)); gdb_byte *buf = &dsc->insn_buf[0]; struct amd64_insn *details = &dsc->insn_details; @@ -1448,7 +1500,7 @@ amd64_displaced_step_copy_insn (struct gdbarch *gdbarch, /* Modify the insn to cope with the address where it will be executed from. In particular, handle any rip-relative addressing. */ - fixup_displaced_copy (gdbarch, dsc, from, to, regs); + fixup_displaced_copy (gdbarch, dsc.get (), from, to, regs); write_memory (to, buf, len); @@ -1459,7 +1511,8 @@ amd64_displaced_step_copy_insn (struct gdbarch *gdbarch, displaced_step_dump_bytes (gdb_stdlog, buf, len); } - return dsc; + /* This is a work around for a problem with g++ 4.8. */ + return displaced_step_closure_up (dsc.release ()); } static int @@ -2309,6 +2362,9 @@ amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc, pushq %rbp 0x55 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec) + The `endbr64` instruction can be found before these sequences, and will be + skipped if found. + Any function that doesn't start with one of these sequences will be assumed to have no prologue and thus no valid frame pointer in %rbp. */ @@ -2319,6 +2375,8 @@ amd64_analyze_prologue (struct gdbarch *gdbarch, struct amd64_frame_cache *cache) { enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); + /* The `endbr64` instruction. */ + static const gdb_byte endbr64[4] = { 0xf3, 0x0f, 0x1e, 0xfa }; /* There are two variations of movq %rsp, %rbp. */ static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 }; static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec }; @@ -2339,6 +2397,20 @@ amd64_analyze_prologue (struct gdbarch *gdbarch, op = read_code_unsigned_integer (pc, 1, byte_order); + /* Check for the `endbr64` instruction, skip it if found. */ + if (op == endbr64[0]) + { + read_code (pc + 1, buf, 3); + + if (memcmp (buf, &endbr64[1], 3) == 0) + pc += 4; + + op = read_code_unsigned_integer (pc, 1, byte_order); + } + + if (current_pc <= pc) + return current_pc; + if (op == 0x55) /* pushq %rbp */ { /* Take into account that we've executed the `pushq %rbp' that @@ -2576,16 +2648,15 @@ amd64_frame_cache (struct frame_info *this_frame, void **this_cache) cache = amd64_alloc_frame_cache (); *this_cache = cache; - TRY + try { amd64_frame_cache_1 (this_frame, cache); } - CATCH (ex, RETURN_MASK_ERROR) + catch (const gdb_exception_error &ex) { if (ex.error != NOT_AVAILABLE_ERROR) - throw_exception (ex); + throw; } - END_CATCH return cache; } @@ -2694,7 +2765,7 @@ amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache) cache = amd64_alloc_frame_cache (); - TRY + try { get_frame_register (this_frame, AMD64_RSP_REGNUM, buf); cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8; @@ -2708,12 +2779,11 @@ amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache) cache->base_p = 1; } - CATCH (ex, RETURN_MASK_ERROR) + catch (const gdb_exception_error &ex) { if (ex.error != NOT_AVAILABLE_ERROR) - throw_exception (ex); + throw; } - END_CATCH *this_cache = cache; return cache; @@ -2871,7 +2941,7 @@ amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache) cache = amd64_alloc_frame_cache (); *this_cache = cache; - TRY + try { /* Cache base will be %esp plus cache->sp_offset (-8). */ get_frame_register (this_frame, AMD64_RSP_REGNUM, buf); @@ -2889,12 +2959,11 @@ amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache) cache->base_p = 1; } - CATCH (ex, RETURN_MASK_ERROR) + catch (const gdb_exception_error &ex) { if (ex.error != NOT_AVAILABLE_ERROR) - throw_exception (ex); + throw; } - END_CATCH return cache; } @@ -3102,15 +3171,7 @@ amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch, if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL) { - const struct tdesc_feature *feature = - tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments"); - struct tdesc_arch_data *tdesc_data_segments = - (struct tdesc_arch_data *) info.tdep_info; - - tdesc_numbered_register (feature, tdesc_data_segments, - AMD64_FSBASE_REGNUM, "fs_base"); - tdesc_numbered_register (feature, tdesc_data_segments, - AMD64_GSBASE_REGNUM, "gs_base"); + tdep->fsbase_regnum = AMD64_FSBASE_REGNUM; } if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL) @@ -3226,7 +3287,8 @@ amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch, static void amd64_none_init_abi (gdbarch_info info, gdbarch *arch) { - amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK)); + amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK, + true)); } static struct type * @@ -3267,59 +3329,39 @@ static void amd64_x32_none_init_abi (gdbarch_info info, gdbarch *arch) { amd64_x32_init_abi (info, arch, - amd64_target_description (X86_XSTATE_SSE_MASK)); + amd64_target_description (X86_XSTATE_SSE_MASK, true)); } /* Return the target description for a specified XSAVE feature mask. */ const struct target_desc * -amd64_target_description (uint64_t xcr0) +amd64_target_description (uint64_t xcr0, bool segments) { static target_desc *amd64_tdescs \ - [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/] = {}; + [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {}; target_desc **tdesc; tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0] [(xcr0 & X86_XSTATE_MPX) ? 1 : 0] [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0] - [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0]; + [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0] + [segments ? 1 : 0]; if (*tdesc == NULL) - *tdesc = amd64_create_target_description (xcr0, false, false); + *tdesc = amd64_create_target_description (xcr0, false, false, + segments); return *tdesc; } +void _initialize_amd64_tdep (); void -_initialize_amd64_tdep (void) +_initialize_amd64_tdep () { gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_NONE, amd64_none_init_abi); gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x64_32, GDB_OSABI_NONE, amd64_x32_none_init_abi); - -#if GDB_SELF_TEST - struct - { - const char *xml; - uint64_t mask; - } xml_masks[] = { - { "i386/amd64.xml", X86_XSTATE_SSE_MASK }, - { "i386/amd64-avx.xml", X86_XSTATE_AVX_MASK }, - { "i386/amd64-mpx.xml", X86_XSTATE_MPX_MASK }, - { "i386/amd64-avx-mpx.xml", X86_XSTATE_AVX_MPX_MASK }, - { "i386/amd64-avx-avx512.xml", X86_XSTATE_AVX_AVX512_MASK }, - { "i386/amd64-avx-mpx-avx512-pku.xml", - X86_XSTATE_AVX_MPX_AVX512_PKU_MASK }, - }; - - for (auto &a : xml_masks) - { - auto tdesc = amd64_target_description (a.mask); - - selftests::record_xml_tdesc (a.xml, tdesc); - } -#endif /* GDB_SELF_TEST */ } @@ -3350,9 +3392,9 @@ amd64_supply_fxsave (struct regcache *regcache, int regnum, const gdb_byte *regs = (const gdb_byte *) fxsave; if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep)) - regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12); + regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12); if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep)) - regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20); + regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20); } } @@ -3371,7 +3413,6 @@ amd64_supply_xsave (struct regcache *regcache, int regnum, && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64) { const gdb_byte *regs = (const gdb_byte *) xsave; - static const gdb_byte zero[I386_MAX_REGISTER_SIZE] = { 0 }; ULONGEST clear_bv; clear_bv = i387_xsave_get_clear_bv (gdbarch, xsave); @@ -3382,11 +3423,9 @@ amd64_supply_xsave (struct regcache *regcache, int regnum, if (!(clear_bv & X86_XSTATE_X87)) { if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep)) - regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), - regs + 12); + regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12); if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep)) - regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), - regs + 20); + regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20); } } } @@ -3409,9 +3448,9 @@ amd64_collect_fxsave (const struct regcache *regcache, int regnum, if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64) { if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep)) - regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12); + regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12); if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep)) - regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20); + regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20); } } @@ -3430,10 +3469,10 @@ amd64_collect_xsave (const struct regcache *regcache, int regnum, if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64) { if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep)) - regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), + regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12); if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep)) - regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), + regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20); } }