X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=gdb%2Faarch64-tdep.c;h=ae145abce26e39c972796b33a89cebf22ac5c43a;hb=refs%2Fheads%2Fconcurrent-displaced-stepping-2020-04-01;hp=ca0d0023126df35afe7f23e80469ae2cdfe0e129;hpb=d55e5aa6b29906346c51ad00e6a9b112590aa294;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/aarch64-tdep.c b/gdb/aarch64-tdep.c index ca0d002312..ae145abce2 100644 --- a/gdb/aarch64-tdep.c +++ b/gdb/aarch64-tdep.c @@ -1,6 +1,6 @@ /* Common target dependent code for GDB on AArch64 systems. - Copyright (C) 2009-2019 Free Software Foundation, Inc. + Copyright (C) 2009-2020 Free Software Foundation, Inc. Contributed by ARM Ltd. This file is part of GDB. @@ -20,44 +20,38 @@ #include "defs.h" -/* Standard C++ includes. */ -#include - -/* Local non-gdb includes. */ -#include "aarch64-ravenscar-thread.h" -#include "aarch64-tdep.h" -#include "arch-utils.h" -#include "arch/aarch64-insn.h" -#include "ax-gdb.h" -#include "ax.h" -#include "common/selftest.h" -#include "common/vec.h" -#include "dis-asm.h" -#include "dwarf2-frame.h" -#include "dwarf2.h" -#include "elf-bfd.h" -#include "elf/aarch64.h" -#include "frame-base.h" -#include "frame-unwind.h" #include "frame.h" #include "gdbcmd.h" #include "gdbcore.h" -#include "gdbtypes.h" -#include "infcall.h" -#include "inferior.h" -#include "language.h" -#include "objfiles.h" -#include "opcode/aarch64.h" -#include "osabi.h" -#include "prologue-value.h" -#include "record-full.h" -#include "record.h" +#include "dis-asm.h" #include "regcache.h" #include "reggroups.h" -#include "target-descriptions.h" +#include "value.h" +#include "arch-utils.h" +#include "osabi.h" +#include "frame-unwind.h" +#include "frame-base.h" #include "trad-frame.h" +#include "objfiles.h" +#include "dwarf2.h" +#include "dwarf2/frame.h" +#include "gdbtypes.h" +#include "prologue-value.h" +#include "target-descriptions.h" #include "user-regs.h" -#include "value.h" +#include "ax-gdb.h" +#include "gdbsupport/selftest.h" + +#include "aarch64-tdep.h" +#include "aarch64-ravenscar-thread.h" + +#include "record.h" +#include "record-full.h" +#include "arch/aarch64-insn.h" +#include "gdbarch.h" + +#include "opcode/aarch64.h" +#include #define submask(x) ((1L << ((x) + 1)) - 1) #define bit(obj,st) (((obj) >> (st)) & 1) @@ -247,13 +241,13 @@ class instruction_reader : public abstract_instruction_reader } // namespace -/* If address signing is enabled, mask off the signature bits from ADDR, using - the register values in THIS_FRAME. */ +/* If address signing is enabled, mask off the signature bits from the link + register, which is passed by value in ADDR, using the register values in + THIS_FRAME. */ static CORE_ADDR -aarch64_frame_unmask_address (struct gdbarch_tdep *tdep, - struct frame_info *this_frame, - CORE_ADDR addr) +aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep, + struct frame_info *this_frame, CORE_ADDR addr) { if (tdep->has_pauth () && frame_unwind_register_unsigned (this_frame, @@ -262,11 +256,25 @@ aarch64_frame_unmask_address (struct gdbarch_tdep *tdep, int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base); CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num); addr = addr & ~cmask; + + /* Record in the frame that the link register required unmasking. */ + set_frame_previous_pc_masked (this_frame); } return addr; } +/* Implement the "get_pc_address_flags" gdbarch method. */ + +static std::string +aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc) +{ + if (pc != 0 && get_frame_pc_masked (frame)) + return "PAC"; + + return ""; +} + /* Analyze a prologue, looking for a recognizable stack frame and frame pointer. Scan until we encounter a store that could clobber the stack frame unexpectedly, or an unknown instruction. */ @@ -381,17 +389,16 @@ aarch64_analyze_prologue (struct gdbarch *gdbarch, { unsigned rt = inst.operands[0].reg.regno; unsigned rn = inst.operands[1].addr.base_regno; - int is64 - = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8); + int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier); gdb_assert (aarch64_num_of_operands (inst.opcode) == 2); gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt); gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9); gdb_assert (!inst.operands[1].addr.offset.is_reg); - stack.store (pv_add_constant (regs[rn], - inst.operands[1].addr.offset.imm), - is64 ? 8 : 4, regs[rt]); + stack.store + (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm), + size, regs[rt]); } else if ((inst.opcode->iclass == ldstpair_off || (inst.opcode->iclass == ldstpair_indexed @@ -403,6 +410,7 @@ aarch64_analyze_prologue (struct gdbarch *gdbarch, unsigned rt2; unsigned rn = inst.operands[2].addr.base_regno; int32_t imm = inst.operands[2].addr.offset.imm; + int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier); gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt || inst.operands[0].type == AARCH64_OPND_Ft); @@ -424,17 +432,12 @@ aarch64_analyze_prologue (struct gdbarch *gdbarch, rt2 = inst.operands[1].reg.regno; if (inst.operands[0].type == AARCH64_OPND_Ft) { - /* Only bottom 64-bit of each V register (D register) need - to be preserved. */ - gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D); rt1 += AARCH64_X_REGISTER_COUNT; rt2 += AARCH64_X_REGISTER_COUNT; } - stack.store (pv_add_constant (regs[rn], imm), 8, - regs[rt1]); - stack.store (pv_add_constant (regs[rn], imm + 8), 8, - regs[rt2]); + stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]); + stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]); if (inst.operands[2].addr.writeback) regs[rn] = pv_add_constant (regs[rn], imm); @@ -451,21 +454,14 @@ aarch64_analyze_prologue (struct gdbarch *gdbarch, unsigned int rt = inst.operands[0].reg.regno; int32_t imm = inst.operands[1].addr.offset.imm; unsigned int rn = inst.operands[1].addr.base_regno; - bool is64 - = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8); + int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier); gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt || inst.operands[0].type == AARCH64_OPND_Ft); if (inst.operands[0].type == AARCH64_OPND_Ft) - { - /* Only bottom 64-bit of each V register (D register) need - to be preserved. */ - gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D); - rt += AARCH64_X_REGISTER_COUNT; - } + rt += AARCH64_X_REGISTER_COUNT; - stack.store (pv_add_constant (regs[rn], imm), - is64 ? 8 : 4, regs[rt]); + stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]); if (inst.operands[1].addr.writeback) regs[rn] = pv_add_constant (regs[rn], imm); } @@ -663,6 +659,7 @@ aarch64_analyze_prologue_test (void) }; instruction_reader_test reader (insns); + trad_frame_reset_saved_regs (gdbarch, cache.saved_regs); CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader); SELF_CHECK (end == 4 * 5); @@ -705,6 +702,7 @@ aarch64_analyze_prologue_test (void) }; instruction_reader_test reader (insns); + trad_frame_reset_saved_regs (gdbarch, cache.saved_regs); CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader); @@ -872,16 +870,15 @@ aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache) cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); *this_cache = cache; - TRY + try { aarch64_make_prologue_cache_1 (this_frame, cache); } - CATCH (ex, RETURN_MASK_ERROR) + catch (const gdb_exception_error &ex) { if (ex.error != NOT_AVAILABLE_ERROR) - throw_exception (ex); + throw; } - END_CATCH return cache; } @@ -948,7 +945,7 @@ aarch64_prologue_prev_register (struct frame_info *this_frame, if (tdep->has_pauth () && trad_frame_value_p (cache->saved_regs, tdep->pauth_ra_state_regnum)) - lr = aarch64_frame_unmask_address (tdep, this_frame, lr); + lr = aarch64_frame_unmask_lr (tdep, this_frame, lr); return frame_unwind_got_constant (this_frame, prev_regnum, lr); } @@ -1004,19 +1001,18 @@ aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache) cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); *this_cache = cache; - TRY + try { cache->prev_sp = get_frame_register_unsigned (this_frame, AARCH64_SP_REGNUM); cache->prev_pc = get_frame_pc (this_frame); cache->available_p = 1; } - CATCH (ex, RETURN_MASK_ERROR) + catch (const gdb_exception_error &ex) { if (ex.error != NOT_AVAILABLE_ERROR) - throw_exception (ex); + throw; } - END_CATCH return cache; } @@ -1116,7 +1112,7 @@ aarch64_dwarf2_prev_register (struct frame_info *this_frame, { case AARCH64_PC_REGNUM: lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM); - lr = aarch64_frame_unmask_address (tdep, this_frame, lr); + lr = aarch64_frame_unmask_lr (tdep, this_frame, lr); return frame_unwind_got_constant (this_frame, regnum, lr); default: @@ -1178,8 +1174,12 @@ aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op, struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); struct dwarf2_frame_state_reg *ra_state; - if (tdep->has_pauth () && op == DW_CFA_AARCH64_negate_ra_state) + if (op == DW_CFA_AARCH64_negate_ra_state) { + /* On systems without pauth, treat as a nop. */ + if (!tdep->has_pauth ()) + return true; + /* Allocate RA_STATE column if it's not allocated yet. */ fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1); @@ -1201,10 +1201,43 @@ aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op, return false; } +/* Used for matching BRK instructions for AArch64. */ +static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f; +static constexpr uint32_t BRK_INSN_BASE = 0xd4200000; + +/* Implementation of gdbarch_program_breakpoint_here_p for aarch64. */ + +static bool +aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address) +{ + const uint32_t insn_len = 4; + gdb_byte target_mem[4]; + + /* Enable the automatic memory restoration from breakpoints while + we read the memory. Otherwise we may find temporary breakpoints, ones + inserted by GDB, and flag them as permanent breakpoints. */ + scoped_restore restore_memory + = make_scoped_restore_show_memory_breakpoints (0); + + if (target_read_memory (address, target_mem, insn_len) == 0) + { + uint32_t insn = + (uint32_t) extract_unsigned_integer (target_mem, insn_len, + gdbarch_byte_order_for_code (gdbarch)); + + /* Check if INSN is a BRK instruction pattern. There are multiple choices + of such instructions with different immediate values. Different OS' + may use a different variation, but they have the same outcome. */ + return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE); + } + + return false; +} + /* When arguments must be pushed onto the stack, they go on in reverse order. The code below implements a FILO (stack) to do this. */ -typedef struct +struct stack_item_t { /* Value to pass on stack. It can be NULL if this item is for stack padding. */ @@ -1212,66 +1245,27 @@ typedef struct /* Size in bytes of value to pass on stack. */ int len; -} stack_item_t; - -DEF_VEC_O (stack_item_t); +}; -/* Return the alignment (in bytes) of the given type. */ +/* Implement the gdbarch type alignment method, overrides the generic + alignment algorithm for anything that is aarch64 specific. */ -static int -aarch64_type_align (struct type *t) +static ULONGEST +aarch64_type_align (gdbarch *gdbarch, struct type *t) { - int n; - int align; - int falign; - t = check_typedef (t); - switch (TYPE_CODE (t)) + if (t->code () == TYPE_CODE_ARRAY && TYPE_VECTOR (t)) { - default: - /* Should never happen. */ - internal_error (__FILE__, __LINE__, _("unknown type alignment")); - return 4; - - case TYPE_CODE_PTR: - case TYPE_CODE_ENUM: - case TYPE_CODE_INT: - case TYPE_CODE_FLT: - case TYPE_CODE_SET: - case TYPE_CODE_RANGE: - case TYPE_CODE_BITSTRING: - case TYPE_CODE_REF: - case TYPE_CODE_RVALUE_REF: - case TYPE_CODE_CHAR: - case TYPE_CODE_BOOL: - return TYPE_LENGTH (t); - - case TYPE_CODE_ARRAY: - if (TYPE_VECTOR (t)) - { - /* Use the natural alignment for vector types (the same for - scalar type), but the maximum alignment is 128-bit. */ - if (TYPE_LENGTH (t) > 16) - return 16; - else - return TYPE_LENGTH (t); - } + /* Use the natural alignment for vector types (the same for + scalar type), but the maximum alignment is 128-bit. */ + if (TYPE_LENGTH (t) > 16) + return 16; else - return aarch64_type_align (TYPE_TARGET_TYPE (t)); - case TYPE_CODE_COMPLEX: - return aarch64_type_align (TYPE_TARGET_TYPE (t)); - - case TYPE_CODE_STRUCT: - case TYPE_CODE_UNION: - align = 1; - for (n = 0; n < TYPE_NFIELDS (t); n++) - { - falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n)); - if (falign > align) - align = falign; - } - return align; + return TYPE_LENGTH (t); } + + /* Allow the common code to calculate the alignment. */ + return 0; } /* Worker function for aapcs_is_vfp_call_or_return_candidate. @@ -1289,7 +1283,7 @@ aapcs_is_vfp_call_or_return_candidate_1 (struct type *type, if (type == nullptr) return -1; - switch (TYPE_CODE (type)) + switch (type->code ()) { case TYPE_CODE_FLT: if (TYPE_LENGTH (type) > 16) @@ -1298,7 +1292,7 @@ aapcs_is_vfp_call_or_return_candidate_1 (struct type *type, if (*fundamental_type == nullptr) *fundamental_type = type; else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type) - || TYPE_CODE (type) != TYPE_CODE (*fundamental_type)) + || type->code () != (*fundamental_type)->code ()) return -1; return 1; @@ -1312,7 +1306,7 @@ aapcs_is_vfp_call_or_return_candidate_1 (struct type *type, if (*fundamental_type == nullptr) *fundamental_type = target_type; else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type) - || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type)) + || target_type->code () != (*fundamental_type)->code ()) return -1; return 2; @@ -1328,7 +1322,7 @@ aapcs_is_vfp_call_or_return_candidate_1 (struct type *type, if (*fundamental_type == nullptr) *fundamental_type = type; else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type) - || TYPE_CODE (type) != TYPE_CODE (*fundamental_type)) + || type->code () != (*fundamental_type)->code ()) return -1; return 1; @@ -1352,10 +1346,10 @@ aapcs_is_vfp_call_or_return_candidate_1 (struct type *type, { int count = 0; - for (int i = 0; i < TYPE_NFIELDS (type); i++) + for (int i = 0; i < type->num_fields (); i++) { /* Ignore any static fields. */ - if (field_is_static (&TYPE_FIELD (type, i))) + if (field_is_static (&type->field (i))) continue; struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i)); @@ -1427,26 +1421,26 @@ aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count, struct aarch64_call_info { /* the current argument number. */ - unsigned argnum; + unsigned argnum = 0; /* The next general purpose register number, equivalent to NGRN as described in the AArch64 Procedure Call Standard. */ - unsigned ngrn; + unsigned ngrn = 0; /* The next SIMD and floating point register number, equivalent to NSRN as described in the AArch64 Procedure Call Standard. */ - unsigned nsrn; + unsigned nsrn = 0; /* The next stacked argument address, equivalent to NSAA as described in the AArch64 Procedure Call Standard. */ - unsigned nsaa; + unsigned nsaa = 0; /* Stack item vector. */ - VEC(stack_item_t) *si; + std::vector si; }; /* Pass a value in a sequence of consecutive X registers. The caller - is responsbile for ensuring sufficient registers are available. */ + is responsible for ensuring sufficient registers are available. */ static void pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache, @@ -1455,7 +1449,7 @@ pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache, { enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); int len = TYPE_LENGTH (type); - enum type_code typecode = TYPE_CODE (type); + enum type_code typecode = type->code (); int regnum = AARCH64_X0_REGNUM + info->ngrn; const bfd_byte *buf = value_contents (arg); @@ -1538,7 +1532,7 @@ pass_on_stack (struct aarch64_call_info *info, struct type *type, info->argnum++; - align = aarch64_type_align (type); + align = type_align (type); /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the Natural alignment of the argument's type. */ @@ -1556,7 +1550,7 @@ pass_on_stack (struct aarch64_call_info *info, struct type *type, item.len = len; item.data = buf; - VEC_safe_push (stack_item_t, info->si, &item); + info->si.push_back (item); info->nsaa += len; if (info->nsaa & (align - 1)) @@ -1567,7 +1561,7 @@ pass_on_stack (struct aarch64_call_info *info, struct type *type, item.len = pad; item.data = NULL; - VEC_safe_push (stack_item_t, info->si, &item); + info->si.push_back (item); info->nsaa += pad; } } @@ -1606,7 +1600,7 @@ pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache, struct aarch64_call_info *info, struct type *arg_type, struct value *arg) { - switch (TYPE_CODE (arg_type)) + switch (arg_type->code ()) { case TYPE_CODE_FLT: return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type), @@ -1634,10 +1628,10 @@ pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache, case TYPE_CODE_STRUCT: case TYPE_CODE_UNION: - for (int i = 0; i < TYPE_NFIELDS (arg_type); i++) + for (int i = 0; i < arg_type->num_fields (); i++) { /* Don't include static fields. */ - if (field_is_static (&TYPE_FIELD (arg_type, i))) + if (field_is_static (&arg_type->field (i))) continue; struct value *field = value_primitive_field (arg, 0, i, arg_type); @@ -1667,8 +1661,6 @@ aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, int argnum; struct aarch64_call_info info; - memset (&info, 0, sizeof (info)); - /* We need to know what the type of the called function is in order to determine the number of named/anonymous arguments for the actual argument placement, and the return type in order to handle @@ -1744,7 +1736,7 @@ aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, continue; } - switch (TYPE_CODE (arg_type)) + switch (arg_type->code ()) { case TYPE_CODE_INT: case TYPE_CODE_BOOL: @@ -1797,18 +1789,16 @@ aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, if (info.nsaa & 15) sp -= 16 - (info.nsaa & 15); - while (!VEC_empty (stack_item_t, info.si)) + while (!info.si.empty ()) { - stack_item_t *si = VEC_last (stack_item_t, info.si); + const stack_item_t &si = info.si.back (); - sp -= si->len; - if (si->data != NULL) - write_memory (sp, si->data, si->len); - VEC_pop (stack_item_t, info.si); + sp -= si.len; + if (si.data != NULL) + write_memory (sp, si.data, si.len); + info.si.pop_back (); } - VEC_free (stack_item_t, info.si); - /* Finally, update the SP register. */ regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp); @@ -1926,6 +1916,9 @@ aarch64_vnh_type (struct gdbarch *gdbarch) t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh", TYPE_CODE_UNION); + elem = builtin_type (gdbarch)->builtin_half; + append_composite_type_field (t, "f", elem); + elem = builtin_type (gdbarch)->builtin_uint16; append_composite_type_field (t, "u", elem); @@ -1974,7 +1967,7 @@ aarch64_vnv_type (struct gdbarch *gdbarch) if (tdep->vnv_type == NULL) { - /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value + /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value slice from the non-pseudo vector registers. However NEON V registers are always vector registers, and need constructing as such. */ const struct builtin_type *bt = builtin_type (gdbarch); @@ -2004,6 +1997,8 @@ aarch64_vnv_type (struct gdbarch *gdbarch) sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh", TYPE_CODE_UNION); + append_composite_type_field (sub, "f", + init_vector_type (bt->builtin_half, 8)); append_composite_type_field (sub, "u", init_vector_type (bt->builtin_uint16, 8)); append_composite_type_field (sub, "s", @@ -2125,12 +2120,12 @@ aarch64_extract_return_value (struct type *type, struct regcache *regs, valbuf += len; } } - else if (TYPE_CODE (type) == TYPE_CODE_INT - || TYPE_CODE (type) == TYPE_CODE_CHAR - || TYPE_CODE (type) == TYPE_CODE_BOOL - || TYPE_CODE (type) == TYPE_CODE_PTR + else if (type->code () == TYPE_CODE_INT + || type->code () == TYPE_CODE_CHAR + || type->code () == TYPE_CODE_BOOL + || type->code () == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type) - || TYPE_CODE (type) == TYPE_CODE_ENUM) + || type->code () == TYPE_CODE_ENUM) { /* If the type is a plain integer, then the access is straight-forward. Otherwise we have to play around a bit @@ -2238,12 +2233,12 @@ aarch64_store_return_value (struct type *type, struct regcache *regs, valbuf += len; } } - else if (TYPE_CODE (type) == TYPE_CODE_INT - || TYPE_CODE (type) == TYPE_CODE_CHAR - || TYPE_CODE (type) == TYPE_CODE_BOOL - || TYPE_CODE (type) == TYPE_CODE_PTR + else if (type->code () == TYPE_CODE_INT + || type->code () == TYPE_CODE_CHAR + || type->code () == TYPE_CODE_BOOL + || type->code () == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type) - || TYPE_CODE (type) == TYPE_CODE_ENUM) + || type->code () == TYPE_CODE_ENUM) { if (TYPE_LENGTH (type) <= X_REGISTER_SIZE) { @@ -2299,9 +2294,9 @@ aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value, gdb_byte *readbuf, const gdb_byte *writebuf) { - if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT - || TYPE_CODE (valtype) == TYPE_CODE_UNION - || TYPE_CODE (valtype) == TYPE_CODE_ARRAY) + if (valtype->code () == TYPE_CODE_STRUCT + || valtype->code () == TYPE_CODE_UNION + || valtype->code () == TYPE_CODE_ARRAY) { if (aarch64_return_in_memory (gdbarch, valtype)) { @@ -2769,13 +2764,14 @@ aarch64_software_single_step (struct regcache *regcache) return next_pcs; } -struct aarch64_displaced_step_closure : public displaced_step_closure +struct aarch64_displaced_step_copy_insn_closure : public displaced_step_copy_insn_closure { /* It is true when condition instruction, such as B.CON, TBZ, etc, is being displaced stepping. */ - int cond = 0; + bool cond = false; - /* PC adjustment offset after displaced stepping. */ + /* PC adjustment offset after displaced stepping. If 0, then we don't + write the PC back, assuming the PC is already the right address. */ int32_t pc_adjust = 0; }; @@ -2788,13 +2784,13 @@ struct aarch64_displaced_step_data /* The address where the instruction will be executed at. */ CORE_ADDR new_addr; /* Buffer of instructions to be copied to NEW_ADDR to execute. */ - uint32_t insn_buf[DISPLACED_MODIFIED_INSNS]; + uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS]; /* Number of instructions in INSN_BUF. */ unsigned insn_count; /* Registers when doing displaced stepping. */ struct regcache *regs; - aarch64_displaced_step_closure *dsc; + aarch64_displaced_step_copy_insn_closure *dsc; }; /* Implementation of aarch64_insn_visitor method "b". */ @@ -2853,7 +2849,7 @@ aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset, */ emit_bcond (dsd->insn_buf, cond, 8); - dsd->dsc->cond = 1; + dsd->dsc->cond = true; dsd->dsc->pc_adjust = offset; dsd->insn_count = 1; } @@ -2888,7 +2884,7 @@ aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz, */ emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8); dsd->insn_count = 1; - dsd->dsc->cond = 1; + dsd->dsc->cond = true; dsd->dsc->pc_adjust = offset; } @@ -2913,7 +2909,7 @@ aarch64_displaced_step_tb (const int32_t offset, int is_tbnz, */ emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8); dsd->insn_count = 1; - dsd->dsc->cond = 1; + dsd->dsc->cond = true; dsd->dsc->pc_adjust = offset; } @@ -3003,7 +2999,7 @@ static const struct aarch64_insn_visitor visitor = /* Implement the "displaced_step_copy_insn" gdbarch method. */ -struct displaced_step_closure * +displaced_step_copy_insn_closure_up aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch, CORE_ADDR from, CORE_ADDR to, struct regcache *regs) @@ -3023,8 +3019,8 @@ aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch, return NULL; } - std::unique_ptr dsc - (new aarch64_displaced_step_closure); + std::unique_ptr dsc + (new aarch64_displaced_step_copy_insn_closure); dsd.base.insn_addr = from; dsd.new_addr = to; dsd.regs = regs; @@ -3032,7 +3028,7 @@ aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch, dsd.insn_count = 0; aarch64_relocate_instruction (insn, &visitor, (struct aarch64_insn_data *) &dsd); - gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS); + gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS); if (dsd.insn_count != 0) { @@ -3057,24 +3053,34 @@ aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch, dsc = NULL; } - return dsc.release (); + /* This is a work around for a problem with g++ 4.8. */ + return displaced_step_copy_insn_closure_up (dsc.release ()); } /* Implement the "displaced_step_fixup" gdbarch method. */ void aarch64_displaced_step_fixup (struct gdbarch *gdbarch, - struct displaced_step_closure *dsc_, + struct displaced_step_copy_insn_closure *dsc_, CORE_ADDR from, CORE_ADDR to, struct regcache *regs) { - aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_; + aarch64_displaced_step_copy_insn_closure *dsc = (aarch64_displaced_step_copy_insn_closure *) dsc_; + + ULONGEST pc; + + regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc); + + if (debug_displaced) + debug_printf ("Displaced: PC after stepping: %s (was %s).\n", + paddress (gdbarch, pc), paddress (gdbarch, to)); if (dsc->cond) { - ULONGEST pc; + if (debug_displaced) + debug_printf ("Displaced: [Conditional] pc_adjust before: %d\n", + dsc->pc_adjust); - regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc); if (pc - to == 8) { /* Condition is true. */ @@ -3086,13 +3092,35 @@ aarch64_displaced_step_fixup (struct gdbarch *gdbarch, } else gdb_assert_not_reached ("Unexpected PC value after displaced stepping"); + + if (debug_displaced) + debug_printf ("Displaced: [Conditional] pc_adjust after: %d\n", + dsc->pc_adjust); } + if (debug_displaced) + debug_printf ("Displaced: %s PC by %d\n", + dsc->pc_adjust? "adjusting" : "not adjusting", + dsc->pc_adjust); + + if (dsc->pc_adjust != 0) { + /* Make sure the previous instruction was executed (that is, the PC + has changed). If the PC didn't change, then discard the adjustment + offset. Otherwise we may skip an instruction before its execution + took place. */ + if ((pc - to) == 0) + { + if (debug_displaced) + debug_printf ("Displaced: PC did not move. Discarding PC " + "adjustment.\n"); + dsc->pc_adjust = 0; + } + if (debug_displaced) { - debug_printf ("displaced: fixup: set PC to %s:%d\n", + debug_printf ("Displaced: fixup: set PC to %s:%d\n", paddress (gdbarch, from), dsc->pc_adjust); } regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM, @@ -3104,7 +3132,7 @@ aarch64_displaced_step_fixup (struct gdbarch *gdbarch, int aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch, - struct displaced_step_closure *closure) + struct displaced_step_copy_insn_closure *closure) { return 1; } @@ -3190,36 +3218,53 @@ aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum) static struct gdbarch * aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) { - struct gdbarch_tdep *tdep; - struct gdbarch *gdbarch; - struct gdbarch_list *best_arch; - struct tdesc_arch_data *tdesc_data = NULL; - const struct target_desc *tdesc = info.target_desc; - int i; - int valid_p = 1; - const struct tdesc_feature *feature_core; - const struct tdesc_feature *feature_fpu; - const struct tdesc_feature *feature_sve; + const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve; const struct tdesc_feature *feature_pauth; - int num_regs = 0; - int num_pseudo_regs = 0; - int first_pauth_regnum = -1; - int pauth_ra_state_offset = -1; + bool valid_p = true; + int i, num_regs = 0, num_pseudo_regs = 0; + int first_pauth_regnum = -1, pauth_ra_state_offset = -1; + + /* Use the vector length passed via the target info. Here -1 is used for no + SVE, and 0 is unset. If unset then use the vector length from the existing + tdesc. */ + uint64_t vq = 0; + if (info.id == (int *) -1) + vq = 0; + else if (info.id != 0) + vq = (uint64_t) info.id; + else + vq = aarch64_get_tdesc_vq (info.target_desc); - /* Ensure we always have a target description. */ - if (!tdesc_has_registers (tdesc)) - tdesc = aarch64_read_description (0, false); + if (vq > AARCH64_MAX_SVE_VQ) + internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"), + pulongest (vq), AARCH64_MAX_SVE_VQ); + + /* If there is already a candidate, use it. */ + for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info); + best_arch != nullptr; + best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info)) + { + struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch); + if (tdep && tdep->vq == vq) + return best_arch->gdbarch; + } + + /* Ensure we always have a target descriptor, and that it is for the given VQ + value. */ + const struct target_desc *tdesc = info.target_desc; + if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc)) + tdesc = aarch64_read_description (vq, false); gdb_assert (tdesc); - feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core"); + feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core"); feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu"); feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve"); feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth"); - if (feature_core == NULL) - return NULL; + if (feature_core == nullptr) + return nullptr; - tdesc_data = tdesc_data_alloc (); + struct tdesc_arch_data *tdesc_data = tdesc_data_alloc (); /* Validate the description provides the mandatory core R registers and allocate their numbers. */ @@ -3231,9 +3276,9 @@ aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) num_regs = AARCH64_X0_REGNUM + i; /* Add the V registers. */ - if (feature_fpu != NULL) + if (feature_fpu != nullptr) { - if (feature_sve != NULL) + if (feature_sve != nullptr) error (_("Program contains both fpu and SVE features.")); /* Validate the description provides the mandatory V registers @@ -3247,7 +3292,7 @@ aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) } /* Add the SVE registers. */ - if (feature_sve != NULL) + if (feature_sve != nullptr) { /* Validate the description provides the mandatory SVE registers and allocate their numbers. */ @@ -3260,7 +3305,7 @@ aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) num_pseudo_regs += 32; /* add the Vn register pseudos. */ } - if (feature_fpu != NULL || feature_sve != NULL) + if (feature_fpu != nullptr || feature_sve != nullptr) { num_pseudo_regs += 32; /* add the Qn scalar register pseudos */ num_pseudo_regs += 32; /* add the Dn scalar register pseudos */ @@ -3288,41 +3333,24 @@ aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) if (!valid_p) { tdesc_data_cleanup (tdesc_data); - return NULL; + return nullptr; } /* AArch64 code is always little-endian. */ info.byte_order_for_code = BFD_ENDIAN_LITTLE; - /* If there is already a candidate, use it. */ - for (best_arch = gdbarch_list_lookup_by_info (arches, &info); - best_arch != NULL; - best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info)) - { - /* Found a match. */ - break; - } - - if (best_arch != NULL) - { - if (tdesc_data != NULL) - tdesc_data_cleanup (tdesc_data); - return best_arch->gdbarch; - } - - tdep = XCNEW (struct gdbarch_tdep); - gdbarch = gdbarch_alloc (&info, tdep); + struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep); + struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep); /* This should be low enough for everything. */ tdep->lowest_pc = 0x20; tdep->jb_pc = -1; /* Longjump support not enabled by default. */ tdep->jb_elt_size = 8; - tdep->vq = aarch64_get_tdesc_vq (tdesc); + tdep->vq = vq; tdep->pauth_reg_base = first_pauth_regnum; tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1 : pauth_ra_state_offset + num_regs; - set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call); set_gdbarch_frame_align (gdbarch, aarch64_frame_align); @@ -3368,6 +3396,7 @@ aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) set_gdbarch_float_format (gdbarch, floatformats_ieee_single); set_gdbarch_double_format (gdbarch, floatformats_ieee_double); set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad); + set_gdbarch_type_align (gdbarch, aarch64_type_align); /* Internal <-> external register number maps. */ set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum); @@ -3394,6 +3423,10 @@ aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch, aarch64_execute_dwarf_cfa_vendor_op); + /* Permanent/Program breakpoint handling. */ + set_gdbarch_program_breakpoint_here_p (gdbarch, + aarch64_program_breakpoint_here_p); + /* Add some default predicates. */ frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind); dwarf2_append_unwinders (gdbarch); @@ -3409,6 +3442,8 @@ aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address); + set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags); + tdesc_use_registers (gdbarch, tdesc, tdesc_data); /* Add standard register aliases. */ @@ -3441,8 +3476,9 @@ static void aarch64_process_record_test (void); } #endif +void _initialize_aarch64_tdep (); void -_initialize_aarch64_tdep (void) +_initialize_aarch64_tdep () { gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init, aarch64_dump_tdep); @@ -3461,8 +3497,6 @@ When on, AArch64 specific debugging is enabled."), selftests::aarch64_analyze_prologue_test); selftests::register_test ("aarch64-process-record", selftests::aarch64_process_record_test); - selftests::record_xml_tdesc ("aarch64.xml", - aarch64_create_target_description (0, false)); #endif } @@ -3580,7 +3614,7 @@ aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r) } else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06) { - /* CConditional select. */ + /* Conditional select. */ /* Data-processing (2 source). */ /* Data-processing (1 source). */ record_buf[0] = reg_rd;