X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=gdb%2Faarch64-tdep.c;h=1d5fb2001d38c9ff1965b49882fe1603f1991c4a;hb=228c8f4be0c428369ec6b68e25696863d1e62ed7;hp=cb185ee337fe54ff65722a3fc01f2c60200241e0;hpb=4da037ef9dba6c17089250d228efdbe6f7d830c9;p=deliverable%2Fbinutils-gdb.git diff --git a/gdb/aarch64-tdep.c b/gdb/aarch64-tdep.c index cb185ee337..1d5fb2001d 100644 --- a/gdb/aarch64-tdep.c +++ b/gdb/aarch64-tdep.c @@ -21,7 +21,6 @@ #include "defs.h" #include "frame.h" -#include "inferior.h" #include "gdbcmd.h" #include "gdbcore.h" #include "dis-asm.h" @@ -40,23 +39,16 @@ #include "prologue-value.h" #include "target-descriptions.h" #include "user-regs.h" -#include "language.h" -#include "infcall.h" -#include "ax.h" #include "ax-gdb.h" -#include "common/selftest.h" +#include "gdbsupport/selftest.h" #include "aarch64-tdep.h" #include "aarch64-ravenscar-thread.h" -#include "elf-bfd.h" -#include "elf/aarch64.h" - -#include "common/vec.h" - #include "record.h" #include "record-full.h" #include "arch/aarch64-insn.h" +#include "gdbarch.h" #include "opcode/aarch64.h" #include @@ -249,13 +241,13 @@ class instruction_reader : public abstract_instruction_reader } // namespace -/* If address signing is enabled, mask off the signature bits from ADDR, using - the register values in THIS_FRAME. */ +/* If address signing is enabled, mask off the signature bits from the link + register, which is passed by value in ADDR, using the register values in + THIS_FRAME. */ static CORE_ADDR -aarch64_frame_unmask_address (struct gdbarch_tdep *tdep, - struct frame_info *this_frame, - CORE_ADDR addr) +aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep, + struct frame_info *this_frame, CORE_ADDR addr) { if (tdep->has_pauth () && frame_unwind_register_unsigned (this_frame, @@ -264,11 +256,25 @@ aarch64_frame_unmask_address (struct gdbarch_tdep *tdep, int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base); CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num); addr = addr & ~cmask; + + /* Record in the frame that the link register required unmasking. */ + set_frame_previous_pc_masked (this_frame); } return addr; } +/* Implement the "get_pc_address_flags" gdbarch method. */ + +static std::string +aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc) +{ + if (pc != 0 && get_frame_pc_masked (frame)) + return "PAC"; + + return ""; +} + /* Analyze a prologue, looking for a recognizable stack frame and frame pointer. Scan until we encounter a store that could clobber the stack frame unexpectedly, or an unknown instruction. */ @@ -383,17 +389,16 @@ aarch64_analyze_prologue (struct gdbarch *gdbarch, { unsigned rt = inst.operands[0].reg.regno; unsigned rn = inst.operands[1].addr.base_regno; - int is64 - = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8); + int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier); gdb_assert (aarch64_num_of_operands (inst.opcode) == 2); gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt); gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9); gdb_assert (!inst.operands[1].addr.offset.is_reg); - stack.store (pv_add_constant (regs[rn], - inst.operands[1].addr.offset.imm), - is64 ? 8 : 4, regs[rt]); + stack.store + (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm), + size, regs[rt]); } else if ((inst.opcode->iclass == ldstpair_off || (inst.opcode->iclass == ldstpair_indexed @@ -405,6 +410,7 @@ aarch64_analyze_prologue (struct gdbarch *gdbarch, unsigned rt2; unsigned rn = inst.operands[2].addr.base_regno; int32_t imm = inst.operands[2].addr.offset.imm; + int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier); gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt || inst.operands[0].type == AARCH64_OPND_Ft); @@ -426,17 +432,12 @@ aarch64_analyze_prologue (struct gdbarch *gdbarch, rt2 = inst.operands[1].reg.regno; if (inst.operands[0].type == AARCH64_OPND_Ft) { - /* Only bottom 64-bit of each V register (D register) need - to be preserved. */ - gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D); rt1 += AARCH64_X_REGISTER_COUNT; rt2 += AARCH64_X_REGISTER_COUNT; } - stack.store (pv_add_constant (regs[rn], imm), 8, - regs[rt1]); - stack.store (pv_add_constant (regs[rn], imm + 8), 8, - regs[rt2]); + stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]); + stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]); if (inst.operands[2].addr.writeback) regs[rn] = pv_add_constant (regs[rn], imm); @@ -453,21 +454,14 @@ aarch64_analyze_prologue (struct gdbarch *gdbarch, unsigned int rt = inst.operands[0].reg.regno; int32_t imm = inst.operands[1].addr.offset.imm; unsigned int rn = inst.operands[1].addr.base_regno; - bool is64 - = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8); + int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier); gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt || inst.operands[0].type == AARCH64_OPND_Ft); if (inst.operands[0].type == AARCH64_OPND_Ft) - { - /* Only bottom 64-bit of each V register (D register) need - to be preserved. */ - gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D); - rt += AARCH64_X_REGISTER_COUNT; - } + rt += AARCH64_X_REGISTER_COUNT; - stack.store (pv_add_constant (regs[rn], imm), - is64 ? 8 : 4, regs[rt]); + stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]); if (inst.operands[1].addr.writeback) regs[rn] = pv_add_constant (regs[rn], imm); } @@ -951,7 +945,7 @@ aarch64_prologue_prev_register (struct frame_info *this_frame, if (tdep->has_pauth () && trad_frame_value_p (cache->saved_regs, tdep->pauth_ra_state_regnum)) - lr = aarch64_frame_unmask_address (tdep, this_frame, lr); + lr = aarch64_frame_unmask_lr (tdep, this_frame, lr); return frame_unwind_got_constant (this_frame, prev_regnum, lr); } @@ -1118,7 +1112,7 @@ aarch64_dwarf2_prev_register (struct frame_info *this_frame, { case AARCH64_PC_REGNUM: lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM); - lr = aarch64_frame_unmask_address (tdep, this_frame, lr); + lr = aarch64_frame_unmask_lr (tdep, this_frame, lr); return frame_unwind_got_constant (this_frame, regnum, lr); default: @@ -1180,8 +1174,12 @@ aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op, struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); struct dwarf2_frame_state_reg *ra_state; - if (tdep->has_pauth () && op == DW_CFA_AARCH64_negate_ra_state) + if (op == DW_CFA_AARCH64_negate_ra_state) { + /* On systems without pauth, treat as a nop. */ + if (!tdep->has_pauth ()) + return true; + /* Allocate RA_STATE column if it's not allocated yet. */ fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1); @@ -1206,7 +1204,7 @@ aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op, /* When arguments must be pushed onto the stack, they go on in reverse order. The code below implements a FILO (stack) to do this. */ -typedef struct +struct stack_item_t { /* Value to pass on stack. It can be NULL if this item is for stack padding. */ @@ -1214,66 +1212,27 @@ typedef struct /* Size in bytes of value to pass on stack. */ int len; -} stack_item_t; - -DEF_VEC_O (stack_item_t); +}; -/* Return the alignment (in bytes) of the given type. */ +/* Implement the gdbarch type alignment method, overrides the generic + alignment algorithm for anything that is aarch64 specific. */ -static int -aarch64_type_align (struct type *t) +static ULONGEST +aarch64_type_align (gdbarch *gdbarch, struct type *t) { - int n; - int align; - int falign; - t = check_typedef (t); - switch (TYPE_CODE (t)) + if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t)) { - default: - /* Should never happen. */ - internal_error (__FILE__, __LINE__, _("unknown type alignment")); - return 4; - - case TYPE_CODE_PTR: - case TYPE_CODE_ENUM: - case TYPE_CODE_INT: - case TYPE_CODE_FLT: - case TYPE_CODE_SET: - case TYPE_CODE_RANGE: - case TYPE_CODE_BITSTRING: - case TYPE_CODE_REF: - case TYPE_CODE_RVALUE_REF: - case TYPE_CODE_CHAR: - case TYPE_CODE_BOOL: - return TYPE_LENGTH (t); - - case TYPE_CODE_ARRAY: - if (TYPE_VECTOR (t)) - { - /* Use the natural alignment for vector types (the same for - scalar type), but the maximum alignment is 128-bit. */ - if (TYPE_LENGTH (t) > 16) - return 16; - else - return TYPE_LENGTH (t); - } + /* Use the natural alignment for vector types (the same for + scalar type), but the maximum alignment is 128-bit. */ + if (TYPE_LENGTH (t) > 16) + return 16; else - return aarch64_type_align (TYPE_TARGET_TYPE (t)); - case TYPE_CODE_COMPLEX: - return aarch64_type_align (TYPE_TARGET_TYPE (t)); - - case TYPE_CODE_STRUCT: - case TYPE_CODE_UNION: - align = 1; - for (n = 0; n < TYPE_NFIELDS (t); n++) - { - falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n)); - if (falign > align) - align = falign; - } - return align; + return TYPE_LENGTH (t); } + + /* Allow the common code to calculate the alignment. */ + return 0; } /* Worker function for aapcs_is_vfp_call_or_return_candidate. @@ -1429,26 +1388,26 @@ aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count, struct aarch64_call_info { /* the current argument number. */ - unsigned argnum; + unsigned argnum = 0; /* The next general purpose register number, equivalent to NGRN as described in the AArch64 Procedure Call Standard. */ - unsigned ngrn; + unsigned ngrn = 0; /* The next SIMD and floating point register number, equivalent to NSRN as described in the AArch64 Procedure Call Standard. */ - unsigned nsrn; + unsigned nsrn = 0; /* The next stacked argument address, equivalent to NSAA as described in the AArch64 Procedure Call Standard. */ - unsigned nsaa; + unsigned nsaa = 0; /* Stack item vector. */ - VEC(stack_item_t) *si; + std::vector si; }; /* Pass a value in a sequence of consecutive X registers. The caller - is responsbile for ensuring sufficient registers are available. */ + is responsible for ensuring sufficient registers are available. */ static void pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache, @@ -1540,7 +1499,7 @@ pass_on_stack (struct aarch64_call_info *info, struct type *type, info->argnum++; - align = aarch64_type_align (type); + align = type_align (type); /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the Natural alignment of the argument's type. */ @@ -1558,7 +1517,7 @@ pass_on_stack (struct aarch64_call_info *info, struct type *type, item.len = len; item.data = buf; - VEC_safe_push (stack_item_t, info->si, &item); + info->si.push_back (item); info->nsaa += len; if (info->nsaa & (align - 1)) @@ -1569,7 +1528,7 @@ pass_on_stack (struct aarch64_call_info *info, struct type *type, item.len = pad; item.data = NULL; - VEC_safe_push (stack_item_t, info->si, &item); + info->si.push_back (item); info->nsaa += pad; } } @@ -1669,8 +1628,6 @@ aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, int argnum; struct aarch64_call_info info; - memset (&info, 0, sizeof (info)); - /* We need to know what the type of the called function is in order to determine the number of named/anonymous arguments for the actual argument placement, and the return type in order to handle @@ -1799,18 +1756,16 @@ aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, if (info.nsaa & 15) sp -= 16 - (info.nsaa & 15); - while (!VEC_empty (stack_item_t, info.si)) + while (!info.si.empty ()) { - stack_item_t *si = VEC_last (stack_item_t, info.si); + const stack_item_t &si = info.si.back (); - sp -= si->len; - if (si->data != NULL) - write_memory (sp, si->data, si->len); - VEC_pop (stack_item_t, info.si); + sp -= si.len; + if (si.data != NULL) + write_memory (sp, si.data, si.len); + info.si.pop_back (); } - VEC_free (stack_item_t, info.si); - /* Finally, update the SP register. */ regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp); @@ -1928,6 +1883,9 @@ aarch64_vnh_type (struct gdbarch *gdbarch) t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh", TYPE_CODE_UNION); + elem = builtin_type (gdbarch)->builtin_half; + append_composite_type_field (t, "f", elem); + elem = builtin_type (gdbarch)->builtin_uint16; append_composite_type_field (t, "u", elem); @@ -2006,6 +1964,8 @@ aarch64_vnv_type (struct gdbarch *gdbarch) sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh", TYPE_CODE_UNION); + append_composite_type_field (sub, "f", + init_vector_type (bt->builtin_half, 8)); append_composite_type_field (sub, "u", init_vector_type (bt->builtin_uint16, 8)); append_composite_type_field (sub, "s", @@ -2790,7 +2750,7 @@ struct aarch64_displaced_step_data /* The address where the instruction will be executed at. */ CORE_ADDR new_addr; /* Buffer of instructions to be copied to NEW_ADDR to execute. */ - uint32_t insn_buf[DISPLACED_MODIFIED_INSNS]; + uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS]; /* Number of instructions in INSN_BUF. */ unsigned insn_count; /* Registers when doing displaced stepping. */ @@ -3034,7 +2994,7 @@ aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch, dsd.insn_count = 0; aarch64_relocate_instruction (insn, &visitor, (struct aarch64_insn_data *) &dsd); - gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS); + gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS); if (dsd.insn_count != 0) { @@ -3210,8 +3170,8 @@ aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) vq = aarch64_get_tdesc_vq (info.target_desc); if (vq > AARCH64_MAX_SVE_VQ) - internal_error (__FILE__, __LINE__, _("VQ out of bounds: %ld (max %d)"), - vq, AARCH64_MAX_SVE_VQ); + internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"), + pulongest (vq), AARCH64_MAX_SVE_VQ); /* If there is already a candidate, use it. */ for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info); @@ -3370,6 +3330,7 @@ aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) set_gdbarch_float_format (gdbarch, floatformats_ieee_single); set_gdbarch_double_format (gdbarch, floatformats_ieee_double); set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad); + set_gdbarch_type_align (gdbarch, aarch64_type_align); /* Internal <-> external register number maps. */ set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum); @@ -3411,6 +3372,8 @@ aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address); + set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags); + tdesc_use_registers (gdbarch, tdesc, tdesc_data); /* Add standard register aliases. */ @@ -3463,8 +3426,6 @@ When on, AArch64 specific debugging is enabled."), selftests::aarch64_analyze_prologue_test); selftests::register_test ("aarch64-process-record", selftests::aarch64_process_record_test); - selftests::record_xml_tdesc ("aarch64.xml", - aarch64_create_target_description (0, false)); #endif } @@ -3582,7 +3543,7 @@ aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r) } else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06) { - /* CConditional select. */ + /* Conditional select. */ /* Data-processing (2 source). */ /* Data-processing (1 source). */ record_buf[0] = reg_rd;