/* Common target dependent code for GDB on AArch64 systems.
- Copyright (C) 2009-2018 Free Software Foundation, Inc.
+ Copyright (C) 2009-2019 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of GDB.
#include "frame-base.h"
#include "trad-frame.h"
#include "objfiles.h"
+#include "dwarf2.h"
#include "dwarf2-frame.h"
#include "gdbtypes.h"
#include "prologue-value.h"
#include "infcall.h"
#include "ax.h"
#include "ax-gdb.h"
-#include "selftest.h"
+#include "common/selftest.h"
#include "aarch64-tdep.h"
+#include "aarch64-ravenscar-thread.h"
#include "elf-bfd.h"
#include "elf/aarch64.h"
-#include "vec.h"
+#include "common/vec.h"
#include "record.h"
#include "record-full.h"
#define bit(obj,st) (((obj) >> (st)) & 1)
#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
-/* Pseudo register base numbers. */
-#define AARCH64_Q0_REGNUM 0
-#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
-#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
-#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
-#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
-#define AARCH64_SVE_V0_REGNUM (AARCH64_B0_REGNUM + 32)
+/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
+ four members. */
+#define HA_MAX_NUM_FLDS 4
/* All possible aarch64 target descriptors. */
-struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
+struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
/* The standard register names, and all the valid aliases for them. */
static const struct
"ffr", "vg"
};
+static const char *const aarch64_pauth_register_names[] =
+{
+ /* Authentication mask for data pointer. */
+ "pauth_dmask",
+ /* Authentication mask for code pointer. */
+ "pauth_cmask"
+};
+
/* AArch64 prologue cache structure. */
struct aarch64_prologue_cache
{
} // namespace
+/* If address signing is enabled, mask off the signature bits from ADDR, using
+ the register values in THIS_FRAME. */
+
+static CORE_ADDR
+aarch64_frame_unmask_address (struct gdbarch_tdep *tdep,
+ struct frame_info *this_frame,
+ CORE_ADDR addr)
+{
+ if (tdep->has_pauth ()
+ && frame_unwind_register_unsigned (this_frame,
+ tdep->pauth_ra_state_regnum))
+ {
+ int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
+ CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
+ addr = addr & ~cmask;
+ }
+
+ return addr;
+}
+
/* Analyze a prologue, looking for a recognizable stack frame
and frame pointer. Scan until we encounter a store that could
clobber the stack frame unexpectedly, or an unknown instruction. */
/* Stop analysis on branch. */
break;
}
+ else if (inst.opcode->iclass == ic_system)
+ {
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ int ra_state_val = 0;
+
+ if (insn == 0xd503233f /* paciasp. */
+ || insn == 0xd503237f /* pacibsp. */)
+ {
+ /* Return addresses are mangled. */
+ ra_state_val = 1;
+ }
+ else if (insn == 0xd50323bf /* autiasp. */
+ || insn == 0xd50323ff /* autibsp. */)
+ {
+ /* Return addresses are not mangled. */
+ ra_state_val = 0;
+ }
+ else
+ {
+ if (aarch64_debug)
+ debug_printf ("aarch64: prologue analysis gave up addr=%s"
+ " opcode=0x%x (iclass)\n",
+ core_addr_to_string_nz (start), insn);
+ break;
+ }
+
+ if (tdep->has_pauth () && cache != nullptr)
+ trad_frame_set_value (cache->saved_regs,
+ tdep->pauth_ra_state_regnum,
+ ra_state_val);
+ }
else
{
if (aarch64_debug)
struct gdbarch *gdbarch = gdbarch_find_by_info (info);
SELF_CHECK (gdbarch != NULL);
+ struct aarch64_prologue_cache cache;
+ cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
+
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
/* Test the simple prologue in which frame pointer is used. */
{
- struct aarch64_prologue_cache cache;
- cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
-
static const uint32_t insns[] = {
0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
0x910003fd, /* mov x29, sp */
/* Test a prologue in which STR is used and frame pointer is not
used. */
{
- struct aarch64_prologue_cache cache;
- cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
-
static const uint32_t insns[] = {
0xf81d0ff3, /* str x19, [sp, #-48]! */
0xb9002fe0, /* str w0, [sp, #44] */
};
instruction_reader_test reader (insns);
+ trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
SELF_CHECK (end == 4 * 5);
== -1);
}
}
+
+ /* Test a prologue in which there is a return address signing instruction. */
+ if (tdep->has_pauth ())
+ {
+ static const uint32_t insns[] = {
+ 0xd503233f, /* paciasp */
+ 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
+ 0x910003fd, /* mov x29, sp */
+ 0xf801c3f3, /* str x19, [sp, #28] */
+ 0xb9401fa0, /* ldr x19, [x29, #28] */
+ };
+ instruction_reader_test reader (insns);
+
+ trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
+ CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
+ reader);
+
+ SELF_CHECK (end == 4 * 4);
+ SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
+ SELF_CHECK (cache.framesize == 48);
+
+ for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
+ {
+ if (i == 19)
+ SELF_CHECK (cache.saved_regs[i].addr == -20);
+ else if (i == AARCH64_FP_REGNUM)
+ SELF_CHECK (cache.saved_regs[i].addr == -48);
+ else if (i == AARCH64_LR_REGNUM)
+ SELF_CHECK (cache.saved_regs[i].addr == -40);
+ else
+ SELF_CHECK (cache.saved_regs[i].addr == -1);
+ }
+
+ if (tdep->has_pauth ())
+ {
+ SELF_CHECK (trad_frame_value_p (cache.saved_regs,
+ tdep->pauth_ra_state_regnum));
+ SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
+ }
+ }
}
} // namespace selftests
#endif /* GDB_SELF_TEST */
cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
*this_cache = cache;
- TRY
+ try
{
aarch64_make_prologue_cache_1 (this_frame, cache);
}
- CATCH (ex, RETURN_MASK_ERROR)
+ catch (const gdb_exception_error &ex)
{
if (ex.error != NOT_AVAILABLE_ERROR)
- throw_exception (ex);
+ throw;
}
- END_CATCH
return cache;
}
if (prev_regnum == AARCH64_PC_REGNUM)
{
CORE_ADDR lr;
+ struct gdbarch *gdbarch = get_frame_arch (this_frame);
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
+
+ if (tdep->has_pauth ()
+ && trad_frame_value_p (cache->saved_regs,
+ tdep->pauth_ra_state_regnum))
+ lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
+
return frame_unwind_got_constant (this_frame, prev_regnum, lr);
}
cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
*this_cache = cache;
- TRY
+ try
{
cache->prev_sp = get_frame_register_unsigned (this_frame,
AARCH64_SP_REGNUM);
cache->prev_pc = get_frame_pc (this_frame);
cache->available_p = 1;
}
- CATCH (ex, RETURN_MASK_ERROR)
+ catch (const gdb_exception_error &ex)
{
if (ex.error != NOT_AVAILABLE_ERROR)
- throw_exception (ex);
+ throw;
}
- END_CATCH
return cache;
}
aarch64_normal_frame_base
};
-/* Assuming THIS_FRAME is a dummy, return the frame ID of that
- dummy frame. The frame ID's base needs to match the TOS value
- saved by save_dummy_frame_tos () and returned from
- aarch64_push_dummy_call, and the PC needs to match the dummy
- frame's breakpoint. */
-
-static struct frame_id
-aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
-{
- return frame_id_build (get_frame_register_unsigned (this_frame,
- AARCH64_SP_REGNUM),
- get_frame_pc (this_frame));
-}
-
-/* Implement the "unwind_pc" gdbarch method. */
-
-static CORE_ADDR
-aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
-{
- CORE_ADDR pc
- = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
-
- return pc;
-}
-
-/* Implement the "unwind_sp" gdbarch method. */
-
-static CORE_ADDR
-aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
-{
- return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
-}
-
/* Return the value of the REGNUM register in the previous frame of
*THIS_FRAME. */
aarch64_dwarf2_prev_register (struct frame_info *this_frame,
void **this_cache, int regnum)
{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
CORE_ADDR lr;
switch (regnum)
{
case AARCH64_PC_REGNUM:
lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
+ lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
return frame_unwind_got_constant (this_frame, regnum, lr);
default:
}
}
+static const unsigned char op_lit0 = DW_OP_lit0;
+static const unsigned char op_lit1 = DW_OP_lit1;
+
/* Implement the "init_reg" dwarf2_frame_ops method. */
static void
struct dwarf2_frame_state_reg *reg,
struct frame_info *this_frame)
{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
switch (regnum)
{
case AARCH64_PC_REGNUM:
reg->how = DWARF2_FRAME_REG_FN;
reg->loc.fn = aarch64_dwarf2_prev_register;
- break;
+ return;
+
case AARCH64_SP_REGNUM:
reg->how = DWARF2_FRAME_REG_CFA;
- break;
+ return;
+ }
+
+ /* Init pauth registers. */
+ if (tdep->has_pauth ())
+ {
+ if (regnum == tdep->pauth_ra_state_regnum)
+ {
+ /* Initialize RA_STATE to zero. */
+ reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
+ reg->loc.exp.start = &op_lit0;
+ reg->loc.exp.len = 1;
+ return;
+ }
+ else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
+ || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
+ {
+ reg->how = DWARF2_FRAME_REG_SAME_VALUE;
+ return;
+ }
+ }
+}
+
+/* Implement the execute_dwarf_cfa_vendor_op method. */
+
+static bool
+aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
+ struct dwarf2_frame_state *fs)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+ struct dwarf2_frame_state_reg *ra_state;
+
+ if (tdep->has_pauth () && op == DW_CFA_AARCH64_negate_ra_state)
+ {
+ /* Allocate RA_STATE column if it's not allocated yet. */
+ fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
+
+ /* Toggle the status of RA_STATE between 0 and 1. */
+ ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
+ ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
+
+ if (ra_state->loc.exp.start == nullptr
+ || ra_state->loc.exp.start == &op_lit0)
+ ra_state->loc.exp.start = &op_lit1;
+ else
+ ra_state->loc.exp.start = &op_lit0;
+
+ ra_state->loc.exp.len = 1;
+
+ return true;
}
+
+ return false;
}
/* When arguments must be pushed onto the stack, they go on in reverse
}
}
-/* Return 1 if *TY is a homogeneous floating-point aggregate or
- homogeneous short-vector aggregate as defined in the AAPCS64 ABI
- document; otherwise return 0. */
+/* Worker function for aapcs_is_vfp_call_or_return_candidate.
+
+ Return the number of register required, or -1 on failure.
+
+ When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
+ to the element, else fail if the type of this element does not match the
+ existing value. */
static int
-is_hfa_or_hva (struct type *ty)
+aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
+ struct type **fundamental_type)
{
- switch (TYPE_CODE (ty))
+ if (type == nullptr)
+ return -1;
+
+ switch (TYPE_CODE (type))
{
+ case TYPE_CODE_FLT:
+ if (TYPE_LENGTH (type) > 16)
+ return -1;
+
+ if (*fundamental_type == nullptr)
+ *fundamental_type = type;
+ else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
+ || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
+ return -1;
+
+ return 1;
+
+ case TYPE_CODE_COMPLEX:
+ {
+ struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
+ if (TYPE_LENGTH (target_type) > 16)
+ return -1;
+
+ if (*fundamental_type == nullptr)
+ *fundamental_type = target_type;
+ else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
+ || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
+ return -1;
+
+ return 2;
+ }
+
case TYPE_CODE_ARRAY:
{
- struct type *target_ty = TYPE_TARGET_TYPE (ty);
+ if (TYPE_VECTOR (type))
+ {
+ if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
+ return -1;
- if (TYPE_VECTOR (ty))
- return 0;
+ if (*fundamental_type == nullptr)
+ *fundamental_type = type;
+ else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
+ || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
+ return -1;
- if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
- && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
- || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
- && TYPE_VECTOR (target_ty))))
- return 1;
- break;
+ return 1;
+ }
+ else
+ {
+ struct type *target_type = TYPE_TARGET_TYPE (type);
+ int count = aapcs_is_vfp_call_or_return_candidate_1
+ (target_type, fundamental_type);
+
+ if (count == -1)
+ return count;
+
+ count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
+ return count;
+ }
}
- case TYPE_CODE_UNION:
case TYPE_CODE_STRUCT:
+ case TYPE_CODE_UNION:
{
- /* HFA or HVA has at most four members. */
- if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
+ int count = 0;
+
+ for (int i = 0; i < TYPE_NFIELDS (type); i++)
{
- struct type *member0_type;
-
- member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
- if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
- || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
- && TYPE_VECTOR (member0_type)))
- {
- int i;
-
- for (i = 0; i < TYPE_NFIELDS (ty); i++)
- {
- struct type *member1_type;
-
- member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
- if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
- || (TYPE_LENGTH (member0_type)
- != TYPE_LENGTH (member1_type)))
- return 0;
- }
- return 1;
- }
+ /* Ignore any static fields. */
+ if (field_is_static (&TYPE_FIELD (type, i)))
+ continue;
+
+ struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
+
+ int sub_count = aapcs_is_vfp_call_or_return_candidate_1
+ (member, fundamental_type);
+ if (sub_count == -1)
+ return -1;
+ count += sub_count;
}
- return 0;
+
+ /* Ensure there is no padding between the fields (allowing for empty
+ zero length structs) */
+ int ftype_length = (*fundamental_type == nullptr)
+ ? 0 : TYPE_LENGTH (*fundamental_type);
+ if (count * ftype_length != TYPE_LENGTH (type))
+ return -1;
+
+ return count;
}
default:
break;
}
- return 0;
+ return -1;
+}
+
+/* Return true if an argument, whose type is described by TYPE, can be passed or
+ returned in simd/fp registers, providing enough parameter passing registers
+ are available. This is as described in the AAPCS64.
+
+ Upon successful return, *COUNT returns the number of needed registers,
+ *FUNDAMENTAL_TYPE contains the type of those registers.
+
+ Candidate as per the AAPCS64 5.4.2.C is either a:
+ - float.
+ - short-vector.
+ - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
+ all the members are floats and has at most 4 members.
+ - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
+ all the members are short vectors and has at most 4 members.
+ - Complex (7.1.1)
+
+ Note that HFAs and HVAs can include nested structures and arrays. */
+
+static bool
+aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
+ struct type **fundamental_type)
+{
+ if (type == nullptr)
+ return false;
+
+ *fundamental_type = nullptr;
+
+ int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
+ fundamental_type);
+
+ if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
+ {
+ *count = ag_count;
+ return true;
+ }
+ else
+ return false;
}
/* AArch64 function call information structure. */
if (info->nsrn < 8)
{
int regnum = AARCH64_V0_REGNUM + info->nsrn;
- gdb_byte reg[V_REGISTER_SIZE];
+ /* Enough space for a full vector register. */
+ gdb_byte reg[register_size (gdbarch, regnum)];
+ gdb_assert (len <= sizeof (reg));
info->argnum++;
info->nsrn++;
}
}
-/* Pass a value in a V register, or on the stack if insufficient are
- available. */
-
-static void
-pass_in_v_or_stack (struct gdbarch *gdbarch,
- struct regcache *regcache,
- struct aarch64_call_info *info,
- struct type *type,
- struct value *arg)
+/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
+ aapcs_is_vfp_call_or_return_candidate and there are enough spare V
+ registers. A return value of false is an error state as the value will have
+ been partially passed to the stack. */
+static bool
+pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
+ struct aarch64_call_info *info, struct type *arg_type,
+ struct value *arg)
{
- if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
- value_contents (arg)))
- pass_on_stack (info, type, arg);
+ switch (TYPE_CODE (arg_type))
+ {
+ case TYPE_CODE_FLT:
+ return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
+ value_contents (arg));
+ break;
+
+ case TYPE_CODE_COMPLEX:
+ {
+ const bfd_byte *buf = value_contents (arg);
+ struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
+
+ if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
+ buf))
+ return false;
+
+ return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
+ buf + TYPE_LENGTH (target_type));
+ }
+
+ case TYPE_CODE_ARRAY:
+ if (TYPE_VECTOR (arg_type))
+ return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
+ value_contents (arg));
+ /* fall through. */
+
+ case TYPE_CODE_STRUCT:
+ case TYPE_CODE_UNION:
+ for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
+ {
+ /* Don't include static fields. */
+ if (field_is_static (&TYPE_FIELD (arg_type, i)))
+ continue;
+
+ struct value *field = value_primitive_field (arg, 0, i, arg_type);
+ struct type *field_type = check_typedef (value_type (field));
+
+ if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
+ field))
+ return false;
+ }
+ return true;
+
+ default:
+ return false;
+ }
}
/* Implement the "push_dummy_call" gdbarch method. */
aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
struct regcache *regcache, CORE_ADDR bp_addr,
int nargs,
- struct value **args, CORE_ADDR sp, int struct_return,
+ struct value **args, CORE_ADDR sp,
+ function_call_return_method return_method,
CORE_ADDR struct_addr)
{
int argnum;
struct aarch64_call_info info;
- struct type *func_type;
- struct type *return_type;
- int lang_struct_return;
memset (&info, 0, sizeof (info));
If the language code decides to pass in memory we want to move
the pointer inserted as the initial argument from the argument
list and into X8, the conventional AArch64 struct return pointer
- register.
-
- This is slightly awkward, ideally the flag "lang_struct_return"
- would be passed to the targets implementation of push_dummy_call.
- Rather that change the target interface we call the language code
- directly ourselves. */
-
- func_type = check_typedef (value_type (function));
-
- /* Dereference function pointer types. */
- if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
- func_type = TYPE_TARGET_TYPE (func_type);
-
- gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
- || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
-
- /* If language_pass_by_reference () returned true we will have been
- given an additional initial argument, a hidden pointer to the
- return slot in memory. */
- return_type = TYPE_TARGET_TYPE (func_type);
- lang_struct_return = language_pass_by_reference (return_type);
+ register. */
/* Set the return address. For the AArch64, the return breakpoint
is always at BP_ADDR. */
regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
- /* If we were given an initial argument for the return slot because
- lang_struct_return was true, lose it. */
- if (lang_struct_return)
+ /* If we were given an initial argument for the return slot, lose it. */
+ if (return_method == return_method_hidden_param)
{
args++;
nargs--;
}
/* The struct_return pointer occupies X8. */
- if (struct_return || lang_struct_return)
+ if (return_method != return_method_normal)
{
if (aarch64_debug)
{
for (argnum = 0; argnum < nargs; argnum++)
{
struct value *arg = args[argnum];
- struct type *arg_type;
- int len;
+ struct type *arg_type, *fundamental_type;
+ int len, elements;
arg_type = check_typedef (value_type (arg));
len = TYPE_LENGTH (arg_type);
+ /* If arg can be passed in v registers as per the AAPCS64, then do so if
+ if there are enough spare registers. */
+ if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
+ &fundamental_type))
+ {
+ if (info.nsrn + elements <= 8)
+ {
+ /* We know that we have sufficient registers available therefore
+ this will never need to fallback to the stack. */
+ if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
+ arg))
+ gdb_assert_not_reached ("Failed to push args");
+ }
+ else
+ {
+ info.nsrn = 8;
+ pass_on_stack (&info, arg_type, arg);
+ }
+ continue;
+ }
+
switch (TYPE_CODE (arg_type))
{
case TYPE_CODE_INT:
pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
break;
- case TYPE_CODE_COMPLEX:
- if (info.nsrn <= 6)
- {
- const bfd_byte *buf = value_contents (arg);
- struct type *target_type =
- check_typedef (TYPE_TARGET_TYPE (arg_type));
-
- pass_in_v (gdbarch, regcache, &info,
- TYPE_LENGTH (target_type), buf);
- pass_in_v (gdbarch, regcache, &info,
- TYPE_LENGTH (target_type),
- buf + TYPE_LENGTH (target_type));
- }
- else
- {
- info.nsrn = 8;
- pass_on_stack (&info, arg_type, arg);
- }
- break;
- case TYPE_CODE_FLT:
- pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
- break;
-
case TYPE_CODE_STRUCT:
case TYPE_CODE_ARRAY:
case TYPE_CODE_UNION:
- if (is_hfa_or_hva (arg_type))
- {
- int elements = TYPE_NFIELDS (arg_type);
-
- /* Homogeneous Aggregates */
- if (info.nsrn + elements < 8)
- {
- int i;
-
- for (i = 0; i < elements; i++)
- {
- /* We know that we have sufficient registers
- available therefore this will never fallback
- to the stack. */
- struct value *field =
- value_primitive_field (arg, 0, i, arg_type);
- struct type *field_type =
- check_typedef (value_type (field));
-
- pass_in_v_or_stack (gdbarch, regcache, &info,
- field_type, field);
- }
- }
- else
- {
- info.nsrn = 8;
- pass_on_stack (&info, arg_type, arg);
- }
- }
- else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
- && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
- {
- /* Short vector types are passed in V registers. */
- pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
- }
- else if (len > 16)
+ if (len > 16)
{
/* PCS B.7 Aggregates larger than 16 bytes are passed by
invisible reference. */
if (tdep->vnv_type == NULL)
{
+ /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
+ slice from the non-pseudo vector registers. However NEON V registers
+ are always vector registers, and need constructing as such. */
+ const struct builtin_type *bt = builtin_type (gdbarch);
+
struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
TYPE_CODE_UNION);
- append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
- append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
- append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
- append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
- append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
+ struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
+ TYPE_CODE_UNION);
+ append_composite_type_field (sub, "f",
+ init_vector_type (bt->builtin_double, 2));
+ append_composite_type_field (sub, "u",
+ init_vector_type (bt->builtin_uint64, 2));
+ append_composite_type_field (sub, "s",
+ init_vector_type (bt->builtin_int64, 2));
+ append_composite_type_field (t, "d", sub);
+
+ sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
+ TYPE_CODE_UNION);
+ append_composite_type_field (sub, "f",
+ init_vector_type (bt->builtin_float, 4));
+ append_composite_type_field (sub, "u",
+ init_vector_type (bt->builtin_uint32, 4));
+ append_composite_type_field (sub, "s",
+ init_vector_type (bt->builtin_int32, 4));
+ append_composite_type_field (t, "s", sub);
+
+ sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
+ TYPE_CODE_UNION);
+ append_composite_type_field (sub, "u",
+ init_vector_type (bt->builtin_uint16, 8));
+ append_composite_type_field (sub, "s",
+ init_vector_type (bt->builtin_int16, 8));
+ append_composite_type_field (t, "h", sub);
+
+ sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
+ TYPE_CODE_UNION);
+ append_composite_type_field (sub, "u",
+ init_vector_type (bt->builtin_uint8, 16));
+ append_composite_type_field (sub, "s",
+ init_vector_type (bt->builtin_int8, 16));
+ append_composite_type_field (t, "b", sub);
+
+ sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
+ TYPE_CODE_UNION);
+ append_composite_type_field (sub, "u",
+ init_vector_type (bt->builtin_uint128, 1));
+ append_composite_type_field (sub, "s",
+ init_vector_type (bt->builtin_int128, 1));
+ append_composite_type_field (t, "q", sub);
tdep->vnv_type = t;
}
static int
aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
+ if (tdep->has_pauth ())
+ {
+ if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
+ return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
+
+ if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
+ return tdep->pauth_ra_state_regnum;
+ }
+
return -1;
}
{
struct gdbarch *gdbarch = regs->arch ();
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ int elements;
+ struct type *fundamental_type;
- if (TYPE_CODE (type) == TYPE_CODE_FLT)
+ if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
+ &fundamental_type))
{
- bfd_byte buf[V_REGISTER_SIZE];
- int len = TYPE_LENGTH (type);
+ int len = TYPE_LENGTH (fundamental_type);
+
+ for (int i = 0; i < elements; i++)
+ {
+ int regno = AARCH64_V0_REGNUM + i;
+ /* Enough space for a full vector register. */
+ gdb_byte buf[register_size (gdbarch, regno)];
+ gdb_assert (len <= sizeof (buf));
- regs->cooked_read (AARCH64_V0_REGNUM, buf);
- memcpy (valbuf, buf, len);
+ if (aarch64_debug)
+ {
+ debug_printf ("read HFA or HVA return value element %d from %s\n",
+ i + 1,
+ gdbarch_register_name (gdbarch, regno));
+ }
+ regs->cooked_read (regno, buf);
+
+ memcpy (valbuf, buf, len);
+ valbuf += len;
+ }
}
else if (TYPE_CODE (type) == TYPE_CODE_INT
|| TYPE_CODE (type) == TYPE_CODE_CHAR
|| TYPE_IS_REFERENCE (type)
|| TYPE_CODE (type) == TYPE_CODE_ENUM)
{
- /* If the the type is a plain integer, then the access is
+ /* If the type is a plain integer, then the access is
straight-forward. Otherwise we have to play around a bit
more. */
int len = TYPE_LENGTH (type);
valbuf += X_REGISTER_SIZE;
}
}
- else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
- {
- int regno = AARCH64_V0_REGNUM;
- bfd_byte buf[V_REGISTER_SIZE];
- struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
- int len = TYPE_LENGTH (target_type);
-
- regs->cooked_read (regno, buf);
- memcpy (valbuf, buf, len);
- valbuf += len;
- regs->cooked_read (regno + 1, buf);
- memcpy (valbuf, buf, len);
- valbuf += len;
- }
- else if (is_hfa_or_hva (type))
- {
- int elements = TYPE_NFIELDS (type);
- struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
- int len = TYPE_LENGTH (member_type);
- int i;
-
- for (i = 0; i < elements; i++)
- {
- int regno = AARCH64_V0_REGNUM + i;
- bfd_byte buf[V_REGISTER_SIZE];
-
- if (aarch64_debug)
- {
- debug_printf ("read HFA or HVA return value element %d from %s\n",
- i + 1,
- gdbarch_register_name (gdbarch, regno));
- }
- regs->cooked_read (regno, buf);
-
- memcpy (valbuf, buf, len);
- valbuf += len;
- }
- }
- else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
- && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
- {
- /* Short vector is returned in V register. */
- gdb_byte buf[V_REGISTER_SIZE];
-
- regs->cooked_read (AARCH64_V0_REGNUM, buf);
- memcpy (valbuf, buf, TYPE_LENGTH (type));
- }
else
{
/* For a structure or union the behaviour is as if the value had
aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
{
type = check_typedef (type);
+ int elements;
+ struct type *fundamental_type;
- if (is_hfa_or_hva (type))
+ if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
+ &fundamental_type))
{
/* v0-v7 are used to return values and one register is allocated
for one member. However, HFA or HVA has at most four members. */
{
struct gdbarch *gdbarch = regs->arch ();
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ int elements;
+ struct type *fundamental_type;
- if (TYPE_CODE (type) == TYPE_CODE_FLT)
+ if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
+ &fundamental_type))
{
- bfd_byte buf[V_REGISTER_SIZE];
- int len = TYPE_LENGTH (type);
+ int len = TYPE_LENGTH (fundamental_type);
+
+ for (int i = 0; i < elements; i++)
+ {
+ int regno = AARCH64_V0_REGNUM + i;
+ /* Enough space for a full vector register. */
+ gdb_byte tmpbuf[register_size (gdbarch, regno)];
+ gdb_assert (len <= sizeof (tmpbuf));
- memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
- regs->cooked_write (AARCH64_V0_REGNUM, buf);
+ if (aarch64_debug)
+ {
+ debug_printf ("write HFA or HVA return value element %d to %s\n",
+ i + 1,
+ gdbarch_register_name (gdbarch, regno));
+ }
+
+ memcpy (tmpbuf, valbuf,
+ len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
+ regs->cooked_write (regno, tmpbuf);
+ valbuf += len;
+ }
}
else if (TYPE_CODE (type) == TYPE_CODE_INT
|| TYPE_CODE (type) == TYPE_CODE_CHAR
}
}
}
- else if (is_hfa_or_hva (type))
- {
- int elements = TYPE_NFIELDS (type);
- struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
- int len = TYPE_LENGTH (member_type);
- int i;
-
- for (i = 0; i < elements; i++)
- {
- int regno = AARCH64_V0_REGNUM + i;
- bfd_byte tmpbuf[V_REGISTER_SIZE];
-
- if (aarch64_debug)
- {
- debug_printf ("write HFA or HVA return value element %d to %s\n",
- i + 1,
- gdbarch_register_name (gdbarch, regno));
- }
-
- memcpy (tmpbuf, valbuf, len);
- regs->cooked_write (regno, tmpbuf);
- valbuf += len;
- }
- }
- else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
- && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
- {
- /* Short vector. */
- gdb_byte buf[V_REGISTER_SIZE];
-
- memcpy (buf, valbuf, TYPE_LENGTH (type));
- regs->cooked_write (AARCH64_V0_REGNUM, buf);
- }
else
{
/* For a structure or union the behaviour is as if the value had
"b28", "b29", "b30", "b31",
};
- regnum -= gdbarch_num_regs (gdbarch);
+ int p_regnum = regnum - gdbarch_num_regs (gdbarch);
- if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
- return q_name[regnum - AARCH64_Q0_REGNUM];
+ if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
+ return q_name[p_regnum - AARCH64_Q0_REGNUM];
- if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
- return d_name[regnum - AARCH64_D0_REGNUM];
+ if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
+ return d_name[p_regnum - AARCH64_D0_REGNUM];
- if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
- return s_name[regnum - AARCH64_S0_REGNUM];
+ if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
+ return s_name[p_regnum - AARCH64_S0_REGNUM];
- if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
- return h_name[regnum - AARCH64_H0_REGNUM];
+ if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
+ return h_name[p_regnum - AARCH64_H0_REGNUM];
- if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
- return b_name[regnum - AARCH64_B0_REGNUM];
+ if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
+ return b_name[p_regnum - AARCH64_B0_REGNUM];
if (tdep->has_sve ())
{
"v28", "v29", "v30", "v31",
};
- if (regnum >= AARCH64_SVE_V0_REGNUM
- && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
- return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
+ if (p_regnum >= AARCH64_SVE_V0_REGNUM
+ && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
+ return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
}
+ /* RA_STATE is used for unwinding only. Do not assign it a name - this
+ prevents it from being read by methods such as
+ mi_cmd_trace_frame_collected. */
+ if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
+ return "";
+
internal_error (__FILE__, __LINE__,
_("aarch64_pseudo_register_name: bad register number %d"),
- regnum);
+ p_regnum);
}
/* Implement the "pseudo_register_type" tdesc_arch_data method. */
{
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
- regnum -= gdbarch_num_regs (gdbarch);
+ int p_regnum = regnum - gdbarch_num_regs (gdbarch);
- if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
+ if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
return aarch64_vnq_type (gdbarch);
- if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
+ if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
return aarch64_vnd_type (gdbarch);
- if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
+ if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
return aarch64_vns_type (gdbarch);
- if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
+ if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
return aarch64_vnh_type (gdbarch);
- if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
+ if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
return aarch64_vnb_type (gdbarch);
- if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
- && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
+ if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
+ && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
return aarch64_vnv_type (gdbarch);
+ if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
+ return builtin_type (gdbarch)->builtin_uint64;
+
internal_error (__FILE__, __LINE__,
_("aarch64_pseudo_register_type: bad register number %d"),
- regnum);
+ p_regnum);
}
/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
{
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
- regnum -= gdbarch_num_regs (gdbarch);
+ int p_regnum = regnum - gdbarch_num_regs (gdbarch);
- if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
+ if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
return group == all_reggroup || group == vector_reggroup;
- else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
+ else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
return (group == all_reggroup || group == vector_reggroup
|| group == float_reggroup);
- else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
+ else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
return (group == all_reggroup || group == vector_reggroup
|| group == float_reggroup);
- else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
+ else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
return group == all_reggroup || group == vector_reggroup;
- else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
+ else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
return group == all_reggroup || group == vector_reggroup;
- else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
- && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
+ else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
+ && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
return group == all_reggroup || group == vector_reggroup;
+ /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
+ if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
+ return 0;
return group == all_reggroup;
}
const int insn_size = 4;
const int atomic_sequence_length = 16; /* Instruction sequence length. */
CORE_ADDR pc = regcache_read_pc (regcache);
- CORE_ADDR breaks[2] = { -1, -1 };
+ CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
CORE_ADDR loc = pc;
CORE_ADDR closing_insn = 0;
uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
(It is not possible to set VQ to zero on an SVE system). */
const target_desc *
-aarch64_read_description (uint64_t vq)
+aarch64_read_description (uint64_t vq, bool pauth_p)
{
if (vq > AARCH64_MAX_SVE_VQ)
error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
AARCH64_MAX_SVE_VQ);
- struct target_desc *tdesc = tdesc_aarch64_list[vq];
+ struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
if (tdesc == NULL)
{
- tdesc = aarch64_create_target_description (vq);
- tdesc_aarch64_list[vq] = tdesc;
+ tdesc = aarch64_create_target_description (vq, pauth_p);
+ tdesc_aarch64_list[vq][pauth_p] = tdesc;
}
return tdesc;
if (feature_sve == nullptr)
return 0;
- uint64_t vl = tdesc_register_size (feature_sve,
- aarch64_sve_register_names[0]);
+ uint64_t vl = tdesc_register_bitsize (feature_sve,
+ aarch64_sve_register_names[0]) / 8;
return sve_vq_from_vl (vl);
}
+/* Add all the expected register sets into GDBARCH. */
+
+static void
+aarch64_add_reggroups (struct gdbarch *gdbarch)
+{
+ reggroup_add (gdbarch, general_reggroup);
+ reggroup_add (gdbarch, float_reggroup);
+ reggroup_add (gdbarch, system_reggroup);
+ reggroup_add (gdbarch, vector_reggroup);
+ reggroup_add (gdbarch, all_reggroup);
+ reggroup_add (gdbarch, save_reggroup);
+ reggroup_add (gdbarch, restore_reggroup);
+}
+
+/* Implement the "cannot_store_register" gdbarch method. */
+
+static int
+aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
+{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
+ if (!tdep->has_pauth ())
+ return 0;
+
+ /* Pointer authentication registers are read-only. */
+ return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
+ || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
+}
/* Initialize the current architecture based on INFO. If possible,
re-use an architecture from ARCHES, which is a list of
const struct tdesc_feature *feature_core;
const struct tdesc_feature *feature_fpu;
const struct tdesc_feature *feature_sve;
+ const struct tdesc_feature *feature_pauth;
int num_regs = 0;
int num_pseudo_regs = 0;
+ int first_pauth_regnum = -1;
+ int pauth_ra_state_offset = -1;
/* Ensure we always have a target description. */
if (!tdesc_has_registers (tdesc))
- tdesc = aarch64_read_description (0);
+ tdesc = aarch64_read_description (0, false);
gdb_assert (tdesc);
feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
+ feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
if (feature_core == NULL)
return NULL;
num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
}
+ /* Add the pauth registers. */
+ if (feature_pauth != NULL)
+ {
+ first_pauth_regnum = num_regs;
+ pauth_ra_state_offset = num_pseudo_regs;
+ /* Validate the descriptor provides the mandatory PAUTH registers and
+ allocate their numbers. */
+ for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
+ valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
+ first_pauth_regnum + i,
+ aarch64_pauth_register_names[i]);
+
+ num_regs += i;
+ num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
+ }
+
if (!valid_p)
{
tdesc_data_cleanup (tdesc_data);
tdep->jb_pc = -1; /* Longjump support not enabled by default. */
tdep->jb_elt_size = 8;
tdep->vq = aarch64_get_tdesc_vq (tdesc);
+ tdep->pauth_reg_base = first_pauth_regnum;
+ tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
+ : pauth_ra_state_offset + num_regs;
+
set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
- /* Frame handling. */
- set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
- set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
- set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
-
/* Advance PC across function entry code. */
set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
set_tdesc_pseudo_register_reggroup_p (gdbarch,
aarch64_pseudo_register_reggroup_p);
+ set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
/* ABI */
set_gdbarch_short_bit (gdbarch, 16);
/* Virtual tables. */
set_gdbarch_vbit_in_delta (gdbarch, 1);
+ /* Register architecture. */
+ aarch64_add_reggroups (gdbarch);
+
/* Hook in the ABI-specific overrides, if they have been registered. */
info.target_desc = tdesc;
info.tdesc_data = tdesc_data;
gdbarch_init_osabi (info, gdbarch);
dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
+ /* Register DWARF CFA vendor handler. */
+ set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
+ aarch64_execute_dwarf_cfa_vendor_op);
/* Add some default predicates. */
frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
value_of_aarch64_user_reg,
&aarch64_register_aliases[i].regnum);
+ register_aarch64_ravenscar_ops (gdbarch);
+
return gdbarch;
}
selftests::register_test ("aarch64-process-record",
selftests::aarch64_process_record_test);
selftests::record_xml_tdesc ("aarch64.xml",
- aarch64_create_target_description (0));
+ aarch64_create_target_description (0, false));
#endif
}