/* Target-dependent code for GNU/Linux AArch64.
- Copyright (C) 2009-2018 Free Software Foundation, Inc.
+ Copyright (C) 2009-2019 Free Software Foundation, Inc.
Contributed by ARM Ltd.
This file is part of GDB.
#include "defs.h"
#include "gdbarch.h"
-#include "arch-utils.h"
#include "glibc-tdep.h"
#include "linux-tdep.h"
#include "aarch64-tdep.h"
#include "symtab.h"
#include "tramp-frame.h"
#include "trad-frame.h"
+#include "target/target.h"
-#include "inferior.h"
#include "regcache.h"
#include "regset.h"
-#include "cli/cli-utils.h"
#include "stap-probe.h"
#include "parser-defs.h"
#include "user-regs.h"
#include "record-full.h"
#include "linux-record.h"
-#include "auxv.h"
-#include "elf/common.h"
/* Signal frame handling.
struct ucontext uc;
};
- typedef struct
- {
- ... 128 bytes
- } siginfo_t;
-
The ucontext has the following form:
struct ucontext
{
struct sigcontext uc_mcontext;
};
- typedef struct sigaltstack
- {
- void *ss_sp;
- int ss_flags;
- size_t ss_size;
- } stack_t;
-
struct sigcontext
{
unsigned long fault_address;
__u8 __reserved[4096]
};
+ The reserved space in sigcontext contains additional structures, each starting
+ with a aarch64_ctx, which specifies a unique identifier and the total size of
+ the structure. The final structure in reserved will start will a null
+ aarch64_ctx. The penultimate entry in reserved may be a extra_context which
+ then points to a further block of reserved space.
+
+ struct aarch64_ctx {
+ u32 magic;
+ u32 size;
+ };
+
The restorer stub will always have the form:
d28015a8 movz x8, #0xad
#define AARCH64_RT_SIGFRAME_UCONTEXT_OFFSET 128
#define AARCH64_UCONTEXT_SIGCONTEXT_OFFSET 176
#define AARCH64_SIGCONTEXT_XO_OFFSET 8
+#define AARCH64_SIGCONTEXT_RESERVED_OFFSET 288
+
+#define AARCH64_SIGCONTEXT_RESERVED_SIZE 4096
+
+/* Unique identifiers that may be used for aarch64_ctx.magic. */
+#define AARCH64_EXTRA_MAGIC 0x45585401
+#define AARCH64_FPSIMD_MAGIC 0x46508001
+#define AARCH64_SVE_MAGIC 0x53564501
+
+/* Defines for the extra_context that follows an AARCH64_EXTRA_MAGIC. */
+#define AARCH64_EXTRA_DATAP_OFFSET 8
+
+/* Defines for the fpsimd that follows an AARCH64_FPSIMD_MAGIC. */
+#define AARCH64_FPSIMD_FPSR_OFFSET 8
+#define AARCH64_FPSIMD_FPCR_OFFSET 12
+#define AARCH64_FPSIMD_V0_OFFSET 16
+#define AARCH64_FPSIMD_VREG_SIZE 16
+
+/* Defines for the sve structure that follows an AARCH64_SVE_MAGIC. */
+#define AARCH64_SVE_CONTEXT_VL_OFFSET 8
+#define AARCH64_SVE_CONTEXT_REGS_OFFSET 16
+#define AARCH64_SVE_CONTEXT_P_REGS_OFFSET(vq) (32 * vq * 16)
+#define AARCH64_SVE_CONTEXT_FFR_OFFSET(vq) \
+ (AARCH64_SVE_CONTEXT_P_REGS_OFFSET (vq) + (16 * vq * 2))
+#define AARCH64_SVE_CONTEXT_SIZE(vq) \
+ (AARCH64_SVE_CONTEXT_FFR_OFFSET (vq) + (vq * 2))
+
+
+/* Read an aarch64_ctx, returning the magic value, and setting *SIZE to the
+ size, or return 0 on error. */
+
+static uint32_t
+read_aarch64_ctx (CORE_ADDR ctx_addr, enum bfd_endian byte_order,
+ uint32_t *size)
+{
+ uint32_t magic = 0;
+ gdb_byte buf[4];
+
+ if (target_read_memory (ctx_addr, buf, 4) != 0)
+ return 0;
+ magic = extract_unsigned_integer (buf, 4, byte_order);
+
+ if (target_read_memory (ctx_addr + 4, buf, 4) != 0)
+ return 0;
+ *size = extract_unsigned_integer (buf, 4, byte_order);
+
+ return magic;
+}
/* Implement the "init" method of struct tramp_frame. */
struct trad_frame_cache *this_cache,
CORE_ADDR func)
{
+ struct gdbarch *gdbarch = get_frame_arch (this_frame);
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
CORE_ADDR sp = get_frame_register_unsigned (this_frame, AARCH64_SP_REGNUM);
- CORE_ADDR sigcontext_addr =
- sp
- + AARCH64_RT_SIGFRAME_UCONTEXT_OFFSET
- + AARCH64_UCONTEXT_SIGCONTEXT_OFFSET;
- int i;
-
- for (i = 0; i < 31; i++)
+ CORE_ADDR sigcontext_addr = (sp + AARCH64_RT_SIGFRAME_UCONTEXT_OFFSET
+ + AARCH64_UCONTEXT_SIGCONTEXT_OFFSET );
+ CORE_ADDR section = sigcontext_addr + AARCH64_SIGCONTEXT_RESERVED_OFFSET;
+ CORE_ADDR section_end = section + AARCH64_SIGCONTEXT_RESERVED_SIZE;
+ CORE_ADDR fpsimd = 0;
+ CORE_ADDR sve_regs = 0;
+ uint32_t size, magic;
+ bool extra_found = false;
+ int num_regs = gdbarch_num_regs (gdbarch);
+
+ /* Read in the integer registers. */
+
+ for (int i = 0; i < 31; i++)
{
trad_frame_set_reg_addr (this_cache,
AARCH64_X0_REGNUM + i,
sigcontext_addr + AARCH64_SIGCONTEXT_XO_OFFSET
- + i * AARCH64_SIGCONTEXT_REG_SIZE);
+ + i * AARCH64_SIGCONTEXT_REG_SIZE);
}
trad_frame_set_reg_addr (this_cache, AARCH64_SP_REGNUM,
sigcontext_addr + AARCH64_SIGCONTEXT_XO_OFFSET
sigcontext_addr + AARCH64_SIGCONTEXT_XO_OFFSET
+ 32 * AARCH64_SIGCONTEXT_REG_SIZE);
+ /* Search for the FP and SVE sections, stopping at null. */
+ while ((magic = read_aarch64_ctx (section, byte_order, &size)) != 0
+ && size != 0)
+ {
+ switch (magic)
+ {
+ case AARCH64_FPSIMD_MAGIC:
+ fpsimd = section;
+ section += size;
+ break;
+
+ case AARCH64_SVE_MAGIC:
+ {
+ /* Check if the section is followed by a full SVE dump, and set
+ sve_regs if it is. */
+ gdb_byte buf[4];
+ uint16_t vq;
+
+ if (!tdep->has_sve ())
+ break;
+
+ if (target_read_memory (section + AARCH64_SVE_CONTEXT_VL_OFFSET,
+ buf, 2) != 0)
+ {
+ section += size;
+ break;
+ }
+ vq = sve_vq_from_vl (extract_unsigned_integer (buf, 2, byte_order));
+
+ if (vq != tdep->vq)
+ error (_("Invalid vector length in signal frame %d vs %s."), vq,
+ pulongest (tdep->vq));
+
+ if (size >= AARCH64_SVE_CONTEXT_SIZE (vq))
+ sve_regs = section + AARCH64_SVE_CONTEXT_REGS_OFFSET;
+
+ section += size;
+ break;
+ }
+
+ case AARCH64_EXTRA_MAGIC:
+ {
+ /* Extra is always the last valid section in reserved and points to
+ an additional block of memory filled with more sections. Reset
+ the address to the extra section and continue looking for more
+ structures. */
+ gdb_byte buf[8];
+
+ if (target_read_memory (section + AARCH64_EXTRA_DATAP_OFFSET,
+ buf, 8) != 0)
+ {
+ section += size;
+ break;
+ }
+
+ section = extract_unsigned_integer (buf, 8, byte_order);
+ extra_found = true;
+ break;
+ }
+
+ default:
+ section += size;
+ break;
+ }
+
+ /* Prevent searching past the end of the reserved section. The extra
+ section does not have a hard coded limit - we have to rely on it ending
+ with nulls. */
+ if (!extra_found && section > section_end)
+ break;
+ }
+
+ if (sve_regs != 0)
+ {
+ CORE_ADDR offset;
+
+ for (int i = 0; i < 32; i++)
+ {
+ offset = sve_regs + (i * tdep->vq * 16);
+ trad_frame_set_reg_addr (this_cache, AARCH64_SVE_Z0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_SVE_V0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache, num_regs + AARCH64_Q0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache, num_regs + AARCH64_D0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache, num_regs + AARCH64_S0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache, num_regs + AARCH64_H0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache, num_regs + AARCH64_B0_REGNUM + i,
+ offset);
+ }
+
+ offset = sve_regs + AARCH64_SVE_CONTEXT_P_REGS_OFFSET (tdep->vq);
+ for (int i = 0; i < 16; i++)
+ trad_frame_set_reg_addr (this_cache, AARCH64_SVE_P0_REGNUM + i,
+ offset + (i * tdep->vq * 2));
+
+ offset = sve_regs + AARCH64_SVE_CONTEXT_FFR_OFFSET (tdep->vq);
+ trad_frame_set_reg_addr (this_cache, AARCH64_SVE_FFR_REGNUM, offset);
+ }
+
+ if (fpsimd != 0)
+ {
+ trad_frame_set_reg_addr (this_cache, AARCH64_FPSR_REGNUM,
+ fpsimd + AARCH64_FPSIMD_FPSR_OFFSET);
+ trad_frame_set_reg_addr (this_cache, AARCH64_FPCR_REGNUM,
+ fpsimd + AARCH64_FPSIMD_FPCR_OFFSET);
+
+ /* If there was no SVE section then set up the V registers. */
+ if (sve_regs == 0)
+ for (int i = 0; i < 32; i++)
+ {
+ CORE_ADDR offset = (fpsimd + AARCH64_FPSIMD_V0_OFFSET
+ + (i * AARCH64_FPSIMD_VREG_SIZE));
+
+ trad_frame_set_reg_addr (this_cache, AARCH64_V0_REGNUM + i, offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_Q0_REGNUM + i, offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_D0_REGNUM + i, offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_S0_REGNUM + i, offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_H0_REGNUM + i, offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_B0_REGNUM + i, offset);
+ if (tdep->has_sve ())
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_SVE_V0_REGNUM + i,
+ offset);
+ }
+ }
+
trad_frame_set_id (this_cache, frame_id_build (sp, func));
}
{
/* movz x8, 0x8b (S=1,o=10,h=0,i=0x8b,r=8)
Soo1 0010 1hhi iiii iiii iiii iiir rrrr */
- {0xd2801168, -1},
+ {0xd2801168, ULONGEST_MAX},
/* svc 0x0 (o=0, l=1)
1101 0100 oooi iiii iiii iiii iii0 00ll */
- {0xd4000001, -1},
- {TRAMP_SENTINEL_INSN, -1}
+ {0xd4000001, ULONGEST_MAX},
+ {TRAMP_SENTINEL_INSN, ULONGEST_MAX}
},
aarch64_linux_sigframe_init
};
#define SVE_HEADER_SIZE \
(SVE_HEADER_RESERVED_OFFSET + SVE_HEADER_RESERVED_LENGTH)
+#define SVE_HEADER_FLAG_SVE 1
+
/* Get VQ value from SVE section in the core dump. */
static uint64_t
return 0;
}
- size_t size = bfd_section_size (abfd, sve_section);
+ size_t size = bfd_section_size (sve_section);
/* Check extended state size. */
if (size < SVE_HEADER_SIZE)
if (vq > AARCH64_MAX_SVE_VQ)
{
warning (_("SVE Vector length in core file not supported by this version"
- " of GDB. (VQ=%ld)"), vq);
+ " of GDB. (VQ=%s)"), pulongest (vq));
return 0;
}
else if (vq == 0)
{
- warning (_("SVE Vector length in core file is invalid. (VQ=%ld"), vq);
+ warning (_("SVE Vector length in core file is invalid. (VQ=%s"),
+ pulongest (vq));
return 0;
}
return vq;
}
+/* Supply register REGNUM from BUF to REGCACHE, using the register map
+ in REGSET. If REGNUM is -1, do this for all registers in REGSET.
+ If BUF is NULL, set the registers to "unavailable" status. */
+
+static void
+aarch64_linux_supply_sve_regset (const struct regset *regset,
+ struct regcache *regcache,
+ int regnum, const void *buf, size_t size)
+{
+ gdb_byte *header = (gdb_byte *) buf;
+ struct gdbarch *gdbarch = regcache->arch ();
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+
+ if (buf == nullptr)
+ return regcache->supply_regset (regset, regnum, nullptr, size);
+ gdb_assert (size > SVE_HEADER_SIZE);
+
+ /* BUF contains an SVE header followed by a register dump of either the
+ passed in SVE regset or a NEON fpregset. */
+
+ /* Extract required fields from the header. */
+ ULONGEST vl = extract_unsigned_integer (header + SVE_HEADER_VL_OFFSET,
+ SVE_HEADER_VL_LENGTH, byte_order);
+ uint16_t flags = extract_unsigned_integer (header + SVE_HEADER_FLAGS_OFFSET,
+ SVE_HEADER_FLAGS_LENGTH,
+ byte_order);
+
+ if (regnum == -1 || regnum == AARCH64_SVE_VG_REGNUM)
+ {
+ gdb_byte vg_target[8];
+ store_integer ((gdb_byte *)&vg_target, sizeof (uint64_t), byte_order,
+ sve_vg_from_vl (vl));
+ regcache->raw_supply (AARCH64_SVE_VG_REGNUM, &vg_target);
+ }
+
+ if (flags & SVE_HEADER_FLAG_SVE)
+ {
+ /* Register dump is a SVE structure. */
+ regcache->supply_regset (regset, regnum,
+ (gdb_byte *) buf + SVE_HEADER_SIZE,
+ size - SVE_HEADER_SIZE);
+ }
+ else
+ {
+ /* Register dump is a fpsimd structure. First clear the SVE
+ registers. */
+ for (int i = 0; i < AARCH64_SVE_Z_REGS_NUM; i++)
+ regcache->raw_supply_zeroed (AARCH64_SVE_Z0_REGNUM + i);
+ for (int i = 0; i < AARCH64_SVE_P_REGS_NUM; i++)
+ regcache->raw_supply_zeroed (AARCH64_SVE_P0_REGNUM + i);
+ regcache->raw_supply_zeroed (AARCH64_SVE_FFR_REGNUM);
+
+ /* Then supply the fpsimd registers. */
+ regcache->supply_regset (&aarch64_linux_fpregset, regnum,
+ (gdb_byte *) buf + SVE_HEADER_SIZE,
+ size - SVE_HEADER_SIZE);
+ }
+}
+
+/* Collect register REGNUM from REGCACHE to BUF, using the register
+ map in REGSET. If REGNUM is -1, do this for all registers in
+ REGSET. */
+
+static void
+aarch64_linux_collect_sve_regset (const struct regset *regset,
+ const struct regcache *regcache,
+ int regnum, void *buf, size_t size)
+{
+ gdb_byte *header = (gdb_byte *) buf;
+ struct gdbarch *gdbarch = regcache->arch ();
+ enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
+ uint64_t vq = gdbarch_tdep (gdbarch)->vq;
+
+ gdb_assert (buf != NULL);
+ gdb_assert (size > SVE_HEADER_SIZE);
+
+ /* BUF starts with a SVE header prior to the register dump. */
+
+ store_unsigned_integer (header + SVE_HEADER_SIZE_OFFSET,
+ SVE_HEADER_SIZE_LENGTH, byte_order, size);
+ store_unsigned_integer (header + SVE_HEADER_MAX_SIZE_OFFSET,
+ SVE_HEADER_MAX_SIZE_LENGTH, byte_order, size);
+ store_unsigned_integer (header + SVE_HEADER_VL_OFFSET, SVE_HEADER_VL_LENGTH,
+ byte_order, sve_vl_from_vq (vq));
+ store_unsigned_integer (header + SVE_HEADER_MAX_VL_OFFSET,
+ SVE_HEADER_MAX_VL_LENGTH, byte_order,
+ sve_vl_from_vq (vq));
+ store_unsigned_integer (header + SVE_HEADER_FLAGS_OFFSET,
+ SVE_HEADER_FLAGS_LENGTH, byte_order,
+ SVE_HEADER_FLAG_SVE);
+ store_unsigned_integer (header + SVE_HEADER_RESERVED_OFFSET,
+ SVE_HEADER_RESERVED_LENGTH, byte_order, 0);
+
+ /* The SVE register dump follows. */
+ regcache->collect_regset (regset, regnum, (gdb_byte *) buf + SVE_HEADER_SIZE,
+ size - SVE_HEADER_SIZE);
+}
+
/* Implement the "regset_from_core_section" gdbarch method. */
static void
void *cb_data,
const struct regcache *regcache)
{
+ struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
+
cb (".reg", AARCH64_LINUX_SIZEOF_GREGSET, AARCH64_LINUX_SIZEOF_GREGSET,
&aarch64_linux_gregset, NULL, cb_data);
- cb (".reg2", AARCH64_LINUX_SIZEOF_FPREGSET, AARCH64_LINUX_SIZEOF_FPREGSET,
- &aarch64_linux_fpregset, NULL, cb_data);
+
+ if (tdep->has_sve ())
+ {
+ /* Create this on the fly in order to handle vector register sizes. */
+ const struct regcache_map_entry sve_regmap[] =
+ {
+ { 32, AARCH64_SVE_Z0_REGNUM, (int) (tdep->vq * 16) },
+ { 16, AARCH64_SVE_P0_REGNUM, (int) (tdep->vq * 16 / 8) },
+ { 1, AARCH64_SVE_FFR_REGNUM, 4 },
+ { 1, AARCH64_FPSR_REGNUM, 4 },
+ { 1, AARCH64_FPCR_REGNUM, 4 },
+ { 0 }
+ };
+
+ const struct regset aarch64_linux_sve_regset =
+ {
+ sve_regmap,
+ aarch64_linux_supply_sve_regset, aarch64_linux_collect_sve_regset,
+ REGSET_VARIABLE_SIZE
+ };
+
+ cb (".reg-aarch-sve",
+ SVE_HEADER_SIZE + regcache_map_entry_size (aarch64_linux_fpregmap),
+ SVE_HEADER_SIZE + regcache_map_entry_size (sve_regmap),
+ &aarch64_linux_sve_regset, "SVE registers", cb_data);
+ }
+ else
+ cb (".reg2", AARCH64_LINUX_SIZEOF_FPREGSET, AARCH64_LINUX_SIZEOF_FPREGSET,
+ &aarch64_linux_fpregset, NULL, cb_data);
+
+
+ if (tdep->has_pauth ())
+ {
+ /* Create this on the fly in order to handle the variable location. */
+ const struct regcache_map_entry pauth_regmap[] =
+ {
+ { 2, AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base), 8},
+ { 0 }
+ };
+
+ const struct regset aarch64_linux_pauth_regset =
+ {
+ pauth_regmap, regcache_supply_regset, regcache_collect_regset
+ };
+
+ cb (".reg-aarch-pauth", AARCH64_LINUX_SIZEOF_PAUTH,
+ AARCH64_LINUX_SIZEOF_PAUTH, &aarch64_linux_pauth_regset,
+ "pauth registers", cb_data);
+ }
}
/* Implement the "core_read_description" gdbarch method. */
aarch64_linux_core_read_description (struct gdbarch *gdbarch,
struct target_ops *target, bfd *abfd)
{
- CORE_ADDR aarch64_hwcap = 0;
-
- if (target_auxv_search (target, AT_HWCAP, &aarch64_hwcap) != 1)
- return NULL;
+ CORE_ADDR hwcap = linux_get_hwcap (target);
- return aarch64_read_description (aarch64_linux_core_read_vq (gdbarch, abfd));
+ return aarch64_read_description (aarch64_linux_core_read_vq (gdbarch, abfd),
+ hwcap & AARCH64_HWCAP_PACA);
}
/* Implementation of `gdbarch_stap_is_single_operand', as defined in
return 1;
}
-/* Implement the "get_syscall_number" gdbarch method. */
-
-static LONGEST
-aarch64_linux_get_syscall_number (struct gdbarch *gdbarch,
- thread_info *thread)
-{
- struct regcache *regs = get_thread_regcache (thread);
- enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
-
- /* The content of register x8. */
- gdb_byte buf[X_REGISTER_SIZE];
- /* The result. */
- LONGEST ret;
-
- /* Getting the system call number from the register x8. */
- regs->cooked_read (AARCH64_DWARF_X0 + 8, buf);
-
- ret = extract_signed_integer (buf, X_REGISTER_SIZE, byte_order);
-
- return ret;
-}
-
/* AArch64 process record-replay constructs: syscall, signal etc. */
struct linux_record_tdep aarch64_linux_record_tdep;
}
}
+/* Retrieve the syscall number at a ptrace syscall-stop, either on syscall entry
+ or exit. Return -1 upon error. */
+
+static LONGEST
+aarch64_linux_get_syscall_number (struct gdbarch *gdbarch, thread_info *thread)
+{
+ struct regcache *regs = get_thread_regcache (thread);
+ LONGEST ret;
+
+ /* Get the system call number from register x8. */
+ regs->cooked_read (AARCH64_X0_REGNUM + 8, &ret);
+
+ /* On exit from a successful execve, we will be in a new process and all the
+ registers will be cleared - x0 to x30 will be 0, except for a 1 in x7.
+ This function will only ever get called when stopped at the entry or exit
+ of a syscall, so by checking for 0 in x0 (arg0/retval), x1 (arg1), x8
+ (syscall), x29 (FP) and x30 (LR) we can infer:
+ 1) Either inferior is at exit from successful execve.
+ 2) Or inferior is at entry to a call to io_setup with invalid arguments and
+ a corrupted FP and LR.
+ It should be safe enough to assume case 1. */
+ if (ret == 0)
+ {
+ LONGEST x1 = -1, fp = -1, lr = -1;
+ regs->cooked_read (AARCH64_X0_REGNUM + 1, &x1);
+ regs->cooked_read (AARCH64_FP_REGNUM, &fp);
+ regs->cooked_read (AARCH64_LR_REGNUM, &lr);
+ if (x1 == 0 && fp ==0 && lr == 0)
+ return aarch64_sys_execve;
+ }
+
+ return ret;
+}
+
/* Record all registers but PC register for process-record. */
static int
/* Implement the "gcc_target_options" gdbarch method. */
-static char *
+static std::string
aarch64_linux_gcc_target_options (struct gdbarch *gdbarch)
{
/* GCC doesn't know "-m64". */
- return NULL;
+ return {};
}
static void
set_gdbarch_get_syscall_number (gdbarch, aarch64_linux_get_syscall_number);
/* Displaced stepping. */
- set_gdbarch_max_insn_length (gdbarch, 4 * DISPLACED_MODIFIED_INSNS);
+ set_gdbarch_max_insn_length (gdbarch, 4 * AARCH64_DISPLACED_MODIFIED_INSNS);
set_gdbarch_displaced_step_copy_insn (gdbarch,
aarch64_displaced_step_copy_insn);
set_gdbarch_displaced_step_fixup (gdbarch, aarch64_displaced_step_fixup);