+#define AARCH64_SIGCONTEXT_RESERVED_OFFSET 288
+
+#define AARCH64_SIGCONTEXT_RESERVED_SIZE 4096
+
+/* Unique identifiers that may be used for aarch64_ctx.magic. */
+#define AARCH64_EXTRA_MAGIC 0x45585401
+#define AARCH64_FPSIMD_MAGIC 0x46508001
+#define AARCH64_SVE_MAGIC 0x53564501
+
+/* Defines for the extra_context that follows an AARCH64_EXTRA_MAGIC. */
+#define AARCH64_EXTRA_DATAP_OFFSET 8
+
+/* Defines for the fpsimd that follows an AARCH64_FPSIMD_MAGIC. */
+#define AARCH64_FPSIMD_FPSR_OFFSET 8
+#define AARCH64_FPSIMD_FPCR_OFFSET 12
+#define AARCH64_FPSIMD_V0_OFFSET 16
+#define AARCH64_FPSIMD_VREG_SIZE 16
+
+/* Defines for the sve structure that follows an AARCH64_SVE_MAGIC. */
+#define AARCH64_SVE_CONTEXT_VL_OFFSET 8
+#define AARCH64_SVE_CONTEXT_REGS_OFFSET 16
+#define AARCH64_SVE_CONTEXT_P_REGS_OFFSET(vq) (32 * vq * 16)
+#define AARCH64_SVE_CONTEXT_FFR_OFFSET(vq) \
+ (AARCH64_SVE_CONTEXT_P_REGS_OFFSET (vq) + (16 * vq * 2))
+#define AARCH64_SVE_CONTEXT_SIZE(vq) \
+ (AARCH64_SVE_CONTEXT_FFR_OFFSET (vq) + (vq * 2))
+
+
+/* Read an aarch64_ctx, returning the magic value, and setting *SIZE to the
+ size, or return 0 on error. */
+
+static uint32_t
+read_aarch64_ctx (CORE_ADDR ctx_addr, enum bfd_endian byte_order,
+ uint32_t *size)
+{
+ uint32_t magic = 0;
+ gdb_byte buf[4];
+
+ if (target_read_memory (ctx_addr, buf, 4) != 0)
+ return 0;
+ magic = extract_unsigned_integer (buf, 4, byte_order);
+
+ if (target_read_memory (ctx_addr + 4, buf, 4) != 0)
+ return 0;
+ *size = extract_unsigned_integer (buf, 4, byte_order);
+
+ return magic;
+}
+
+/* Given CACHE, use the trad_frame* functions to restore the FPSIMD
+ registers from a signal frame.
+
+ VREG_NUM is the number of the V register being restored, OFFSET is the
+ address containing the register value, BYTE_ORDER is the endianness and
+ HAS_SVE tells us if we have a valid SVE context or not. */
+
+static void
+aarch64_linux_restore_vreg (struct trad_frame_cache *cache, int num_regs,
+ int vreg_num, CORE_ADDR offset,
+ enum bfd_endian byte_order, bool has_sve)
+{
+ /* WARNING: SIMD state is laid out in memory in target-endian format.
+
+ So we have a couple cases to consider:
+
+ 1 - If the target is big endian, then SIMD state is big endian,
+ requiring a byteswap.
+
+ 2 - If the target is little endian, then SIMD state is little endian, so
+ no byteswap is needed. */
+
+ if (byte_order == BFD_ENDIAN_BIG)
+ {
+ gdb_byte buf[V_REGISTER_SIZE];
+
+ if (target_read_memory (offset, buf, V_REGISTER_SIZE) != 0)
+ {
+ size_t size = V_REGISTER_SIZE/2;
+
+ /* Read the two halves of the V register in reverse byte order. */
+ CORE_ADDR u64 = extract_unsigned_integer (buf, size,
+ byte_order);
+ CORE_ADDR l64 = extract_unsigned_integer (buf + size, size,
+ byte_order);
+
+ /* Copy the reversed bytes to the buffer. */
+ store_unsigned_integer (buf, size, BFD_ENDIAN_LITTLE, l64);
+ store_unsigned_integer (buf + size , size, BFD_ENDIAN_LITTLE, u64);
+
+ /* Now we can store the correct bytes for the V register. */
+ trad_frame_set_reg_value_bytes (cache, AARCH64_V0_REGNUM + vreg_num,
+ buf, V_REGISTER_SIZE);
+ trad_frame_set_reg_value_bytes (cache,
+ num_regs + AARCH64_Q0_REGNUM
+ + vreg_num, buf, Q_REGISTER_SIZE);
+ trad_frame_set_reg_value_bytes (cache,
+ num_regs + AARCH64_D0_REGNUM
+ + vreg_num, buf, D_REGISTER_SIZE);
+ trad_frame_set_reg_value_bytes (cache,
+ num_regs + AARCH64_S0_REGNUM
+ + vreg_num, buf, S_REGISTER_SIZE);
+ trad_frame_set_reg_value_bytes (cache,
+ num_regs + AARCH64_H0_REGNUM
+ + vreg_num, buf, H_REGISTER_SIZE);
+ trad_frame_set_reg_value_bytes (cache,
+ num_regs + AARCH64_B0_REGNUM
+ + vreg_num, buf, B_REGISTER_SIZE);
+
+ if (has_sve)
+ trad_frame_set_reg_value_bytes (cache,
+ num_regs + AARCH64_SVE_V0_REGNUM
+ + vreg_num, buf, V_REGISTER_SIZE);
+ }
+ return;
+ }
+
+ /* Little endian, just point at the address containing the register
+ value. */
+ trad_frame_set_reg_addr (cache, AARCH64_V0_REGNUM + vreg_num, offset);
+ trad_frame_set_reg_addr (cache, num_regs + AARCH64_Q0_REGNUM + vreg_num,
+ offset);
+ trad_frame_set_reg_addr (cache, num_regs + AARCH64_D0_REGNUM + vreg_num,
+ offset);
+ trad_frame_set_reg_addr (cache, num_regs + AARCH64_S0_REGNUM + vreg_num,
+ offset);
+ trad_frame_set_reg_addr (cache, num_regs + AARCH64_H0_REGNUM + vreg_num,
+ offset);
+ trad_frame_set_reg_addr (cache, num_regs + AARCH64_B0_REGNUM + vreg_num,
+ offset);
+
+ if (has_sve)
+ trad_frame_set_reg_addr (cache, num_regs + AARCH64_SVE_V0_REGNUM
+ + vreg_num, offset);
+
+}