+ /* Search for the FP and SVE sections, stopping at null. */
+ while ((magic = read_aarch64_ctx (section, byte_order, &size)) != 0
+ && size != 0)
+ {
+ switch (magic)
+ {
+ case AARCH64_FPSIMD_MAGIC:
+ fpsimd = section;
+ section += size;
+ break;
+
+ case AARCH64_SVE_MAGIC:
+ {
+ /* Check if the section is followed by a full SVE dump, and set
+ sve_regs if it is. */
+ gdb_byte buf[4];
+ uint16_t vq;
+
+ if (!tdep->has_sve ())
+ break;
+
+ if (target_read_memory (section + AARCH64_SVE_CONTEXT_VL_OFFSET,
+ buf, 2) != 0)
+ {
+ section += size;
+ break;
+ }
+ vq = sve_vq_from_vl (extract_unsigned_integer (buf, 2, byte_order));
+
+ if (vq != tdep->vq)
+ error (_("Invalid vector length in signal frame %d vs %s."), vq,
+ pulongest (tdep->vq));
+
+ if (size >= AARCH64_SVE_CONTEXT_SIZE (vq))
+ sve_regs = section + AARCH64_SVE_CONTEXT_REGS_OFFSET;
+
+ section += size;
+ break;
+ }
+
+ case AARCH64_EXTRA_MAGIC:
+ {
+ /* Extra is always the last valid section in reserved and points to
+ an additional block of memory filled with more sections. Reset
+ the address to the extra section and continue looking for more
+ structures. */
+ gdb_byte buf[8];
+
+ if (target_read_memory (section + AARCH64_EXTRA_DATAP_OFFSET,
+ buf, 8) != 0)
+ {
+ section += size;
+ break;
+ }
+
+ section = extract_unsigned_integer (buf, 8, byte_order);
+ extra_found = true;
+ break;
+ }
+
+ default:
+ section += size;
+ break;
+ }
+
+ /* Prevent searching past the end of the reserved section. The extra
+ section does not have a hard coded limit - we have to rely on it ending
+ with nulls. */
+ if (!extra_found && section > section_end)
+ break;
+ }
+
+ if (sve_regs != 0)
+ {
+ CORE_ADDR offset;
+
+ for (int i = 0; i < 32; i++)
+ {
+ offset = sve_regs + (i * tdep->vq * 16);
+ trad_frame_set_reg_addr (this_cache, AARCH64_SVE_Z0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_SVE_V0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache, num_regs + AARCH64_Q0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache, num_regs + AARCH64_D0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache, num_regs + AARCH64_S0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache, num_regs + AARCH64_H0_REGNUM + i,
+ offset);
+ trad_frame_set_reg_addr (this_cache, num_regs + AARCH64_B0_REGNUM + i,
+ offset);
+ }
+
+ offset = sve_regs + AARCH64_SVE_CONTEXT_P_REGS_OFFSET (tdep->vq);
+ for (int i = 0; i < 16; i++)
+ trad_frame_set_reg_addr (this_cache, AARCH64_SVE_P0_REGNUM + i,
+ offset + (i * tdep->vq * 2));
+
+ offset = sve_regs + AARCH64_SVE_CONTEXT_FFR_OFFSET (tdep->vq);
+ trad_frame_set_reg_addr (this_cache, AARCH64_SVE_FFR_REGNUM, offset);
+ }
+
+ if (fpsimd != 0)
+ {
+ trad_frame_set_reg_addr (this_cache, AARCH64_FPSR_REGNUM,
+ fpsimd + AARCH64_FPSIMD_FPSR_OFFSET);
+ trad_frame_set_reg_addr (this_cache, AARCH64_FPCR_REGNUM,
+ fpsimd + AARCH64_FPSIMD_FPCR_OFFSET);
+
+ /* If there was no SVE section then set up the V registers. */
+ if (sve_regs == 0)
+ for (int i = 0; i < 32; i++)
+ {
+ CORE_ADDR offset = (fpsimd + AARCH64_FPSIMD_V0_OFFSET
+ + (i * AARCH64_FPSIMD_VREG_SIZE));
+
+ trad_frame_set_reg_addr (this_cache, AARCH64_V0_REGNUM + i, offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_Q0_REGNUM + i, offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_D0_REGNUM + i, offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_S0_REGNUM + i, offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_H0_REGNUM + i, offset);
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_B0_REGNUM + i, offset);
+ if (tdep->has_sve ())
+ trad_frame_set_reg_addr (this_cache,
+ num_regs + AARCH64_SVE_V0_REGNUM + i,
+ offset);
+ }
+ }
+