1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
37 #include "dwarf2-frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
46 #include "common/selftest.h"
48 #include "aarch64-tdep.h"
49 #include "aarch64-ravenscar-thread.h"
52 #include "elf/aarch64.h"
54 #include "common/vec.h"
57 #include "record-full.h"
58 #include "arch/aarch64-insn.h"
60 #include "opcode/aarch64.h"
63 #define submask(x) ((1L << ((x) + 1)) - 1)
64 #define bit(obj,st) (((obj) >> (st)) & 1)
65 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
69 #define HA_MAX_NUM_FLDS 4
71 /* All possible aarch64 target descriptors. */
72 struct target_desc
*tdesc_aarch64_list
[AARCH64_MAX_SVE_VQ
+ 1][2/*pauth*/];
74 /* The standard register names, and all the valid aliases for them. */
77 const char *const name
;
79 } aarch64_register_aliases
[] =
81 /* 64-bit register names. */
82 {"fp", AARCH64_FP_REGNUM
},
83 {"lr", AARCH64_LR_REGNUM
},
84 {"sp", AARCH64_SP_REGNUM
},
86 /* 32-bit register names. */
87 {"w0", AARCH64_X0_REGNUM
+ 0},
88 {"w1", AARCH64_X0_REGNUM
+ 1},
89 {"w2", AARCH64_X0_REGNUM
+ 2},
90 {"w3", AARCH64_X0_REGNUM
+ 3},
91 {"w4", AARCH64_X0_REGNUM
+ 4},
92 {"w5", AARCH64_X0_REGNUM
+ 5},
93 {"w6", AARCH64_X0_REGNUM
+ 6},
94 {"w7", AARCH64_X0_REGNUM
+ 7},
95 {"w8", AARCH64_X0_REGNUM
+ 8},
96 {"w9", AARCH64_X0_REGNUM
+ 9},
97 {"w10", AARCH64_X0_REGNUM
+ 10},
98 {"w11", AARCH64_X0_REGNUM
+ 11},
99 {"w12", AARCH64_X0_REGNUM
+ 12},
100 {"w13", AARCH64_X0_REGNUM
+ 13},
101 {"w14", AARCH64_X0_REGNUM
+ 14},
102 {"w15", AARCH64_X0_REGNUM
+ 15},
103 {"w16", AARCH64_X0_REGNUM
+ 16},
104 {"w17", AARCH64_X0_REGNUM
+ 17},
105 {"w18", AARCH64_X0_REGNUM
+ 18},
106 {"w19", AARCH64_X0_REGNUM
+ 19},
107 {"w20", AARCH64_X0_REGNUM
+ 20},
108 {"w21", AARCH64_X0_REGNUM
+ 21},
109 {"w22", AARCH64_X0_REGNUM
+ 22},
110 {"w23", AARCH64_X0_REGNUM
+ 23},
111 {"w24", AARCH64_X0_REGNUM
+ 24},
112 {"w25", AARCH64_X0_REGNUM
+ 25},
113 {"w26", AARCH64_X0_REGNUM
+ 26},
114 {"w27", AARCH64_X0_REGNUM
+ 27},
115 {"w28", AARCH64_X0_REGNUM
+ 28},
116 {"w29", AARCH64_X0_REGNUM
+ 29},
117 {"w30", AARCH64_X0_REGNUM
+ 30},
120 {"ip0", AARCH64_X0_REGNUM
+ 16},
121 {"ip1", AARCH64_X0_REGNUM
+ 17}
124 /* The required core 'R' registers. */
125 static const char *const aarch64_r_register_names
[] =
127 /* These registers must appear in consecutive RAW register number
128 order and they must begin with AARCH64_X0_REGNUM! */
129 "x0", "x1", "x2", "x3",
130 "x4", "x5", "x6", "x7",
131 "x8", "x9", "x10", "x11",
132 "x12", "x13", "x14", "x15",
133 "x16", "x17", "x18", "x19",
134 "x20", "x21", "x22", "x23",
135 "x24", "x25", "x26", "x27",
136 "x28", "x29", "x30", "sp",
140 /* The FP/SIMD 'V' registers. */
141 static const char *const aarch64_v_register_names
[] =
143 /* These registers must appear in consecutive RAW register number
144 order and they must begin with AARCH64_V0_REGNUM! */
145 "v0", "v1", "v2", "v3",
146 "v4", "v5", "v6", "v7",
147 "v8", "v9", "v10", "v11",
148 "v12", "v13", "v14", "v15",
149 "v16", "v17", "v18", "v19",
150 "v20", "v21", "v22", "v23",
151 "v24", "v25", "v26", "v27",
152 "v28", "v29", "v30", "v31",
157 /* The SVE 'Z' and 'P' registers. */
158 static const char *const aarch64_sve_register_names
[] =
160 /* These registers must appear in consecutive RAW register number
161 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
162 "z0", "z1", "z2", "z3",
163 "z4", "z5", "z6", "z7",
164 "z8", "z9", "z10", "z11",
165 "z12", "z13", "z14", "z15",
166 "z16", "z17", "z18", "z19",
167 "z20", "z21", "z22", "z23",
168 "z24", "z25", "z26", "z27",
169 "z28", "z29", "z30", "z31",
171 "p0", "p1", "p2", "p3",
172 "p4", "p5", "p6", "p7",
173 "p8", "p9", "p10", "p11",
174 "p12", "p13", "p14", "p15",
178 static const char *const aarch64_pauth_register_names
[] =
180 /* Authentication mask for data pointer. */
182 /* Authentication mask for code pointer. */
186 /* AArch64 prologue cache structure. */
187 struct aarch64_prologue_cache
189 /* The program counter at the start of the function. It is used to
190 identify this frame as a prologue frame. */
193 /* The program counter at the time this frame was created; i.e. where
194 this function was called from. It is used to identify this frame as a
198 /* The stack pointer at the time this frame was created; i.e. the
199 caller's stack pointer when this function was called. It is used
200 to identify this frame. */
203 /* Is the target available to read from? */
206 /* The frame base for this frame is just prev_sp - frame size.
207 FRAMESIZE is the distance from the frame pointer to the
208 initial stack pointer. */
211 /* The register used to hold the frame pointer for this frame. */
214 /* Saved register offsets. */
215 struct trad_frame_saved_reg
*saved_regs
;
219 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
220 struct cmd_list_element
*c
, const char *value
)
222 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
227 /* Abstract instruction reader. */
229 class abstract_instruction_reader
232 /* Read in one instruction. */
233 virtual ULONGEST
read (CORE_ADDR memaddr
, int len
,
234 enum bfd_endian byte_order
) = 0;
237 /* Instruction reader from real target. */
239 class instruction_reader
: public abstract_instruction_reader
242 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
245 return read_code_unsigned_integer (memaddr
, len
, byte_order
);
251 /* Analyze a prologue, looking for a recognizable stack frame
252 and frame pointer. Scan until we encounter a store that could
253 clobber the stack frame unexpectedly, or an unknown instruction. */
256 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
257 CORE_ADDR start
, CORE_ADDR limit
,
258 struct aarch64_prologue_cache
*cache
,
259 abstract_instruction_reader
& reader
)
261 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
263 /* Track X registers and D registers in prologue. */
264 pv_t regs
[AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
];
266 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
; i
++)
267 regs
[i
] = pv_register (i
, 0);
268 pv_area
stack (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
270 for (; start
< limit
; start
+= 4)
275 insn
= reader
.read (start
, 4, byte_order_for_code
);
277 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
280 if (inst
.opcode
->iclass
== addsub_imm
281 && (inst
.opcode
->op
== OP_ADD
282 || strcmp ("sub", inst
.opcode
->name
) == 0))
284 unsigned rd
= inst
.operands
[0].reg
.regno
;
285 unsigned rn
= inst
.operands
[1].reg
.regno
;
287 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
288 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
289 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
290 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
292 if (inst
.opcode
->op
== OP_ADD
)
294 regs
[rd
] = pv_add_constant (regs
[rn
],
295 inst
.operands
[2].imm
.value
);
299 regs
[rd
] = pv_add_constant (regs
[rn
],
300 -inst
.operands
[2].imm
.value
);
303 else if (inst
.opcode
->iclass
== pcreladdr
304 && inst
.operands
[1].type
== AARCH64_OPND_ADDR_ADRP
)
306 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
307 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
309 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
311 else if (inst
.opcode
->iclass
== branch_imm
)
313 /* Stop analysis on branch. */
316 else if (inst
.opcode
->iclass
== condbranch
)
318 /* Stop analysis on branch. */
321 else if (inst
.opcode
->iclass
== branch_reg
)
323 /* Stop analysis on branch. */
326 else if (inst
.opcode
->iclass
== compbranch
)
328 /* Stop analysis on branch. */
331 else if (inst
.opcode
->op
== OP_MOVZ
)
333 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
334 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
336 else if (inst
.opcode
->iclass
== log_shift
337 && strcmp (inst
.opcode
->name
, "orr") == 0)
339 unsigned rd
= inst
.operands
[0].reg
.regno
;
340 unsigned rn
= inst
.operands
[1].reg
.regno
;
341 unsigned rm
= inst
.operands
[2].reg
.regno
;
343 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
344 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
345 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
347 if (inst
.operands
[2].shifter
.amount
== 0
348 && rn
== AARCH64_SP_REGNUM
)
354 debug_printf ("aarch64: prologue analysis gave up "
355 "addr=%s opcode=0x%x (orr x register)\n",
356 core_addr_to_string_nz (start
), insn
);
361 else if (inst
.opcode
->op
== OP_STUR
)
363 unsigned rt
= inst
.operands
[0].reg
.regno
;
364 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
366 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
368 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
369 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
370 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
371 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
373 stack
.store (pv_add_constant (regs
[rn
],
374 inst
.operands
[1].addr
.offset
.imm
),
375 is64
? 8 : 4, regs
[rt
]);
377 else if ((inst
.opcode
->iclass
== ldstpair_off
378 || (inst
.opcode
->iclass
== ldstpair_indexed
379 && inst
.operands
[2].addr
.preind
))
380 && strcmp ("stp", inst
.opcode
->name
) == 0)
382 /* STP with addressing mode Pre-indexed and Base register. */
385 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
386 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
388 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
389 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
390 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
391 || inst
.operands
[1].type
== AARCH64_OPND_Ft2
);
392 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
393 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
395 /* If recording this store would invalidate the store area
396 (perhaps because rn is not known) then we should abandon
397 further prologue analysis. */
398 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
)))
401 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
+ 8)))
404 rt1
= inst
.operands
[0].reg
.regno
;
405 rt2
= inst
.operands
[1].reg
.regno
;
406 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
408 /* Only bottom 64-bit of each V register (D register) need
410 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
411 rt1
+= AARCH64_X_REGISTER_COUNT
;
412 rt2
+= AARCH64_X_REGISTER_COUNT
;
415 stack
.store (pv_add_constant (regs
[rn
], imm
), 8,
417 stack
.store (pv_add_constant (regs
[rn
], imm
+ 8), 8,
420 if (inst
.operands
[2].addr
.writeback
)
421 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
424 else if ((inst
.opcode
->iclass
== ldst_imm9
/* Signed immediate. */
425 || (inst
.opcode
->iclass
== ldst_pos
/* Unsigned immediate. */
426 && (inst
.opcode
->op
== OP_STR_POS
427 || inst
.opcode
->op
== OP_STRF_POS
)))
428 && inst
.operands
[1].addr
.base_regno
== AARCH64_SP_REGNUM
429 && strcmp ("str", inst
.opcode
->name
) == 0)
431 /* STR (immediate) */
432 unsigned int rt
= inst
.operands
[0].reg
.regno
;
433 int32_t imm
= inst
.operands
[1].addr
.offset
.imm
;
434 unsigned int rn
= inst
.operands
[1].addr
.base_regno
;
436 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
437 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
438 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
440 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
442 /* Only bottom 64-bit of each V register (D register) need
444 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
445 rt
+= AARCH64_X_REGISTER_COUNT
;
448 stack
.store (pv_add_constant (regs
[rn
], imm
),
449 is64
? 8 : 4, regs
[rt
]);
450 if (inst
.operands
[1].addr
.writeback
)
451 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
453 else if (inst
.opcode
->iclass
== testbranch
)
455 /* Stop analysis on branch. */
462 debug_printf ("aarch64: prologue analysis gave up addr=%s"
464 core_addr_to_string_nz (start
), insn
);
473 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
475 /* Frame pointer is fp. Frame size is constant. */
476 cache
->framereg
= AARCH64_FP_REGNUM
;
477 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
479 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
481 /* Try the stack pointer. */
482 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
483 cache
->framereg
= AARCH64_SP_REGNUM
;
487 /* We're just out of luck. We don't know where the frame is. */
488 cache
->framereg
= -1;
489 cache
->framesize
= 0;
492 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
496 if (stack
.find_reg (gdbarch
, i
, &offset
))
497 cache
->saved_regs
[i
].addr
= offset
;
500 for (i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
502 int regnum
= gdbarch_num_regs (gdbarch
);
505 if (stack
.find_reg (gdbarch
, i
+ AARCH64_X_REGISTER_COUNT
,
507 cache
->saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
= offset
;
514 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
515 CORE_ADDR start
, CORE_ADDR limit
,
516 struct aarch64_prologue_cache
*cache
)
518 instruction_reader reader
;
520 return aarch64_analyze_prologue (gdbarch
, start
, limit
, cache
,
526 namespace selftests
{
528 /* Instruction reader from manually cooked instruction sequences. */
530 class instruction_reader_test
: public abstract_instruction_reader
533 template<size_t SIZE
>
534 explicit instruction_reader_test (const uint32_t (&insns
)[SIZE
])
535 : m_insns (insns
), m_insns_size (SIZE
)
538 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
541 SELF_CHECK (len
== 4);
542 SELF_CHECK (memaddr
% 4 == 0);
543 SELF_CHECK (memaddr
/ 4 < m_insns_size
);
545 return m_insns
[memaddr
/ 4];
549 const uint32_t *m_insns
;
554 aarch64_analyze_prologue_test (void)
556 struct gdbarch_info info
;
558 gdbarch_info_init (&info
);
559 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
561 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
562 SELF_CHECK (gdbarch
!= NULL
);
564 /* Test the simple prologue in which frame pointer is used. */
566 struct aarch64_prologue_cache cache
;
567 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
569 static const uint32_t insns
[] = {
570 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
571 0x910003fd, /* mov x29, sp */
572 0x97ffffe6, /* bl 0x400580 */
574 instruction_reader_test
reader (insns
);
576 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
577 SELF_CHECK (end
== 4 * 2);
579 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
580 SELF_CHECK (cache
.framesize
== 272);
582 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
584 if (i
== AARCH64_FP_REGNUM
)
585 SELF_CHECK (cache
.saved_regs
[i
].addr
== -272);
586 else if (i
== AARCH64_LR_REGNUM
)
587 SELF_CHECK (cache
.saved_regs
[i
].addr
== -264);
589 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
592 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
594 int regnum
= gdbarch_num_regs (gdbarch
);
596 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
601 /* Test a prologue in which STR is used and frame pointer is not
604 struct aarch64_prologue_cache cache
;
605 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
607 static const uint32_t insns
[] = {
608 0xf81d0ff3, /* str x19, [sp, #-48]! */
609 0xb9002fe0, /* str w0, [sp, #44] */
610 0xf90013e1, /* str x1, [sp, #32]*/
611 0xfd000fe0, /* str d0, [sp, #24] */
612 0xaa0203f3, /* mov x19, x2 */
613 0xf94013e0, /* ldr x0, [sp, #32] */
615 instruction_reader_test
reader (insns
);
617 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
619 SELF_CHECK (end
== 4 * 5);
621 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
622 SELF_CHECK (cache
.framesize
== 48);
624 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
627 SELF_CHECK (cache
.saved_regs
[i
].addr
== -16);
629 SELF_CHECK (cache
.saved_regs
[i
].addr
== -48);
631 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
634 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
636 int regnum
= gdbarch_num_regs (gdbarch
);
639 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
642 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
647 } // namespace selftests
648 #endif /* GDB_SELF_TEST */
650 /* Implement the "skip_prologue" gdbarch method. */
653 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
655 CORE_ADDR func_addr
, limit_pc
;
657 /* See if we can determine the end of the prologue via the symbol
658 table. If so, then return either PC, or the PC after the
659 prologue, whichever is greater. */
660 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
662 CORE_ADDR post_prologue_pc
663 = skip_prologue_using_sal (gdbarch
, func_addr
);
665 if (post_prologue_pc
!= 0)
666 return std::max (pc
, post_prologue_pc
);
669 /* Can't determine prologue from the symbol table, need to examine
672 /* Find an upper limit on the function prologue using the debug
673 information. If the debug information could not be used to
674 provide that bound, then use an arbitrary large number as the
676 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
678 limit_pc
= pc
+ 128; /* Magic. */
680 /* Try disassembling prologue. */
681 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
684 /* Scan the function prologue for THIS_FRAME and populate the prologue
688 aarch64_scan_prologue (struct frame_info
*this_frame
,
689 struct aarch64_prologue_cache
*cache
)
691 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
692 CORE_ADDR prologue_start
;
693 CORE_ADDR prologue_end
;
694 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
695 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
697 cache
->prev_pc
= prev_pc
;
699 /* Assume we do not find a frame. */
700 cache
->framereg
= -1;
701 cache
->framesize
= 0;
703 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
706 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
710 /* No line info so use the current PC. */
711 prologue_end
= prev_pc
;
713 else if (sal
.end
< prologue_end
)
715 /* The next line begins after the function end. */
716 prologue_end
= sal
.end
;
719 prologue_end
= std::min (prologue_end
, prev_pc
);
720 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
726 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
730 cache
->framereg
= AARCH64_FP_REGNUM
;
731 cache
->framesize
= 16;
732 cache
->saved_regs
[29].addr
= 0;
733 cache
->saved_regs
[30].addr
= 8;
737 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
738 function may throw an exception if the inferior's registers or memory is
742 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
743 struct aarch64_prologue_cache
*cache
)
745 CORE_ADDR unwound_fp
;
748 aarch64_scan_prologue (this_frame
, cache
);
750 if (cache
->framereg
== -1)
753 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
757 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
759 /* Calculate actual addresses of saved registers using offsets
760 determined by aarch64_analyze_prologue. */
761 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
762 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
763 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
765 cache
->func
= get_frame_func (this_frame
);
767 cache
->available_p
= 1;
770 /* Allocate and fill in *THIS_CACHE with information about the prologue of
771 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
772 Return a pointer to the current aarch64_prologue_cache in
775 static struct aarch64_prologue_cache
*
776 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
778 struct aarch64_prologue_cache
*cache
;
780 if (*this_cache
!= NULL
)
781 return (struct aarch64_prologue_cache
*) *this_cache
;
783 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
784 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
789 aarch64_make_prologue_cache_1 (this_frame
, cache
);
791 CATCH (ex
, RETURN_MASK_ERROR
)
793 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
794 throw_exception (ex
);
801 /* Implement the "stop_reason" frame_unwind method. */
803 static enum unwind_stop_reason
804 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
807 struct aarch64_prologue_cache
*cache
808 = aarch64_make_prologue_cache (this_frame
, this_cache
);
810 if (!cache
->available_p
)
811 return UNWIND_UNAVAILABLE
;
813 /* Halt the backtrace at "_start". */
814 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
815 return UNWIND_OUTERMOST
;
817 /* We've hit a wall, stop. */
818 if (cache
->prev_sp
== 0)
819 return UNWIND_OUTERMOST
;
821 return UNWIND_NO_REASON
;
824 /* Our frame ID for a normal frame is the current function's starting
825 PC and the caller's SP when we were called. */
828 aarch64_prologue_this_id (struct frame_info
*this_frame
,
829 void **this_cache
, struct frame_id
*this_id
)
831 struct aarch64_prologue_cache
*cache
832 = aarch64_make_prologue_cache (this_frame
, this_cache
);
834 if (!cache
->available_p
)
835 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
837 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
840 /* Implement the "prev_register" frame_unwind method. */
842 static struct value
*
843 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
844 void **this_cache
, int prev_regnum
)
846 struct aarch64_prologue_cache
*cache
847 = aarch64_make_prologue_cache (this_frame
, this_cache
);
849 /* If we are asked to unwind the PC, then we need to return the LR
850 instead. The prologue may save PC, but it will point into this
851 frame's prologue, not the next frame's resume location. */
852 if (prev_regnum
== AARCH64_PC_REGNUM
)
856 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
857 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
860 /* SP is generally not saved to the stack, but this frame is
861 identified by the next frame's stack pointer at the time of the
862 call. The value was already reconstructed into PREV_SP. */
875 if (prev_regnum
== AARCH64_SP_REGNUM
)
876 return frame_unwind_got_constant (this_frame
, prev_regnum
,
879 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
883 /* AArch64 prologue unwinder. */
884 struct frame_unwind aarch64_prologue_unwind
=
887 aarch64_prologue_frame_unwind_stop_reason
,
888 aarch64_prologue_this_id
,
889 aarch64_prologue_prev_register
,
891 default_frame_sniffer
894 /* Allocate and fill in *THIS_CACHE with information about the prologue of
895 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
896 Return a pointer to the current aarch64_prologue_cache in
899 static struct aarch64_prologue_cache
*
900 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
902 struct aarch64_prologue_cache
*cache
;
904 if (*this_cache
!= NULL
)
905 return (struct aarch64_prologue_cache
*) *this_cache
;
907 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
908 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
913 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
915 cache
->prev_pc
= get_frame_pc (this_frame
);
916 cache
->available_p
= 1;
918 CATCH (ex
, RETURN_MASK_ERROR
)
920 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
921 throw_exception (ex
);
928 /* Implement the "stop_reason" frame_unwind method. */
930 static enum unwind_stop_reason
931 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
934 struct aarch64_prologue_cache
*cache
935 = aarch64_make_stub_cache (this_frame
, this_cache
);
937 if (!cache
->available_p
)
938 return UNWIND_UNAVAILABLE
;
940 return UNWIND_NO_REASON
;
943 /* Our frame ID for a stub frame is the current SP and LR. */
946 aarch64_stub_this_id (struct frame_info
*this_frame
,
947 void **this_cache
, struct frame_id
*this_id
)
949 struct aarch64_prologue_cache
*cache
950 = aarch64_make_stub_cache (this_frame
, this_cache
);
952 if (cache
->available_p
)
953 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
955 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
958 /* Implement the "sniffer" frame_unwind method. */
961 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
962 struct frame_info
*this_frame
,
963 void **this_prologue_cache
)
965 CORE_ADDR addr_in_block
;
968 addr_in_block
= get_frame_address_in_block (this_frame
);
969 if (in_plt_section (addr_in_block
)
970 /* We also use the stub winder if the target memory is unreadable
971 to avoid having the prologue unwinder trying to read it. */
972 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
978 /* AArch64 stub unwinder. */
979 struct frame_unwind aarch64_stub_unwind
=
982 aarch64_stub_frame_unwind_stop_reason
,
983 aarch64_stub_this_id
,
984 aarch64_prologue_prev_register
,
986 aarch64_stub_unwind_sniffer
989 /* Return the frame base address of *THIS_FRAME. */
992 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
994 struct aarch64_prologue_cache
*cache
995 = aarch64_make_prologue_cache (this_frame
, this_cache
);
997 return cache
->prev_sp
- cache
->framesize
;
1000 /* AArch64 default frame base information. */
1001 struct frame_base aarch64_normal_base
=
1003 &aarch64_prologue_unwind
,
1004 aarch64_normal_frame_base
,
1005 aarch64_normal_frame_base
,
1006 aarch64_normal_frame_base
1009 /* Return the value of the REGNUM register in the previous frame of
1012 static struct value
*
1013 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1014 void **this_cache
, int regnum
)
1020 case AARCH64_PC_REGNUM
:
1021 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1022 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1025 internal_error (__FILE__
, __LINE__
,
1026 _("Unexpected register %d"), regnum
);
1030 /* Implement the "init_reg" dwarf2_frame_ops method. */
1033 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1034 struct dwarf2_frame_state_reg
*reg
,
1035 struct frame_info
*this_frame
)
1039 case AARCH64_PC_REGNUM
:
1040 reg
->how
= DWARF2_FRAME_REG_FN
;
1041 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1043 case AARCH64_SP_REGNUM
:
1044 reg
->how
= DWARF2_FRAME_REG_CFA
;
1049 /* When arguments must be pushed onto the stack, they go on in reverse
1050 order. The code below implements a FILO (stack) to do this. */
1054 /* Value to pass on stack. It can be NULL if this item is for stack
1056 const gdb_byte
*data
;
1058 /* Size in bytes of value to pass on stack. */
1062 DEF_VEC_O (stack_item_t
);
1064 /* Return the alignment (in bytes) of the given type. */
1067 aarch64_type_align (struct type
*t
)
1073 t
= check_typedef (t
);
1074 switch (TYPE_CODE (t
))
1077 /* Should never happen. */
1078 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1082 case TYPE_CODE_ENUM
:
1086 case TYPE_CODE_RANGE
:
1087 case TYPE_CODE_BITSTRING
:
1089 case TYPE_CODE_RVALUE_REF
:
1090 case TYPE_CODE_CHAR
:
1091 case TYPE_CODE_BOOL
:
1092 return TYPE_LENGTH (t
);
1094 case TYPE_CODE_ARRAY
:
1095 if (TYPE_VECTOR (t
))
1097 /* Use the natural alignment for vector types (the same for
1098 scalar type), but the maximum alignment is 128-bit. */
1099 if (TYPE_LENGTH (t
) > 16)
1102 return TYPE_LENGTH (t
);
1105 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1106 case TYPE_CODE_COMPLEX
:
1107 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1109 case TYPE_CODE_STRUCT
:
1110 case TYPE_CODE_UNION
:
1112 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1114 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1122 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1124 Return the number of register required, or -1 on failure.
1126 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1127 to the element, else fail if the type of this element does not match the
1131 aapcs_is_vfp_call_or_return_candidate_1 (struct type
*type
,
1132 struct type
**fundamental_type
)
1134 if (type
== nullptr)
1137 switch (TYPE_CODE (type
))
1140 if (TYPE_LENGTH (type
) > 16)
1143 if (*fundamental_type
== nullptr)
1144 *fundamental_type
= type
;
1145 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1146 || TYPE_CODE (type
) != TYPE_CODE (*fundamental_type
))
1151 case TYPE_CODE_COMPLEX
:
1153 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1154 if (TYPE_LENGTH (target_type
) > 16)
1157 if (*fundamental_type
== nullptr)
1158 *fundamental_type
= target_type
;
1159 else if (TYPE_LENGTH (target_type
) != TYPE_LENGTH (*fundamental_type
)
1160 || TYPE_CODE (target_type
) != TYPE_CODE (*fundamental_type
))
1166 case TYPE_CODE_ARRAY
:
1168 if (TYPE_VECTOR (type
))
1170 if (TYPE_LENGTH (type
) != 8 && TYPE_LENGTH (type
) != 16)
1173 if (*fundamental_type
== nullptr)
1174 *fundamental_type
= type
;
1175 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1176 || TYPE_CODE (type
) != TYPE_CODE (*fundamental_type
))
1183 struct type
*target_type
= TYPE_TARGET_TYPE (type
);
1184 int count
= aapcs_is_vfp_call_or_return_candidate_1
1185 (target_type
, fundamental_type
);
1190 count
*= (TYPE_LENGTH (type
) / TYPE_LENGTH (target_type
));
1195 case TYPE_CODE_STRUCT
:
1196 case TYPE_CODE_UNION
:
1200 for (int i
= 0; i
< TYPE_NFIELDS (type
); i
++)
1202 /* Ignore any static fields. */
1203 if (field_is_static (&TYPE_FIELD (type
, i
)))
1206 struct type
*member
= check_typedef (TYPE_FIELD_TYPE (type
, i
));
1208 int sub_count
= aapcs_is_vfp_call_or_return_candidate_1
1209 (member
, fundamental_type
);
1210 if (sub_count
== -1)
1215 /* Ensure there is no padding between the fields (allowing for empty
1216 zero length structs) */
1217 int ftype_length
= (*fundamental_type
== nullptr)
1218 ? 0 : TYPE_LENGTH (*fundamental_type
);
1219 if (count
* ftype_length
!= TYPE_LENGTH (type
))
1232 /* Return true if an argument, whose type is described by TYPE, can be passed or
1233 returned in simd/fp registers, providing enough parameter passing registers
1234 are available. This is as described in the AAPCS64.
1236 Upon successful return, *COUNT returns the number of needed registers,
1237 *FUNDAMENTAL_TYPE contains the type of those registers.
1239 Candidate as per the AAPCS64 5.4.2.C is either a:
1242 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1243 all the members are floats and has at most 4 members.
1244 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1245 all the members are short vectors and has at most 4 members.
1248 Note that HFAs and HVAs can include nested structures and arrays. */
1251 aapcs_is_vfp_call_or_return_candidate (struct type
*type
, int *count
,
1252 struct type
**fundamental_type
)
1254 if (type
== nullptr)
1257 *fundamental_type
= nullptr;
1259 int ag_count
= aapcs_is_vfp_call_or_return_candidate_1 (type
,
1262 if (ag_count
> 0 && ag_count
<= HA_MAX_NUM_FLDS
)
1271 /* AArch64 function call information structure. */
1272 struct aarch64_call_info
1274 /* the current argument number. */
1277 /* The next general purpose register number, equivalent to NGRN as
1278 described in the AArch64 Procedure Call Standard. */
1281 /* The next SIMD and floating point register number, equivalent to
1282 NSRN as described in the AArch64 Procedure Call Standard. */
1285 /* The next stacked argument address, equivalent to NSAA as
1286 described in the AArch64 Procedure Call Standard. */
1289 /* Stack item vector. */
1290 VEC(stack_item_t
) *si
;
1293 /* Pass a value in a sequence of consecutive X registers. The caller
1294 is responsbile for ensuring sufficient registers are available. */
1297 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1298 struct aarch64_call_info
*info
, struct type
*type
,
1301 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1302 int len
= TYPE_LENGTH (type
);
1303 enum type_code typecode
= TYPE_CODE (type
);
1304 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1305 const bfd_byte
*buf
= value_contents (arg
);
1311 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1312 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1316 /* Adjust sub-word struct/union args when big-endian. */
1317 if (byte_order
== BFD_ENDIAN_BIG
1318 && partial_len
< X_REGISTER_SIZE
1319 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1320 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1324 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1325 gdbarch_register_name (gdbarch
, regnum
),
1326 phex (regval
, X_REGISTER_SIZE
));
1328 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1335 /* Attempt to marshall a value in a V register. Return 1 if
1336 successful, or 0 if insufficient registers are available. This
1337 function, unlike the equivalent pass_in_x() function does not
1338 handle arguments spread across multiple registers. */
1341 pass_in_v (struct gdbarch
*gdbarch
,
1342 struct regcache
*regcache
,
1343 struct aarch64_call_info
*info
,
1344 int len
, const bfd_byte
*buf
)
1348 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1349 /* Enough space for a full vector register. */
1350 gdb_byte reg
[register_size (gdbarch
, regnum
)];
1351 gdb_assert (len
<= sizeof (reg
));
1356 memset (reg
, 0, sizeof (reg
));
1357 /* PCS C.1, the argument is allocated to the least significant
1358 bits of V register. */
1359 memcpy (reg
, buf
, len
);
1360 regcache
->cooked_write (regnum
, reg
);
1364 debug_printf ("arg %d in %s\n", info
->argnum
,
1365 gdbarch_register_name (gdbarch
, regnum
));
1373 /* Marshall an argument onto the stack. */
1376 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1379 const bfd_byte
*buf
= value_contents (arg
);
1380 int len
= TYPE_LENGTH (type
);
1386 align
= aarch64_type_align (type
);
1388 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1389 Natural alignment of the argument's type. */
1390 align
= align_up (align
, 8);
1392 /* The AArch64 PCS requires at most doubleword alignment. */
1398 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1404 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1407 if (info
->nsaa
& (align
- 1))
1409 /* Push stack alignment padding. */
1410 int pad
= align
- (info
->nsaa
& (align
- 1));
1415 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1420 /* Marshall an argument into a sequence of one or more consecutive X
1421 registers or, if insufficient X registers are available then onto
1425 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1426 struct aarch64_call_info
*info
, struct type
*type
,
1429 int len
= TYPE_LENGTH (type
);
1430 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1432 /* PCS C.13 - Pass in registers if we have enough spare */
1433 if (info
->ngrn
+ nregs
<= 8)
1435 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1436 info
->ngrn
+= nregs
;
1441 pass_on_stack (info
, type
, arg
);
1445 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1446 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1447 registers. A return value of false is an error state as the value will have
1448 been partially passed to the stack. */
1450 pass_in_v_vfp_candidate (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1451 struct aarch64_call_info
*info
, struct type
*arg_type
,
1454 switch (TYPE_CODE (arg_type
))
1457 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1458 value_contents (arg
));
1461 case TYPE_CODE_COMPLEX
:
1463 const bfd_byte
*buf
= value_contents (arg
);
1464 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (arg_type
));
1466 if (!pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1470 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1471 buf
+ TYPE_LENGTH (target_type
));
1474 case TYPE_CODE_ARRAY
:
1475 if (TYPE_VECTOR (arg_type
))
1476 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1477 value_contents (arg
));
1480 case TYPE_CODE_STRUCT
:
1481 case TYPE_CODE_UNION
:
1482 for (int i
= 0; i
< TYPE_NFIELDS (arg_type
); i
++)
1484 /* Don't include static fields. */
1485 if (field_is_static (&TYPE_FIELD (arg_type
, i
)))
1488 struct value
*field
= value_primitive_field (arg
, 0, i
, arg_type
);
1489 struct type
*field_type
= check_typedef (value_type (field
));
1491 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, info
, field_type
,
1502 /* Implement the "push_dummy_call" gdbarch method. */
1505 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1506 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1508 struct value
**args
, CORE_ADDR sp
,
1509 function_call_return_method return_method
,
1510 CORE_ADDR struct_addr
)
1513 struct aarch64_call_info info
;
1515 memset (&info
, 0, sizeof (info
));
1517 /* We need to know what the type of the called function is in order
1518 to determine the number of named/anonymous arguments for the
1519 actual argument placement, and the return type in order to handle
1520 return value correctly.
1522 The generic code above us views the decision of return in memory
1523 or return in registers as a two stage processes. The language
1524 handler is consulted first and may decide to return in memory (eg
1525 class with copy constructor returned by value), this will cause
1526 the generic code to allocate space AND insert an initial leading
1529 If the language code does not decide to pass in memory then the
1530 target code is consulted.
1532 If the language code decides to pass in memory we want to move
1533 the pointer inserted as the initial argument from the argument
1534 list and into X8, the conventional AArch64 struct return pointer
1537 /* Set the return address. For the AArch64, the return breakpoint
1538 is always at BP_ADDR. */
1539 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1541 /* If we were given an initial argument for the return slot, lose it. */
1542 if (return_method
== return_method_hidden_param
)
1548 /* The struct_return pointer occupies X8. */
1549 if (return_method
!= return_method_normal
)
1553 debug_printf ("struct return in %s = 0x%s\n",
1554 gdbarch_register_name (gdbarch
,
1555 AARCH64_STRUCT_RETURN_REGNUM
),
1556 paddress (gdbarch
, struct_addr
));
1558 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1562 for (argnum
= 0; argnum
< nargs
; argnum
++)
1564 struct value
*arg
= args
[argnum
];
1565 struct type
*arg_type
, *fundamental_type
;
1568 arg_type
= check_typedef (value_type (arg
));
1569 len
= TYPE_LENGTH (arg_type
);
1571 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1572 if there are enough spare registers. */
1573 if (aapcs_is_vfp_call_or_return_candidate (arg_type
, &elements
,
1576 if (info
.nsrn
+ elements
<= 8)
1578 /* We know that we have sufficient registers available therefore
1579 this will never need to fallback to the stack. */
1580 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, &info
, arg_type
,
1582 gdb_assert_not_reached ("Failed to push args");
1587 pass_on_stack (&info
, arg_type
, arg
);
1592 switch (TYPE_CODE (arg_type
))
1595 case TYPE_CODE_BOOL
:
1596 case TYPE_CODE_CHAR
:
1597 case TYPE_CODE_RANGE
:
1598 case TYPE_CODE_ENUM
:
1601 /* Promote to 32 bit integer. */
1602 if (TYPE_UNSIGNED (arg_type
))
1603 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1605 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1606 arg
= value_cast (arg_type
, arg
);
1608 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1611 case TYPE_CODE_STRUCT
:
1612 case TYPE_CODE_ARRAY
:
1613 case TYPE_CODE_UNION
:
1616 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1617 invisible reference. */
1619 /* Allocate aligned storage. */
1620 sp
= align_down (sp
- len
, 16);
1622 /* Write the real data into the stack. */
1623 write_memory (sp
, value_contents (arg
), len
);
1625 /* Construct the indirection. */
1626 arg_type
= lookup_pointer_type (arg_type
);
1627 arg
= value_from_pointer (arg_type
, sp
);
1628 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1631 /* PCS C.15 / C.18 multiple values pass. */
1632 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1636 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1641 /* Make sure stack retains 16 byte alignment. */
1643 sp
-= 16 - (info
.nsaa
& 15);
1645 while (!VEC_empty (stack_item_t
, info
.si
))
1647 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1650 if (si
->data
!= NULL
)
1651 write_memory (sp
, si
->data
, si
->len
);
1652 VEC_pop (stack_item_t
, info
.si
);
1655 VEC_free (stack_item_t
, info
.si
);
1657 /* Finally, update the SP register. */
1658 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1663 /* Implement the "frame_align" gdbarch method. */
1666 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1668 /* Align the stack to sixteen bytes. */
1669 return sp
& ~(CORE_ADDR
) 15;
1672 /* Return the type for an AdvSISD Q register. */
1674 static struct type
*
1675 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1677 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1679 if (tdep
->vnq_type
== NULL
)
1684 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1687 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1688 append_composite_type_field (t
, "u", elem
);
1690 elem
= builtin_type (gdbarch
)->builtin_int128
;
1691 append_composite_type_field (t
, "s", elem
);
1696 return tdep
->vnq_type
;
1699 /* Return the type for an AdvSISD D register. */
1701 static struct type
*
1702 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1704 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1706 if (tdep
->vnd_type
== NULL
)
1711 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1714 elem
= builtin_type (gdbarch
)->builtin_double
;
1715 append_composite_type_field (t
, "f", elem
);
1717 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1718 append_composite_type_field (t
, "u", elem
);
1720 elem
= builtin_type (gdbarch
)->builtin_int64
;
1721 append_composite_type_field (t
, "s", elem
);
1726 return tdep
->vnd_type
;
1729 /* Return the type for an AdvSISD S register. */
1731 static struct type
*
1732 aarch64_vns_type (struct gdbarch
*gdbarch
)
1734 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1736 if (tdep
->vns_type
== NULL
)
1741 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1744 elem
= builtin_type (gdbarch
)->builtin_float
;
1745 append_composite_type_field (t
, "f", elem
);
1747 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1748 append_composite_type_field (t
, "u", elem
);
1750 elem
= builtin_type (gdbarch
)->builtin_int32
;
1751 append_composite_type_field (t
, "s", elem
);
1756 return tdep
->vns_type
;
1759 /* Return the type for an AdvSISD H register. */
1761 static struct type
*
1762 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1764 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1766 if (tdep
->vnh_type
== NULL
)
1771 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1774 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1775 append_composite_type_field (t
, "u", elem
);
1777 elem
= builtin_type (gdbarch
)->builtin_int16
;
1778 append_composite_type_field (t
, "s", elem
);
1783 return tdep
->vnh_type
;
1786 /* Return the type for an AdvSISD B register. */
1788 static struct type
*
1789 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1791 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1793 if (tdep
->vnb_type
== NULL
)
1798 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1801 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1802 append_composite_type_field (t
, "u", elem
);
1804 elem
= builtin_type (gdbarch
)->builtin_int8
;
1805 append_composite_type_field (t
, "s", elem
);
1810 return tdep
->vnb_type
;
1813 /* Return the type for an AdvSISD V register. */
1815 static struct type
*
1816 aarch64_vnv_type (struct gdbarch
*gdbarch
)
1818 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1820 if (tdep
->vnv_type
== NULL
)
1822 struct type
*t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnv",
1825 append_composite_type_field (t
, "d", aarch64_vnd_type (gdbarch
));
1826 append_composite_type_field (t
, "s", aarch64_vns_type (gdbarch
));
1827 append_composite_type_field (t
, "h", aarch64_vnh_type (gdbarch
));
1828 append_composite_type_field (t
, "b", aarch64_vnb_type (gdbarch
));
1829 append_composite_type_field (t
, "q", aarch64_vnq_type (gdbarch
));
1834 return tdep
->vnv_type
;
1837 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1840 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1842 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1843 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1845 if (reg
== AARCH64_DWARF_SP
)
1846 return AARCH64_SP_REGNUM
;
1848 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1849 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1851 if (reg
== AARCH64_DWARF_SVE_VG
)
1852 return AARCH64_SVE_VG_REGNUM
;
1854 if (reg
== AARCH64_DWARF_SVE_FFR
)
1855 return AARCH64_SVE_FFR_REGNUM
;
1857 if (reg
>= AARCH64_DWARF_SVE_P0
&& reg
<= AARCH64_DWARF_SVE_P0
+ 15)
1858 return AARCH64_SVE_P0_REGNUM
+ reg
- AARCH64_DWARF_SVE_P0
;
1860 if (reg
>= AARCH64_DWARF_SVE_Z0
&& reg
<= AARCH64_DWARF_SVE_Z0
+ 15)
1861 return AARCH64_SVE_Z0_REGNUM
+ reg
- AARCH64_DWARF_SVE_Z0
;
1866 /* Implement the "print_insn" gdbarch method. */
1869 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1871 info
->symbols
= NULL
;
1872 return default_print_insn (memaddr
, info
);
1875 /* AArch64 BRK software debug mode instruction.
1876 Note that AArch64 code is always little-endian.
1877 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1878 constexpr gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1880 typedef BP_MANIPULATION (aarch64_default_breakpoint
) aarch64_breakpoint
;
1882 /* Extract from an array REGS containing the (raw) register state a
1883 function return value of type TYPE, and copy that, in virtual
1884 format, into VALBUF. */
1887 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
1890 struct gdbarch
*gdbarch
= regs
->arch ();
1891 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1893 struct type
*fundamental_type
;
1895 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
1898 int len
= TYPE_LENGTH (fundamental_type
);
1900 for (int i
= 0; i
< elements
; i
++)
1902 int regno
= AARCH64_V0_REGNUM
+ i
;
1903 /* Enough space for a full vector register. */
1904 gdb_byte buf
[register_size (gdbarch
, regno
)];
1905 gdb_assert (len
<= sizeof (buf
));
1909 debug_printf ("read HFA or HVA return value element %d from %s\n",
1911 gdbarch_register_name (gdbarch
, regno
));
1913 regs
->cooked_read (regno
, buf
);
1915 memcpy (valbuf
, buf
, len
);
1919 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1920 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1921 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1922 || TYPE_CODE (type
) == TYPE_CODE_PTR
1923 || TYPE_IS_REFERENCE (type
)
1924 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1926 /* If the type is a plain integer, then the access is
1927 straight-forward. Otherwise we have to play around a bit
1929 int len
= TYPE_LENGTH (type
);
1930 int regno
= AARCH64_X0_REGNUM
;
1935 /* By using store_unsigned_integer we avoid having to do
1936 anything special for small big-endian values. */
1937 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
1938 store_unsigned_integer (valbuf
,
1939 (len
> X_REGISTER_SIZE
1940 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
1941 len
-= X_REGISTER_SIZE
;
1942 valbuf
+= X_REGISTER_SIZE
;
1947 /* For a structure or union the behaviour is as if the value had
1948 been stored to word-aligned memory and then loaded into
1949 registers with 64-bit load instruction(s). */
1950 int len
= TYPE_LENGTH (type
);
1951 int regno
= AARCH64_X0_REGNUM
;
1952 bfd_byte buf
[X_REGISTER_SIZE
];
1956 regs
->cooked_read (regno
++, buf
);
1957 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
1958 len
-= X_REGISTER_SIZE
;
1959 valbuf
+= X_REGISTER_SIZE
;
1965 /* Will a function return an aggregate type in memory or in a
1966 register? Return 0 if an aggregate type can be returned in a
1967 register, 1 if it must be returned in memory. */
1970 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
1972 type
= check_typedef (type
);
1974 struct type
*fundamental_type
;
1976 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
1979 /* v0-v7 are used to return values and one register is allocated
1980 for one member. However, HFA or HVA has at most four members. */
1984 if (TYPE_LENGTH (type
) > 16)
1986 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1987 invisible reference. */
1995 /* Write into appropriate registers a function return value of type
1996 TYPE, given in virtual format. */
1999 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2000 const gdb_byte
*valbuf
)
2002 struct gdbarch
*gdbarch
= regs
->arch ();
2003 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2005 struct type
*fundamental_type
;
2007 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2010 int len
= TYPE_LENGTH (fundamental_type
);
2012 for (int i
= 0; i
< elements
; i
++)
2014 int regno
= AARCH64_V0_REGNUM
+ i
;
2015 /* Enough space for a full vector register. */
2016 gdb_byte tmpbuf
[register_size (gdbarch
, regno
)];
2017 gdb_assert (len
<= sizeof (tmpbuf
));
2021 debug_printf ("write HFA or HVA return value element %d to %s\n",
2023 gdbarch_register_name (gdbarch
, regno
));
2026 memcpy (tmpbuf
, valbuf
,
2027 len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2028 regs
->cooked_write (regno
, tmpbuf
);
2032 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2033 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2034 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2035 || TYPE_CODE (type
) == TYPE_CODE_PTR
2036 || TYPE_IS_REFERENCE (type
)
2037 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2039 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2041 /* Values of one word or less are zero/sign-extended and
2043 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2044 LONGEST val
= unpack_long (type
, valbuf
);
2046 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2047 regs
->cooked_write (AARCH64_X0_REGNUM
, tmpbuf
);
2051 /* Integral values greater than one word are stored in
2052 consecutive registers starting with r0. This will always
2053 be a multiple of the regiser size. */
2054 int len
= TYPE_LENGTH (type
);
2055 int regno
= AARCH64_X0_REGNUM
;
2059 regs
->cooked_write (regno
++, valbuf
);
2060 len
-= X_REGISTER_SIZE
;
2061 valbuf
+= X_REGISTER_SIZE
;
2067 /* For a structure or union the behaviour is as if the value had
2068 been stored to word-aligned memory and then loaded into
2069 registers with 64-bit load instruction(s). */
2070 int len
= TYPE_LENGTH (type
);
2071 int regno
= AARCH64_X0_REGNUM
;
2072 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2076 memcpy (tmpbuf
, valbuf
,
2077 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2078 regs
->cooked_write (regno
++, tmpbuf
);
2079 len
-= X_REGISTER_SIZE
;
2080 valbuf
+= X_REGISTER_SIZE
;
2085 /* Implement the "return_value" gdbarch method. */
2087 static enum return_value_convention
2088 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2089 struct type
*valtype
, struct regcache
*regcache
,
2090 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2093 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2094 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2095 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2097 if (aarch64_return_in_memory (gdbarch
, valtype
))
2100 debug_printf ("return value in memory\n");
2101 return RETURN_VALUE_STRUCT_CONVENTION
;
2106 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2109 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2112 debug_printf ("return value in registers\n");
2114 return RETURN_VALUE_REGISTER_CONVENTION
;
2117 /* Implement the "get_longjmp_target" gdbarch method. */
2120 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2123 gdb_byte buf
[X_REGISTER_SIZE
];
2124 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2125 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2126 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2128 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2130 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2134 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2138 /* Implement the "gen_return_address" gdbarch method. */
2141 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2142 struct agent_expr
*ax
, struct axs_value
*value
,
2145 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2146 value
->kind
= axs_lvalue_register
;
2147 value
->u
.reg
= AARCH64_LR_REGNUM
;
2151 /* Return the pseudo register name corresponding to register regnum. */
2154 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2156 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2158 static const char *const q_name
[] =
2160 "q0", "q1", "q2", "q3",
2161 "q4", "q5", "q6", "q7",
2162 "q8", "q9", "q10", "q11",
2163 "q12", "q13", "q14", "q15",
2164 "q16", "q17", "q18", "q19",
2165 "q20", "q21", "q22", "q23",
2166 "q24", "q25", "q26", "q27",
2167 "q28", "q29", "q30", "q31",
2170 static const char *const d_name
[] =
2172 "d0", "d1", "d2", "d3",
2173 "d4", "d5", "d6", "d7",
2174 "d8", "d9", "d10", "d11",
2175 "d12", "d13", "d14", "d15",
2176 "d16", "d17", "d18", "d19",
2177 "d20", "d21", "d22", "d23",
2178 "d24", "d25", "d26", "d27",
2179 "d28", "d29", "d30", "d31",
2182 static const char *const s_name
[] =
2184 "s0", "s1", "s2", "s3",
2185 "s4", "s5", "s6", "s7",
2186 "s8", "s9", "s10", "s11",
2187 "s12", "s13", "s14", "s15",
2188 "s16", "s17", "s18", "s19",
2189 "s20", "s21", "s22", "s23",
2190 "s24", "s25", "s26", "s27",
2191 "s28", "s29", "s30", "s31",
2194 static const char *const h_name
[] =
2196 "h0", "h1", "h2", "h3",
2197 "h4", "h5", "h6", "h7",
2198 "h8", "h9", "h10", "h11",
2199 "h12", "h13", "h14", "h15",
2200 "h16", "h17", "h18", "h19",
2201 "h20", "h21", "h22", "h23",
2202 "h24", "h25", "h26", "h27",
2203 "h28", "h29", "h30", "h31",
2206 static const char *const b_name
[] =
2208 "b0", "b1", "b2", "b3",
2209 "b4", "b5", "b6", "b7",
2210 "b8", "b9", "b10", "b11",
2211 "b12", "b13", "b14", "b15",
2212 "b16", "b17", "b18", "b19",
2213 "b20", "b21", "b22", "b23",
2214 "b24", "b25", "b26", "b27",
2215 "b28", "b29", "b30", "b31",
2218 regnum
-= gdbarch_num_regs (gdbarch
);
2220 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2221 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2223 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2224 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2226 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2227 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2229 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2230 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2232 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2233 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2235 if (tdep
->has_sve ())
2237 static const char *const sve_v_name
[] =
2239 "v0", "v1", "v2", "v3",
2240 "v4", "v5", "v6", "v7",
2241 "v8", "v9", "v10", "v11",
2242 "v12", "v13", "v14", "v15",
2243 "v16", "v17", "v18", "v19",
2244 "v20", "v21", "v22", "v23",
2245 "v24", "v25", "v26", "v27",
2246 "v28", "v29", "v30", "v31",
2249 if (regnum
>= AARCH64_SVE_V0_REGNUM
2250 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2251 return sve_v_name
[regnum
- AARCH64_SVE_V0_REGNUM
];
2254 internal_error (__FILE__
, __LINE__
,
2255 _("aarch64_pseudo_register_name: bad register number %d"),
2259 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2261 static struct type
*
2262 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2264 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2266 regnum
-= gdbarch_num_regs (gdbarch
);
2268 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2269 return aarch64_vnq_type (gdbarch
);
2271 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2272 return aarch64_vnd_type (gdbarch
);
2274 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2275 return aarch64_vns_type (gdbarch
);
2277 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2278 return aarch64_vnh_type (gdbarch
);
2280 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2281 return aarch64_vnb_type (gdbarch
);
2283 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2284 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2285 return aarch64_vnv_type (gdbarch
);
2287 internal_error (__FILE__
, __LINE__
,
2288 _("aarch64_pseudo_register_type: bad register number %d"),
2292 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2295 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2296 struct reggroup
*group
)
2298 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2300 regnum
-= gdbarch_num_regs (gdbarch
);
2302 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2303 return group
== all_reggroup
|| group
== vector_reggroup
;
2304 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2305 return (group
== all_reggroup
|| group
== vector_reggroup
2306 || group
== float_reggroup
);
2307 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2308 return (group
== all_reggroup
|| group
== vector_reggroup
2309 || group
== float_reggroup
);
2310 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2311 return group
== all_reggroup
|| group
== vector_reggroup
;
2312 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2313 return group
== all_reggroup
|| group
== vector_reggroup
;
2314 else if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2315 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2316 return group
== all_reggroup
|| group
== vector_reggroup
;
2318 return group
== all_reggroup
;
2321 /* Helper for aarch64_pseudo_read_value. */
2323 static struct value
*
2324 aarch64_pseudo_read_value_1 (struct gdbarch
*gdbarch
,
2325 readable_regcache
*regcache
, int regnum_offset
,
2326 int regsize
, struct value
*result_value
)
2328 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2330 /* Enough space for a full vector register. */
2331 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2332 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2334 if (regcache
->raw_read (v_regnum
, reg_buf
) != REG_VALID
)
2335 mark_value_bytes_unavailable (result_value
, 0,
2336 TYPE_LENGTH (value_type (result_value
)));
2338 memcpy (value_contents_raw (result_value
), reg_buf
, regsize
);
2340 return result_value
;
2343 /* Implement the "pseudo_register_read_value" gdbarch method. */
2345 static struct value
*
2346 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
, readable_regcache
*regcache
,
2349 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2350 struct value
*result_value
= allocate_value (register_type (gdbarch
, regnum
));
2352 VALUE_LVAL (result_value
) = lval_register
;
2353 VALUE_REGNUM (result_value
) = regnum
;
2355 regnum
-= gdbarch_num_regs (gdbarch
);
2357 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2358 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2359 regnum
- AARCH64_Q0_REGNUM
,
2360 Q_REGISTER_SIZE
, result_value
);
2362 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2363 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2364 regnum
- AARCH64_D0_REGNUM
,
2365 D_REGISTER_SIZE
, result_value
);
2367 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2368 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2369 regnum
- AARCH64_S0_REGNUM
,
2370 S_REGISTER_SIZE
, result_value
);
2372 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2373 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2374 regnum
- AARCH64_H0_REGNUM
,
2375 H_REGISTER_SIZE
, result_value
);
2377 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2378 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2379 regnum
- AARCH64_B0_REGNUM
,
2380 B_REGISTER_SIZE
, result_value
);
2382 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2383 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2384 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2385 regnum
- AARCH64_SVE_V0_REGNUM
,
2386 V_REGISTER_SIZE
, result_value
);
2388 gdb_assert_not_reached ("regnum out of bound");
2391 /* Helper for aarch64_pseudo_write. */
2394 aarch64_pseudo_write_1 (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2395 int regnum_offset
, int regsize
, const gdb_byte
*buf
)
2397 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2399 /* Enough space for a full vector register. */
2400 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2401 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2403 /* Ensure the register buffer is zero, we want gdb writes of the
2404 various 'scalar' pseudo registers to behavior like architectural
2405 writes, register width bytes are written the remainder are set to
2407 memset (reg_buf
, 0, register_size (gdbarch
, AARCH64_V0_REGNUM
));
2409 memcpy (reg_buf
, buf
, regsize
);
2410 regcache
->raw_write (v_regnum
, reg_buf
);
2413 /* Implement the "pseudo_register_write" gdbarch method. */
2416 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2417 int regnum
, const gdb_byte
*buf
)
2419 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2420 regnum
-= gdbarch_num_regs (gdbarch
);
2422 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2423 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2424 regnum
- AARCH64_Q0_REGNUM
, Q_REGISTER_SIZE
,
2427 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2428 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2429 regnum
- AARCH64_D0_REGNUM
, D_REGISTER_SIZE
,
2432 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2433 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2434 regnum
- AARCH64_S0_REGNUM
, S_REGISTER_SIZE
,
2437 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2438 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2439 regnum
- AARCH64_H0_REGNUM
, H_REGISTER_SIZE
,
2442 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2443 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2444 regnum
- AARCH64_B0_REGNUM
, B_REGISTER_SIZE
,
2447 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2448 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2449 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2450 regnum
- AARCH64_SVE_V0_REGNUM
,
2451 V_REGISTER_SIZE
, buf
);
2453 gdb_assert_not_reached ("regnum out of bound");
2456 /* Callback function for user_reg_add. */
2458 static struct value
*
2459 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2461 const int *reg_p
= (const int *) baton
;
2463 return value_of_register (*reg_p
, frame
);
2467 /* Implement the "software_single_step" gdbarch method, needed to
2468 single step through atomic sequences on AArch64. */
2470 static std::vector
<CORE_ADDR
>
2471 aarch64_software_single_step (struct regcache
*regcache
)
2473 struct gdbarch
*gdbarch
= regcache
->arch ();
2474 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2475 const int insn_size
= 4;
2476 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2477 CORE_ADDR pc
= regcache_read_pc (regcache
);
2478 CORE_ADDR breaks
[2] = { CORE_ADDR_MAX
, CORE_ADDR_MAX
};
2480 CORE_ADDR closing_insn
= 0;
2481 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2482 byte_order_for_code
);
2485 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2486 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2489 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2492 /* Look for a Load Exclusive instruction which begins the sequence. */
2493 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2496 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2499 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2500 byte_order_for_code
);
2502 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2504 /* Check if the instruction is a conditional branch. */
2505 if (inst
.opcode
->iclass
== condbranch
)
2507 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2509 if (bc_insn_count
>= 1)
2512 /* It is, so we'll try to set a breakpoint at the destination. */
2513 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2519 /* Look for the Store Exclusive which closes the atomic sequence. */
2520 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2527 /* We didn't find a closing Store Exclusive instruction, fall back. */
2531 /* Insert breakpoint after the end of the atomic sequence. */
2532 breaks
[0] = loc
+ insn_size
;
2534 /* Check for duplicated breakpoints, and also check that the second
2535 breakpoint is not within the atomic sequence. */
2537 && (breaks
[1] == breaks
[0]
2538 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2539 last_breakpoint
= 0;
2541 std::vector
<CORE_ADDR
> next_pcs
;
2543 /* Insert the breakpoint at the end of the sequence, and one at the
2544 destination of the conditional branch, if it exists. */
2545 for (index
= 0; index
<= last_breakpoint
; index
++)
2546 next_pcs
.push_back (breaks
[index
]);
2551 struct aarch64_displaced_step_closure
: public displaced_step_closure
2553 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2554 is being displaced stepping. */
2557 /* PC adjustment offset after displaced stepping. */
2558 int32_t pc_adjust
= 0;
2561 /* Data when visiting instructions for displaced stepping. */
2563 struct aarch64_displaced_step_data
2565 struct aarch64_insn_data base
;
2567 /* The address where the instruction will be executed at. */
2569 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2570 uint32_t insn_buf
[DISPLACED_MODIFIED_INSNS
];
2571 /* Number of instructions in INSN_BUF. */
2572 unsigned insn_count
;
2573 /* Registers when doing displaced stepping. */
2574 struct regcache
*regs
;
2576 aarch64_displaced_step_closure
*dsc
;
2579 /* Implementation of aarch64_insn_visitor method "b". */
2582 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2583 struct aarch64_insn_data
*data
)
2585 struct aarch64_displaced_step_data
*dsd
2586 = (struct aarch64_displaced_step_data
*) data
;
2587 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2589 if (can_encode_int32 (new_offset
, 28))
2591 /* Emit B rather than BL, because executing BL on a new address
2592 will get the wrong address into LR. In order to avoid this,
2593 we emit B, and update LR if the instruction is BL. */
2594 emit_b (dsd
->insn_buf
, 0, new_offset
);
2600 emit_nop (dsd
->insn_buf
);
2602 dsd
->dsc
->pc_adjust
= offset
;
2608 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2609 data
->insn_addr
+ 4);
2613 /* Implementation of aarch64_insn_visitor method "b_cond". */
2616 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2617 struct aarch64_insn_data
*data
)
2619 struct aarch64_displaced_step_data
*dsd
2620 = (struct aarch64_displaced_step_data
*) data
;
2622 /* GDB has to fix up PC after displaced step this instruction
2623 differently according to the condition is true or false. Instead
2624 of checking COND against conditional flags, we can use
2625 the following instructions, and GDB can tell how to fix up PC
2626 according to the PC value.
2628 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2634 emit_bcond (dsd
->insn_buf
, cond
, 8);
2636 dsd
->dsc
->pc_adjust
= offset
;
2637 dsd
->insn_count
= 1;
2640 /* Dynamically allocate a new register. If we know the register
2641 statically, we should make it a global as above instead of using this
2644 static struct aarch64_register
2645 aarch64_register (unsigned num
, int is64
)
2647 return (struct aarch64_register
) { num
, is64
};
2650 /* Implementation of aarch64_insn_visitor method "cb". */
2653 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
2654 const unsigned rn
, int is64
,
2655 struct aarch64_insn_data
*data
)
2657 struct aarch64_displaced_step_data
*dsd
2658 = (struct aarch64_displaced_step_data
*) data
;
2660 /* The offset is out of range for a compare and branch
2661 instruction. We can use the following instructions instead:
2663 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2668 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
2669 dsd
->insn_count
= 1;
2671 dsd
->dsc
->pc_adjust
= offset
;
2674 /* Implementation of aarch64_insn_visitor method "tb". */
2677 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
2678 const unsigned rt
, unsigned bit
,
2679 struct aarch64_insn_data
*data
)
2681 struct aarch64_displaced_step_data
*dsd
2682 = (struct aarch64_displaced_step_data
*) data
;
2684 /* The offset is out of range for a test bit and branch
2685 instruction We can use the following instructions instead:
2687 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2693 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
2694 dsd
->insn_count
= 1;
2696 dsd
->dsc
->pc_adjust
= offset
;
2699 /* Implementation of aarch64_insn_visitor method "adr". */
2702 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
2703 const int is_adrp
, struct aarch64_insn_data
*data
)
2705 struct aarch64_displaced_step_data
*dsd
2706 = (struct aarch64_displaced_step_data
*) data
;
2707 /* We know exactly the address the ADR{P,} instruction will compute.
2708 We can just write it to the destination register. */
2709 CORE_ADDR address
= data
->insn_addr
+ offset
;
2713 /* Clear the lower 12 bits of the offset to get the 4K page. */
2714 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2718 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2721 dsd
->dsc
->pc_adjust
= 4;
2722 emit_nop (dsd
->insn_buf
);
2723 dsd
->insn_count
= 1;
2726 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2729 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
2730 const unsigned rt
, const int is64
,
2731 struct aarch64_insn_data
*data
)
2733 struct aarch64_displaced_step_data
*dsd
2734 = (struct aarch64_displaced_step_data
*) data
;
2735 CORE_ADDR address
= data
->insn_addr
+ offset
;
2736 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
2738 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
2742 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
2743 aarch64_register (rt
, 1), zero
);
2745 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
2746 aarch64_register (rt
, 1), zero
);
2748 dsd
->dsc
->pc_adjust
= 4;
2751 /* Implementation of aarch64_insn_visitor method "others". */
2754 aarch64_displaced_step_others (const uint32_t insn
,
2755 struct aarch64_insn_data
*data
)
2757 struct aarch64_displaced_step_data
*dsd
2758 = (struct aarch64_displaced_step_data
*) data
;
2760 aarch64_emit_insn (dsd
->insn_buf
, insn
);
2761 dsd
->insn_count
= 1;
2763 if ((insn
& 0xfffffc1f) == 0xd65f0000)
2766 dsd
->dsc
->pc_adjust
= 0;
2769 dsd
->dsc
->pc_adjust
= 4;
2772 static const struct aarch64_insn_visitor visitor
=
2774 aarch64_displaced_step_b
,
2775 aarch64_displaced_step_b_cond
,
2776 aarch64_displaced_step_cb
,
2777 aarch64_displaced_step_tb
,
2778 aarch64_displaced_step_adr
,
2779 aarch64_displaced_step_ldr_literal
,
2780 aarch64_displaced_step_others
,
2783 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2785 struct displaced_step_closure
*
2786 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
2787 CORE_ADDR from
, CORE_ADDR to
,
2788 struct regcache
*regs
)
2790 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2791 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
2792 struct aarch64_displaced_step_data dsd
;
2795 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2798 /* Look for a Load Exclusive instruction which begins the sequence. */
2799 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
2801 /* We can't displaced step atomic sequences. */
2805 std::unique_ptr
<aarch64_displaced_step_closure
> dsc
2806 (new aarch64_displaced_step_closure
);
2807 dsd
.base
.insn_addr
= from
;
2810 dsd
.dsc
= dsc
.get ();
2812 aarch64_relocate_instruction (insn
, &visitor
,
2813 (struct aarch64_insn_data
*) &dsd
);
2814 gdb_assert (dsd
.insn_count
<= DISPLACED_MODIFIED_INSNS
);
2816 if (dsd
.insn_count
!= 0)
2820 /* Instruction can be relocated to scratch pad. Copy
2821 relocated instruction(s) there. */
2822 for (i
= 0; i
< dsd
.insn_count
; i
++)
2824 if (debug_displaced
)
2826 debug_printf ("displaced: writing insn ");
2827 debug_printf ("%.8x", dsd
.insn_buf
[i
]);
2828 debug_printf (" at %s\n", paddress (gdbarch
, to
+ i
* 4));
2830 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
2831 (ULONGEST
) dsd
.insn_buf
[i
]);
2839 return dsc
.release ();
2842 /* Implement the "displaced_step_fixup" gdbarch method. */
2845 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
2846 struct displaced_step_closure
*dsc_
,
2847 CORE_ADDR from
, CORE_ADDR to
,
2848 struct regcache
*regs
)
2850 aarch64_displaced_step_closure
*dsc
= (aarch64_displaced_step_closure
*) dsc_
;
2856 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
2859 /* Condition is true. */
2861 else if (pc
- to
== 4)
2863 /* Condition is false. */
2867 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2870 if (dsc
->pc_adjust
!= 0)
2872 if (debug_displaced
)
2874 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2875 paddress (gdbarch
, from
), dsc
->pc_adjust
);
2877 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
2878 from
+ dsc
->pc_adjust
);
2882 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2885 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
,
2886 struct displaced_step_closure
*closure
)
2891 /* Get the correct target description for the given VQ value.
2892 If VQ is zero then it is assumed SVE is not supported.
2893 (It is not possible to set VQ to zero on an SVE system). */
2896 aarch64_read_description (uint64_t vq
, bool pauth_p
)
2898 if (vq
> AARCH64_MAX_SVE_VQ
)
2899 error (_("VQ is %" PRIu64
", maximum supported value is %d"), vq
,
2900 AARCH64_MAX_SVE_VQ
);
2902 struct target_desc
*tdesc
= tdesc_aarch64_list
[vq
][pauth_p
];
2906 tdesc
= aarch64_create_target_description (vq
, pauth_p
);
2907 tdesc_aarch64_list
[vq
][pauth_p
] = tdesc
;
2913 /* Return the VQ used when creating the target description TDESC. */
2916 aarch64_get_tdesc_vq (const struct target_desc
*tdesc
)
2918 const struct tdesc_feature
*feature_sve
;
2920 if (!tdesc_has_registers (tdesc
))
2923 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
2925 if (feature_sve
== nullptr)
2928 uint64_t vl
= tdesc_register_bitsize (feature_sve
,
2929 aarch64_sve_register_names
[0]) / 8;
2930 return sve_vq_from_vl (vl
);
2933 /* Add all the expected register sets into GDBARCH. */
2936 aarch64_add_reggroups (struct gdbarch
*gdbarch
)
2938 reggroup_add (gdbarch
, general_reggroup
);
2939 reggroup_add (gdbarch
, float_reggroup
);
2940 reggroup_add (gdbarch
, system_reggroup
);
2941 reggroup_add (gdbarch
, vector_reggroup
);
2942 reggroup_add (gdbarch
, all_reggroup
);
2943 reggroup_add (gdbarch
, save_reggroup
);
2944 reggroup_add (gdbarch
, restore_reggroup
);
2947 /* Implement the "cannot_store_register" gdbarch method. */
2950 aarch64_cannot_store_register (struct gdbarch
*gdbarch
, int regnum
)
2952 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2954 if (!tdep
->has_pauth ())
2957 /* Pointer authentication registers are read-only. */
2958 return (regnum
== AARCH64_PAUTH_DMASK_REGNUM (tdep
->pauth_reg_base
)
2959 || regnum
== AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
));
2962 /* Initialize the current architecture based on INFO. If possible,
2963 re-use an architecture from ARCHES, which is a list of
2964 architectures already created during this debugging session.
2966 Called e.g. at program startup, when reading a core file, and when
2967 reading a binary file. */
2969 static struct gdbarch
*
2970 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2972 struct gdbarch_tdep
*tdep
;
2973 struct gdbarch
*gdbarch
;
2974 struct gdbarch_list
*best_arch
;
2975 struct tdesc_arch_data
*tdesc_data
= NULL
;
2976 const struct target_desc
*tdesc
= info
.target_desc
;
2979 const struct tdesc_feature
*feature_core
;
2980 const struct tdesc_feature
*feature_fpu
;
2981 const struct tdesc_feature
*feature_sve
;
2982 const struct tdesc_feature
*feature_pauth
;
2984 int num_pseudo_regs
= 0;
2985 int first_pauth_regnum
= -1;
2987 /* Ensure we always have a target description. */
2988 if (!tdesc_has_registers (tdesc
))
2989 tdesc
= aarch64_read_description (0, false);
2992 feature_core
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2993 feature_fpu
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2994 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
2995 feature_pauth
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.pauth");
2997 if (feature_core
== NULL
)
3000 tdesc_data
= tdesc_data_alloc ();
3002 /* Validate the description provides the mandatory core R registers
3003 and allocate their numbers. */
3004 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
3005 valid_p
&= tdesc_numbered_register (feature_core
, tdesc_data
,
3006 AARCH64_X0_REGNUM
+ i
,
3007 aarch64_r_register_names
[i
]);
3009 num_regs
= AARCH64_X0_REGNUM
+ i
;
3011 /* Add the V registers. */
3012 if (feature_fpu
!= NULL
)
3014 if (feature_sve
!= NULL
)
3015 error (_("Program contains both fpu and SVE features."));
3017 /* Validate the description provides the mandatory V registers
3018 and allocate their numbers. */
3019 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
3020 valid_p
&= tdesc_numbered_register (feature_fpu
, tdesc_data
,
3021 AARCH64_V0_REGNUM
+ i
,
3022 aarch64_v_register_names
[i
]);
3024 num_regs
= AARCH64_V0_REGNUM
+ i
;
3027 /* Add the SVE registers. */
3028 if (feature_sve
!= NULL
)
3030 /* Validate the description provides the mandatory SVE registers
3031 and allocate their numbers. */
3032 for (i
= 0; i
< ARRAY_SIZE (aarch64_sve_register_names
); i
++)
3033 valid_p
&= tdesc_numbered_register (feature_sve
, tdesc_data
,
3034 AARCH64_SVE_Z0_REGNUM
+ i
,
3035 aarch64_sve_register_names
[i
]);
3037 num_regs
= AARCH64_SVE_Z0_REGNUM
+ i
;
3038 num_pseudo_regs
+= 32; /* add the Vn register pseudos. */
3041 if (feature_fpu
!= NULL
|| feature_sve
!= NULL
)
3043 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
3044 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
3045 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
3046 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
3047 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
3050 /* Add the pauth registers. */
3051 if (feature_pauth
!= NULL
)
3053 first_pauth_regnum
= num_regs
;
3055 /* Validate the descriptor provides the mandatory PAUTH registers and
3056 allocate their numbers. */
3057 for (i
= 0; i
< ARRAY_SIZE (aarch64_pauth_register_names
); i
++)
3058 valid_p
&= tdesc_numbered_register (feature_pauth
, tdesc_data
,
3059 first_pauth_regnum
+ i
,
3060 aarch64_pauth_register_names
[i
]);
3067 tdesc_data_cleanup (tdesc_data
);
3071 /* AArch64 code is always little-endian. */
3072 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
3074 /* If there is already a candidate, use it. */
3075 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
3077 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
3079 /* Found a match. */
3083 if (best_arch
!= NULL
)
3085 if (tdesc_data
!= NULL
)
3086 tdesc_data_cleanup (tdesc_data
);
3087 return best_arch
->gdbarch
;
3090 tdep
= XCNEW (struct gdbarch_tdep
);
3091 gdbarch
= gdbarch_alloc (&info
, tdep
);
3093 /* This should be low enough for everything. */
3094 tdep
->lowest_pc
= 0x20;
3095 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
3096 tdep
->jb_elt_size
= 8;
3097 tdep
->vq
= aarch64_get_tdesc_vq (tdesc
);
3098 tdep
->pauth_reg_base
= first_pauth_regnum
;
3100 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
3101 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
3103 /* Advance PC across function entry code. */
3104 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
3106 /* The stack grows downward. */
3107 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
3109 /* Breakpoint manipulation. */
3110 set_gdbarch_breakpoint_kind_from_pc (gdbarch
,
3111 aarch64_breakpoint::kind_from_pc
);
3112 set_gdbarch_sw_breakpoint_from_kind (gdbarch
,
3113 aarch64_breakpoint::bp_from_kind
);
3114 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
3115 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
3117 /* Information about registers, etc. */
3118 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
3119 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
3120 set_gdbarch_num_regs (gdbarch
, num_regs
);
3122 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
3123 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
3124 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
3125 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
3126 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
3127 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
3128 aarch64_pseudo_register_reggroup_p
);
3129 set_gdbarch_cannot_store_register (gdbarch
, aarch64_cannot_store_register
);
3132 set_gdbarch_short_bit (gdbarch
, 16);
3133 set_gdbarch_int_bit (gdbarch
, 32);
3134 set_gdbarch_float_bit (gdbarch
, 32);
3135 set_gdbarch_double_bit (gdbarch
, 64);
3136 set_gdbarch_long_double_bit (gdbarch
, 128);
3137 set_gdbarch_long_bit (gdbarch
, 64);
3138 set_gdbarch_long_long_bit (gdbarch
, 64);
3139 set_gdbarch_ptr_bit (gdbarch
, 64);
3140 set_gdbarch_char_signed (gdbarch
, 0);
3141 set_gdbarch_wchar_signed (gdbarch
, 0);
3142 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
3143 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
3144 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
3146 /* Internal <-> external register number maps. */
3147 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
3149 /* Returning results. */
3150 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
3153 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
3155 /* Virtual tables. */
3156 set_gdbarch_vbit_in_delta (gdbarch
, 1);
3158 /* Register architecture. */
3159 aarch64_add_reggroups (gdbarch
);
3161 /* Hook in the ABI-specific overrides, if they have been registered. */
3162 info
.target_desc
= tdesc
;
3163 info
.tdesc_data
= tdesc_data
;
3164 gdbarch_init_osabi (info
, gdbarch
);
3166 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
3168 /* Add some default predicates. */
3169 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
3170 dwarf2_append_unwinders (gdbarch
);
3171 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
3173 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
3175 /* Now we have tuned the configuration, set a few final things,
3176 based on what the OS ABI has told us. */
3178 if (tdep
->jb_pc
>= 0)
3179 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
3181 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
3183 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
3185 /* Add standard register aliases. */
3186 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
3187 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
3188 value_of_aarch64_user_reg
,
3189 &aarch64_register_aliases
[i
].regnum
);
3191 register_aarch64_ravenscar_ops (gdbarch
);
3197 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
3199 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3204 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3205 paddress (gdbarch
, tdep
->lowest_pc
));
3211 static void aarch64_process_record_test (void);
3216 _initialize_aarch64_tdep (void)
3218 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
3221 /* Debug this file's internals. */
3222 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
3223 Set AArch64 debugging."), _("\
3224 Show AArch64 debugging."), _("\
3225 When on, AArch64 specific debugging is enabled."),
3228 &setdebuglist
, &showdebuglist
);
3231 selftests::register_test ("aarch64-analyze-prologue",
3232 selftests::aarch64_analyze_prologue_test
);
3233 selftests::register_test ("aarch64-process-record",
3234 selftests::aarch64_process_record_test
);
3235 selftests::record_xml_tdesc ("aarch64.xml",
3236 aarch64_create_target_description (0, false));
3240 /* AArch64 process record-replay related structures, defines etc. */
3242 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3245 unsigned int reg_len = LENGTH; \
3248 REGS = XNEWVEC (uint32_t, reg_len); \
3249 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3254 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3257 unsigned int mem_len = LENGTH; \
3260 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3261 memcpy(&MEMS->len, &RECORD_BUF[0], \
3262 sizeof(struct aarch64_mem_r) * LENGTH); \
3267 /* AArch64 record/replay structures and enumerations. */
3269 struct aarch64_mem_r
3271 uint64_t len
; /* Record length. */
3272 uint64_t addr
; /* Memory address. */
3275 enum aarch64_record_result
3277 AARCH64_RECORD_SUCCESS
,
3278 AARCH64_RECORD_UNSUPPORTED
,
3279 AARCH64_RECORD_UNKNOWN
3282 typedef struct insn_decode_record_t
3284 struct gdbarch
*gdbarch
;
3285 struct regcache
*regcache
;
3286 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
3287 uint32_t aarch64_insn
; /* Insn to be recorded. */
3288 uint32_t mem_rec_count
; /* Count of memory records. */
3289 uint32_t reg_rec_count
; /* Count of register records. */
3290 uint32_t *aarch64_regs
; /* Registers to be recorded. */
3291 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
3292 } insn_decode_record
;
3294 /* Record handler for data processing - register instructions. */
3297 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
3299 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
3300 uint32_t record_buf
[4];
3302 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3303 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3304 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3306 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3310 /* Logical (shifted register). */
3311 if (insn_bits24_27
== 0x0a)
3312 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3314 else if (insn_bits24_27
== 0x0b)
3315 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3317 return AARCH64_RECORD_UNKNOWN
;
3319 record_buf
[0] = reg_rd
;
3320 aarch64_insn_r
->reg_rec_count
= 1;
3322 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3326 if (insn_bits24_27
== 0x0b)
3328 /* Data-processing (3 source). */
3329 record_buf
[0] = reg_rd
;
3330 aarch64_insn_r
->reg_rec_count
= 1;
3332 else if (insn_bits24_27
== 0x0a)
3334 if (insn_bits21_23
== 0x00)
3336 /* Add/subtract (with carry). */
3337 record_buf
[0] = reg_rd
;
3338 aarch64_insn_r
->reg_rec_count
= 1;
3339 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3341 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3342 aarch64_insn_r
->reg_rec_count
= 2;
3345 else if (insn_bits21_23
== 0x02)
3347 /* Conditional compare (register) and conditional compare
3348 (immediate) instructions. */
3349 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3350 aarch64_insn_r
->reg_rec_count
= 1;
3352 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3354 /* CConditional select. */
3355 /* Data-processing (2 source). */
3356 /* Data-processing (1 source). */
3357 record_buf
[0] = reg_rd
;
3358 aarch64_insn_r
->reg_rec_count
= 1;
3361 return AARCH64_RECORD_UNKNOWN
;
3365 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3367 return AARCH64_RECORD_SUCCESS
;
3370 /* Record handler for data processing - immediate instructions. */
3373 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3375 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
3376 uint32_t record_buf
[4];
3378 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3379 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3380 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3382 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3383 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3384 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3386 record_buf
[0] = reg_rd
;
3387 aarch64_insn_r
->reg_rec_count
= 1;
3389 else if (insn_bits24_27
== 0x01)
3391 /* Add/Subtract (immediate). */
3392 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3393 record_buf
[0] = reg_rd
;
3394 aarch64_insn_r
->reg_rec_count
= 1;
3396 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3398 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3400 /* Logical (immediate). */
3401 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3402 record_buf
[0] = reg_rd
;
3403 aarch64_insn_r
->reg_rec_count
= 1;
3405 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3408 return AARCH64_RECORD_UNKNOWN
;
3410 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3412 return AARCH64_RECORD_SUCCESS
;
3415 /* Record handler for branch, exception generation and system instructions. */
3418 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3420 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3421 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3422 uint32_t record_buf
[4];
3424 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3425 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3426 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3428 if (insn_bits28_31
== 0x0d)
3430 /* Exception generation instructions. */
3431 if (insn_bits24_27
== 0x04)
3433 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3434 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3435 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3437 ULONGEST svc_number
;
3439 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3441 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3445 return AARCH64_RECORD_UNSUPPORTED
;
3447 /* System instructions. */
3448 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3450 uint32_t reg_rt
, reg_crn
;
3452 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3453 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3455 /* Record rt in case of sysl and mrs instructions. */
3456 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3458 record_buf
[0] = reg_rt
;
3459 aarch64_insn_r
->reg_rec_count
= 1;
3461 /* Record cpsr for hint and msr(immediate) instructions. */
3462 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3464 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3465 aarch64_insn_r
->reg_rec_count
= 1;
3468 /* Unconditional branch (register). */
3469 else if((insn_bits24_27
& 0x0e) == 0x06)
3471 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3472 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3473 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3476 return AARCH64_RECORD_UNKNOWN
;
3478 /* Unconditional branch (immediate). */
3479 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3481 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3482 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3483 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3486 /* Compare & branch (immediate), Test & branch (immediate) and
3487 Conditional branch (immediate). */
3488 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3490 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3492 return AARCH64_RECORD_SUCCESS
;
3495 /* Record handler for advanced SIMD load and store instructions. */
3498 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3501 uint64_t addr_offset
= 0;
3502 uint32_t record_buf
[24];
3503 uint64_t record_buf_mem
[24];
3504 uint32_t reg_rn
, reg_rt
;
3505 uint32_t reg_index
= 0, mem_index
= 0;
3506 uint8_t opcode_bits
, size_bits
;
3508 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3509 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3510 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3511 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3512 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3515 debug_printf ("Process record: Advanced SIMD load/store\n");
3517 /* Load/store single structure. */
3518 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3520 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3521 scale
= opcode_bits
>> 2;
3522 selem
= ((opcode_bits
& 0x02) |
3523 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3527 if (size_bits
& 0x01)
3528 return AARCH64_RECORD_UNKNOWN
;
3531 if ((size_bits
>> 1) & 0x01)
3532 return AARCH64_RECORD_UNKNOWN
;
3533 if (size_bits
& 0x01)
3535 if (!((opcode_bits
>> 1) & 0x01))
3538 return AARCH64_RECORD_UNKNOWN
;
3542 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3549 return AARCH64_RECORD_UNKNOWN
;
3555 for (sindex
= 0; sindex
< selem
; sindex
++)
3557 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3558 reg_rt
= (reg_rt
+ 1) % 32;
3562 for (sindex
= 0; sindex
< selem
; sindex
++)
3564 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3565 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3568 record_buf_mem
[mem_index
++] = esize
/ 8;
3569 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3571 addr_offset
= addr_offset
+ (esize
/ 8);
3572 reg_rt
= (reg_rt
+ 1) % 32;
3576 /* Load/store multiple structure. */
3579 uint8_t selem
, esize
, rpt
, elements
;
3580 uint8_t eindex
, rindex
;
3582 esize
= 8 << size_bits
;
3583 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3584 elements
= 128 / esize
;
3586 elements
= 64 / esize
;
3588 switch (opcode_bits
)
3590 /*LD/ST4 (4 Registers). */
3595 /*LD/ST1 (4 Registers). */
3600 /*LD/ST3 (3 Registers). */
3605 /*LD/ST1 (3 Registers). */
3610 /*LD/ST1 (1 Register). */
3615 /*LD/ST2 (2 Registers). */
3620 /*LD/ST1 (2 Registers). */
3626 return AARCH64_RECORD_UNSUPPORTED
;
3629 for (rindex
= 0; rindex
< rpt
; rindex
++)
3630 for (eindex
= 0; eindex
< elements
; eindex
++)
3632 uint8_t reg_tt
, sindex
;
3633 reg_tt
= (reg_rt
+ rindex
) % 32;
3634 for (sindex
= 0; sindex
< selem
; sindex
++)
3636 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3637 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3640 record_buf_mem
[mem_index
++] = esize
/ 8;
3641 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3643 addr_offset
= addr_offset
+ (esize
/ 8);
3644 reg_tt
= (reg_tt
+ 1) % 32;
3649 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3650 record_buf
[reg_index
++] = reg_rn
;
3652 aarch64_insn_r
->reg_rec_count
= reg_index
;
3653 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3654 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3656 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3658 return AARCH64_RECORD_SUCCESS
;
3661 /* Record handler for load and store instructions. */
3664 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3666 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3667 uint8_t insn_bit23
, insn_bit21
;
3668 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3669 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3670 uint64_t datasize
, offset
;
3671 uint32_t record_buf
[8];
3672 uint64_t record_buf_mem
[8];
3675 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3676 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3677 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3678 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3679 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3680 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3681 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3682 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3683 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3684 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3685 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3687 /* Load/store exclusive. */
3688 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3691 debug_printf ("Process record: load/store exclusive\n");
3695 record_buf
[0] = reg_rt
;
3696 aarch64_insn_r
->reg_rec_count
= 1;
3699 record_buf
[1] = reg_rt2
;
3700 aarch64_insn_r
->reg_rec_count
= 2;
3706 datasize
= (8 << size_bits
) * 2;
3708 datasize
= (8 << size_bits
);
3709 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3711 record_buf_mem
[0] = datasize
/ 8;
3712 record_buf_mem
[1] = address
;
3713 aarch64_insn_r
->mem_rec_count
= 1;
3716 /* Save register rs. */
3717 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3718 aarch64_insn_r
->reg_rec_count
= 1;
3722 /* Load register (literal) instructions decoding. */
3723 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3726 debug_printf ("Process record: load register (literal)\n");
3728 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3730 record_buf
[0] = reg_rt
;
3731 aarch64_insn_r
->reg_rec_count
= 1;
3733 /* All types of load/store pair instructions decoding. */
3734 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3737 debug_printf ("Process record: load/store pair\n");
3743 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3744 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3748 record_buf
[0] = reg_rt
;
3749 record_buf
[1] = reg_rt2
;
3751 aarch64_insn_r
->reg_rec_count
= 2;
3756 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3758 size_bits
= size_bits
>> 1;
3759 datasize
= 8 << (2 + size_bits
);
3760 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3761 offset
= offset
<< (2 + size_bits
);
3762 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3764 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3766 if (imm7_off
& 0x40)
3767 address
= address
- offset
;
3769 address
= address
+ offset
;
3772 record_buf_mem
[0] = datasize
/ 8;
3773 record_buf_mem
[1] = address
;
3774 record_buf_mem
[2] = datasize
/ 8;
3775 record_buf_mem
[3] = address
+ (datasize
/ 8);
3776 aarch64_insn_r
->mem_rec_count
= 2;
3778 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3779 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3781 /* Load/store register (unsigned immediate) instructions. */
3782 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3784 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3794 if (size_bits
== 0x3 && vector_flag
== 0x0 && opc
== 0x2)
3796 /* PRFM (immediate) */
3797 return AARCH64_RECORD_SUCCESS
;
3799 else if (size_bits
== 0x2 && vector_flag
== 0x0 && opc
== 0x2)
3801 /* LDRSW (immediate) */
3815 debug_printf ("Process record: load/store (unsigned immediate):"
3816 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3822 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3823 datasize
= 8 << size_bits
;
3824 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3826 offset
= offset
<< size_bits
;
3827 address
= address
+ offset
;
3829 record_buf_mem
[0] = datasize
>> 3;
3830 record_buf_mem
[1] = address
;
3831 aarch64_insn_r
->mem_rec_count
= 1;
3836 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3838 record_buf
[0] = reg_rt
;
3839 aarch64_insn_r
->reg_rec_count
= 1;
3842 /* Load/store register (register offset) instructions. */
3843 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3844 && insn_bits10_11
== 0x02 && insn_bit21
)
3847 debug_printf ("Process record: load/store (register offset)\n");
3848 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3855 if (size_bits
!= 0x03)
3858 return AARCH64_RECORD_UNKNOWN
;
3862 ULONGEST reg_rm_val
;
3864 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3865 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3866 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3867 offset
= reg_rm_val
<< size_bits
;
3869 offset
= reg_rm_val
;
3870 datasize
= 8 << size_bits
;
3871 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3873 address
= address
+ offset
;
3874 record_buf_mem
[0] = datasize
>> 3;
3875 record_buf_mem
[1] = address
;
3876 aarch64_insn_r
->mem_rec_count
= 1;
3881 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3883 record_buf
[0] = reg_rt
;
3884 aarch64_insn_r
->reg_rec_count
= 1;
3887 /* Load/store register (immediate and unprivileged) instructions. */
3888 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3893 debug_printf ("Process record: load/store "
3894 "(immediate and unprivileged)\n");
3896 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3903 if (size_bits
!= 0x03)
3906 return AARCH64_RECORD_UNKNOWN
;
3911 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3912 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3913 datasize
= 8 << size_bits
;
3914 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3916 if (insn_bits10_11
!= 0x01)
3918 if (imm9_off
& 0x0100)
3919 address
= address
- offset
;
3921 address
= address
+ offset
;
3923 record_buf_mem
[0] = datasize
>> 3;
3924 record_buf_mem
[1] = address
;
3925 aarch64_insn_r
->mem_rec_count
= 1;
3930 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3932 record_buf
[0] = reg_rt
;
3933 aarch64_insn_r
->reg_rec_count
= 1;
3935 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3936 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3938 /* Advanced SIMD load/store instructions. */
3940 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3942 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3944 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3946 return AARCH64_RECORD_SUCCESS
;
3949 /* Record handler for data processing SIMD and floating point instructions. */
3952 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3954 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3955 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3956 uint8_t insn_bits11_14
;
3957 uint32_t record_buf
[2];
3959 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3960 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3961 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3962 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3963 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3964 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3965 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3966 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3967 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3970 debug_printf ("Process record: data processing SIMD/FP: ");
3972 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3974 /* Floating point - fixed point conversion instructions. */
3978 debug_printf ("FP - fixed point conversion");
3980 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3981 record_buf
[0] = reg_rd
;
3983 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3985 /* Floating point - conditional compare instructions. */
3986 else if (insn_bits10_11
== 0x01)
3989 debug_printf ("FP - conditional compare");
3991 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3993 /* Floating point - data processing (2-source) and
3994 conditional select instructions. */
3995 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3998 debug_printf ("FP - DP (2-source)");
4000 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4002 else if (insn_bits10_11
== 0x00)
4004 /* Floating point - immediate instructions. */
4005 if ((insn_bits12_15
& 0x01) == 0x01
4006 || (insn_bits12_15
& 0x07) == 0x04)
4009 debug_printf ("FP - immediate");
4010 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4012 /* Floating point - compare instructions. */
4013 else if ((insn_bits12_15
& 0x03) == 0x02)
4016 debug_printf ("FP - immediate");
4017 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4019 /* Floating point - integer conversions instructions. */
4020 else if (insn_bits12_15
== 0x00)
4022 /* Convert float to integer instruction. */
4023 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
4026 debug_printf ("float to int conversion");
4028 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4030 /* Convert integer to float instruction. */
4031 else if ((opcode
>> 1) == 0x01 && !rmode
)
4034 debug_printf ("int to float conversion");
4036 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4038 /* Move float to integer instruction. */
4039 else if ((opcode
>> 1) == 0x03)
4042 debug_printf ("move float to int");
4044 if (!(opcode
& 0x01))
4045 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4047 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4050 return AARCH64_RECORD_UNKNOWN
;
4053 return AARCH64_RECORD_UNKNOWN
;
4056 return AARCH64_RECORD_UNKNOWN
;
4058 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
4061 debug_printf ("SIMD copy");
4063 /* Advanced SIMD copy instructions. */
4064 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
4065 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
4066 && bit (aarch64_insn_r
->aarch64_insn
, 10))
4068 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
4069 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4071 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4074 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4076 /* All remaining floating point or advanced SIMD instructions. */
4080 debug_printf ("all remain");
4082 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4086 debug_printf ("\n");
4088 aarch64_insn_r
->reg_rec_count
++;
4089 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
4090 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4092 return AARCH64_RECORD_SUCCESS
;
4095 /* Decodes insns type and invokes its record handler. */
4098 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
4100 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
4102 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
4103 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
4104 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
4105 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
4107 /* Data processing - immediate instructions. */
4108 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
4109 return aarch64_record_data_proc_imm (aarch64_insn_r
);
4111 /* Branch, exception generation and system instructions. */
4112 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
4113 return aarch64_record_branch_except_sys (aarch64_insn_r
);
4115 /* Load and store instructions. */
4116 if (!ins_bit25
&& ins_bit27
)
4117 return aarch64_record_load_store (aarch64_insn_r
);
4119 /* Data processing - register instructions. */
4120 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
4121 return aarch64_record_data_proc_reg (aarch64_insn_r
);
4123 /* Data processing - SIMD and floating point instructions. */
4124 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
4125 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
4127 return AARCH64_RECORD_UNSUPPORTED
;
4130 /* Cleans up local record registers and memory allocations. */
4133 deallocate_reg_mem (insn_decode_record
*record
)
4135 xfree (record
->aarch64_regs
);
4136 xfree (record
->aarch64_mems
);
4140 namespace selftests
{
4143 aarch64_process_record_test (void)
4145 struct gdbarch_info info
;
4148 gdbarch_info_init (&info
);
4149 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
4151 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
4152 SELF_CHECK (gdbarch
!= NULL
);
4154 insn_decode_record aarch64_record
;
4156 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4157 aarch64_record
.regcache
= NULL
;
4158 aarch64_record
.this_addr
= 0;
4159 aarch64_record
.gdbarch
= gdbarch
;
4161 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4162 aarch64_record
.aarch64_insn
= 0xf9800020;
4163 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4164 SELF_CHECK (ret
== AARCH64_RECORD_SUCCESS
);
4165 SELF_CHECK (aarch64_record
.reg_rec_count
== 0);
4166 SELF_CHECK (aarch64_record
.mem_rec_count
== 0);
4168 deallocate_reg_mem (&aarch64_record
);
4171 } // namespace selftests
4172 #endif /* GDB_SELF_TEST */
4174 /* Parse the current instruction and record the values of the registers and
4175 memory that will be changed in current instruction to record_arch_list
4176 return -1 if something is wrong. */
4179 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
4180 CORE_ADDR insn_addr
)
4182 uint32_t rec_no
= 0;
4183 uint8_t insn_size
= 4;
4185 gdb_byte buf
[insn_size
];
4186 insn_decode_record aarch64_record
;
4188 memset (&buf
[0], 0, insn_size
);
4189 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4190 target_read_memory (insn_addr
, &buf
[0], insn_size
);
4191 aarch64_record
.aarch64_insn
4192 = (uint32_t) extract_unsigned_integer (&buf
[0],
4194 gdbarch_byte_order (gdbarch
));
4195 aarch64_record
.regcache
= regcache
;
4196 aarch64_record
.this_addr
= insn_addr
;
4197 aarch64_record
.gdbarch
= gdbarch
;
4199 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4200 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
4202 printf_unfiltered (_("Process record does not support instruction "
4203 "0x%0x at address %s.\n"),
4204 aarch64_record
.aarch64_insn
,
4205 paddress (gdbarch
, insn_addr
));
4211 /* Record registers. */
4212 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4214 /* Always record register CPSR. */
4215 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4216 AARCH64_CPSR_REGNUM
);
4217 if (aarch64_record
.aarch64_regs
)
4218 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
4219 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
4220 aarch64_record
.aarch64_regs
[rec_no
]))
4223 /* Record memories. */
4224 if (aarch64_record
.aarch64_mems
)
4225 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
4226 if (record_full_arch_list_add_mem
4227 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
4228 aarch64_record
.aarch64_mems
[rec_no
].len
))
4231 if (record_full_arch_list_add_end ())
4235 deallocate_reg_mem (&aarch64_record
);