1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
47 #include "gdbsupport/selftest.h"
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
53 #include "elf/aarch64.h"
55 #include "gdbsupport/vec.h"
58 #include "record-full.h"
59 #include "arch/aarch64-insn.h"
62 #include "opcode/aarch64.h"
65 #define submask(x) ((1L << ((x) + 1)) - 1)
66 #define bit(obj,st) (((obj) >> (st)) & 1)
67 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
69 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
71 #define HA_MAX_NUM_FLDS 4
73 /* All possible aarch64 target descriptors. */
74 struct target_desc
*tdesc_aarch64_list
[AARCH64_MAX_SVE_VQ
+ 1][2/*pauth*/];
76 /* The standard register names, and all the valid aliases for them. */
79 const char *const name
;
81 } aarch64_register_aliases
[] =
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM
},
85 {"lr", AARCH64_LR_REGNUM
},
86 {"sp", AARCH64_SP_REGNUM
},
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM
+ 0},
90 {"w1", AARCH64_X0_REGNUM
+ 1},
91 {"w2", AARCH64_X0_REGNUM
+ 2},
92 {"w3", AARCH64_X0_REGNUM
+ 3},
93 {"w4", AARCH64_X0_REGNUM
+ 4},
94 {"w5", AARCH64_X0_REGNUM
+ 5},
95 {"w6", AARCH64_X0_REGNUM
+ 6},
96 {"w7", AARCH64_X0_REGNUM
+ 7},
97 {"w8", AARCH64_X0_REGNUM
+ 8},
98 {"w9", AARCH64_X0_REGNUM
+ 9},
99 {"w10", AARCH64_X0_REGNUM
+ 10},
100 {"w11", AARCH64_X0_REGNUM
+ 11},
101 {"w12", AARCH64_X0_REGNUM
+ 12},
102 {"w13", AARCH64_X0_REGNUM
+ 13},
103 {"w14", AARCH64_X0_REGNUM
+ 14},
104 {"w15", AARCH64_X0_REGNUM
+ 15},
105 {"w16", AARCH64_X0_REGNUM
+ 16},
106 {"w17", AARCH64_X0_REGNUM
+ 17},
107 {"w18", AARCH64_X0_REGNUM
+ 18},
108 {"w19", AARCH64_X0_REGNUM
+ 19},
109 {"w20", AARCH64_X0_REGNUM
+ 20},
110 {"w21", AARCH64_X0_REGNUM
+ 21},
111 {"w22", AARCH64_X0_REGNUM
+ 22},
112 {"w23", AARCH64_X0_REGNUM
+ 23},
113 {"w24", AARCH64_X0_REGNUM
+ 24},
114 {"w25", AARCH64_X0_REGNUM
+ 25},
115 {"w26", AARCH64_X0_REGNUM
+ 26},
116 {"w27", AARCH64_X0_REGNUM
+ 27},
117 {"w28", AARCH64_X0_REGNUM
+ 28},
118 {"w29", AARCH64_X0_REGNUM
+ 29},
119 {"w30", AARCH64_X0_REGNUM
+ 30},
122 {"ip0", AARCH64_X0_REGNUM
+ 16},
123 {"ip1", AARCH64_X0_REGNUM
+ 17}
126 /* The required core 'R' registers. */
127 static const char *const aarch64_r_register_names
[] =
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
142 /* The FP/SIMD 'V' registers. */
143 static const char *const aarch64_v_register_names
[] =
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
159 /* The SVE 'Z' and 'P' registers. */
160 static const char *const aarch64_sve_register_names
[] =
162 /* These registers must appear in consecutive RAW register number
163 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
164 "z0", "z1", "z2", "z3",
165 "z4", "z5", "z6", "z7",
166 "z8", "z9", "z10", "z11",
167 "z12", "z13", "z14", "z15",
168 "z16", "z17", "z18", "z19",
169 "z20", "z21", "z22", "z23",
170 "z24", "z25", "z26", "z27",
171 "z28", "z29", "z30", "z31",
173 "p0", "p1", "p2", "p3",
174 "p4", "p5", "p6", "p7",
175 "p8", "p9", "p10", "p11",
176 "p12", "p13", "p14", "p15",
180 static const char *const aarch64_pauth_register_names
[] =
182 /* Authentication mask for data pointer. */
184 /* Authentication mask for code pointer. */
188 /* AArch64 prologue cache structure. */
189 struct aarch64_prologue_cache
191 /* The program counter at the start of the function. It is used to
192 identify this frame as a prologue frame. */
195 /* The program counter at the time this frame was created; i.e. where
196 this function was called from. It is used to identify this frame as a
200 /* The stack pointer at the time this frame was created; i.e. the
201 caller's stack pointer when this function was called. It is used
202 to identify this frame. */
205 /* Is the target available to read from? */
208 /* The frame base for this frame is just prev_sp - frame size.
209 FRAMESIZE is the distance from the frame pointer to the
210 initial stack pointer. */
213 /* The register used to hold the frame pointer for this frame. */
216 /* Saved register offsets. */
217 struct trad_frame_saved_reg
*saved_regs
;
221 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
222 struct cmd_list_element
*c
, const char *value
)
224 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
229 /* Abstract instruction reader. */
231 class abstract_instruction_reader
234 /* Read in one instruction. */
235 virtual ULONGEST
read (CORE_ADDR memaddr
, int len
,
236 enum bfd_endian byte_order
) = 0;
239 /* Instruction reader from real target. */
241 class instruction_reader
: public abstract_instruction_reader
244 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
247 return read_code_unsigned_integer (memaddr
, len
, byte_order
);
253 /* If address signing is enabled, mask off the signature bits from the link
254 register, which is passed by value in ADDR, using the register values in
258 aarch64_frame_unmask_lr (struct gdbarch_tdep
*tdep
,
259 struct frame_info
*this_frame
, CORE_ADDR addr
)
261 if (tdep
->has_pauth ()
262 && frame_unwind_register_unsigned (this_frame
,
263 tdep
->pauth_ra_state_regnum
))
265 int cmask_num
= AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
);
266 CORE_ADDR cmask
= frame_unwind_register_unsigned (this_frame
, cmask_num
);
267 addr
= addr
& ~cmask
;
269 /* Record in the frame that the link register required unmasking. */
270 set_frame_previous_pc_masked (this_frame
);
276 /* Analyze a prologue, looking for a recognizable stack frame
277 and frame pointer. Scan until we encounter a store that could
278 clobber the stack frame unexpectedly, or an unknown instruction. */
281 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
282 CORE_ADDR start
, CORE_ADDR limit
,
283 struct aarch64_prologue_cache
*cache
,
284 abstract_instruction_reader
& reader
)
286 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
288 /* Track X registers and D registers in prologue. */
289 pv_t regs
[AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
];
291 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
; i
++)
292 regs
[i
] = pv_register (i
, 0);
293 pv_area
stack (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
295 for (; start
< limit
; start
+= 4)
300 insn
= reader
.read (start
, 4, byte_order_for_code
);
302 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
305 if (inst
.opcode
->iclass
== addsub_imm
306 && (inst
.opcode
->op
== OP_ADD
307 || strcmp ("sub", inst
.opcode
->name
) == 0))
309 unsigned rd
= inst
.operands
[0].reg
.regno
;
310 unsigned rn
= inst
.operands
[1].reg
.regno
;
312 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
313 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
314 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
315 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
317 if (inst
.opcode
->op
== OP_ADD
)
319 regs
[rd
] = pv_add_constant (regs
[rn
],
320 inst
.operands
[2].imm
.value
);
324 regs
[rd
] = pv_add_constant (regs
[rn
],
325 -inst
.operands
[2].imm
.value
);
328 else if (inst
.opcode
->iclass
== pcreladdr
329 && inst
.operands
[1].type
== AARCH64_OPND_ADDR_ADRP
)
331 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
332 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
334 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
336 else if (inst
.opcode
->iclass
== branch_imm
)
338 /* Stop analysis on branch. */
341 else if (inst
.opcode
->iclass
== condbranch
)
343 /* Stop analysis on branch. */
346 else if (inst
.opcode
->iclass
== branch_reg
)
348 /* Stop analysis on branch. */
351 else if (inst
.opcode
->iclass
== compbranch
)
353 /* Stop analysis on branch. */
356 else if (inst
.opcode
->op
== OP_MOVZ
)
358 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
359 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
361 else if (inst
.opcode
->iclass
== log_shift
362 && strcmp (inst
.opcode
->name
, "orr") == 0)
364 unsigned rd
= inst
.operands
[0].reg
.regno
;
365 unsigned rn
= inst
.operands
[1].reg
.regno
;
366 unsigned rm
= inst
.operands
[2].reg
.regno
;
368 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
369 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
370 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
372 if (inst
.operands
[2].shifter
.amount
== 0
373 && rn
== AARCH64_SP_REGNUM
)
379 debug_printf ("aarch64: prologue analysis gave up "
380 "addr=%s opcode=0x%x (orr x register)\n",
381 core_addr_to_string_nz (start
), insn
);
386 else if (inst
.opcode
->op
== OP_STUR
)
388 unsigned rt
= inst
.operands
[0].reg
.regno
;
389 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
391 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
393 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
394 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
395 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
396 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
398 stack
.store (pv_add_constant (regs
[rn
],
399 inst
.operands
[1].addr
.offset
.imm
),
400 is64
? 8 : 4, regs
[rt
]);
402 else if ((inst
.opcode
->iclass
== ldstpair_off
403 || (inst
.opcode
->iclass
== ldstpair_indexed
404 && inst
.operands
[2].addr
.preind
))
405 && strcmp ("stp", inst
.opcode
->name
) == 0)
407 /* STP with addressing mode Pre-indexed and Base register. */
410 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
411 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
413 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
414 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
415 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
416 || inst
.operands
[1].type
== AARCH64_OPND_Ft2
);
417 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
418 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
420 /* If recording this store would invalidate the store area
421 (perhaps because rn is not known) then we should abandon
422 further prologue analysis. */
423 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
)))
426 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
+ 8)))
429 rt1
= inst
.operands
[0].reg
.regno
;
430 rt2
= inst
.operands
[1].reg
.regno
;
431 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
433 /* Only bottom 64-bit of each V register (D register) need
435 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
436 rt1
+= AARCH64_X_REGISTER_COUNT
;
437 rt2
+= AARCH64_X_REGISTER_COUNT
;
440 stack
.store (pv_add_constant (regs
[rn
], imm
), 8,
442 stack
.store (pv_add_constant (regs
[rn
], imm
+ 8), 8,
445 if (inst
.operands
[2].addr
.writeback
)
446 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
449 else if ((inst
.opcode
->iclass
== ldst_imm9
/* Signed immediate. */
450 || (inst
.opcode
->iclass
== ldst_pos
/* Unsigned immediate. */
451 && (inst
.opcode
->op
== OP_STR_POS
452 || inst
.opcode
->op
== OP_STRF_POS
)))
453 && inst
.operands
[1].addr
.base_regno
== AARCH64_SP_REGNUM
454 && strcmp ("str", inst
.opcode
->name
) == 0)
456 /* STR (immediate) */
457 unsigned int rt
= inst
.operands
[0].reg
.regno
;
458 int32_t imm
= inst
.operands
[1].addr
.offset
.imm
;
459 unsigned int rn
= inst
.operands
[1].addr
.base_regno
;
461 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
462 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
463 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
465 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
467 /* Only bottom 64-bit of each V register (D register) need
469 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
470 rt
+= AARCH64_X_REGISTER_COUNT
;
473 stack
.store (pv_add_constant (regs
[rn
], imm
),
474 is64
? 8 : 4, regs
[rt
]);
475 if (inst
.operands
[1].addr
.writeback
)
476 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
478 else if (inst
.opcode
->iclass
== testbranch
)
480 /* Stop analysis on branch. */
483 else if (inst
.opcode
->iclass
== ic_system
)
485 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
486 int ra_state_val
= 0;
488 if (insn
== 0xd503233f /* paciasp. */
489 || insn
== 0xd503237f /* pacibsp. */)
491 /* Return addresses are mangled. */
494 else if (insn
== 0xd50323bf /* autiasp. */
495 || insn
== 0xd50323ff /* autibsp. */)
497 /* Return addresses are not mangled. */
503 debug_printf ("aarch64: prologue analysis gave up addr=%s"
504 " opcode=0x%x (iclass)\n",
505 core_addr_to_string_nz (start
), insn
);
509 if (tdep
->has_pauth () && cache
!= nullptr)
510 trad_frame_set_value (cache
->saved_regs
,
511 tdep
->pauth_ra_state_regnum
,
518 debug_printf ("aarch64: prologue analysis gave up addr=%s"
520 core_addr_to_string_nz (start
), insn
);
529 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
531 /* Frame pointer is fp. Frame size is constant. */
532 cache
->framereg
= AARCH64_FP_REGNUM
;
533 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
535 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
537 /* Try the stack pointer. */
538 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
539 cache
->framereg
= AARCH64_SP_REGNUM
;
543 /* We're just out of luck. We don't know where the frame is. */
544 cache
->framereg
= -1;
545 cache
->framesize
= 0;
548 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
552 if (stack
.find_reg (gdbarch
, i
, &offset
))
553 cache
->saved_regs
[i
].addr
= offset
;
556 for (i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
558 int regnum
= gdbarch_num_regs (gdbarch
);
561 if (stack
.find_reg (gdbarch
, i
+ AARCH64_X_REGISTER_COUNT
,
563 cache
->saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
= offset
;
570 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
571 CORE_ADDR start
, CORE_ADDR limit
,
572 struct aarch64_prologue_cache
*cache
)
574 instruction_reader reader
;
576 return aarch64_analyze_prologue (gdbarch
, start
, limit
, cache
,
582 namespace selftests
{
584 /* Instruction reader from manually cooked instruction sequences. */
586 class instruction_reader_test
: public abstract_instruction_reader
589 template<size_t SIZE
>
590 explicit instruction_reader_test (const uint32_t (&insns
)[SIZE
])
591 : m_insns (insns
), m_insns_size (SIZE
)
594 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
597 SELF_CHECK (len
== 4);
598 SELF_CHECK (memaddr
% 4 == 0);
599 SELF_CHECK (memaddr
/ 4 < m_insns_size
);
601 return m_insns
[memaddr
/ 4];
605 const uint32_t *m_insns
;
610 aarch64_analyze_prologue_test (void)
612 struct gdbarch_info info
;
614 gdbarch_info_init (&info
);
615 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
617 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
618 SELF_CHECK (gdbarch
!= NULL
);
620 struct aarch64_prologue_cache cache
;
621 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
623 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
625 /* Test the simple prologue in which frame pointer is used. */
627 static const uint32_t insns
[] = {
628 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
629 0x910003fd, /* mov x29, sp */
630 0x97ffffe6, /* bl 0x400580 */
632 instruction_reader_test
reader (insns
);
634 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
635 SELF_CHECK (end
== 4 * 2);
637 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
638 SELF_CHECK (cache
.framesize
== 272);
640 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
642 if (i
== AARCH64_FP_REGNUM
)
643 SELF_CHECK (cache
.saved_regs
[i
].addr
== -272);
644 else if (i
== AARCH64_LR_REGNUM
)
645 SELF_CHECK (cache
.saved_regs
[i
].addr
== -264);
647 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
650 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
652 int regnum
= gdbarch_num_regs (gdbarch
);
654 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
659 /* Test a prologue in which STR is used and frame pointer is not
662 static const uint32_t insns
[] = {
663 0xf81d0ff3, /* str x19, [sp, #-48]! */
664 0xb9002fe0, /* str w0, [sp, #44] */
665 0xf90013e1, /* str x1, [sp, #32]*/
666 0xfd000fe0, /* str d0, [sp, #24] */
667 0xaa0203f3, /* mov x19, x2 */
668 0xf94013e0, /* ldr x0, [sp, #32] */
670 instruction_reader_test
reader (insns
);
672 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
673 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
675 SELF_CHECK (end
== 4 * 5);
677 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
678 SELF_CHECK (cache
.framesize
== 48);
680 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
683 SELF_CHECK (cache
.saved_regs
[i
].addr
== -16);
685 SELF_CHECK (cache
.saved_regs
[i
].addr
== -48);
687 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
690 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
692 int regnum
= gdbarch_num_regs (gdbarch
);
695 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
698 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
703 /* Test a prologue in which there is a return address signing instruction. */
704 if (tdep
->has_pauth ())
706 static const uint32_t insns
[] = {
707 0xd503233f, /* paciasp */
708 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
709 0x910003fd, /* mov x29, sp */
710 0xf801c3f3, /* str x19, [sp, #28] */
711 0xb9401fa0, /* ldr x19, [x29, #28] */
713 instruction_reader_test
reader (insns
);
715 trad_frame_reset_saved_regs (gdbarch
, cache
.saved_regs
);
716 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
,
719 SELF_CHECK (end
== 4 * 4);
720 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
721 SELF_CHECK (cache
.framesize
== 48);
723 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
726 SELF_CHECK (cache
.saved_regs
[i
].addr
== -20);
727 else if (i
== AARCH64_FP_REGNUM
)
728 SELF_CHECK (cache
.saved_regs
[i
].addr
== -48);
729 else if (i
== AARCH64_LR_REGNUM
)
730 SELF_CHECK (cache
.saved_regs
[i
].addr
== -40);
732 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
735 if (tdep
->has_pauth ())
737 SELF_CHECK (trad_frame_value_p (cache
.saved_regs
,
738 tdep
->pauth_ra_state_regnum
));
739 SELF_CHECK (cache
.saved_regs
[tdep
->pauth_ra_state_regnum
].addr
== 1);
743 } // namespace selftests
744 #endif /* GDB_SELF_TEST */
746 /* Implement the "skip_prologue" gdbarch method. */
749 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
751 CORE_ADDR func_addr
, limit_pc
;
753 /* See if we can determine the end of the prologue via the symbol
754 table. If so, then return either PC, or the PC after the
755 prologue, whichever is greater. */
756 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
758 CORE_ADDR post_prologue_pc
759 = skip_prologue_using_sal (gdbarch
, func_addr
);
761 if (post_prologue_pc
!= 0)
762 return std::max (pc
, post_prologue_pc
);
765 /* Can't determine prologue from the symbol table, need to examine
768 /* Find an upper limit on the function prologue using the debug
769 information. If the debug information could not be used to
770 provide that bound, then use an arbitrary large number as the
772 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
774 limit_pc
= pc
+ 128; /* Magic. */
776 /* Try disassembling prologue. */
777 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
780 /* Scan the function prologue for THIS_FRAME and populate the prologue
784 aarch64_scan_prologue (struct frame_info
*this_frame
,
785 struct aarch64_prologue_cache
*cache
)
787 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
788 CORE_ADDR prologue_start
;
789 CORE_ADDR prologue_end
;
790 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
791 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
793 cache
->prev_pc
= prev_pc
;
795 /* Assume we do not find a frame. */
796 cache
->framereg
= -1;
797 cache
->framesize
= 0;
799 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
802 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
806 /* No line info so use the current PC. */
807 prologue_end
= prev_pc
;
809 else if (sal
.end
< prologue_end
)
811 /* The next line begins after the function end. */
812 prologue_end
= sal
.end
;
815 prologue_end
= std::min (prologue_end
, prev_pc
);
816 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
822 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
826 cache
->framereg
= AARCH64_FP_REGNUM
;
827 cache
->framesize
= 16;
828 cache
->saved_regs
[29].addr
= 0;
829 cache
->saved_regs
[30].addr
= 8;
833 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
834 function may throw an exception if the inferior's registers or memory is
838 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
839 struct aarch64_prologue_cache
*cache
)
841 CORE_ADDR unwound_fp
;
844 aarch64_scan_prologue (this_frame
, cache
);
846 if (cache
->framereg
== -1)
849 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
853 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
855 /* Calculate actual addresses of saved registers using offsets
856 determined by aarch64_analyze_prologue. */
857 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
858 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
859 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
861 cache
->func
= get_frame_func (this_frame
);
863 cache
->available_p
= 1;
866 /* Allocate and fill in *THIS_CACHE with information about the prologue of
867 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
868 Return a pointer to the current aarch64_prologue_cache in
871 static struct aarch64_prologue_cache
*
872 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
874 struct aarch64_prologue_cache
*cache
;
876 if (*this_cache
!= NULL
)
877 return (struct aarch64_prologue_cache
*) *this_cache
;
879 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
880 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
885 aarch64_make_prologue_cache_1 (this_frame
, cache
);
887 catch (const gdb_exception_error
&ex
)
889 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
896 /* Implement the "stop_reason" frame_unwind method. */
898 static enum unwind_stop_reason
899 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
902 struct aarch64_prologue_cache
*cache
903 = aarch64_make_prologue_cache (this_frame
, this_cache
);
905 if (!cache
->available_p
)
906 return UNWIND_UNAVAILABLE
;
908 /* Halt the backtrace at "_start". */
909 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
910 return UNWIND_OUTERMOST
;
912 /* We've hit a wall, stop. */
913 if (cache
->prev_sp
== 0)
914 return UNWIND_OUTERMOST
;
916 return UNWIND_NO_REASON
;
919 /* Our frame ID for a normal frame is the current function's starting
920 PC and the caller's SP when we were called. */
923 aarch64_prologue_this_id (struct frame_info
*this_frame
,
924 void **this_cache
, struct frame_id
*this_id
)
926 struct aarch64_prologue_cache
*cache
927 = aarch64_make_prologue_cache (this_frame
, this_cache
);
929 if (!cache
->available_p
)
930 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
932 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
935 /* Implement the "prev_register" frame_unwind method. */
937 static struct value
*
938 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
939 void **this_cache
, int prev_regnum
)
941 struct aarch64_prologue_cache
*cache
942 = aarch64_make_prologue_cache (this_frame
, this_cache
);
944 /* If we are asked to unwind the PC, then we need to return the LR
945 instead. The prologue may save PC, but it will point into this
946 frame's prologue, not the next frame's resume location. */
947 if (prev_regnum
== AARCH64_PC_REGNUM
)
950 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
951 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
953 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
955 if (tdep
->has_pauth ()
956 && trad_frame_value_p (cache
->saved_regs
,
957 tdep
->pauth_ra_state_regnum
))
958 lr
= aarch64_frame_unmask_lr (tdep
, this_frame
, lr
);
960 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
963 /* SP is generally not saved to the stack, but this frame is
964 identified by the next frame's stack pointer at the time of the
965 call. The value was already reconstructed into PREV_SP. */
978 if (prev_regnum
== AARCH64_SP_REGNUM
)
979 return frame_unwind_got_constant (this_frame
, prev_regnum
,
982 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
986 /* AArch64 prologue unwinder. */
987 struct frame_unwind aarch64_prologue_unwind
=
990 aarch64_prologue_frame_unwind_stop_reason
,
991 aarch64_prologue_this_id
,
992 aarch64_prologue_prev_register
,
994 default_frame_sniffer
997 /* Allocate and fill in *THIS_CACHE with information about the prologue of
998 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
999 Return a pointer to the current aarch64_prologue_cache in
1002 static struct aarch64_prologue_cache
*
1003 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
1005 struct aarch64_prologue_cache
*cache
;
1007 if (*this_cache
!= NULL
)
1008 return (struct aarch64_prologue_cache
*) *this_cache
;
1010 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1011 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1012 *this_cache
= cache
;
1016 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
1018 cache
->prev_pc
= get_frame_pc (this_frame
);
1019 cache
->available_p
= 1;
1021 catch (const gdb_exception_error
&ex
)
1023 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1030 /* Implement the "stop_reason" frame_unwind method. */
1032 static enum unwind_stop_reason
1033 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1036 struct aarch64_prologue_cache
*cache
1037 = aarch64_make_stub_cache (this_frame
, this_cache
);
1039 if (!cache
->available_p
)
1040 return UNWIND_UNAVAILABLE
;
1042 return UNWIND_NO_REASON
;
1045 /* Our frame ID for a stub frame is the current SP and LR. */
1048 aarch64_stub_this_id (struct frame_info
*this_frame
,
1049 void **this_cache
, struct frame_id
*this_id
)
1051 struct aarch64_prologue_cache
*cache
1052 = aarch64_make_stub_cache (this_frame
, this_cache
);
1054 if (cache
->available_p
)
1055 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
1057 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
1060 /* Implement the "sniffer" frame_unwind method. */
1063 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1064 struct frame_info
*this_frame
,
1065 void **this_prologue_cache
)
1067 CORE_ADDR addr_in_block
;
1070 addr_in_block
= get_frame_address_in_block (this_frame
);
1071 if (in_plt_section (addr_in_block
)
1072 /* We also use the stub winder if the target memory is unreadable
1073 to avoid having the prologue unwinder trying to read it. */
1074 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1080 /* AArch64 stub unwinder. */
1081 struct frame_unwind aarch64_stub_unwind
=
1084 aarch64_stub_frame_unwind_stop_reason
,
1085 aarch64_stub_this_id
,
1086 aarch64_prologue_prev_register
,
1088 aarch64_stub_unwind_sniffer
1091 /* Return the frame base address of *THIS_FRAME. */
1094 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1096 struct aarch64_prologue_cache
*cache
1097 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1099 return cache
->prev_sp
- cache
->framesize
;
1102 /* AArch64 default frame base information. */
1103 struct frame_base aarch64_normal_base
=
1105 &aarch64_prologue_unwind
,
1106 aarch64_normal_frame_base
,
1107 aarch64_normal_frame_base
,
1108 aarch64_normal_frame_base
1111 /* Return the value of the REGNUM register in the previous frame of
1114 static struct value
*
1115 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1116 void **this_cache
, int regnum
)
1118 struct gdbarch_tdep
*tdep
= gdbarch_tdep (get_frame_arch (this_frame
));
1123 case AARCH64_PC_REGNUM
:
1124 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1125 lr
= aarch64_frame_unmask_lr (tdep
, this_frame
, lr
);
1126 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1129 internal_error (__FILE__
, __LINE__
,
1130 _("Unexpected register %d"), regnum
);
1134 static const unsigned char op_lit0
= DW_OP_lit0
;
1135 static const unsigned char op_lit1
= DW_OP_lit1
;
1137 /* Implement the "init_reg" dwarf2_frame_ops method. */
1140 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1141 struct dwarf2_frame_state_reg
*reg
,
1142 struct frame_info
*this_frame
)
1144 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1148 case AARCH64_PC_REGNUM
:
1149 reg
->how
= DWARF2_FRAME_REG_FN
;
1150 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1153 case AARCH64_SP_REGNUM
:
1154 reg
->how
= DWARF2_FRAME_REG_CFA
;
1158 /* Init pauth registers. */
1159 if (tdep
->has_pauth ())
1161 if (regnum
== tdep
->pauth_ra_state_regnum
)
1163 /* Initialize RA_STATE to zero. */
1164 reg
->how
= DWARF2_FRAME_REG_SAVED_VAL_EXP
;
1165 reg
->loc
.exp
.start
= &op_lit0
;
1166 reg
->loc
.exp
.len
= 1;
1169 else if (regnum
== AARCH64_PAUTH_DMASK_REGNUM (tdep
->pauth_reg_base
)
1170 || regnum
== AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
))
1172 reg
->how
= DWARF2_FRAME_REG_SAME_VALUE
;
1178 /* Implement the execute_dwarf_cfa_vendor_op method. */
1181 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch
*gdbarch
, gdb_byte op
,
1182 struct dwarf2_frame_state
*fs
)
1184 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1185 struct dwarf2_frame_state_reg
*ra_state
;
1187 if (op
== DW_CFA_AARCH64_negate_ra_state
)
1189 /* On systems without pauth, treat as a nop. */
1190 if (!tdep
->has_pauth ())
1193 /* Allocate RA_STATE column if it's not allocated yet. */
1194 fs
->regs
.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE
+ 1);
1196 /* Toggle the status of RA_STATE between 0 and 1. */
1197 ra_state
= &(fs
->regs
.reg
[AARCH64_DWARF_PAUTH_RA_STATE
]);
1198 ra_state
->how
= DWARF2_FRAME_REG_SAVED_VAL_EXP
;
1200 if (ra_state
->loc
.exp
.start
== nullptr
1201 || ra_state
->loc
.exp
.start
== &op_lit0
)
1202 ra_state
->loc
.exp
.start
= &op_lit1
;
1204 ra_state
->loc
.exp
.start
= &op_lit0
;
1206 ra_state
->loc
.exp
.len
= 1;
1214 /* When arguments must be pushed onto the stack, they go on in reverse
1215 order. The code below implements a FILO (stack) to do this. */
1219 /* Value to pass on stack. It can be NULL if this item is for stack
1221 const gdb_byte
*data
;
1223 /* Size in bytes of value to pass on stack. */
1227 /* Implement the gdbarch type alignment method, overrides the generic
1228 alignment algorithm for anything that is aarch64 specific. */
1231 aarch64_type_align (gdbarch
*gdbarch
, struct type
*t
)
1233 t
= check_typedef (t
);
1234 if (TYPE_CODE (t
) == TYPE_CODE_ARRAY
&& TYPE_VECTOR (t
))
1236 /* Use the natural alignment for vector types (the same for
1237 scalar type), but the maximum alignment is 128-bit. */
1238 if (TYPE_LENGTH (t
) > 16)
1241 return TYPE_LENGTH (t
);
1244 /* Allow the common code to calculate the alignment. */
1248 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1250 Return the number of register required, or -1 on failure.
1252 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1253 to the element, else fail if the type of this element does not match the
1257 aapcs_is_vfp_call_or_return_candidate_1 (struct type
*type
,
1258 struct type
**fundamental_type
)
1260 if (type
== nullptr)
1263 switch (TYPE_CODE (type
))
1266 if (TYPE_LENGTH (type
) > 16)
1269 if (*fundamental_type
== nullptr)
1270 *fundamental_type
= type
;
1271 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1272 || TYPE_CODE (type
) != TYPE_CODE (*fundamental_type
))
1277 case TYPE_CODE_COMPLEX
:
1279 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1280 if (TYPE_LENGTH (target_type
) > 16)
1283 if (*fundamental_type
== nullptr)
1284 *fundamental_type
= target_type
;
1285 else if (TYPE_LENGTH (target_type
) != TYPE_LENGTH (*fundamental_type
)
1286 || TYPE_CODE (target_type
) != TYPE_CODE (*fundamental_type
))
1292 case TYPE_CODE_ARRAY
:
1294 if (TYPE_VECTOR (type
))
1296 if (TYPE_LENGTH (type
) != 8 && TYPE_LENGTH (type
) != 16)
1299 if (*fundamental_type
== nullptr)
1300 *fundamental_type
= type
;
1301 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1302 || TYPE_CODE (type
) != TYPE_CODE (*fundamental_type
))
1309 struct type
*target_type
= TYPE_TARGET_TYPE (type
);
1310 int count
= aapcs_is_vfp_call_or_return_candidate_1
1311 (target_type
, fundamental_type
);
1316 count
*= (TYPE_LENGTH (type
) / TYPE_LENGTH (target_type
));
1321 case TYPE_CODE_STRUCT
:
1322 case TYPE_CODE_UNION
:
1326 for (int i
= 0; i
< TYPE_NFIELDS (type
); i
++)
1328 /* Ignore any static fields. */
1329 if (field_is_static (&TYPE_FIELD (type
, i
)))
1332 struct type
*member
= check_typedef (TYPE_FIELD_TYPE (type
, i
));
1334 int sub_count
= aapcs_is_vfp_call_or_return_candidate_1
1335 (member
, fundamental_type
);
1336 if (sub_count
== -1)
1341 /* Ensure there is no padding between the fields (allowing for empty
1342 zero length structs) */
1343 int ftype_length
= (*fundamental_type
== nullptr)
1344 ? 0 : TYPE_LENGTH (*fundamental_type
);
1345 if (count
* ftype_length
!= TYPE_LENGTH (type
))
1358 /* Return true if an argument, whose type is described by TYPE, can be passed or
1359 returned in simd/fp registers, providing enough parameter passing registers
1360 are available. This is as described in the AAPCS64.
1362 Upon successful return, *COUNT returns the number of needed registers,
1363 *FUNDAMENTAL_TYPE contains the type of those registers.
1365 Candidate as per the AAPCS64 5.4.2.C is either a:
1368 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1369 all the members are floats and has at most 4 members.
1370 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1371 all the members are short vectors and has at most 4 members.
1374 Note that HFAs and HVAs can include nested structures and arrays. */
1377 aapcs_is_vfp_call_or_return_candidate (struct type
*type
, int *count
,
1378 struct type
**fundamental_type
)
1380 if (type
== nullptr)
1383 *fundamental_type
= nullptr;
1385 int ag_count
= aapcs_is_vfp_call_or_return_candidate_1 (type
,
1388 if (ag_count
> 0 && ag_count
<= HA_MAX_NUM_FLDS
)
1397 /* AArch64 function call information structure. */
1398 struct aarch64_call_info
1400 /* the current argument number. */
1401 unsigned argnum
= 0;
1403 /* The next general purpose register number, equivalent to NGRN as
1404 described in the AArch64 Procedure Call Standard. */
1407 /* The next SIMD and floating point register number, equivalent to
1408 NSRN as described in the AArch64 Procedure Call Standard. */
1411 /* The next stacked argument address, equivalent to NSAA as
1412 described in the AArch64 Procedure Call Standard. */
1415 /* Stack item vector. */
1416 std::vector
<stack_item_t
> si
;
1419 /* Pass a value in a sequence of consecutive X registers. The caller
1420 is responsbile for ensuring sufficient registers are available. */
1423 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1424 struct aarch64_call_info
*info
, struct type
*type
,
1427 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1428 int len
= TYPE_LENGTH (type
);
1429 enum type_code typecode
= TYPE_CODE (type
);
1430 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1431 const bfd_byte
*buf
= value_contents (arg
);
1437 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1438 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1442 /* Adjust sub-word struct/union args when big-endian. */
1443 if (byte_order
== BFD_ENDIAN_BIG
1444 && partial_len
< X_REGISTER_SIZE
1445 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1446 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1450 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1451 gdbarch_register_name (gdbarch
, regnum
),
1452 phex (regval
, X_REGISTER_SIZE
));
1454 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1461 /* Attempt to marshall a value in a V register. Return 1 if
1462 successful, or 0 if insufficient registers are available. This
1463 function, unlike the equivalent pass_in_x() function does not
1464 handle arguments spread across multiple registers. */
1467 pass_in_v (struct gdbarch
*gdbarch
,
1468 struct regcache
*regcache
,
1469 struct aarch64_call_info
*info
,
1470 int len
, const bfd_byte
*buf
)
1474 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1475 /* Enough space for a full vector register. */
1476 gdb_byte reg
[register_size (gdbarch
, regnum
)];
1477 gdb_assert (len
<= sizeof (reg
));
1482 memset (reg
, 0, sizeof (reg
));
1483 /* PCS C.1, the argument is allocated to the least significant
1484 bits of V register. */
1485 memcpy (reg
, buf
, len
);
1486 regcache
->cooked_write (regnum
, reg
);
1490 debug_printf ("arg %d in %s\n", info
->argnum
,
1491 gdbarch_register_name (gdbarch
, regnum
));
1499 /* Marshall an argument onto the stack. */
1502 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1505 const bfd_byte
*buf
= value_contents (arg
);
1506 int len
= TYPE_LENGTH (type
);
1512 align
= type_align (type
);
1514 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1515 Natural alignment of the argument's type. */
1516 align
= align_up (align
, 8);
1518 /* The AArch64 PCS requires at most doubleword alignment. */
1524 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1530 info
->si
.push_back (item
);
1533 if (info
->nsaa
& (align
- 1))
1535 /* Push stack alignment padding. */
1536 int pad
= align
- (info
->nsaa
& (align
- 1));
1541 info
->si
.push_back (item
);
1546 /* Marshall an argument into a sequence of one or more consecutive X
1547 registers or, if insufficient X registers are available then onto
1551 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1552 struct aarch64_call_info
*info
, struct type
*type
,
1555 int len
= TYPE_LENGTH (type
);
1556 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1558 /* PCS C.13 - Pass in registers if we have enough spare */
1559 if (info
->ngrn
+ nregs
<= 8)
1561 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1562 info
->ngrn
+= nregs
;
1567 pass_on_stack (info
, type
, arg
);
1571 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1572 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1573 registers. A return value of false is an error state as the value will have
1574 been partially passed to the stack. */
1576 pass_in_v_vfp_candidate (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1577 struct aarch64_call_info
*info
, struct type
*arg_type
,
1580 switch (TYPE_CODE (arg_type
))
1583 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1584 value_contents (arg
));
1587 case TYPE_CODE_COMPLEX
:
1589 const bfd_byte
*buf
= value_contents (arg
);
1590 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (arg_type
));
1592 if (!pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1596 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1597 buf
+ TYPE_LENGTH (target_type
));
1600 case TYPE_CODE_ARRAY
:
1601 if (TYPE_VECTOR (arg_type
))
1602 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1603 value_contents (arg
));
1606 case TYPE_CODE_STRUCT
:
1607 case TYPE_CODE_UNION
:
1608 for (int i
= 0; i
< TYPE_NFIELDS (arg_type
); i
++)
1610 /* Don't include static fields. */
1611 if (field_is_static (&TYPE_FIELD (arg_type
, i
)))
1614 struct value
*field
= value_primitive_field (arg
, 0, i
, arg_type
);
1615 struct type
*field_type
= check_typedef (value_type (field
));
1617 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, info
, field_type
,
1628 /* Implement the "push_dummy_call" gdbarch method. */
1631 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1632 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1634 struct value
**args
, CORE_ADDR sp
,
1635 function_call_return_method return_method
,
1636 CORE_ADDR struct_addr
)
1639 struct aarch64_call_info info
;
1641 /* We need to know what the type of the called function is in order
1642 to determine the number of named/anonymous arguments for the
1643 actual argument placement, and the return type in order to handle
1644 return value correctly.
1646 The generic code above us views the decision of return in memory
1647 or return in registers as a two stage processes. The language
1648 handler is consulted first and may decide to return in memory (eg
1649 class with copy constructor returned by value), this will cause
1650 the generic code to allocate space AND insert an initial leading
1653 If the language code does not decide to pass in memory then the
1654 target code is consulted.
1656 If the language code decides to pass in memory we want to move
1657 the pointer inserted as the initial argument from the argument
1658 list and into X8, the conventional AArch64 struct return pointer
1661 /* Set the return address. For the AArch64, the return breakpoint
1662 is always at BP_ADDR. */
1663 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1665 /* If we were given an initial argument for the return slot, lose it. */
1666 if (return_method
== return_method_hidden_param
)
1672 /* The struct_return pointer occupies X8. */
1673 if (return_method
!= return_method_normal
)
1677 debug_printf ("struct return in %s = 0x%s\n",
1678 gdbarch_register_name (gdbarch
,
1679 AARCH64_STRUCT_RETURN_REGNUM
),
1680 paddress (gdbarch
, struct_addr
));
1682 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1686 for (argnum
= 0; argnum
< nargs
; argnum
++)
1688 struct value
*arg
= args
[argnum
];
1689 struct type
*arg_type
, *fundamental_type
;
1692 arg_type
= check_typedef (value_type (arg
));
1693 len
= TYPE_LENGTH (arg_type
);
1695 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1696 if there are enough spare registers. */
1697 if (aapcs_is_vfp_call_or_return_candidate (arg_type
, &elements
,
1700 if (info
.nsrn
+ elements
<= 8)
1702 /* We know that we have sufficient registers available therefore
1703 this will never need to fallback to the stack. */
1704 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, &info
, arg_type
,
1706 gdb_assert_not_reached ("Failed to push args");
1711 pass_on_stack (&info
, arg_type
, arg
);
1716 switch (TYPE_CODE (arg_type
))
1719 case TYPE_CODE_BOOL
:
1720 case TYPE_CODE_CHAR
:
1721 case TYPE_CODE_RANGE
:
1722 case TYPE_CODE_ENUM
:
1725 /* Promote to 32 bit integer. */
1726 if (TYPE_UNSIGNED (arg_type
))
1727 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1729 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1730 arg
= value_cast (arg_type
, arg
);
1732 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1735 case TYPE_CODE_STRUCT
:
1736 case TYPE_CODE_ARRAY
:
1737 case TYPE_CODE_UNION
:
1740 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1741 invisible reference. */
1743 /* Allocate aligned storage. */
1744 sp
= align_down (sp
- len
, 16);
1746 /* Write the real data into the stack. */
1747 write_memory (sp
, value_contents (arg
), len
);
1749 /* Construct the indirection. */
1750 arg_type
= lookup_pointer_type (arg_type
);
1751 arg
= value_from_pointer (arg_type
, sp
);
1752 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1755 /* PCS C.15 / C.18 multiple values pass. */
1756 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1760 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1765 /* Make sure stack retains 16 byte alignment. */
1767 sp
-= 16 - (info
.nsaa
& 15);
1769 while (!info
.si
.empty ())
1771 const stack_item_t
&si
= info
.si
.back ();
1774 if (si
.data
!= NULL
)
1775 write_memory (sp
, si
.data
, si
.len
);
1776 info
.si
.pop_back ();
1779 /* Finally, update the SP register. */
1780 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1785 /* Implement the "frame_align" gdbarch method. */
1788 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1790 /* Align the stack to sixteen bytes. */
1791 return sp
& ~(CORE_ADDR
) 15;
1794 /* Return the type for an AdvSISD Q register. */
1796 static struct type
*
1797 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1799 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1801 if (tdep
->vnq_type
== NULL
)
1806 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1809 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1810 append_composite_type_field (t
, "u", elem
);
1812 elem
= builtin_type (gdbarch
)->builtin_int128
;
1813 append_composite_type_field (t
, "s", elem
);
1818 return tdep
->vnq_type
;
1821 /* Return the type for an AdvSISD D register. */
1823 static struct type
*
1824 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1826 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1828 if (tdep
->vnd_type
== NULL
)
1833 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1836 elem
= builtin_type (gdbarch
)->builtin_double
;
1837 append_composite_type_field (t
, "f", elem
);
1839 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1840 append_composite_type_field (t
, "u", elem
);
1842 elem
= builtin_type (gdbarch
)->builtin_int64
;
1843 append_composite_type_field (t
, "s", elem
);
1848 return tdep
->vnd_type
;
1851 /* Return the type for an AdvSISD S register. */
1853 static struct type
*
1854 aarch64_vns_type (struct gdbarch
*gdbarch
)
1856 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1858 if (tdep
->vns_type
== NULL
)
1863 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1866 elem
= builtin_type (gdbarch
)->builtin_float
;
1867 append_composite_type_field (t
, "f", elem
);
1869 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1870 append_composite_type_field (t
, "u", elem
);
1872 elem
= builtin_type (gdbarch
)->builtin_int32
;
1873 append_composite_type_field (t
, "s", elem
);
1878 return tdep
->vns_type
;
1881 /* Return the type for an AdvSISD H register. */
1883 static struct type
*
1884 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1886 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1888 if (tdep
->vnh_type
== NULL
)
1893 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1896 elem
= builtin_type (gdbarch
)->builtin_half
;
1897 append_composite_type_field (t
, "f", elem
);
1899 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1900 append_composite_type_field (t
, "u", elem
);
1902 elem
= builtin_type (gdbarch
)->builtin_int16
;
1903 append_composite_type_field (t
, "s", elem
);
1908 return tdep
->vnh_type
;
1911 /* Return the type for an AdvSISD B register. */
1913 static struct type
*
1914 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1916 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1918 if (tdep
->vnb_type
== NULL
)
1923 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1926 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1927 append_composite_type_field (t
, "u", elem
);
1929 elem
= builtin_type (gdbarch
)->builtin_int8
;
1930 append_composite_type_field (t
, "s", elem
);
1935 return tdep
->vnb_type
;
1938 /* Return the type for an AdvSISD V register. */
1940 static struct type
*
1941 aarch64_vnv_type (struct gdbarch
*gdbarch
)
1943 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1945 if (tdep
->vnv_type
== NULL
)
1947 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1948 slice from the non-pseudo vector registers. However NEON V registers
1949 are always vector registers, and need constructing as such. */
1950 const struct builtin_type
*bt
= builtin_type (gdbarch
);
1952 struct type
*t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnv",
1955 struct type
*sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1957 append_composite_type_field (sub
, "f",
1958 init_vector_type (bt
->builtin_double
, 2));
1959 append_composite_type_field (sub
, "u",
1960 init_vector_type (bt
->builtin_uint64
, 2));
1961 append_composite_type_field (sub
, "s",
1962 init_vector_type (bt
->builtin_int64
, 2));
1963 append_composite_type_field (t
, "d", sub
);
1965 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1967 append_composite_type_field (sub
, "f",
1968 init_vector_type (bt
->builtin_float
, 4));
1969 append_composite_type_field (sub
, "u",
1970 init_vector_type (bt
->builtin_uint32
, 4));
1971 append_composite_type_field (sub
, "s",
1972 init_vector_type (bt
->builtin_int32
, 4));
1973 append_composite_type_field (t
, "s", sub
);
1975 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1977 append_composite_type_field (sub
, "f",
1978 init_vector_type (bt
->builtin_half
, 8));
1979 append_composite_type_field (sub
, "u",
1980 init_vector_type (bt
->builtin_uint16
, 8));
1981 append_composite_type_field (sub
, "s",
1982 init_vector_type (bt
->builtin_int16
, 8));
1983 append_composite_type_field (t
, "h", sub
);
1985 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1987 append_composite_type_field (sub
, "u",
1988 init_vector_type (bt
->builtin_uint8
, 16));
1989 append_composite_type_field (sub
, "s",
1990 init_vector_type (bt
->builtin_int8
, 16));
1991 append_composite_type_field (t
, "b", sub
);
1993 sub
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1995 append_composite_type_field (sub
, "u",
1996 init_vector_type (bt
->builtin_uint128
, 1));
1997 append_composite_type_field (sub
, "s",
1998 init_vector_type (bt
->builtin_int128
, 1));
1999 append_composite_type_field (t
, "q", sub
);
2004 return tdep
->vnv_type
;
2007 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2010 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
2012 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2014 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
2015 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
2017 if (reg
== AARCH64_DWARF_SP
)
2018 return AARCH64_SP_REGNUM
;
2020 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
2021 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
2023 if (reg
== AARCH64_DWARF_SVE_VG
)
2024 return AARCH64_SVE_VG_REGNUM
;
2026 if (reg
== AARCH64_DWARF_SVE_FFR
)
2027 return AARCH64_SVE_FFR_REGNUM
;
2029 if (reg
>= AARCH64_DWARF_SVE_P0
&& reg
<= AARCH64_DWARF_SVE_P0
+ 15)
2030 return AARCH64_SVE_P0_REGNUM
+ reg
- AARCH64_DWARF_SVE_P0
;
2032 if (reg
>= AARCH64_DWARF_SVE_Z0
&& reg
<= AARCH64_DWARF_SVE_Z0
+ 15)
2033 return AARCH64_SVE_Z0_REGNUM
+ reg
- AARCH64_DWARF_SVE_Z0
;
2035 if (tdep
->has_pauth ())
2037 if (reg
>= AARCH64_DWARF_PAUTH_DMASK
&& reg
<= AARCH64_DWARF_PAUTH_CMASK
)
2038 return tdep
->pauth_reg_base
+ reg
- AARCH64_DWARF_PAUTH_DMASK
;
2040 if (reg
== AARCH64_DWARF_PAUTH_RA_STATE
)
2041 return tdep
->pauth_ra_state_regnum
;
2047 /* Implement the "print_insn" gdbarch method. */
2050 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
2052 info
->symbols
= NULL
;
2053 return default_print_insn (memaddr
, info
);
2056 /* AArch64 BRK software debug mode instruction.
2057 Note that AArch64 code is always little-endian.
2058 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2059 constexpr gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
2061 typedef BP_MANIPULATION (aarch64_default_breakpoint
) aarch64_breakpoint
;
2063 /* Extract from an array REGS containing the (raw) register state a
2064 function return value of type TYPE, and copy that, in virtual
2065 format, into VALBUF. */
2068 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
2071 struct gdbarch
*gdbarch
= regs
->arch ();
2072 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2074 struct type
*fundamental_type
;
2076 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2079 int len
= TYPE_LENGTH (fundamental_type
);
2081 for (int i
= 0; i
< elements
; i
++)
2083 int regno
= AARCH64_V0_REGNUM
+ i
;
2084 /* Enough space for a full vector register. */
2085 gdb_byte buf
[register_size (gdbarch
, regno
)];
2086 gdb_assert (len
<= sizeof (buf
));
2090 debug_printf ("read HFA or HVA return value element %d from %s\n",
2092 gdbarch_register_name (gdbarch
, regno
));
2094 regs
->cooked_read (regno
, buf
);
2096 memcpy (valbuf
, buf
, len
);
2100 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2101 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2102 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2103 || TYPE_CODE (type
) == TYPE_CODE_PTR
2104 || TYPE_IS_REFERENCE (type
)
2105 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2107 /* If the type is a plain integer, then the access is
2108 straight-forward. Otherwise we have to play around a bit
2110 int len
= TYPE_LENGTH (type
);
2111 int regno
= AARCH64_X0_REGNUM
;
2116 /* By using store_unsigned_integer we avoid having to do
2117 anything special for small big-endian values. */
2118 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
2119 store_unsigned_integer (valbuf
,
2120 (len
> X_REGISTER_SIZE
2121 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
2122 len
-= X_REGISTER_SIZE
;
2123 valbuf
+= X_REGISTER_SIZE
;
2128 /* For a structure or union the behaviour is as if the value had
2129 been stored to word-aligned memory and then loaded into
2130 registers with 64-bit load instruction(s). */
2131 int len
= TYPE_LENGTH (type
);
2132 int regno
= AARCH64_X0_REGNUM
;
2133 bfd_byte buf
[X_REGISTER_SIZE
];
2137 regs
->cooked_read (regno
++, buf
);
2138 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2139 len
-= X_REGISTER_SIZE
;
2140 valbuf
+= X_REGISTER_SIZE
;
2146 /* Will a function return an aggregate type in memory or in a
2147 register? Return 0 if an aggregate type can be returned in a
2148 register, 1 if it must be returned in memory. */
2151 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2153 type
= check_typedef (type
);
2155 struct type
*fundamental_type
;
2157 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2160 /* v0-v7 are used to return values and one register is allocated
2161 for one member. However, HFA or HVA has at most four members. */
2165 if (TYPE_LENGTH (type
) > 16)
2167 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2168 invisible reference. */
2176 /* Write into appropriate registers a function return value of type
2177 TYPE, given in virtual format. */
2180 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2181 const gdb_byte
*valbuf
)
2183 struct gdbarch
*gdbarch
= regs
->arch ();
2184 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2186 struct type
*fundamental_type
;
2188 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2191 int len
= TYPE_LENGTH (fundamental_type
);
2193 for (int i
= 0; i
< elements
; i
++)
2195 int regno
= AARCH64_V0_REGNUM
+ i
;
2196 /* Enough space for a full vector register. */
2197 gdb_byte tmpbuf
[register_size (gdbarch
, regno
)];
2198 gdb_assert (len
<= sizeof (tmpbuf
));
2202 debug_printf ("write HFA or HVA return value element %d to %s\n",
2204 gdbarch_register_name (gdbarch
, regno
));
2207 memcpy (tmpbuf
, valbuf
,
2208 len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2209 regs
->cooked_write (regno
, tmpbuf
);
2213 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2214 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2215 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2216 || TYPE_CODE (type
) == TYPE_CODE_PTR
2217 || TYPE_IS_REFERENCE (type
)
2218 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2220 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2222 /* Values of one word or less are zero/sign-extended and
2224 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2225 LONGEST val
= unpack_long (type
, valbuf
);
2227 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2228 regs
->cooked_write (AARCH64_X0_REGNUM
, tmpbuf
);
2232 /* Integral values greater than one word are stored in
2233 consecutive registers starting with r0. This will always
2234 be a multiple of the regiser size. */
2235 int len
= TYPE_LENGTH (type
);
2236 int regno
= AARCH64_X0_REGNUM
;
2240 regs
->cooked_write (regno
++, valbuf
);
2241 len
-= X_REGISTER_SIZE
;
2242 valbuf
+= X_REGISTER_SIZE
;
2248 /* For a structure or union the behaviour is as if the value had
2249 been stored to word-aligned memory and then loaded into
2250 registers with 64-bit load instruction(s). */
2251 int len
= TYPE_LENGTH (type
);
2252 int regno
= AARCH64_X0_REGNUM
;
2253 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2257 memcpy (tmpbuf
, valbuf
,
2258 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2259 regs
->cooked_write (regno
++, tmpbuf
);
2260 len
-= X_REGISTER_SIZE
;
2261 valbuf
+= X_REGISTER_SIZE
;
2266 /* Implement the "return_value" gdbarch method. */
2268 static enum return_value_convention
2269 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2270 struct type
*valtype
, struct regcache
*regcache
,
2271 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2274 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2275 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2276 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2278 if (aarch64_return_in_memory (gdbarch
, valtype
))
2281 debug_printf ("return value in memory\n");
2282 return RETURN_VALUE_STRUCT_CONVENTION
;
2287 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2290 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2293 debug_printf ("return value in registers\n");
2295 return RETURN_VALUE_REGISTER_CONVENTION
;
2298 /* Implement the "get_longjmp_target" gdbarch method. */
2301 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2304 gdb_byte buf
[X_REGISTER_SIZE
];
2305 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2306 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2307 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2309 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2311 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2315 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2319 /* Implement the "gen_return_address" gdbarch method. */
2322 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2323 struct agent_expr
*ax
, struct axs_value
*value
,
2326 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2327 value
->kind
= axs_lvalue_register
;
2328 value
->u
.reg
= AARCH64_LR_REGNUM
;
2332 /* Return the pseudo register name corresponding to register regnum. */
2335 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2337 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2339 static const char *const q_name
[] =
2341 "q0", "q1", "q2", "q3",
2342 "q4", "q5", "q6", "q7",
2343 "q8", "q9", "q10", "q11",
2344 "q12", "q13", "q14", "q15",
2345 "q16", "q17", "q18", "q19",
2346 "q20", "q21", "q22", "q23",
2347 "q24", "q25", "q26", "q27",
2348 "q28", "q29", "q30", "q31",
2351 static const char *const d_name
[] =
2353 "d0", "d1", "d2", "d3",
2354 "d4", "d5", "d6", "d7",
2355 "d8", "d9", "d10", "d11",
2356 "d12", "d13", "d14", "d15",
2357 "d16", "d17", "d18", "d19",
2358 "d20", "d21", "d22", "d23",
2359 "d24", "d25", "d26", "d27",
2360 "d28", "d29", "d30", "d31",
2363 static const char *const s_name
[] =
2365 "s0", "s1", "s2", "s3",
2366 "s4", "s5", "s6", "s7",
2367 "s8", "s9", "s10", "s11",
2368 "s12", "s13", "s14", "s15",
2369 "s16", "s17", "s18", "s19",
2370 "s20", "s21", "s22", "s23",
2371 "s24", "s25", "s26", "s27",
2372 "s28", "s29", "s30", "s31",
2375 static const char *const h_name
[] =
2377 "h0", "h1", "h2", "h3",
2378 "h4", "h5", "h6", "h7",
2379 "h8", "h9", "h10", "h11",
2380 "h12", "h13", "h14", "h15",
2381 "h16", "h17", "h18", "h19",
2382 "h20", "h21", "h22", "h23",
2383 "h24", "h25", "h26", "h27",
2384 "h28", "h29", "h30", "h31",
2387 static const char *const b_name
[] =
2389 "b0", "b1", "b2", "b3",
2390 "b4", "b5", "b6", "b7",
2391 "b8", "b9", "b10", "b11",
2392 "b12", "b13", "b14", "b15",
2393 "b16", "b17", "b18", "b19",
2394 "b20", "b21", "b22", "b23",
2395 "b24", "b25", "b26", "b27",
2396 "b28", "b29", "b30", "b31",
2399 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2401 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2402 return q_name
[p_regnum
- AARCH64_Q0_REGNUM
];
2404 if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2405 return d_name
[p_regnum
- AARCH64_D0_REGNUM
];
2407 if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2408 return s_name
[p_regnum
- AARCH64_S0_REGNUM
];
2410 if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2411 return h_name
[p_regnum
- AARCH64_H0_REGNUM
];
2413 if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2414 return b_name
[p_regnum
- AARCH64_B0_REGNUM
];
2416 if (tdep
->has_sve ())
2418 static const char *const sve_v_name
[] =
2420 "v0", "v1", "v2", "v3",
2421 "v4", "v5", "v6", "v7",
2422 "v8", "v9", "v10", "v11",
2423 "v12", "v13", "v14", "v15",
2424 "v16", "v17", "v18", "v19",
2425 "v20", "v21", "v22", "v23",
2426 "v24", "v25", "v26", "v27",
2427 "v28", "v29", "v30", "v31",
2430 if (p_regnum
>= AARCH64_SVE_V0_REGNUM
2431 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2432 return sve_v_name
[p_regnum
- AARCH64_SVE_V0_REGNUM
];
2435 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2436 prevents it from being read by methods such as
2437 mi_cmd_trace_frame_collected. */
2438 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2441 internal_error (__FILE__
, __LINE__
,
2442 _("aarch64_pseudo_register_name: bad register number %d"),
2446 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2448 static struct type
*
2449 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2451 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2453 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2455 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2456 return aarch64_vnq_type (gdbarch
);
2458 if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2459 return aarch64_vnd_type (gdbarch
);
2461 if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2462 return aarch64_vns_type (gdbarch
);
2464 if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2465 return aarch64_vnh_type (gdbarch
);
2467 if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2468 return aarch64_vnb_type (gdbarch
);
2470 if (tdep
->has_sve () && p_regnum
>= AARCH64_SVE_V0_REGNUM
2471 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2472 return aarch64_vnv_type (gdbarch
);
2474 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2475 return builtin_type (gdbarch
)->builtin_uint64
;
2477 internal_error (__FILE__
, __LINE__
,
2478 _("aarch64_pseudo_register_type: bad register number %d"),
2482 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2485 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2486 struct reggroup
*group
)
2488 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2490 int p_regnum
= regnum
- gdbarch_num_regs (gdbarch
);
2492 if (p_regnum
>= AARCH64_Q0_REGNUM
&& p_regnum
< AARCH64_Q0_REGNUM
+ 32)
2493 return group
== all_reggroup
|| group
== vector_reggroup
;
2494 else if (p_regnum
>= AARCH64_D0_REGNUM
&& p_regnum
< AARCH64_D0_REGNUM
+ 32)
2495 return (group
== all_reggroup
|| group
== vector_reggroup
2496 || group
== float_reggroup
);
2497 else if (p_regnum
>= AARCH64_S0_REGNUM
&& p_regnum
< AARCH64_S0_REGNUM
+ 32)
2498 return (group
== all_reggroup
|| group
== vector_reggroup
2499 || group
== float_reggroup
);
2500 else if (p_regnum
>= AARCH64_H0_REGNUM
&& p_regnum
< AARCH64_H0_REGNUM
+ 32)
2501 return group
== all_reggroup
|| group
== vector_reggroup
;
2502 else if (p_regnum
>= AARCH64_B0_REGNUM
&& p_regnum
< AARCH64_B0_REGNUM
+ 32)
2503 return group
== all_reggroup
|| group
== vector_reggroup
;
2504 else if (tdep
->has_sve () && p_regnum
>= AARCH64_SVE_V0_REGNUM
2505 && p_regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2506 return group
== all_reggroup
|| group
== vector_reggroup
;
2507 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2508 if (tdep
->has_pauth () && regnum
== tdep
->pauth_ra_state_regnum
)
2511 return group
== all_reggroup
;
2514 /* Helper for aarch64_pseudo_read_value. */
2516 static struct value
*
2517 aarch64_pseudo_read_value_1 (struct gdbarch
*gdbarch
,
2518 readable_regcache
*regcache
, int regnum_offset
,
2519 int regsize
, struct value
*result_value
)
2521 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2523 /* Enough space for a full vector register. */
2524 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2525 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2527 if (regcache
->raw_read (v_regnum
, reg_buf
) != REG_VALID
)
2528 mark_value_bytes_unavailable (result_value
, 0,
2529 TYPE_LENGTH (value_type (result_value
)));
2531 memcpy (value_contents_raw (result_value
), reg_buf
, regsize
);
2533 return result_value
;
2536 /* Implement the "pseudo_register_read_value" gdbarch method. */
2538 static struct value
*
2539 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
, readable_regcache
*regcache
,
2542 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2543 struct value
*result_value
= allocate_value (register_type (gdbarch
, regnum
));
2545 VALUE_LVAL (result_value
) = lval_register
;
2546 VALUE_REGNUM (result_value
) = regnum
;
2548 regnum
-= gdbarch_num_regs (gdbarch
);
2550 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2551 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2552 regnum
- AARCH64_Q0_REGNUM
,
2553 Q_REGISTER_SIZE
, result_value
);
2555 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2556 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2557 regnum
- AARCH64_D0_REGNUM
,
2558 D_REGISTER_SIZE
, result_value
);
2560 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2561 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2562 regnum
- AARCH64_S0_REGNUM
,
2563 S_REGISTER_SIZE
, result_value
);
2565 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2566 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2567 regnum
- AARCH64_H0_REGNUM
,
2568 H_REGISTER_SIZE
, result_value
);
2570 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2571 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2572 regnum
- AARCH64_B0_REGNUM
,
2573 B_REGISTER_SIZE
, result_value
);
2575 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2576 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2577 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2578 regnum
- AARCH64_SVE_V0_REGNUM
,
2579 V_REGISTER_SIZE
, result_value
);
2581 gdb_assert_not_reached ("regnum out of bound");
2584 /* Helper for aarch64_pseudo_write. */
2587 aarch64_pseudo_write_1 (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2588 int regnum_offset
, int regsize
, const gdb_byte
*buf
)
2590 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2592 /* Enough space for a full vector register. */
2593 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2594 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2596 /* Ensure the register buffer is zero, we want gdb writes of the
2597 various 'scalar' pseudo registers to behavior like architectural
2598 writes, register width bytes are written the remainder are set to
2600 memset (reg_buf
, 0, register_size (gdbarch
, AARCH64_V0_REGNUM
));
2602 memcpy (reg_buf
, buf
, regsize
);
2603 regcache
->raw_write (v_regnum
, reg_buf
);
2606 /* Implement the "pseudo_register_write" gdbarch method. */
2609 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2610 int regnum
, const gdb_byte
*buf
)
2612 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2613 regnum
-= gdbarch_num_regs (gdbarch
);
2615 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2616 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2617 regnum
- AARCH64_Q0_REGNUM
, Q_REGISTER_SIZE
,
2620 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2621 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2622 regnum
- AARCH64_D0_REGNUM
, D_REGISTER_SIZE
,
2625 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2626 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2627 regnum
- AARCH64_S0_REGNUM
, S_REGISTER_SIZE
,
2630 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2631 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2632 regnum
- AARCH64_H0_REGNUM
, H_REGISTER_SIZE
,
2635 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2636 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2637 regnum
- AARCH64_B0_REGNUM
, B_REGISTER_SIZE
,
2640 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2641 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2642 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2643 regnum
- AARCH64_SVE_V0_REGNUM
,
2644 V_REGISTER_SIZE
, buf
);
2646 gdb_assert_not_reached ("regnum out of bound");
2649 /* Callback function for user_reg_add. */
2651 static struct value
*
2652 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2654 const int *reg_p
= (const int *) baton
;
2656 return value_of_register (*reg_p
, frame
);
2660 /* Implement the "software_single_step" gdbarch method, needed to
2661 single step through atomic sequences on AArch64. */
2663 static std::vector
<CORE_ADDR
>
2664 aarch64_software_single_step (struct regcache
*regcache
)
2666 struct gdbarch
*gdbarch
= regcache
->arch ();
2667 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2668 const int insn_size
= 4;
2669 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2670 CORE_ADDR pc
= regcache_read_pc (regcache
);
2671 CORE_ADDR breaks
[2] = { CORE_ADDR_MAX
, CORE_ADDR_MAX
};
2673 CORE_ADDR closing_insn
= 0;
2674 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2675 byte_order_for_code
);
2678 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2679 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2682 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2685 /* Look for a Load Exclusive instruction which begins the sequence. */
2686 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2689 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2692 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2693 byte_order_for_code
);
2695 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2697 /* Check if the instruction is a conditional branch. */
2698 if (inst
.opcode
->iclass
== condbranch
)
2700 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2702 if (bc_insn_count
>= 1)
2705 /* It is, so we'll try to set a breakpoint at the destination. */
2706 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2712 /* Look for the Store Exclusive which closes the atomic sequence. */
2713 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2720 /* We didn't find a closing Store Exclusive instruction, fall back. */
2724 /* Insert breakpoint after the end of the atomic sequence. */
2725 breaks
[0] = loc
+ insn_size
;
2727 /* Check for duplicated breakpoints, and also check that the second
2728 breakpoint is not within the atomic sequence. */
2730 && (breaks
[1] == breaks
[0]
2731 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2732 last_breakpoint
= 0;
2734 std::vector
<CORE_ADDR
> next_pcs
;
2736 /* Insert the breakpoint at the end of the sequence, and one at the
2737 destination of the conditional branch, if it exists. */
2738 for (index
= 0; index
<= last_breakpoint
; index
++)
2739 next_pcs
.push_back (breaks
[index
]);
2744 struct aarch64_displaced_step_closure
: public displaced_step_closure
2746 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2747 is being displaced stepping. */
2750 /* PC adjustment offset after displaced stepping. */
2751 int32_t pc_adjust
= 0;
2754 /* Data when visiting instructions for displaced stepping. */
2756 struct aarch64_displaced_step_data
2758 struct aarch64_insn_data base
;
2760 /* The address where the instruction will be executed at. */
2762 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2763 uint32_t insn_buf
[AARCH64_DISPLACED_MODIFIED_INSNS
];
2764 /* Number of instructions in INSN_BUF. */
2765 unsigned insn_count
;
2766 /* Registers when doing displaced stepping. */
2767 struct regcache
*regs
;
2769 aarch64_displaced_step_closure
*dsc
;
2772 /* Implementation of aarch64_insn_visitor method "b". */
2775 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2776 struct aarch64_insn_data
*data
)
2778 struct aarch64_displaced_step_data
*dsd
2779 = (struct aarch64_displaced_step_data
*) data
;
2780 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2782 if (can_encode_int32 (new_offset
, 28))
2784 /* Emit B rather than BL, because executing BL on a new address
2785 will get the wrong address into LR. In order to avoid this,
2786 we emit B, and update LR if the instruction is BL. */
2787 emit_b (dsd
->insn_buf
, 0, new_offset
);
2793 emit_nop (dsd
->insn_buf
);
2795 dsd
->dsc
->pc_adjust
= offset
;
2801 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2802 data
->insn_addr
+ 4);
2806 /* Implementation of aarch64_insn_visitor method "b_cond". */
2809 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2810 struct aarch64_insn_data
*data
)
2812 struct aarch64_displaced_step_data
*dsd
2813 = (struct aarch64_displaced_step_data
*) data
;
2815 /* GDB has to fix up PC after displaced step this instruction
2816 differently according to the condition is true or false. Instead
2817 of checking COND against conditional flags, we can use
2818 the following instructions, and GDB can tell how to fix up PC
2819 according to the PC value.
2821 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2827 emit_bcond (dsd
->insn_buf
, cond
, 8);
2829 dsd
->dsc
->pc_adjust
= offset
;
2830 dsd
->insn_count
= 1;
2833 /* Dynamically allocate a new register. If we know the register
2834 statically, we should make it a global as above instead of using this
2837 static struct aarch64_register
2838 aarch64_register (unsigned num
, int is64
)
2840 return (struct aarch64_register
) { num
, is64
};
2843 /* Implementation of aarch64_insn_visitor method "cb". */
2846 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
2847 const unsigned rn
, int is64
,
2848 struct aarch64_insn_data
*data
)
2850 struct aarch64_displaced_step_data
*dsd
2851 = (struct aarch64_displaced_step_data
*) data
;
2853 /* The offset is out of range for a compare and branch
2854 instruction. We can use the following instructions instead:
2856 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2861 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
2862 dsd
->insn_count
= 1;
2864 dsd
->dsc
->pc_adjust
= offset
;
2867 /* Implementation of aarch64_insn_visitor method "tb". */
2870 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
2871 const unsigned rt
, unsigned bit
,
2872 struct aarch64_insn_data
*data
)
2874 struct aarch64_displaced_step_data
*dsd
2875 = (struct aarch64_displaced_step_data
*) data
;
2877 /* The offset is out of range for a test bit and branch
2878 instruction We can use the following instructions instead:
2880 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2886 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
2887 dsd
->insn_count
= 1;
2889 dsd
->dsc
->pc_adjust
= offset
;
2892 /* Implementation of aarch64_insn_visitor method "adr". */
2895 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
2896 const int is_adrp
, struct aarch64_insn_data
*data
)
2898 struct aarch64_displaced_step_data
*dsd
2899 = (struct aarch64_displaced_step_data
*) data
;
2900 /* We know exactly the address the ADR{P,} instruction will compute.
2901 We can just write it to the destination register. */
2902 CORE_ADDR address
= data
->insn_addr
+ offset
;
2906 /* Clear the lower 12 bits of the offset to get the 4K page. */
2907 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2911 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2914 dsd
->dsc
->pc_adjust
= 4;
2915 emit_nop (dsd
->insn_buf
);
2916 dsd
->insn_count
= 1;
2919 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2922 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
2923 const unsigned rt
, const int is64
,
2924 struct aarch64_insn_data
*data
)
2926 struct aarch64_displaced_step_data
*dsd
2927 = (struct aarch64_displaced_step_data
*) data
;
2928 CORE_ADDR address
= data
->insn_addr
+ offset
;
2929 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
2931 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
2935 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
2936 aarch64_register (rt
, 1), zero
);
2938 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
2939 aarch64_register (rt
, 1), zero
);
2941 dsd
->dsc
->pc_adjust
= 4;
2944 /* Implementation of aarch64_insn_visitor method "others". */
2947 aarch64_displaced_step_others (const uint32_t insn
,
2948 struct aarch64_insn_data
*data
)
2950 struct aarch64_displaced_step_data
*dsd
2951 = (struct aarch64_displaced_step_data
*) data
;
2953 aarch64_emit_insn (dsd
->insn_buf
, insn
);
2954 dsd
->insn_count
= 1;
2956 if ((insn
& 0xfffffc1f) == 0xd65f0000)
2959 dsd
->dsc
->pc_adjust
= 0;
2962 dsd
->dsc
->pc_adjust
= 4;
2965 static const struct aarch64_insn_visitor visitor
=
2967 aarch64_displaced_step_b
,
2968 aarch64_displaced_step_b_cond
,
2969 aarch64_displaced_step_cb
,
2970 aarch64_displaced_step_tb
,
2971 aarch64_displaced_step_adr
,
2972 aarch64_displaced_step_ldr_literal
,
2973 aarch64_displaced_step_others
,
2976 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2978 struct displaced_step_closure
*
2979 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
2980 CORE_ADDR from
, CORE_ADDR to
,
2981 struct regcache
*regs
)
2983 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2984 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
2985 struct aarch64_displaced_step_data dsd
;
2988 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2991 /* Look for a Load Exclusive instruction which begins the sequence. */
2992 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
2994 /* We can't displaced step atomic sequences. */
2998 std::unique_ptr
<aarch64_displaced_step_closure
> dsc
2999 (new aarch64_displaced_step_closure
);
3000 dsd
.base
.insn_addr
= from
;
3003 dsd
.dsc
= dsc
.get ();
3005 aarch64_relocate_instruction (insn
, &visitor
,
3006 (struct aarch64_insn_data
*) &dsd
);
3007 gdb_assert (dsd
.insn_count
<= AARCH64_DISPLACED_MODIFIED_INSNS
);
3009 if (dsd
.insn_count
!= 0)
3013 /* Instruction can be relocated to scratch pad. Copy
3014 relocated instruction(s) there. */
3015 for (i
= 0; i
< dsd
.insn_count
; i
++)
3017 if (debug_displaced
)
3019 debug_printf ("displaced: writing insn ");
3020 debug_printf ("%.8x", dsd
.insn_buf
[i
]);
3021 debug_printf (" at %s\n", paddress (gdbarch
, to
+ i
* 4));
3023 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
3024 (ULONGEST
) dsd
.insn_buf
[i
]);
3032 return dsc
.release ();
3035 /* Implement the "displaced_step_fixup" gdbarch method. */
3038 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
3039 struct displaced_step_closure
*dsc_
,
3040 CORE_ADDR from
, CORE_ADDR to
,
3041 struct regcache
*regs
)
3043 aarch64_displaced_step_closure
*dsc
= (aarch64_displaced_step_closure
*) dsc_
;
3049 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
3052 /* Condition is true. */
3054 else if (pc
- to
== 4)
3056 /* Condition is false. */
3060 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3063 if (dsc
->pc_adjust
!= 0)
3065 if (debug_displaced
)
3067 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3068 paddress (gdbarch
, from
), dsc
->pc_adjust
);
3070 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
3071 from
+ dsc
->pc_adjust
);
3075 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3078 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
,
3079 struct displaced_step_closure
*closure
)
3084 /* Get the correct target description for the given VQ value.
3085 If VQ is zero then it is assumed SVE is not supported.
3086 (It is not possible to set VQ to zero on an SVE system). */
3089 aarch64_read_description (uint64_t vq
, bool pauth_p
)
3091 if (vq
> AARCH64_MAX_SVE_VQ
)
3092 error (_("VQ is %" PRIu64
", maximum supported value is %d"), vq
,
3093 AARCH64_MAX_SVE_VQ
);
3095 struct target_desc
*tdesc
= tdesc_aarch64_list
[vq
][pauth_p
];
3099 tdesc
= aarch64_create_target_description (vq
, pauth_p
);
3100 tdesc_aarch64_list
[vq
][pauth_p
] = tdesc
;
3106 /* Return the VQ used when creating the target description TDESC. */
3109 aarch64_get_tdesc_vq (const struct target_desc
*tdesc
)
3111 const struct tdesc_feature
*feature_sve
;
3113 if (!tdesc_has_registers (tdesc
))
3116 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
3118 if (feature_sve
== nullptr)
3121 uint64_t vl
= tdesc_register_bitsize (feature_sve
,
3122 aarch64_sve_register_names
[0]) / 8;
3123 return sve_vq_from_vl (vl
);
3126 /* Add all the expected register sets into GDBARCH. */
3129 aarch64_add_reggroups (struct gdbarch
*gdbarch
)
3131 reggroup_add (gdbarch
, general_reggroup
);
3132 reggroup_add (gdbarch
, float_reggroup
);
3133 reggroup_add (gdbarch
, system_reggroup
);
3134 reggroup_add (gdbarch
, vector_reggroup
);
3135 reggroup_add (gdbarch
, all_reggroup
);
3136 reggroup_add (gdbarch
, save_reggroup
);
3137 reggroup_add (gdbarch
, restore_reggroup
);
3140 /* Implement the "cannot_store_register" gdbarch method. */
3143 aarch64_cannot_store_register (struct gdbarch
*gdbarch
, int regnum
)
3145 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3147 if (!tdep
->has_pauth ())
3150 /* Pointer authentication registers are read-only. */
3151 return (regnum
== AARCH64_PAUTH_DMASK_REGNUM (tdep
->pauth_reg_base
)
3152 || regnum
== AARCH64_PAUTH_CMASK_REGNUM (tdep
->pauth_reg_base
));
3155 /* Initialize the current architecture based on INFO. If possible,
3156 re-use an architecture from ARCHES, which is a list of
3157 architectures already created during this debugging session.
3159 Called e.g. at program startup, when reading a core file, and when
3160 reading a binary file. */
3162 static struct gdbarch
*
3163 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
3165 const struct tdesc_feature
*feature_core
, *feature_fpu
, *feature_sve
;
3166 const struct tdesc_feature
*feature_pauth
;
3167 bool valid_p
= true;
3168 int i
, num_regs
= 0, num_pseudo_regs
= 0;
3169 int first_pauth_regnum
= -1, pauth_ra_state_offset
= -1;
3171 /* Use the vector length passed via the target info. Here -1 is used for no
3172 SVE, and 0 is unset. If unset then use the vector length from the existing
3175 if (info
.id
== (int *) -1)
3177 else if (info
.id
!= 0)
3178 vq
= (uint64_t) info
.id
;
3180 vq
= aarch64_get_tdesc_vq (info
.target_desc
);
3182 if (vq
> AARCH64_MAX_SVE_VQ
)
3183 internal_error (__FILE__
, __LINE__
, _("VQ out of bounds: %s (max %d)"),
3184 pulongest (vq
), AARCH64_MAX_SVE_VQ
);
3186 /* If there is already a candidate, use it. */
3187 for (gdbarch_list
*best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
3188 best_arch
!= nullptr;
3189 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
3191 struct gdbarch_tdep
*tdep
= gdbarch_tdep (best_arch
->gdbarch
);
3192 if (tdep
&& tdep
->vq
== vq
)
3193 return best_arch
->gdbarch
;
3196 /* Ensure we always have a target descriptor, and that it is for the given VQ
3198 const struct target_desc
*tdesc
= info
.target_desc
;
3199 if (!tdesc_has_registers (tdesc
) || vq
!= aarch64_get_tdesc_vq (tdesc
))
3200 tdesc
= aarch64_read_description (vq
, false);
3203 feature_core
= tdesc_find_feature (tdesc
,"org.gnu.gdb.aarch64.core");
3204 feature_fpu
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
3205 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
3206 feature_pauth
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.pauth");
3208 if (feature_core
== nullptr)
3211 struct tdesc_arch_data
*tdesc_data
= tdesc_data_alloc ();
3213 /* Validate the description provides the mandatory core R registers
3214 and allocate their numbers. */
3215 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
3216 valid_p
&= tdesc_numbered_register (feature_core
, tdesc_data
,
3217 AARCH64_X0_REGNUM
+ i
,
3218 aarch64_r_register_names
[i
]);
3220 num_regs
= AARCH64_X0_REGNUM
+ i
;
3222 /* Add the V registers. */
3223 if (feature_fpu
!= nullptr)
3225 if (feature_sve
!= nullptr)
3226 error (_("Program contains both fpu and SVE features."));
3228 /* Validate the description provides the mandatory V registers
3229 and allocate their numbers. */
3230 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
3231 valid_p
&= tdesc_numbered_register (feature_fpu
, tdesc_data
,
3232 AARCH64_V0_REGNUM
+ i
,
3233 aarch64_v_register_names
[i
]);
3235 num_regs
= AARCH64_V0_REGNUM
+ i
;
3238 /* Add the SVE registers. */
3239 if (feature_sve
!= nullptr)
3241 /* Validate the description provides the mandatory SVE registers
3242 and allocate their numbers. */
3243 for (i
= 0; i
< ARRAY_SIZE (aarch64_sve_register_names
); i
++)
3244 valid_p
&= tdesc_numbered_register (feature_sve
, tdesc_data
,
3245 AARCH64_SVE_Z0_REGNUM
+ i
,
3246 aarch64_sve_register_names
[i
]);
3248 num_regs
= AARCH64_SVE_Z0_REGNUM
+ i
;
3249 num_pseudo_regs
+= 32; /* add the Vn register pseudos. */
3252 if (feature_fpu
!= nullptr || feature_sve
!= nullptr)
3254 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
3255 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
3256 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
3257 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
3258 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
3261 /* Add the pauth registers. */
3262 if (feature_pauth
!= NULL
)
3264 first_pauth_regnum
= num_regs
;
3265 pauth_ra_state_offset
= num_pseudo_regs
;
3266 /* Validate the descriptor provides the mandatory PAUTH registers and
3267 allocate their numbers. */
3268 for (i
= 0; i
< ARRAY_SIZE (aarch64_pauth_register_names
); i
++)
3269 valid_p
&= tdesc_numbered_register (feature_pauth
, tdesc_data
,
3270 first_pauth_regnum
+ i
,
3271 aarch64_pauth_register_names
[i
]);
3274 num_pseudo_regs
+= 1; /* Count RA_STATE pseudo register. */
3279 tdesc_data_cleanup (tdesc_data
);
3283 /* AArch64 code is always little-endian. */
3284 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
3286 struct gdbarch_tdep
*tdep
= XCNEW (struct gdbarch_tdep
);
3287 struct gdbarch
*gdbarch
= gdbarch_alloc (&info
, tdep
);
3289 /* This should be low enough for everything. */
3290 tdep
->lowest_pc
= 0x20;
3291 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
3292 tdep
->jb_elt_size
= 8;
3294 tdep
->pauth_reg_base
= first_pauth_regnum
;
3295 tdep
->pauth_ra_state_regnum
= (feature_pauth
== NULL
) ? -1
3296 : pauth_ra_state_offset
+ num_regs
;
3298 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
3299 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
3301 /* Advance PC across function entry code. */
3302 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
3304 /* The stack grows downward. */
3305 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
3307 /* Breakpoint manipulation. */
3308 set_gdbarch_breakpoint_kind_from_pc (gdbarch
,
3309 aarch64_breakpoint::kind_from_pc
);
3310 set_gdbarch_sw_breakpoint_from_kind (gdbarch
,
3311 aarch64_breakpoint::bp_from_kind
);
3312 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
3313 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
3315 /* Information about registers, etc. */
3316 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
3317 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
3318 set_gdbarch_num_regs (gdbarch
, num_regs
);
3320 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
3321 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
3322 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
3323 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
3324 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
3325 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
3326 aarch64_pseudo_register_reggroup_p
);
3327 set_gdbarch_cannot_store_register (gdbarch
, aarch64_cannot_store_register
);
3330 set_gdbarch_short_bit (gdbarch
, 16);
3331 set_gdbarch_int_bit (gdbarch
, 32);
3332 set_gdbarch_float_bit (gdbarch
, 32);
3333 set_gdbarch_double_bit (gdbarch
, 64);
3334 set_gdbarch_long_double_bit (gdbarch
, 128);
3335 set_gdbarch_long_bit (gdbarch
, 64);
3336 set_gdbarch_long_long_bit (gdbarch
, 64);
3337 set_gdbarch_ptr_bit (gdbarch
, 64);
3338 set_gdbarch_char_signed (gdbarch
, 0);
3339 set_gdbarch_wchar_signed (gdbarch
, 0);
3340 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
3341 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
3342 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
3343 set_gdbarch_type_align (gdbarch
, aarch64_type_align
);
3345 /* Internal <-> external register number maps. */
3346 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
3348 /* Returning results. */
3349 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
3352 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
3354 /* Virtual tables. */
3355 set_gdbarch_vbit_in_delta (gdbarch
, 1);
3357 /* Register architecture. */
3358 aarch64_add_reggroups (gdbarch
);
3360 /* Hook in the ABI-specific overrides, if they have been registered. */
3361 info
.target_desc
= tdesc
;
3362 info
.tdesc_data
= tdesc_data
;
3363 gdbarch_init_osabi (info
, gdbarch
);
3365 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
3366 /* Register DWARF CFA vendor handler. */
3367 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch
,
3368 aarch64_execute_dwarf_cfa_vendor_op
);
3370 /* Add some default predicates. */
3371 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
3372 dwarf2_append_unwinders (gdbarch
);
3373 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
3375 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
3377 /* Now we have tuned the configuration, set a few final things,
3378 based on what the OS ABI has told us. */
3380 if (tdep
->jb_pc
>= 0)
3381 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
3383 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
3385 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
3387 /* Add standard register aliases. */
3388 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
3389 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
3390 value_of_aarch64_user_reg
,
3391 &aarch64_register_aliases
[i
].regnum
);
3393 register_aarch64_ravenscar_ops (gdbarch
);
3399 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
3401 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3406 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3407 paddress (gdbarch
, tdep
->lowest_pc
));
3413 static void aarch64_process_record_test (void);
3418 _initialize_aarch64_tdep (void)
3420 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
3423 /* Debug this file's internals. */
3424 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
3425 Set AArch64 debugging."), _("\
3426 Show AArch64 debugging."), _("\
3427 When on, AArch64 specific debugging is enabled."),
3430 &setdebuglist
, &showdebuglist
);
3433 selftests::register_test ("aarch64-analyze-prologue",
3434 selftests::aarch64_analyze_prologue_test
);
3435 selftests::register_test ("aarch64-process-record",
3436 selftests::aarch64_process_record_test
);
3440 /* AArch64 process record-replay related structures, defines etc. */
3442 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3445 unsigned int reg_len = LENGTH; \
3448 REGS = XNEWVEC (uint32_t, reg_len); \
3449 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3454 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3457 unsigned int mem_len = LENGTH; \
3460 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3461 memcpy(&MEMS->len, &RECORD_BUF[0], \
3462 sizeof(struct aarch64_mem_r) * LENGTH); \
3467 /* AArch64 record/replay structures and enumerations. */
3469 struct aarch64_mem_r
3471 uint64_t len
; /* Record length. */
3472 uint64_t addr
; /* Memory address. */
3475 enum aarch64_record_result
3477 AARCH64_RECORD_SUCCESS
,
3478 AARCH64_RECORD_UNSUPPORTED
,
3479 AARCH64_RECORD_UNKNOWN
3482 typedef struct insn_decode_record_t
3484 struct gdbarch
*gdbarch
;
3485 struct regcache
*regcache
;
3486 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
3487 uint32_t aarch64_insn
; /* Insn to be recorded. */
3488 uint32_t mem_rec_count
; /* Count of memory records. */
3489 uint32_t reg_rec_count
; /* Count of register records. */
3490 uint32_t *aarch64_regs
; /* Registers to be recorded. */
3491 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
3492 } insn_decode_record
;
3494 /* Record handler for data processing - register instructions. */
3497 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
3499 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
3500 uint32_t record_buf
[4];
3502 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3503 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3504 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3506 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3510 /* Logical (shifted register). */
3511 if (insn_bits24_27
== 0x0a)
3512 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3514 else if (insn_bits24_27
== 0x0b)
3515 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3517 return AARCH64_RECORD_UNKNOWN
;
3519 record_buf
[0] = reg_rd
;
3520 aarch64_insn_r
->reg_rec_count
= 1;
3522 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3526 if (insn_bits24_27
== 0x0b)
3528 /* Data-processing (3 source). */
3529 record_buf
[0] = reg_rd
;
3530 aarch64_insn_r
->reg_rec_count
= 1;
3532 else if (insn_bits24_27
== 0x0a)
3534 if (insn_bits21_23
== 0x00)
3536 /* Add/subtract (with carry). */
3537 record_buf
[0] = reg_rd
;
3538 aarch64_insn_r
->reg_rec_count
= 1;
3539 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3541 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3542 aarch64_insn_r
->reg_rec_count
= 2;
3545 else if (insn_bits21_23
== 0x02)
3547 /* Conditional compare (register) and conditional compare
3548 (immediate) instructions. */
3549 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3550 aarch64_insn_r
->reg_rec_count
= 1;
3552 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3554 /* CConditional select. */
3555 /* Data-processing (2 source). */
3556 /* Data-processing (1 source). */
3557 record_buf
[0] = reg_rd
;
3558 aarch64_insn_r
->reg_rec_count
= 1;
3561 return AARCH64_RECORD_UNKNOWN
;
3565 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3567 return AARCH64_RECORD_SUCCESS
;
3570 /* Record handler for data processing - immediate instructions. */
3573 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3575 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
3576 uint32_t record_buf
[4];
3578 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3579 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3580 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3582 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3583 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3584 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3586 record_buf
[0] = reg_rd
;
3587 aarch64_insn_r
->reg_rec_count
= 1;
3589 else if (insn_bits24_27
== 0x01)
3591 /* Add/Subtract (immediate). */
3592 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3593 record_buf
[0] = reg_rd
;
3594 aarch64_insn_r
->reg_rec_count
= 1;
3596 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3598 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3600 /* Logical (immediate). */
3601 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3602 record_buf
[0] = reg_rd
;
3603 aarch64_insn_r
->reg_rec_count
= 1;
3605 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3608 return AARCH64_RECORD_UNKNOWN
;
3610 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3612 return AARCH64_RECORD_SUCCESS
;
3615 /* Record handler for branch, exception generation and system instructions. */
3618 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3620 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3621 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3622 uint32_t record_buf
[4];
3624 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3625 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3626 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3628 if (insn_bits28_31
== 0x0d)
3630 /* Exception generation instructions. */
3631 if (insn_bits24_27
== 0x04)
3633 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3634 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3635 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3637 ULONGEST svc_number
;
3639 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3641 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3645 return AARCH64_RECORD_UNSUPPORTED
;
3647 /* System instructions. */
3648 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3650 uint32_t reg_rt
, reg_crn
;
3652 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3653 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3655 /* Record rt in case of sysl and mrs instructions. */
3656 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3658 record_buf
[0] = reg_rt
;
3659 aarch64_insn_r
->reg_rec_count
= 1;
3661 /* Record cpsr for hint and msr(immediate) instructions. */
3662 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3664 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3665 aarch64_insn_r
->reg_rec_count
= 1;
3668 /* Unconditional branch (register). */
3669 else if((insn_bits24_27
& 0x0e) == 0x06)
3671 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3672 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3673 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3676 return AARCH64_RECORD_UNKNOWN
;
3678 /* Unconditional branch (immediate). */
3679 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3681 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3682 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3683 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3686 /* Compare & branch (immediate), Test & branch (immediate) and
3687 Conditional branch (immediate). */
3688 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3690 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3692 return AARCH64_RECORD_SUCCESS
;
3695 /* Record handler for advanced SIMD load and store instructions. */
3698 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3701 uint64_t addr_offset
= 0;
3702 uint32_t record_buf
[24];
3703 uint64_t record_buf_mem
[24];
3704 uint32_t reg_rn
, reg_rt
;
3705 uint32_t reg_index
= 0, mem_index
= 0;
3706 uint8_t opcode_bits
, size_bits
;
3708 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3709 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3710 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3711 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3712 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3715 debug_printf ("Process record: Advanced SIMD load/store\n");
3717 /* Load/store single structure. */
3718 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3720 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3721 scale
= opcode_bits
>> 2;
3722 selem
= ((opcode_bits
& 0x02) |
3723 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3727 if (size_bits
& 0x01)
3728 return AARCH64_RECORD_UNKNOWN
;
3731 if ((size_bits
>> 1) & 0x01)
3732 return AARCH64_RECORD_UNKNOWN
;
3733 if (size_bits
& 0x01)
3735 if (!((opcode_bits
>> 1) & 0x01))
3738 return AARCH64_RECORD_UNKNOWN
;
3742 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3749 return AARCH64_RECORD_UNKNOWN
;
3755 for (sindex
= 0; sindex
< selem
; sindex
++)
3757 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3758 reg_rt
= (reg_rt
+ 1) % 32;
3762 for (sindex
= 0; sindex
< selem
; sindex
++)
3764 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3765 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3768 record_buf_mem
[mem_index
++] = esize
/ 8;
3769 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3771 addr_offset
= addr_offset
+ (esize
/ 8);
3772 reg_rt
= (reg_rt
+ 1) % 32;
3776 /* Load/store multiple structure. */
3779 uint8_t selem
, esize
, rpt
, elements
;
3780 uint8_t eindex
, rindex
;
3782 esize
= 8 << size_bits
;
3783 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3784 elements
= 128 / esize
;
3786 elements
= 64 / esize
;
3788 switch (opcode_bits
)
3790 /*LD/ST4 (4 Registers). */
3795 /*LD/ST1 (4 Registers). */
3800 /*LD/ST3 (3 Registers). */
3805 /*LD/ST1 (3 Registers). */
3810 /*LD/ST1 (1 Register). */
3815 /*LD/ST2 (2 Registers). */
3820 /*LD/ST1 (2 Registers). */
3826 return AARCH64_RECORD_UNSUPPORTED
;
3829 for (rindex
= 0; rindex
< rpt
; rindex
++)
3830 for (eindex
= 0; eindex
< elements
; eindex
++)
3832 uint8_t reg_tt
, sindex
;
3833 reg_tt
= (reg_rt
+ rindex
) % 32;
3834 for (sindex
= 0; sindex
< selem
; sindex
++)
3836 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3837 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3840 record_buf_mem
[mem_index
++] = esize
/ 8;
3841 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3843 addr_offset
= addr_offset
+ (esize
/ 8);
3844 reg_tt
= (reg_tt
+ 1) % 32;
3849 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3850 record_buf
[reg_index
++] = reg_rn
;
3852 aarch64_insn_r
->reg_rec_count
= reg_index
;
3853 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3854 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3856 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3858 return AARCH64_RECORD_SUCCESS
;
3861 /* Record handler for load and store instructions. */
3864 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3866 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3867 uint8_t insn_bit23
, insn_bit21
;
3868 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3869 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3870 uint64_t datasize
, offset
;
3871 uint32_t record_buf
[8];
3872 uint64_t record_buf_mem
[8];
3875 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3876 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3877 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3878 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3879 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3880 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3881 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3882 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3883 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3884 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3885 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3887 /* Load/store exclusive. */
3888 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3891 debug_printf ("Process record: load/store exclusive\n");
3895 record_buf
[0] = reg_rt
;
3896 aarch64_insn_r
->reg_rec_count
= 1;
3899 record_buf
[1] = reg_rt2
;
3900 aarch64_insn_r
->reg_rec_count
= 2;
3906 datasize
= (8 << size_bits
) * 2;
3908 datasize
= (8 << size_bits
);
3909 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3911 record_buf_mem
[0] = datasize
/ 8;
3912 record_buf_mem
[1] = address
;
3913 aarch64_insn_r
->mem_rec_count
= 1;
3916 /* Save register rs. */
3917 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3918 aarch64_insn_r
->reg_rec_count
= 1;
3922 /* Load register (literal) instructions decoding. */
3923 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3926 debug_printf ("Process record: load register (literal)\n");
3928 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3930 record_buf
[0] = reg_rt
;
3931 aarch64_insn_r
->reg_rec_count
= 1;
3933 /* All types of load/store pair instructions decoding. */
3934 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3937 debug_printf ("Process record: load/store pair\n");
3943 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3944 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3948 record_buf
[0] = reg_rt
;
3949 record_buf
[1] = reg_rt2
;
3951 aarch64_insn_r
->reg_rec_count
= 2;
3956 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3958 size_bits
= size_bits
>> 1;
3959 datasize
= 8 << (2 + size_bits
);
3960 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3961 offset
= offset
<< (2 + size_bits
);
3962 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3964 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3966 if (imm7_off
& 0x40)
3967 address
= address
- offset
;
3969 address
= address
+ offset
;
3972 record_buf_mem
[0] = datasize
/ 8;
3973 record_buf_mem
[1] = address
;
3974 record_buf_mem
[2] = datasize
/ 8;
3975 record_buf_mem
[3] = address
+ (datasize
/ 8);
3976 aarch64_insn_r
->mem_rec_count
= 2;
3978 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3979 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3981 /* Load/store register (unsigned immediate) instructions. */
3982 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3984 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3994 if (size_bits
== 0x3 && vector_flag
== 0x0 && opc
== 0x2)
3996 /* PRFM (immediate) */
3997 return AARCH64_RECORD_SUCCESS
;
3999 else if (size_bits
== 0x2 && vector_flag
== 0x0 && opc
== 0x2)
4001 /* LDRSW (immediate) */
4015 debug_printf ("Process record: load/store (unsigned immediate):"
4016 " size %x V %d opc %x\n", size_bits
, vector_flag
,
4022 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
4023 datasize
= 8 << size_bits
;
4024 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4026 offset
= offset
<< size_bits
;
4027 address
= address
+ offset
;
4029 record_buf_mem
[0] = datasize
>> 3;
4030 record_buf_mem
[1] = address
;
4031 aarch64_insn_r
->mem_rec_count
= 1;
4036 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4038 record_buf
[0] = reg_rt
;
4039 aarch64_insn_r
->reg_rec_count
= 1;
4042 /* Load/store register (register offset) instructions. */
4043 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
4044 && insn_bits10_11
== 0x02 && insn_bit21
)
4047 debug_printf ("Process record: load/store (register offset)\n");
4048 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
4055 if (size_bits
!= 0x03)
4058 return AARCH64_RECORD_UNKNOWN
;
4062 ULONGEST reg_rm_val
;
4064 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
4065 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
4066 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
4067 offset
= reg_rm_val
<< size_bits
;
4069 offset
= reg_rm_val
;
4070 datasize
= 8 << size_bits
;
4071 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4073 address
= address
+ offset
;
4074 record_buf_mem
[0] = datasize
>> 3;
4075 record_buf_mem
[1] = address
;
4076 aarch64_insn_r
->mem_rec_count
= 1;
4081 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4083 record_buf
[0] = reg_rt
;
4084 aarch64_insn_r
->reg_rec_count
= 1;
4087 /* Load/store register (immediate and unprivileged) instructions. */
4088 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
4093 debug_printf ("Process record: load/store "
4094 "(immediate and unprivileged)\n");
4096 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
4103 if (size_bits
!= 0x03)
4106 return AARCH64_RECORD_UNKNOWN
;
4111 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
4112 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
4113 datasize
= 8 << size_bits
;
4114 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
4116 if (insn_bits10_11
!= 0x01)
4118 if (imm9_off
& 0x0100)
4119 address
= address
- offset
;
4121 address
= address
+ offset
;
4123 record_buf_mem
[0] = datasize
>> 3;
4124 record_buf_mem
[1] = address
;
4125 aarch64_insn_r
->mem_rec_count
= 1;
4130 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
4132 record_buf
[0] = reg_rt
;
4133 aarch64_insn_r
->reg_rec_count
= 1;
4135 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
4136 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
4138 /* Advanced SIMD load/store instructions. */
4140 return aarch64_record_asimd_load_store (aarch64_insn_r
);
4142 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
4144 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4146 return AARCH64_RECORD_SUCCESS
;
4149 /* Record handler for data processing SIMD and floating point instructions. */
4152 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
4154 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
4155 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
4156 uint8_t insn_bits11_14
;
4157 uint32_t record_buf
[2];
4159 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
4160 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
4161 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
4162 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
4163 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
4164 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
4165 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
4166 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
4167 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
4170 debug_printf ("Process record: data processing SIMD/FP: ");
4172 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
4174 /* Floating point - fixed point conversion instructions. */
4178 debug_printf ("FP - fixed point conversion");
4180 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
4181 record_buf
[0] = reg_rd
;
4183 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4185 /* Floating point - conditional compare instructions. */
4186 else if (insn_bits10_11
== 0x01)
4189 debug_printf ("FP - conditional compare");
4191 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4193 /* Floating point - data processing (2-source) and
4194 conditional select instructions. */
4195 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
4198 debug_printf ("FP - DP (2-source)");
4200 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4202 else if (insn_bits10_11
== 0x00)
4204 /* Floating point - immediate instructions. */
4205 if ((insn_bits12_15
& 0x01) == 0x01
4206 || (insn_bits12_15
& 0x07) == 0x04)
4209 debug_printf ("FP - immediate");
4210 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4212 /* Floating point - compare instructions. */
4213 else if ((insn_bits12_15
& 0x03) == 0x02)
4216 debug_printf ("FP - immediate");
4217 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4219 /* Floating point - integer conversions instructions. */
4220 else if (insn_bits12_15
== 0x00)
4222 /* Convert float to integer instruction. */
4223 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
4226 debug_printf ("float to int conversion");
4228 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4230 /* Convert integer to float instruction. */
4231 else if ((opcode
>> 1) == 0x01 && !rmode
)
4234 debug_printf ("int to float conversion");
4236 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4238 /* Move float to integer instruction. */
4239 else if ((opcode
>> 1) == 0x03)
4242 debug_printf ("move float to int");
4244 if (!(opcode
& 0x01))
4245 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4247 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4250 return AARCH64_RECORD_UNKNOWN
;
4253 return AARCH64_RECORD_UNKNOWN
;
4256 return AARCH64_RECORD_UNKNOWN
;
4258 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
4261 debug_printf ("SIMD copy");
4263 /* Advanced SIMD copy instructions. */
4264 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
4265 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
4266 && bit (aarch64_insn_r
->aarch64_insn
, 10))
4268 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
4269 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4271 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4274 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4276 /* All remaining floating point or advanced SIMD instructions. */
4280 debug_printf ("all remain");
4282 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4286 debug_printf ("\n");
4288 aarch64_insn_r
->reg_rec_count
++;
4289 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
4290 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4292 return AARCH64_RECORD_SUCCESS
;
4295 /* Decodes insns type and invokes its record handler. */
4298 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
4300 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
4302 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
4303 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
4304 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
4305 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
4307 /* Data processing - immediate instructions. */
4308 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
4309 return aarch64_record_data_proc_imm (aarch64_insn_r
);
4311 /* Branch, exception generation and system instructions. */
4312 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
4313 return aarch64_record_branch_except_sys (aarch64_insn_r
);
4315 /* Load and store instructions. */
4316 if (!ins_bit25
&& ins_bit27
)
4317 return aarch64_record_load_store (aarch64_insn_r
);
4319 /* Data processing - register instructions. */
4320 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
4321 return aarch64_record_data_proc_reg (aarch64_insn_r
);
4323 /* Data processing - SIMD and floating point instructions. */
4324 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
4325 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
4327 return AARCH64_RECORD_UNSUPPORTED
;
4330 /* Cleans up local record registers and memory allocations. */
4333 deallocate_reg_mem (insn_decode_record
*record
)
4335 xfree (record
->aarch64_regs
);
4336 xfree (record
->aarch64_mems
);
4340 namespace selftests
{
4343 aarch64_process_record_test (void)
4345 struct gdbarch_info info
;
4348 gdbarch_info_init (&info
);
4349 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
4351 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
4352 SELF_CHECK (gdbarch
!= NULL
);
4354 insn_decode_record aarch64_record
;
4356 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4357 aarch64_record
.regcache
= NULL
;
4358 aarch64_record
.this_addr
= 0;
4359 aarch64_record
.gdbarch
= gdbarch
;
4361 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4362 aarch64_record
.aarch64_insn
= 0xf9800020;
4363 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4364 SELF_CHECK (ret
== AARCH64_RECORD_SUCCESS
);
4365 SELF_CHECK (aarch64_record
.reg_rec_count
== 0);
4366 SELF_CHECK (aarch64_record
.mem_rec_count
== 0);
4368 deallocate_reg_mem (&aarch64_record
);
4371 } // namespace selftests
4372 #endif /* GDB_SELF_TEST */
4374 /* Parse the current instruction and record the values of the registers and
4375 memory that will be changed in current instruction to record_arch_list
4376 return -1 if something is wrong. */
4379 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
4380 CORE_ADDR insn_addr
)
4382 uint32_t rec_no
= 0;
4383 uint8_t insn_size
= 4;
4385 gdb_byte buf
[insn_size
];
4386 insn_decode_record aarch64_record
;
4388 memset (&buf
[0], 0, insn_size
);
4389 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4390 target_read_memory (insn_addr
, &buf
[0], insn_size
);
4391 aarch64_record
.aarch64_insn
4392 = (uint32_t) extract_unsigned_integer (&buf
[0],
4394 gdbarch_byte_order (gdbarch
));
4395 aarch64_record
.regcache
= regcache
;
4396 aarch64_record
.this_addr
= insn_addr
;
4397 aarch64_record
.gdbarch
= gdbarch
;
4399 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4400 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
4402 printf_unfiltered (_("Process record does not support instruction "
4403 "0x%0x at address %s.\n"),
4404 aarch64_record
.aarch64_insn
,
4405 paddress (gdbarch
, insn_addr
));
4411 /* Record registers. */
4412 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4414 /* Always record register CPSR. */
4415 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4416 AARCH64_CPSR_REGNUM
);
4417 if (aarch64_record
.aarch64_regs
)
4418 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
4419 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
4420 aarch64_record
.aarch64_regs
[rec_no
]))
4423 /* Record memories. */
4424 if (aarch64_record
.aarch64_mems
)
4425 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
4426 if (record_full_arch_list_add_mem
4427 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
4428 aarch64_record
.aarch64_mems
[rec_no
].len
))
4431 if (record_full_arch_list_add_end ())
4435 deallocate_reg_mem (&aarch64_record
);