1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
37 #include "dwarf2-frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
59 #include "opcode/aarch64.h"
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66 /* Pseudo register base numbers. */
67 #define AARCH64_Q0_REGNUM 0
68 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
69 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
72 #define AARCH64_SVE_V0_REGNUM (AARCH64_B0_REGNUM + 32)
74 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
76 #define HA_MAX_NUM_FLDS 4
78 /* All possible aarch64 target descriptors. */
79 struct target_desc
*tdesc_aarch64_list
[AARCH64_MAX_SVE_VQ
+ 1];
81 /* The standard register names, and all the valid aliases for them. */
84 const char *const name
;
86 } aarch64_register_aliases
[] =
88 /* 64-bit register names. */
89 {"fp", AARCH64_FP_REGNUM
},
90 {"lr", AARCH64_LR_REGNUM
},
91 {"sp", AARCH64_SP_REGNUM
},
93 /* 32-bit register names. */
94 {"w0", AARCH64_X0_REGNUM
+ 0},
95 {"w1", AARCH64_X0_REGNUM
+ 1},
96 {"w2", AARCH64_X0_REGNUM
+ 2},
97 {"w3", AARCH64_X0_REGNUM
+ 3},
98 {"w4", AARCH64_X0_REGNUM
+ 4},
99 {"w5", AARCH64_X0_REGNUM
+ 5},
100 {"w6", AARCH64_X0_REGNUM
+ 6},
101 {"w7", AARCH64_X0_REGNUM
+ 7},
102 {"w8", AARCH64_X0_REGNUM
+ 8},
103 {"w9", AARCH64_X0_REGNUM
+ 9},
104 {"w10", AARCH64_X0_REGNUM
+ 10},
105 {"w11", AARCH64_X0_REGNUM
+ 11},
106 {"w12", AARCH64_X0_REGNUM
+ 12},
107 {"w13", AARCH64_X0_REGNUM
+ 13},
108 {"w14", AARCH64_X0_REGNUM
+ 14},
109 {"w15", AARCH64_X0_REGNUM
+ 15},
110 {"w16", AARCH64_X0_REGNUM
+ 16},
111 {"w17", AARCH64_X0_REGNUM
+ 17},
112 {"w18", AARCH64_X0_REGNUM
+ 18},
113 {"w19", AARCH64_X0_REGNUM
+ 19},
114 {"w20", AARCH64_X0_REGNUM
+ 20},
115 {"w21", AARCH64_X0_REGNUM
+ 21},
116 {"w22", AARCH64_X0_REGNUM
+ 22},
117 {"w23", AARCH64_X0_REGNUM
+ 23},
118 {"w24", AARCH64_X0_REGNUM
+ 24},
119 {"w25", AARCH64_X0_REGNUM
+ 25},
120 {"w26", AARCH64_X0_REGNUM
+ 26},
121 {"w27", AARCH64_X0_REGNUM
+ 27},
122 {"w28", AARCH64_X0_REGNUM
+ 28},
123 {"w29", AARCH64_X0_REGNUM
+ 29},
124 {"w30", AARCH64_X0_REGNUM
+ 30},
127 {"ip0", AARCH64_X0_REGNUM
+ 16},
128 {"ip1", AARCH64_X0_REGNUM
+ 17}
131 /* The required core 'R' registers. */
132 static const char *const aarch64_r_register_names
[] =
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_X0_REGNUM! */
136 "x0", "x1", "x2", "x3",
137 "x4", "x5", "x6", "x7",
138 "x8", "x9", "x10", "x11",
139 "x12", "x13", "x14", "x15",
140 "x16", "x17", "x18", "x19",
141 "x20", "x21", "x22", "x23",
142 "x24", "x25", "x26", "x27",
143 "x28", "x29", "x30", "sp",
147 /* The FP/SIMD 'V' registers. */
148 static const char *const aarch64_v_register_names
[] =
150 /* These registers must appear in consecutive RAW register number
151 order and they must begin with AARCH64_V0_REGNUM! */
152 "v0", "v1", "v2", "v3",
153 "v4", "v5", "v6", "v7",
154 "v8", "v9", "v10", "v11",
155 "v12", "v13", "v14", "v15",
156 "v16", "v17", "v18", "v19",
157 "v20", "v21", "v22", "v23",
158 "v24", "v25", "v26", "v27",
159 "v28", "v29", "v30", "v31",
164 /* The SVE 'Z' and 'P' registers. */
165 static const char *const aarch64_sve_register_names
[] =
167 /* These registers must appear in consecutive RAW register number
168 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
169 "z0", "z1", "z2", "z3",
170 "z4", "z5", "z6", "z7",
171 "z8", "z9", "z10", "z11",
172 "z12", "z13", "z14", "z15",
173 "z16", "z17", "z18", "z19",
174 "z20", "z21", "z22", "z23",
175 "z24", "z25", "z26", "z27",
176 "z28", "z29", "z30", "z31",
178 "p0", "p1", "p2", "p3",
179 "p4", "p5", "p6", "p7",
180 "p8", "p9", "p10", "p11",
181 "p12", "p13", "p14", "p15",
185 /* AArch64 prologue cache structure. */
186 struct aarch64_prologue_cache
188 /* The program counter at the start of the function. It is used to
189 identify this frame as a prologue frame. */
192 /* The program counter at the time this frame was created; i.e. where
193 this function was called from. It is used to identify this frame as a
197 /* The stack pointer at the time this frame was created; i.e. the
198 caller's stack pointer when this function was called. It is used
199 to identify this frame. */
202 /* Is the target available to read from? */
205 /* The frame base for this frame is just prev_sp - frame size.
206 FRAMESIZE is the distance from the frame pointer to the
207 initial stack pointer. */
210 /* The register used to hold the frame pointer for this frame. */
213 /* Saved register offsets. */
214 struct trad_frame_saved_reg
*saved_regs
;
218 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
219 struct cmd_list_element
*c
, const char *value
)
221 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
226 /* Abstract instruction reader. */
228 class abstract_instruction_reader
231 /* Read in one instruction. */
232 virtual ULONGEST
read (CORE_ADDR memaddr
, int len
,
233 enum bfd_endian byte_order
) = 0;
236 /* Instruction reader from real target. */
238 class instruction_reader
: public abstract_instruction_reader
241 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
244 return read_code_unsigned_integer (memaddr
, len
, byte_order
);
250 /* Analyze a prologue, looking for a recognizable stack frame
251 and frame pointer. Scan until we encounter a store that could
252 clobber the stack frame unexpectedly, or an unknown instruction. */
255 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
256 CORE_ADDR start
, CORE_ADDR limit
,
257 struct aarch64_prologue_cache
*cache
,
258 abstract_instruction_reader
& reader
)
260 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
262 /* Track X registers and D registers in prologue. */
263 pv_t regs
[AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
];
265 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
+ AARCH64_D_REGISTER_COUNT
; i
++)
266 regs
[i
] = pv_register (i
, 0);
267 pv_area
stack (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
269 for (; start
< limit
; start
+= 4)
274 insn
= reader
.read (start
, 4, byte_order_for_code
);
276 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
279 if (inst
.opcode
->iclass
== addsub_imm
280 && (inst
.opcode
->op
== OP_ADD
281 || strcmp ("sub", inst
.opcode
->name
) == 0))
283 unsigned rd
= inst
.operands
[0].reg
.regno
;
284 unsigned rn
= inst
.operands
[1].reg
.regno
;
286 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 3);
287 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd_SP
);
288 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn_SP
);
289 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_AIMM
);
291 if (inst
.opcode
->op
== OP_ADD
)
293 regs
[rd
] = pv_add_constant (regs
[rn
],
294 inst
.operands
[2].imm
.value
);
298 regs
[rd
] = pv_add_constant (regs
[rn
],
299 -inst
.operands
[2].imm
.value
);
302 else if (inst
.opcode
->iclass
== pcreladdr
303 && inst
.operands
[1].type
== AARCH64_OPND_ADDR_ADRP
)
305 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
306 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
308 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
310 else if (inst
.opcode
->iclass
== branch_imm
)
312 /* Stop analysis on branch. */
315 else if (inst
.opcode
->iclass
== condbranch
)
317 /* Stop analysis on branch. */
320 else if (inst
.opcode
->iclass
== branch_reg
)
322 /* Stop analysis on branch. */
325 else if (inst
.opcode
->iclass
== compbranch
)
327 /* Stop analysis on branch. */
330 else if (inst
.opcode
->op
== OP_MOVZ
)
332 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
333 regs
[inst
.operands
[0].reg
.regno
] = pv_unknown ();
335 else if (inst
.opcode
->iclass
== log_shift
336 && strcmp (inst
.opcode
->name
, "orr") == 0)
338 unsigned rd
= inst
.operands
[0].reg
.regno
;
339 unsigned rn
= inst
.operands
[1].reg
.regno
;
340 unsigned rm
= inst
.operands
[2].reg
.regno
;
342 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rd
);
343 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rn
);
344 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_Rm_SFT
);
346 if (inst
.operands
[2].shifter
.amount
== 0
347 && rn
== AARCH64_SP_REGNUM
)
353 debug_printf ("aarch64: prologue analysis gave up "
354 "addr=%s opcode=0x%x (orr x register)\n",
355 core_addr_to_string_nz (start
), insn
);
360 else if (inst
.opcode
->op
== OP_STUR
)
362 unsigned rt
= inst
.operands
[0].reg
.regno
;
363 unsigned rn
= inst
.operands
[1].addr
.base_regno
;
365 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
367 gdb_assert (aarch64_num_of_operands (inst
.opcode
) == 2);
368 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
);
369 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_ADDR_SIMM9
);
370 gdb_assert (!inst
.operands
[1].addr
.offset
.is_reg
);
372 stack
.store (pv_add_constant (regs
[rn
],
373 inst
.operands
[1].addr
.offset
.imm
),
374 is64
? 8 : 4, regs
[rt
]);
376 else if ((inst
.opcode
->iclass
== ldstpair_off
377 || (inst
.opcode
->iclass
== ldstpair_indexed
378 && inst
.operands
[2].addr
.preind
))
379 && strcmp ("stp", inst
.opcode
->name
) == 0)
381 /* STP with addressing mode Pre-indexed and Base register. */
384 unsigned rn
= inst
.operands
[2].addr
.base_regno
;
385 int32_t imm
= inst
.operands
[2].addr
.offset
.imm
;
387 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
388 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
389 gdb_assert (inst
.operands
[1].type
== AARCH64_OPND_Rt2
390 || inst
.operands
[1].type
== AARCH64_OPND_Ft2
);
391 gdb_assert (inst
.operands
[2].type
== AARCH64_OPND_ADDR_SIMM7
);
392 gdb_assert (!inst
.operands
[2].addr
.offset
.is_reg
);
394 /* If recording this store would invalidate the store area
395 (perhaps because rn is not known) then we should abandon
396 further prologue analysis. */
397 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
)))
400 if (stack
.store_would_trash (pv_add_constant (regs
[rn
], imm
+ 8)))
403 rt1
= inst
.operands
[0].reg
.regno
;
404 rt2
= inst
.operands
[1].reg
.regno
;
405 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
407 /* Only bottom 64-bit of each V register (D register) need
409 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
410 rt1
+= AARCH64_X_REGISTER_COUNT
;
411 rt2
+= AARCH64_X_REGISTER_COUNT
;
414 stack
.store (pv_add_constant (regs
[rn
], imm
), 8,
416 stack
.store (pv_add_constant (regs
[rn
], imm
+ 8), 8,
419 if (inst
.operands
[2].addr
.writeback
)
420 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
423 else if ((inst
.opcode
->iclass
== ldst_imm9
/* Signed immediate. */
424 || (inst
.opcode
->iclass
== ldst_pos
/* Unsigned immediate. */
425 && (inst
.opcode
->op
== OP_STR_POS
426 || inst
.opcode
->op
== OP_STRF_POS
)))
427 && inst
.operands
[1].addr
.base_regno
== AARCH64_SP_REGNUM
428 && strcmp ("str", inst
.opcode
->name
) == 0)
430 /* STR (immediate) */
431 unsigned int rt
= inst
.operands
[0].reg
.regno
;
432 int32_t imm
= inst
.operands
[1].addr
.offset
.imm
;
433 unsigned int rn
= inst
.operands
[1].addr
.base_regno
;
435 = (aarch64_get_qualifier_esize (inst
.operands
[0].qualifier
) == 8);
436 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_Rt
437 || inst
.operands
[0].type
== AARCH64_OPND_Ft
);
439 if (inst
.operands
[0].type
== AARCH64_OPND_Ft
)
441 /* Only bottom 64-bit of each V register (D register) need
443 gdb_assert (inst
.operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
);
444 rt
+= AARCH64_X_REGISTER_COUNT
;
447 stack
.store (pv_add_constant (regs
[rn
], imm
),
448 is64
? 8 : 4, regs
[rt
]);
449 if (inst
.operands
[1].addr
.writeback
)
450 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
452 else if (inst
.opcode
->iclass
== testbranch
)
454 /* Stop analysis on branch. */
461 debug_printf ("aarch64: prologue analysis gave up addr=%s"
463 core_addr_to_string_nz (start
), insn
);
472 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
474 /* Frame pointer is fp. Frame size is constant. */
475 cache
->framereg
= AARCH64_FP_REGNUM
;
476 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
478 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
480 /* Try the stack pointer. */
481 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
482 cache
->framereg
= AARCH64_SP_REGNUM
;
486 /* We're just out of luck. We don't know where the frame is. */
487 cache
->framereg
= -1;
488 cache
->framesize
= 0;
491 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
495 if (stack
.find_reg (gdbarch
, i
, &offset
))
496 cache
->saved_regs
[i
].addr
= offset
;
499 for (i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
501 int regnum
= gdbarch_num_regs (gdbarch
);
504 if (stack
.find_reg (gdbarch
, i
+ AARCH64_X_REGISTER_COUNT
,
506 cache
->saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
= offset
;
513 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
514 CORE_ADDR start
, CORE_ADDR limit
,
515 struct aarch64_prologue_cache
*cache
)
517 instruction_reader reader
;
519 return aarch64_analyze_prologue (gdbarch
, start
, limit
, cache
,
525 namespace selftests
{
527 /* Instruction reader from manually cooked instruction sequences. */
529 class instruction_reader_test
: public abstract_instruction_reader
532 template<size_t SIZE
>
533 explicit instruction_reader_test (const uint32_t (&insns
)[SIZE
])
534 : m_insns (insns
), m_insns_size (SIZE
)
537 ULONGEST
read (CORE_ADDR memaddr
, int len
, enum bfd_endian byte_order
)
540 SELF_CHECK (len
== 4);
541 SELF_CHECK (memaddr
% 4 == 0);
542 SELF_CHECK (memaddr
/ 4 < m_insns_size
);
544 return m_insns
[memaddr
/ 4];
548 const uint32_t *m_insns
;
553 aarch64_analyze_prologue_test (void)
555 struct gdbarch_info info
;
557 gdbarch_info_init (&info
);
558 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
560 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
561 SELF_CHECK (gdbarch
!= NULL
);
563 /* Test the simple prologue in which frame pointer is used. */
565 struct aarch64_prologue_cache cache
;
566 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
568 static const uint32_t insns
[] = {
569 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
570 0x910003fd, /* mov x29, sp */
571 0x97ffffe6, /* bl 0x400580 */
573 instruction_reader_test
reader (insns
);
575 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
576 SELF_CHECK (end
== 4 * 2);
578 SELF_CHECK (cache
.framereg
== AARCH64_FP_REGNUM
);
579 SELF_CHECK (cache
.framesize
== 272);
581 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
583 if (i
== AARCH64_FP_REGNUM
)
584 SELF_CHECK (cache
.saved_regs
[i
].addr
== -272);
585 else if (i
== AARCH64_LR_REGNUM
)
586 SELF_CHECK (cache
.saved_regs
[i
].addr
== -264);
588 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
591 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
593 int regnum
= gdbarch_num_regs (gdbarch
);
595 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
600 /* Test a prologue in which STR is used and frame pointer is not
603 struct aarch64_prologue_cache cache
;
604 cache
.saved_regs
= trad_frame_alloc_saved_regs (gdbarch
);
606 static const uint32_t insns
[] = {
607 0xf81d0ff3, /* str x19, [sp, #-48]! */
608 0xb9002fe0, /* str w0, [sp, #44] */
609 0xf90013e1, /* str x1, [sp, #32]*/
610 0xfd000fe0, /* str d0, [sp, #24] */
611 0xaa0203f3, /* mov x19, x2 */
612 0xf94013e0, /* ldr x0, [sp, #32] */
614 instruction_reader_test
reader (insns
);
616 CORE_ADDR end
= aarch64_analyze_prologue (gdbarch
, 0, 128, &cache
, reader
);
618 SELF_CHECK (end
== 4 * 5);
620 SELF_CHECK (cache
.framereg
== AARCH64_SP_REGNUM
);
621 SELF_CHECK (cache
.framesize
== 48);
623 for (int i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
626 SELF_CHECK (cache
.saved_regs
[i
].addr
== -16);
628 SELF_CHECK (cache
.saved_regs
[i
].addr
== -48);
630 SELF_CHECK (cache
.saved_regs
[i
].addr
== -1);
633 for (int i
= 0; i
< AARCH64_D_REGISTER_COUNT
; i
++)
635 int regnum
= gdbarch_num_regs (gdbarch
);
638 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
641 SELF_CHECK (cache
.saved_regs
[i
+ regnum
+ AARCH64_D0_REGNUM
].addr
646 } // namespace selftests
647 #endif /* GDB_SELF_TEST */
649 /* Implement the "skip_prologue" gdbarch method. */
652 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
654 CORE_ADDR func_addr
, limit_pc
;
656 /* See if we can determine the end of the prologue via the symbol
657 table. If so, then return either PC, or the PC after the
658 prologue, whichever is greater. */
659 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
661 CORE_ADDR post_prologue_pc
662 = skip_prologue_using_sal (gdbarch
, func_addr
);
664 if (post_prologue_pc
!= 0)
665 return std::max (pc
, post_prologue_pc
);
668 /* Can't determine prologue from the symbol table, need to examine
671 /* Find an upper limit on the function prologue using the debug
672 information. If the debug information could not be used to
673 provide that bound, then use an arbitrary large number as the
675 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
677 limit_pc
= pc
+ 128; /* Magic. */
679 /* Try disassembling prologue. */
680 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
683 /* Scan the function prologue for THIS_FRAME and populate the prologue
687 aarch64_scan_prologue (struct frame_info
*this_frame
,
688 struct aarch64_prologue_cache
*cache
)
690 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
691 CORE_ADDR prologue_start
;
692 CORE_ADDR prologue_end
;
693 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
694 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
696 cache
->prev_pc
= prev_pc
;
698 /* Assume we do not find a frame. */
699 cache
->framereg
= -1;
700 cache
->framesize
= 0;
702 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
705 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
709 /* No line info so use the current PC. */
710 prologue_end
= prev_pc
;
712 else if (sal
.end
< prologue_end
)
714 /* The next line begins after the function end. */
715 prologue_end
= sal
.end
;
718 prologue_end
= std::min (prologue_end
, prev_pc
);
719 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
725 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
729 cache
->framereg
= AARCH64_FP_REGNUM
;
730 cache
->framesize
= 16;
731 cache
->saved_regs
[29].addr
= 0;
732 cache
->saved_regs
[30].addr
= 8;
736 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
737 function may throw an exception if the inferior's registers or memory is
741 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
742 struct aarch64_prologue_cache
*cache
)
744 CORE_ADDR unwound_fp
;
747 aarch64_scan_prologue (this_frame
, cache
);
749 if (cache
->framereg
== -1)
752 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
756 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
758 /* Calculate actual addresses of saved registers using offsets
759 determined by aarch64_analyze_prologue. */
760 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
761 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
762 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
764 cache
->func
= get_frame_func (this_frame
);
766 cache
->available_p
= 1;
769 /* Allocate and fill in *THIS_CACHE with information about the prologue of
770 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
771 Return a pointer to the current aarch64_prologue_cache in
774 static struct aarch64_prologue_cache
*
775 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
777 struct aarch64_prologue_cache
*cache
;
779 if (*this_cache
!= NULL
)
780 return (struct aarch64_prologue_cache
*) *this_cache
;
782 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
783 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
788 aarch64_make_prologue_cache_1 (this_frame
, cache
);
790 CATCH (ex
, RETURN_MASK_ERROR
)
792 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
793 throw_exception (ex
);
800 /* Implement the "stop_reason" frame_unwind method. */
802 static enum unwind_stop_reason
803 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
806 struct aarch64_prologue_cache
*cache
807 = aarch64_make_prologue_cache (this_frame
, this_cache
);
809 if (!cache
->available_p
)
810 return UNWIND_UNAVAILABLE
;
812 /* Halt the backtrace at "_start". */
813 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
814 return UNWIND_OUTERMOST
;
816 /* We've hit a wall, stop. */
817 if (cache
->prev_sp
== 0)
818 return UNWIND_OUTERMOST
;
820 return UNWIND_NO_REASON
;
823 /* Our frame ID for a normal frame is the current function's starting
824 PC and the caller's SP when we were called. */
827 aarch64_prologue_this_id (struct frame_info
*this_frame
,
828 void **this_cache
, struct frame_id
*this_id
)
830 struct aarch64_prologue_cache
*cache
831 = aarch64_make_prologue_cache (this_frame
, this_cache
);
833 if (!cache
->available_p
)
834 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
836 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
839 /* Implement the "prev_register" frame_unwind method. */
841 static struct value
*
842 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
843 void **this_cache
, int prev_regnum
)
845 struct aarch64_prologue_cache
*cache
846 = aarch64_make_prologue_cache (this_frame
, this_cache
);
848 /* If we are asked to unwind the PC, then we need to return the LR
849 instead. The prologue may save PC, but it will point into this
850 frame's prologue, not the next frame's resume location. */
851 if (prev_regnum
== AARCH64_PC_REGNUM
)
855 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
856 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
859 /* SP is generally not saved to the stack, but this frame is
860 identified by the next frame's stack pointer at the time of the
861 call. The value was already reconstructed into PREV_SP. */
874 if (prev_regnum
== AARCH64_SP_REGNUM
)
875 return frame_unwind_got_constant (this_frame
, prev_regnum
,
878 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
882 /* AArch64 prologue unwinder. */
883 struct frame_unwind aarch64_prologue_unwind
=
886 aarch64_prologue_frame_unwind_stop_reason
,
887 aarch64_prologue_this_id
,
888 aarch64_prologue_prev_register
,
890 default_frame_sniffer
893 /* Allocate and fill in *THIS_CACHE with information about the prologue of
894 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
895 Return a pointer to the current aarch64_prologue_cache in
898 static struct aarch64_prologue_cache
*
899 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
901 struct aarch64_prologue_cache
*cache
;
903 if (*this_cache
!= NULL
)
904 return (struct aarch64_prologue_cache
*) *this_cache
;
906 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
907 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
912 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
914 cache
->prev_pc
= get_frame_pc (this_frame
);
915 cache
->available_p
= 1;
917 CATCH (ex
, RETURN_MASK_ERROR
)
919 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
920 throw_exception (ex
);
927 /* Implement the "stop_reason" frame_unwind method. */
929 static enum unwind_stop_reason
930 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
933 struct aarch64_prologue_cache
*cache
934 = aarch64_make_stub_cache (this_frame
, this_cache
);
936 if (!cache
->available_p
)
937 return UNWIND_UNAVAILABLE
;
939 return UNWIND_NO_REASON
;
942 /* Our frame ID for a stub frame is the current SP and LR. */
945 aarch64_stub_this_id (struct frame_info
*this_frame
,
946 void **this_cache
, struct frame_id
*this_id
)
948 struct aarch64_prologue_cache
*cache
949 = aarch64_make_stub_cache (this_frame
, this_cache
);
951 if (cache
->available_p
)
952 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
954 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
957 /* Implement the "sniffer" frame_unwind method. */
960 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
961 struct frame_info
*this_frame
,
962 void **this_prologue_cache
)
964 CORE_ADDR addr_in_block
;
967 addr_in_block
= get_frame_address_in_block (this_frame
);
968 if (in_plt_section (addr_in_block
)
969 /* We also use the stub winder if the target memory is unreadable
970 to avoid having the prologue unwinder trying to read it. */
971 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
977 /* AArch64 stub unwinder. */
978 struct frame_unwind aarch64_stub_unwind
=
981 aarch64_stub_frame_unwind_stop_reason
,
982 aarch64_stub_this_id
,
983 aarch64_prologue_prev_register
,
985 aarch64_stub_unwind_sniffer
988 /* Return the frame base address of *THIS_FRAME. */
991 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
993 struct aarch64_prologue_cache
*cache
994 = aarch64_make_prologue_cache (this_frame
, this_cache
);
996 return cache
->prev_sp
- cache
->framesize
;
999 /* AArch64 default frame base information. */
1000 struct frame_base aarch64_normal_base
=
1002 &aarch64_prologue_unwind
,
1003 aarch64_normal_frame_base
,
1004 aarch64_normal_frame_base
,
1005 aarch64_normal_frame_base
1008 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1009 dummy frame. The frame ID's base needs to match the TOS value
1010 saved by save_dummy_frame_tos () and returned from
1011 aarch64_push_dummy_call, and the PC needs to match the dummy
1012 frame's breakpoint. */
1014 static struct frame_id
1015 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1017 return frame_id_build (get_frame_register_unsigned (this_frame
,
1019 get_frame_pc (this_frame
));
1022 /* Implement the "unwind_pc" gdbarch method. */
1025 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1028 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1033 /* Implement the "unwind_sp" gdbarch method. */
1036 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1038 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1041 /* Return the value of the REGNUM register in the previous frame of
1044 static struct value
*
1045 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1046 void **this_cache
, int regnum
)
1052 case AARCH64_PC_REGNUM
:
1053 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1054 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1057 internal_error (__FILE__
, __LINE__
,
1058 _("Unexpected register %d"), regnum
);
1062 /* Implement the "init_reg" dwarf2_frame_ops method. */
1065 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1066 struct dwarf2_frame_state_reg
*reg
,
1067 struct frame_info
*this_frame
)
1071 case AARCH64_PC_REGNUM
:
1072 reg
->how
= DWARF2_FRAME_REG_FN
;
1073 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1075 case AARCH64_SP_REGNUM
:
1076 reg
->how
= DWARF2_FRAME_REG_CFA
;
1081 /* When arguments must be pushed onto the stack, they go on in reverse
1082 order. The code below implements a FILO (stack) to do this. */
1086 /* Value to pass on stack. It can be NULL if this item is for stack
1088 const gdb_byte
*data
;
1090 /* Size in bytes of value to pass on stack. */
1094 DEF_VEC_O (stack_item_t
);
1096 /* Return the alignment (in bytes) of the given type. */
1099 aarch64_type_align (struct type
*t
)
1105 t
= check_typedef (t
);
1106 switch (TYPE_CODE (t
))
1109 /* Should never happen. */
1110 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1114 case TYPE_CODE_ENUM
:
1118 case TYPE_CODE_RANGE
:
1119 case TYPE_CODE_BITSTRING
:
1121 case TYPE_CODE_RVALUE_REF
:
1122 case TYPE_CODE_CHAR
:
1123 case TYPE_CODE_BOOL
:
1124 return TYPE_LENGTH (t
);
1126 case TYPE_CODE_ARRAY
:
1127 if (TYPE_VECTOR (t
))
1129 /* Use the natural alignment for vector types (the same for
1130 scalar type), but the maximum alignment is 128-bit. */
1131 if (TYPE_LENGTH (t
) > 16)
1134 return TYPE_LENGTH (t
);
1137 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1138 case TYPE_CODE_COMPLEX
:
1139 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1141 case TYPE_CODE_STRUCT
:
1142 case TYPE_CODE_UNION
:
1144 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1146 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1154 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1156 Return the number of register required, or -1 on failure.
1158 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1159 to the element, else fail if the type of this element does not match the
1163 aapcs_is_vfp_call_or_return_candidate_1 (struct type
*type
,
1164 struct type
**fundamental_type
)
1166 if (type
== nullptr)
1169 switch (TYPE_CODE (type
))
1172 if (TYPE_LENGTH (type
) > 16)
1175 if (*fundamental_type
== nullptr)
1176 *fundamental_type
= type
;
1177 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1178 || TYPE_CODE (type
) != TYPE_CODE (*fundamental_type
))
1183 case TYPE_CODE_COMPLEX
:
1185 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
1186 if (TYPE_LENGTH (target_type
) > 16)
1189 if (*fundamental_type
== nullptr)
1190 *fundamental_type
= target_type
;
1191 else if (TYPE_LENGTH (target_type
) != TYPE_LENGTH (*fundamental_type
)
1192 || TYPE_CODE (target_type
) != TYPE_CODE (*fundamental_type
))
1198 case TYPE_CODE_ARRAY
:
1200 if (TYPE_VECTOR (type
))
1202 if (TYPE_LENGTH (type
) != 8 && TYPE_LENGTH (type
) != 16)
1205 if (*fundamental_type
== nullptr)
1206 *fundamental_type
= type
;
1207 else if (TYPE_LENGTH (type
) != TYPE_LENGTH (*fundamental_type
)
1208 || TYPE_CODE (type
) != TYPE_CODE (*fundamental_type
))
1215 struct type
*target_type
= TYPE_TARGET_TYPE (type
);
1216 int count
= aapcs_is_vfp_call_or_return_candidate_1
1217 (target_type
, fundamental_type
);
1222 count
*= TYPE_LENGTH (type
);
1227 case TYPE_CODE_STRUCT
:
1228 case TYPE_CODE_UNION
:
1232 for (int i
= 0; i
< TYPE_NFIELDS (type
); i
++)
1234 struct type
*member
= check_typedef (TYPE_FIELD_TYPE (type
, i
));
1236 int sub_count
= aapcs_is_vfp_call_or_return_candidate_1
1237 (member
, fundamental_type
);
1238 if (sub_count
== -1)
1252 /* Return true if an argument, whose type is described by TYPE, can be passed or
1253 returned in simd/fp registers, providing enough parameter passing registers
1254 are available. This is as described in the AAPCS64.
1256 Upon successful return, *COUNT returns the number of needed registers,
1257 *FUNDAMENTAL_TYPE contains the type of those registers.
1259 Candidate as per the AAPCS64 5.4.2.C is either a:
1262 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1263 all the members are floats and has at most 4 members.
1264 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1265 all the members are short vectors and has at most 4 members.
1268 Note that HFAs and HVAs can include nested structures and arrays. */
1271 aapcs_is_vfp_call_or_return_candidate (struct type
*type
, int *count
,
1272 struct type
**fundamental_type
)
1274 if (type
== nullptr)
1277 *fundamental_type
= nullptr;
1279 int ag_count
= aapcs_is_vfp_call_or_return_candidate_1 (type
,
1282 if (ag_count
> 0 && ag_count
<= HA_MAX_NUM_FLDS
)
1291 /* AArch64 function call information structure. */
1292 struct aarch64_call_info
1294 /* the current argument number. */
1297 /* The next general purpose register number, equivalent to NGRN as
1298 described in the AArch64 Procedure Call Standard. */
1301 /* The next SIMD and floating point register number, equivalent to
1302 NSRN as described in the AArch64 Procedure Call Standard. */
1305 /* The next stacked argument address, equivalent to NSAA as
1306 described in the AArch64 Procedure Call Standard. */
1309 /* Stack item vector. */
1310 VEC(stack_item_t
) *si
;
1313 /* Pass a value in a sequence of consecutive X registers. The caller
1314 is responsbile for ensuring sufficient registers are available. */
1317 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1318 struct aarch64_call_info
*info
, struct type
*type
,
1321 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1322 int len
= TYPE_LENGTH (type
);
1323 enum type_code typecode
= TYPE_CODE (type
);
1324 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1325 const bfd_byte
*buf
= value_contents (arg
);
1331 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1332 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1336 /* Adjust sub-word struct/union args when big-endian. */
1337 if (byte_order
== BFD_ENDIAN_BIG
1338 && partial_len
< X_REGISTER_SIZE
1339 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1340 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1344 debug_printf ("arg %d in %s = 0x%s\n", info
->argnum
,
1345 gdbarch_register_name (gdbarch
, regnum
),
1346 phex (regval
, X_REGISTER_SIZE
));
1348 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1355 /* Attempt to marshall a value in a V register. Return 1 if
1356 successful, or 0 if insufficient registers are available. This
1357 function, unlike the equivalent pass_in_x() function does not
1358 handle arguments spread across multiple registers. */
1361 pass_in_v (struct gdbarch
*gdbarch
,
1362 struct regcache
*regcache
,
1363 struct aarch64_call_info
*info
,
1364 int len
, const bfd_byte
*buf
)
1368 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1369 gdb_byte reg
[V_REGISTER_SIZE
];
1374 memset (reg
, 0, sizeof (reg
));
1375 /* PCS C.1, the argument is allocated to the least significant
1376 bits of V register. */
1377 memcpy (reg
, buf
, len
);
1378 regcache
->cooked_write (regnum
, reg
);
1382 debug_printf ("arg %d in %s\n", info
->argnum
,
1383 gdbarch_register_name (gdbarch
, regnum
));
1391 /* Marshall an argument onto the stack. */
1394 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1397 const bfd_byte
*buf
= value_contents (arg
);
1398 int len
= TYPE_LENGTH (type
);
1404 align
= aarch64_type_align (type
);
1406 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1407 Natural alignment of the argument's type. */
1408 align
= align_up (align
, 8);
1410 /* The AArch64 PCS requires at most doubleword alignment. */
1416 debug_printf ("arg %d len=%d @ sp + %d\n", info
->argnum
, len
,
1422 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1425 if (info
->nsaa
& (align
- 1))
1427 /* Push stack alignment padding. */
1428 int pad
= align
- (info
->nsaa
& (align
- 1));
1433 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1438 /* Marshall an argument into a sequence of one or more consecutive X
1439 registers or, if insufficient X registers are available then onto
1443 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1444 struct aarch64_call_info
*info
, struct type
*type
,
1447 int len
= TYPE_LENGTH (type
);
1448 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1450 /* PCS C.13 - Pass in registers if we have enough spare */
1451 if (info
->ngrn
+ nregs
<= 8)
1453 pass_in_x (gdbarch
, regcache
, info
, type
, arg
);
1454 info
->ngrn
+= nregs
;
1459 pass_on_stack (info
, type
, arg
);
1463 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1464 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1465 registers. A return value of false is an error state as the value will have
1466 been partially passed to the stack. */
1468 pass_in_v_vfp_candidate (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1469 struct aarch64_call_info
*info
, struct type
*arg_type
,
1472 switch (TYPE_CODE (arg_type
))
1475 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1476 value_contents (arg
));
1479 case TYPE_CODE_COMPLEX
:
1481 const bfd_byte
*buf
= value_contents (arg
);
1482 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (arg_type
));
1484 if (!pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1488 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (target_type
),
1489 buf
+ TYPE_LENGTH (target_type
));
1492 case TYPE_CODE_ARRAY
:
1493 if (TYPE_VECTOR (arg_type
))
1494 return pass_in_v (gdbarch
, regcache
, info
, TYPE_LENGTH (arg_type
),
1495 value_contents (arg
));
1498 case TYPE_CODE_STRUCT
:
1499 case TYPE_CODE_UNION
:
1500 for (int i
= 0; i
< TYPE_NFIELDS (arg_type
); i
++)
1502 struct value
*field
= value_primitive_field (arg
, 0, i
, arg_type
);
1503 struct type
*field_type
= check_typedef (value_type (field
));
1505 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, info
, field_type
,
1516 /* Implement the "push_dummy_call" gdbarch method. */
1519 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1520 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1522 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1523 CORE_ADDR struct_addr
)
1526 struct aarch64_call_info info
;
1527 struct type
*func_type
;
1528 struct type
*return_type
;
1529 int lang_struct_return
;
1531 memset (&info
, 0, sizeof (info
));
1533 /* We need to know what the type of the called function is in order
1534 to determine the number of named/anonymous arguments for the
1535 actual argument placement, and the return type in order to handle
1536 return value correctly.
1538 The generic code above us views the decision of return in memory
1539 or return in registers as a two stage processes. The language
1540 handler is consulted first and may decide to return in memory (eg
1541 class with copy constructor returned by value), this will cause
1542 the generic code to allocate space AND insert an initial leading
1545 If the language code does not decide to pass in memory then the
1546 target code is consulted.
1548 If the language code decides to pass in memory we want to move
1549 the pointer inserted as the initial argument from the argument
1550 list and into X8, the conventional AArch64 struct return pointer
1553 This is slightly awkward, ideally the flag "lang_struct_return"
1554 would be passed to the targets implementation of push_dummy_call.
1555 Rather that change the target interface we call the language code
1556 directly ourselves. */
1558 func_type
= check_typedef (value_type (function
));
1560 /* Dereference function pointer types. */
1561 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1562 func_type
= TYPE_TARGET_TYPE (func_type
);
1564 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1565 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1567 /* If language_pass_by_reference () returned true we will have been
1568 given an additional initial argument, a hidden pointer to the
1569 return slot in memory. */
1570 return_type
= TYPE_TARGET_TYPE (func_type
);
1571 lang_struct_return
= language_pass_by_reference (return_type
);
1573 /* Set the return address. For the AArch64, the return breakpoint
1574 is always at BP_ADDR. */
1575 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1577 /* If we were given an initial argument for the return slot because
1578 lang_struct_return was true, lose it. */
1579 if (lang_struct_return
)
1585 /* The struct_return pointer occupies X8. */
1586 if (struct_return
|| lang_struct_return
)
1590 debug_printf ("struct return in %s = 0x%s\n",
1591 gdbarch_register_name (gdbarch
,
1592 AARCH64_STRUCT_RETURN_REGNUM
),
1593 paddress (gdbarch
, struct_addr
));
1595 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1599 for (argnum
= 0; argnum
< nargs
; argnum
++)
1601 struct value
*arg
= args
[argnum
];
1602 struct type
*arg_type
, *fundamental_type
;
1605 arg_type
= check_typedef (value_type (arg
));
1606 len
= TYPE_LENGTH (arg_type
);
1608 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1609 if there are enough spare registers. */
1610 if (aapcs_is_vfp_call_or_return_candidate (arg_type
, &elements
,
1613 if (info
.nsrn
+ elements
<= 8)
1615 /* We know that we have sufficient registers available therefore
1616 this will never need to fallback to the stack. */
1617 if (!pass_in_v_vfp_candidate (gdbarch
, regcache
, &info
, arg_type
,
1619 gdb_assert_not_reached ("Failed to push args");
1624 pass_on_stack (&info
, arg_type
, arg
);
1629 switch (TYPE_CODE (arg_type
))
1632 case TYPE_CODE_BOOL
:
1633 case TYPE_CODE_CHAR
:
1634 case TYPE_CODE_RANGE
:
1635 case TYPE_CODE_ENUM
:
1638 /* Promote to 32 bit integer. */
1639 if (TYPE_UNSIGNED (arg_type
))
1640 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1642 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1643 arg
= value_cast (arg_type
, arg
);
1645 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1648 case TYPE_CODE_STRUCT
:
1649 case TYPE_CODE_ARRAY
:
1650 case TYPE_CODE_UNION
:
1653 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1654 invisible reference. */
1656 /* Allocate aligned storage. */
1657 sp
= align_down (sp
- len
, 16);
1659 /* Write the real data into the stack. */
1660 write_memory (sp
, value_contents (arg
), len
);
1662 /* Construct the indirection. */
1663 arg_type
= lookup_pointer_type (arg_type
);
1664 arg
= value_from_pointer (arg_type
, sp
);
1665 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1668 /* PCS C.15 / C.18 multiple values pass. */
1669 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1673 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
, arg
);
1678 /* Make sure stack retains 16 byte alignment. */
1680 sp
-= 16 - (info
.nsaa
& 15);
1682 while (!VEC_empty (stack_item_t
, info
.si
))
1684 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1687 if (si
->data
!= NULL
)
1688 write_memory (sp
, si
->data
, si
->len
);
1689 VEC_pop (stack_item_t
, info
.si
);
1692 VEC_free (stack_item_t
, info
.si
);
1694 /* Finally, update the SP register. */
1695 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1700 /* Implement the "frame_align" gdbarch method. */
1703 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1705 /* Align the stack to sixteen bytes. */
1706 return sp
& ~(CORE_ADDR
) 15;
1709 /* Return the type for an AdvSISD Q register. */
1711 static struct type
*
1712 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1714 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1716 if (tdep
->vnq_type
== NULL
)
1721 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1724 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1725 append_composite_type_field (t
, "u", elem
);
1727 elem
= builtin_type (gdbarch
)->builtin_int128
;
1728 append_composite_type_field (t
, "s", elem
);
1733 return tdep
->vnq_type
;
1736 /* Return the type for an AdvSISD D register. */
1738 static struct type
*
1739 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1741 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1743 if (tdep
->vnd_type
== NULL
)
1748 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1751 elem
= builtin_type (gdbarch
)->builtin_double
;
1752 append_composite_type_field (t
, "f", elem
);
1754 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1755 append_composite_type_field (t
, "u", elem
);
1757 elem
= builtin_type (gdbarch
)->builtin_int64
;
1758 append_composite_type_field (t
, "s", elem
);
1763 return tdep
->vnd_type
;
1766 /* Return the type for an AdvSISD S register. */
1768 static struct type
*
1769 aarch64_vns_type (struct gdbarch
*gdbarch
)
1771 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1773 if (tdep
->vns_type
== NULL
)
1778 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1781 elem
= builtin_type (gdbarch
)->builtin_float
;
1782 append_composite_type_field (t
, "f", elem
);
1784 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1785 append_composite_type_field (t
, "u", elem
);
1787 elem
= builtin_type (gdbarch
)->builtin_int32
;
1788 append_composite_type_field (t
, "s", elem
);
1793 return tdep
->vns_type
;
1796 /* Return the type for an AdvSISD H register. */
1798 static struct type
*
1799 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1801 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1803 if (tdep
->vnh_type
== NULL
)
1808 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1811 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1812 append_composite_type_field (t
, "u", elem
);
1814 elem
= builtin_type (gdbarch
)->builtin_int16
;
1815 append_composite_type_field (t
, "s", elem
);
1820 return tdep
->vnh_type
;
1823 /* Return the type for an AdvSISD B register. */
1825 static struct type
*
1826 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1828 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1830 if (tdep
->vnb_type
== NULL
)
1835 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1838 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1839 append_composite_type_field (t
, "u", elem
);
1841 elem
= builtin_type (gdbarch
)->builtin_int8
;
1842 append_composite_type_field (t
, "s", elem
);
1847 return tdep
->vnb_type
;
1850 /* Return the type for an AdvSISD V register. */
1852 static struct type
*
1853 aarch64_vnv_type (struct gdbarch
*gdbarch
)
1855 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1857 if (tdep
->vnv_type
== NULL
)
1859 struct type
*t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnv",
1862 append_composite_type_field (t
, "d", aarch64_vnd_type (gdbarch
));
1863 append_composite_type_field (t
, "s", aarch64_vns_type (gdbarch
));
1864 append_composite_type_field (t
, "h", aarch64_vnh_type (gdbarch
));
1865 append_composite_type_field (t
, "b", aarch64_vnb_type (gdbarch
));
1866 append_composite_type_field (t
, "q", aarch64_vnq_type (gdbarch
));
1871 return tdep
->vnv_type
;
1874 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1877 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1879 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1880 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1882 if (reg
== AARCH64_DWARF_SP
)
1883 return AARCH64_SP_REGNUM
;
1885 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1886 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1888 if (reg
== AARCH64_DWARF_SVE_VG
)
1889 return AARCH64_SVE_VG_REGNUM
;
1891 if (reg
== AARCH64_DWARF_SVE_FFR
)
1892 return AARCH64_SVE_FFR_REGNUM
;
1894 if (reg
>= AARCH64_DWARF_SVE_P0
&& reg
<= AARCH64_DWARF_SVE_P0
+ 15)
1895 return AARCH64_SVE_P0_REGNUM
+ reg
- AARCH64_DWARF_SVE_P0
;
1897 if (reg
>= AARCH64_DWARF_SVE_Z0
&& reg
<= AARCH64_DWARF_SVE_Z0
+ 15)
1898 return AARCH64_SVE_Z0_REGNUM
+ reg
- AARCH64_DWARF_SVE_Z0
;
1903 /* Implement the "print_insn" gdbarch method. */
1906 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1908 info
->symbols
= NULL
;
1909 return default_print_insn (memaddr
, info
);
1912 /* AArch64 BRK software debug mode instruction.
1913 Note that AArch64 code is always little-endian.
1914 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1915 constexpr gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1917 typedef BP_MANIPULATION (aarch64_default_breakpoint
) aarch64_breakpoint
;
1919 /* Extract from an array REGS containing the (raw) register state a
1920 function return value of type TYPE, and copy that, in virtual
1921 format, into VALBUF. */
1924 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
1927 struct gdbarch
*gdbarch
= regs
->arch ();
1928 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1930 struct type
*fundamental_type
;
1932 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
1935 int len
= TYPE_LENGTH (fundamental_type
);
1937 for (int i
= 0; i
< elements
; i
++)
1939 int regno
= AARCH64_V0_REGNUM
+ i
;
1940 bfd_byte buf
[V_REGISTER_SIZE
];
1944 debug_printf ("read HFA or HVA return value element %d from %s\n",
1946 gdbarch_register_name (gdbarch
, regno
));
1948 regs
->cooked_read (regno
, buf
);
1950 memcpy (valbuf
, buf
, len
);
1954 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1955 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1956 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1957 || TYPE_CODE (type
) == TYPE_CODE_PTR
1958 || TYPE_IS_REFERENCE (type
)
1959 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1961 /* If the the type is a plain integer, then the access is
1962 straight-forward. Otherwise we have to play around a bit
1964 int len
= TYPE_LENGTH (type
);
1965 int regno
= AARCH64_X0_REGNUM
;
1970 /* By using store_unsigned_integer we avoid having to do
1971 anything special for small big-endian values. */
1972 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
1973 store_unsigned_integer (valbuf
,
1974 (len
> X_REGISTER_SIZE
1975 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
1976 len
-= X_REGISTER_SIZE
;
1977 valbuf
+= X_REGISTER_SIZE
;
1982 /* For a structure or union the behaviour is as if the value had
1983 been stored to word-aligned memory and then loaded into
1984 registers with 64-bit load instruction(s). */
1985 int len
= TYPE_LENGTH (type
);
1986 int regno
= AARCH64_X0_REGNUM
;
1987 bfd_byte buf
[X_REGISTER_SIZE
];
1991 regs
->cooked_read (regno
++, buf
);
1992 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
1993 len
-= X_REGISTER_SIZE
;
1994 valbuf
+= X_REGISTER_SIZE
;
2000 /* Will a function return an aggregate type in memory or in a
2001 register? Return 0 if an aggregate type can be returned in a
2002 register, 1 if it must be returned in memory. */
2005 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2007 type
= check_typedef (type
);
2009 struct type
*fundamental_type
;
2011 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2014 /* v0-v7 are used to return values and one register is allocated
2015 for one member. However, HFA or HVA has at most four members. */
2019 if (TYPE_LENGTH (type
) > 16)
2021 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2022 invisible reference. */
2030 /* Write into appropriate registers a function return value of type
2031 TYPE, given in virtual format. */
2034 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2035 const gdb_byte
*valbuf
)
2037 struct gdbarch
*gdbarch
= regs
->arch ();
2038 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2040 struct type
*fundamental_type
;
2042 if (aapcs_is_vfp_call_or_return_candidate (type
, &elements
,
2045 int len
= TYPE_LENGTH (fundamental_type
);
2047 for (int i
= 0; i
< elements
; i
++)
2049 int regno
= AARCH64_V0_REGNUM
+ i
;
2050 bfd_byte tmpbuf
[V_REGISTER_SIZE
];
2054 debug_printf ("write HFA or HVA return value element %d to %s\n",
2056 gdbarch_register_name (gdbarch
, regno
));
2059 memcpy (tmpbuf
, valbuf
,
2060 len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2061 regs
->cooked_write (regno
, tmpbuf
);
2065 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2066 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2067 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2068 || TYPE_CODE (type
) == TYPE_CODE_PTR
2069 || TYPE_IS_REFERENCE (type
)
2070 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2072 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2074 /* Values of one word or less are zero/sign-extended and
2076 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2077 LONGEST val
= unpack_long (type
, valbuf
);
2079 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2080 regs
->cooked_write (AARCH64_X0_REGNUM
, tmpbuf
);
2084 /* Integral values greater than one word are stored in
2085 consecutive registers starting with r0. This will always
2086 be a multiple of the regiser size. */
2087 int len
= TYPE_LENGTH (type
);
2088 int regno
= AARCH64_X0_REGNUM
;
2092 regs
->cooked_write (regno
++, valbuf
);
2093 len
-= X_REGISTER_SIZE
;
2094 valbuf
+= X_REGISTER_SIZE
;
2100 /* For a structure or union the behaviour is as if the value had
2101 been stored to word-aligned memory and then loaded into
2102 registers with 64-bit load instruction(s). */
2103 int len
= TYPE_LENGTH (type
);
2104 int regno
= AARCH64_X0_REGNUM
;
2105 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2109 memcpy (tmpbuf
, valbuf
,
2110 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2111 regs
->cooked_write (regno
++, tmpbuf
);
2112 len
-= X_REGISTER_SIZE
;
2113 valbuf
+= X_REGISTER_SIZE
;
2118 /* Implement the "return_value" gdbarch method. */
2120 static enum return_value_convention
2121 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2122 struct type
*valtype
, struct regcache
*regcache
,
2123 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2126 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2127 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2128 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2130 if (aarch64_return_in_memory (gdbarch
, valtype
))
2133 debug_printf ("return value in memory\n");
2134 return RETURN_VALUE_STRUCT_CONVENTION
;
2139 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2142 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2145 debug_printf ("return value in registers\n");
2147 return RETURN_VALUE_REGISTER_CONVENTION
;
2150 /* Implement the "get_longjmp_target" gdbarch method. */
2153 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2156 gdb_byte buf
[X_REGISTER_SIZE
];
2157 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2158 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2159 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2161 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2163 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2167 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2171 /* Implement the "gen_return_address" gdbarch method. */
2174 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2175 struct agent_expr
*ax
, struct axs_value
*value
,
2178 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2179 value
->kind
= axs_lvalue_register
;
2180 value
->u
.reg
= AARCH64_LR_REGNUM
;
2184 /* Return the pseudo register name corresponding to register regnum. */
2187 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2189 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2191 static const char *const q_name
[] =
2193 "q0", "q1", "q2", "q3",
2194 "q4", "q5", "q6", "q7",
2195 "q8", "q9", "q10", "q11",
2196 "q12", "q13", "q14", "q15",
2197 "q16", "q17", "q18", "q19",
2198 "q20", "q21", "q22", "q23",
2199 "q24", "q25", "q26", "q27",
2200 "q28", "q29", "q30", "q31",
2203 static const char *const d_name
[] =
2205 "d0", "d1", "d2", "d3",
2206 "d4", "d5", "d6", "d7",
2207 "d8", "d9", "d10", "d11",
2208 "d12", "d13", "d14", "d15",
2209 "d16", "d17", "d18", "d19",
2210 "d20", "d21", "d22", "d23",
2211 "d24", "d25", "d26", "d27",
2212 "d28", "d29", "d30", "d31",
2215 static const char *const s_name
[] =
2217 "s0", "s1", "s2", "s3",
2218 "s4", "s5", "s6", "s7",
2219 "s8", "s9", "s10", "s11",
2220 "s12", "s13", "s14", "s15",
2221 "s16", "s17", "s18", "s19",
2222 "s20", "s21", "s22", "s23",
2223 "s24", "s25", "s26", "s27",
2224 "s28", "s29", "s30", "s31",
2227 static const char *const h_name
[] =
2229 "h0", "h1", "h2", "h3",
2230 "h4", "h5", "h6", "h7",
2231 "h8", "h9", "h10", "h11",
2232 "h12", "h13", "h14", "h15",
2233 "h16", "h17", "h18", "h19",
2234 "h20", "h21", "h22", "h23",
2235 "h24", "h25", "h26", "h27",
2236 "h28", "h29", "h30", "h31",
2239 static const char *const b_name
[] =
2241 "b0", "b1", "b2", "b3",
2242 "b4", "b5", "b6", "b7",
2243 "b8", "b9", "b10", "b11",
2244 "b12", "b13", "b14", "b15",
2245 "b16", "b17", "b18", "b19",
2246 "b20", "b21", "b22", "b23",
2247 "b24", "b25", "b26", "b27",
2248 "b28", "b29", "b30", "b31",
2251 regnum
-= gdbarch_num_regs (gdbarch
);
2253 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2254 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2256 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2257 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2259 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2260 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2262 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2263 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2265 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2266 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2268 if (tdep
->has_sve ())
2270 static const char *const sve_v_name
[] =
2272 "v0", "v1", "v2", "v3",
2273 "v4", "v5", "v6", "v7",
2274 "v8", "v9", "v10", "v11",
2275 "v12", "v13", "v14", "v15",
2276 "v16", "v17", "v18", "v19",
2277 "v20", "v21", "v22", "v23",
2278 "v24", "v25", "v26", "v27",
2279 "v28", "v29", "v30", "v31",
2282 if (regnum
>= AARCH64_SVE_V0_REGNUM
2283 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2284 return sve_v_name
[regnum
- AARCH64_SVE_V0_REGNUM
];
2287 internal_error (__FILE__
, __LINE__
,
2288 _("aarch64_pseudo_register_name: bad register number %d"),
2292 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2294 static struct type
*
2295 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2297 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2299 regnum
-= gdbarch_num_regs (gdbarch
);
2301 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2302 return aarch64_vnq_type (gdbarch
);
2304 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2305 return aarch64_vnd_type (gdbarch
);
2307 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2308 return aarch64_vns_type (gdbarch
);
2310 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2311 return aarch64_vnh_type (gdbarch
);
2313 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2314 return aarch64_vnb_type (gdbarch
);
2316 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2317 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2318 return aarch64_vnv_type (gdbarch
);
2320 internal_error (__FILE__
, __LINE__
,
2321 _("aarch64_pseudo_register_type: bad register number %d"),
2325 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2328 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2329 struct reggroup
*group
)
2331 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2333 regnum
-= gdbarch_num_regs (gdbarch
);
2335 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2336 return group
== all_reggroup
|| group
== vector_reggroup
;
2337 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2338 return (group
== all_reggroup
|| group
== vector_reggroup
2339 || group
== float_reggroup
);
2340 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2341 return (group
== all_reggroup
|| group
== vector_reggroup
2342 || group
== float_reggroup
);
2343 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2344 return group
== all_reggroup
|| group
== vector_reggroup
;
2345 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2346 return group
== all_reggroup
|| group
== vector_reggroup
;
2347 else if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2348 && regnum
< AARCH64_SVE_V0_REGNUM
+ AARCH64_V_REGS_NUM
)
2349 return group
== all_reggroup
|| group
== vector_reggroup
;
2351 return group
== all_reggroup
;
2354 /* Helper for aarch64_pseudo_read_value. */
2356 static struct value
*
2357 aarch64_pseudo_read_value_1 (struct gdbarch
*gdbarch
,
2358 readable_regcache
*regcache
, int regnum_offset
,
2359 int regsize
, struct value
*result_value
)
2361 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2363 /* Enough space for a full vector register. */
2364 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2365 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2367 if (regcache
->raw_read (v_regnum
, reg_buf
) != REG_VALID
)
2368 mark_value_bytes_unavailable (result_value
, 0,
2369 TYPE_LENGTH (value_type (result_value
)));
2371 memcpy (value_contents_raw (result_value
), reg_buf
, regsize
);
2373 return result_value
;
2376 /* Implement the "pseudo_register_read_value" gdbarch method. */
2378 static struct value
*
2379 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
, readable_regcache
*regcache
,
2382 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2383 struct value
*result_value
= allocate_value (register_type (gdbarch
, regnum
));
2385 VALUE_LVAL (result_value
) = lval_register
;
2386 VALUE_REGNUM (result_value
) = regnum
;
2388 regnum
-= gdbarch_num_regs (gdbarch
);
2390 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2391 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2392 regnum
- AARCH64_Q0_REGNUM
,
2393 Q_REGISTER_SIZE
, result_value
);
2395 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2396 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2397 regnum
- AARCH64_D0_REGNUM
,
2398 D_REGISTER_SIZE
, result_value
);
2400 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2401 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2402 regnum
- AARCH64_S0_REGNUM
,
2403 S_REGISTER_SIZE
, result_value
);
2405 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2406 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2407 regnum
- AARCH64_H0_REGNUM
,
2408 H_REGISTER_SIZE
, result_value
);
2410 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2411 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2412 regnum
- AARCH64_B0_REGNUM
,
2413 B_REGISTER_SIZE
, result_value
);
2415 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2416 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2417 return aarch64_pseudo_read_value_1 (gdbarch
, regcache
,
2418 regnum
- AARCH64_SVE_V0_REGNUM
,
2419 V_REGISTER_SIZE
, result_value
);
2421 gdb_assert_not_reached ("regnum out of bound");
2424 /* Helper for aarch64_pseudo_write. */
2427 aarch64_pseudo_write_1 (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2428 int regnum_offset
, int regsize
, const gdb_byte
*buf
)
2430 unsigned v_regnum
= AARCH64_V0_REGNUM
+ regnum_offset
;
2432 /* Enough space for a full vector register. */
2433 gdb_byte reg_buf
[register_size (gdbarch
, AARCH64_V0_REGNUM
)];
2434 gdb_static_assert (AARCH64_V0_REGNUM
== AARCH64_SVE_Z0_REGNUM
);
2436 /* Ensure the register buffer is zero, we want gdb writes of the
2437 various 'scalar' pseudo registers to behavior like architectural
2438 writes, register width bytes are written the remainder are set to
2440 memset (reg_buf
, 0, register_size (gdbarch
, AARCH64_V0_REGNUM
));
2442 memcpy (reg_buf
, buf
, regsize
);
2443 regcache
->raw_write (v_regnum
, reg_buf
);
2446 /* Implement the "pseudo_register_write" gdbarch method. */
2449 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2450 int regnum
, const gdb_byte
*buf
)
2452 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2453 regnum
-= gdbarch_num_regs (gdbarch
);
2455 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2456 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2457 regnum
- AARCH64_Q0_REGNUM
, Q_REGISTER_SIZE
,
2460 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2461 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2462 regnum
- AARCH64_D0_REGNUM
, D_REGISTER_SIZE
,
2465 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2466 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2467 regnum
- AARCH64_S0_REGNUM
, S_REGISTER_SIZE
,
2470 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2471 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2472 regnum
- AARCH64_H0_REGNUM
, H_REGISTER_SIZE
,
2475 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2476 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2477 regnum
- AARCH64_B0_REGNUM
, B_REGISTER_SIZE
,
2480 if (tdep
->has_sve () && regnum
>= AARCH64_SVE_V0_REGNUM
2481 && regnum
< AARCH64_SVE_V0_REGNUM
+ 32)
2482 return aarch64_pseudo_write_1 (gdbarch
, regcache
,
2483 regnum
- AARCH64_SVE_V0_REGNUM
,
2484 V_REGISTER_SIZE
, buf
);
2486 gdb_assert_not_reached ("regnum out of bound");
2489 /* Callback function for user_reg_add. */
2491 static struct value
*
2492 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2494 const int *reg_p
= (const int *) baton
;
2496 return value_of_register (*reg_p
, frame
);
2500 /* Implement the "software_single_step" gdbarch method, needed to
2501 single step through atomic sequences on AArch64. */
2503 static std::vector
<CORE_ADDR
>
2504 aarch64_software_single_step (struct regcache
*regcache
)
2506 struct gdbarch
*gdbarch
= regcache
->arch ();
2507 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2508 const int insn_size
= 4;
2509 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2510 CORE_ADDR pc
= regcache_read_pc (regcache
);
2511 CORE_ADDR breaks
[2] = { CORE_ADDR_MAX
, CORE_ADDR_MAX
};
2513 CORE_ADDR closing_insn
= 0;
2514 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2515 byte_order_for_code
);
2518 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2519 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2522 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2525 /* Look for a Load Exclusive instruction which begins the sequence. */
2526 if (inst
.opcode
->iclass
!= ldstexcl
|| bit (insn
, 22) == 0)
2529 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2532 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2533 byte_order_for_code
);
2535 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2537 /* Check if the instruction is a conditional branch. */
2538 if (inst
.opcode
->iclass
== condbranch
)
2540 gdb_assert (inst
.operands
[0].type
== AARCH64_OPND_ADDR_PCREL19
);
2542 if (bc_insn_count
>= 1)
2545 /* It is, so we'll try to set a breakpoint at the destination. */
2546 breaks
[1] = loc
+ inst
.operands
[0].imm
.value
;
2552 /* Look for the Store Exclusive which closes the atomic sequence. */
2553 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22) == 0)
2560 /* We didn't find a closing Store Exclusive instruction, fall back. */
2564 /* Insert breakpoint after the end of the atomic sequence. */
2565 breaks
[0] = loc
+ insn_size
;
2567 /* Check for duplicated breakpoints, and also check that the second
2568 breakpoint is not within the atomic sequence. */
2570 && (breaks
[1] == breaks
[0]
2571 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2572 last_breakpoint
= 0;
2574 std::vector
<CORE_ADDR
> next_pcs
;
2576 /* Insert the breakpoint at the end of the sequence, and one at the
2577 destination of the conditional branch, if it exists. */
2578 for (index
= 0; index
<= last_breakpoint
; index
++)
2579 next_pcs
.push_back (breaks
[index
]);
2584 struct aarch64_displaced_step_closure
: public displaced_step_closure
2586 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2587 is being displaced stepping. */
2590 /* PC adjustment offset after displaced stepping. */
2591 int32_t pc_adjust
= 0;
2594 /* Data when visiting instructions for displaced stepping. */
2596 struct aarch64_displaced_step_data
2598 struct aarch64_insn_data base
;
2600 /* The address where the instruction will be executed at. */
2602 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2603 uint32_t insn_buf
[DISPLACED_MODIFIED_INSNS
];
2604 /* Number of instructions in INSN_BUF. */
2605 unsigned insn_count
;
2606 /* Registers when doing displaced stepping. */
2607 struct regcache
*regs
;
2609 aarch64_displaced_step_closure
*dsc
;
2612 /* Implementation of aarch64_insn_visitor method "b". */
2615 aarch64_displaced_step_b (const int is_bl
, const int32_t offset
,
2616 struct aarch64_insn_data
*data
)
2618 struct aarch64_displaced_step_data
*dsd
2619 = (struct aarch64_displaced_step_data
*) data
;
2620 int64_t new_offset
= data
->insn_addr
- dsd
->new_addr
+ offset
;
2622 if (can_encode_int32 (new_offset
, 28))
2624 /* Emit B rather than BL, because executing BL on a new address
2625 will get the wrong address into LR. In order to avoid this,
2626 we emit B, and update LR if the instruction is BL. */
2627 emit_b (dsd
->insn_buf
, 0, new_offset
);
2633 emit_nop (dsd
->insn_buf
);
2635 dsd
->dsc
->pc_adjust
= offset
;
2641 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_LR_REGNUM
,
2642 data
->insn_addr
+ 4);
2646 /* Implementation of aarch64_insn_visitor method "b_cond". */
2649 aarch64_displaced_step_b_cond (const unsigned cond
, const int32_t offset
,
2650 struct aarch64_insn_data
*data
)
2652 struct aarch64_displaced_step_data
*dsd
2653 = (struct aarch64_displaced_step_data
*) data
;
2655 /* GDB has to fix up PC after displaced step this instruction
2656 differently according to the condition is true or false. Instead
2657 of checking COND against conditional flags, we can use
2658 the following instructions, and GDB can tell how to fix up PC
2659 according to the PC value.
2661 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2667 emit_bcond (dsd
->insn_buf
, cond
, 8);
2669 dsd
->dsc
->pc_adjust
= offset
;
2670 dsd
->insn_count
= 1;
2673 /* Dynamically allocate a new register. If we know the register
2674 statically, we should make it a global as above instead of using this
2677 static struct aarch64_register
2678 aarch64_register (unsigned num
, int is64
)
2680 return (struct aarch64_register
) { num
, is64
};
2683 /* Implementation of aarch64_insn_visitor method "cb". */
2686 aarch64_displaced_step_cb (const int32_t offset
, const int is_cbnz
,
2687 const unsigned rn
, int is64
,
2688 struct aarch64_insn_data
*data
)
2690 struct aarch64_displaced_step_data
*dsd
2691 = (struct aarch64_displaced_step_data
*) data
;
2693 /* The offset is out of range for a compare and branch
2694 instruction. We can use the following instructions instead:
2696 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2701 emit_cb (dsd
->insn_buf
, is_cbnz
, aarch64_register (rn
, is64
), 8);
2702 dsd
->insn_count
= 1;
2704 dsd
->dsc
->pc_adjust
= offset
;
2707 /* Implementation of aarch64_insn_visitor method "tb". */
2710 aarch64_displaced_step_tb (const int32_t offset
, int is_tbnz
,
2711 const unsigned rt
, unsigned bit
,
2712 struct aarch64_insn_data
*data
)
2714 struct aarch64_displaced_step_data
*dsd
2715 = (struct aarch64_displaced_step_data
*) data
;
2717 /* The offset is out of range for a test bit and branch
2718 instruction We can use the following instructions instead:
2720 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2726 emit_tb (dsd
->insn_buf
, is_tbnz
, bit
, aarch64_register (rt
, 1), 8);
2727 dsd
->insn_count
= 1;
2729 dsd
->dsc
->pc_adjust
= offset
;
2732 /* Implementation of aarch64_insn_visitor method "adr". */
2735 aarch64_displaced_step_adr (const int32_t offset
, const unsigned rd
,
2736 const int is_adrp
, struct aarch64_insn_data
*data
)
2738 struct aarch64_displaced_step_data
*dsd
2739 = (struct aarch64_displaced_step_data
*) data
;
2740 /* We know exactly the address the ADR{P,} instruction will compute.
2741 We can just write it to the destination register. */
2742 CORE_ADDR address
= data
->insn_addr
+ offset
;
2746 /* Clear the lower 12 bits of the offset to get the 4K page. */
2747 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2751 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rd
,
2754 dsd
->dsc
->pc_adjust
= 4;
2755 emit_nop (dsd
->insn_buf
);
2756 dsd
->insn_count
= 1;
2759 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2762 aarch64_displaced_step_ldr_literal (const int32_t offset
, const int is_sw
,
2763 const unsigned rt
, const int is64
,
2764 struct aarch64_insn_data
*data
)
2766 struct aarch64_displaced_step_data
*dsd
2767 = (struct aarch64_displaced_step_data
*) data
;
2768 CORE_ADDR address
= data
->insn_addr
+ offset
;
2769 struct aarch64_memory_operand zero
= { MEMORY_OPERAND_OFFSET
, 0 };
2771 regcache_cooked_write_unsigned (dsd
->regs
, AARCH64_X0_REGNUM
+ rt
,
2775 dsd
->insn_count
= emit_ldrsw (dsd
->insn_buf
, aarch64_register (rt
, 1),
2776 aarch64_register (rt
, 1), zero
);
2778 dsd
->insn_count
= emit_ldr (dsd
->insn_buf
, aarch64_register (rt
, is64
),
2779 aarch64_register (rt
, 1), zero
);
2781 dsd
->dsc
->pc_adjust
= 4;
2784 /* Implementation of aarch64_insn_visitor method "others". */
2787 aarch64_displaced_step_others (const uint32_t insn
,
2788 struct aarch64_insn_data
*data
)
2790 struct aarch64_displaced_step_data
*dsd
2791 = (struct aarch64_displaced_step_data
*) data
;
2793 aarch64_emit_insn (dsd
->insn_buf
, insn
);
2794 dsd
->insn_count
= 1;
2796 if ((insn
& 0xfffffc1f) == 0xd65f0000)
2799 dsd
->dsc
->pc_adjust
= 0;
2802 dsd
->dsc
->pc_adjust
= 4;
2805 static const struct aarch64_insn_visitor visitor
=
2807 aarch64_displaced_step_b
,
2808 aarch64_displaced_step_b_cond
,
2809 aarch64_displaced_step_cb
,
2810 aarch64_displaced_step_tb
,
2811 aarch64_displaced_step_adr
,
2812 aarch64_displaced_step_ldr_literal
,
2813 aarch64_displaced_step_others
,
2816 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2818 struct displaced_step_closure
*
2819 aarch64_displaced_step_copy_insn (struct gdbarch
*gdbarch
,
2820 CORE_ADDR from
, CORE_ADDR to
,
2821 struct regcache
*regs
)
2823 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2824 uint32_t insn
= read_memory_unsigned_integer (from
, 4, byte_order_for_code
);
2825 struct aarch64_displaced_step_data dsd
;
2828 if (aarch64_decode_insn (insn
, &inst
, 1, NULL
) != 0)
2831 /* Look for a Load Exclusive instruction which begins the sequence. */
2832 if (inst
.opcode
->iclass
== ldstexcl
&& bit (insn
, 22))
2834 /* We can't displaced step atomic sequences. */
2838 std::unique_ptr
<aarch64_displaced_step_closure
> dsc
2839 (new aarch64_displaced_step_closure
);
2840 dsd
.base
.insn_addr
= from
;
2843 dsd
.dsc
= dsc
.get ();
2845 aarch64_relocate_instruction (insn
, &visitor
,
2846 (struct aarch64_insn_data
*) &dsd
);
2847 gdb_assert (dsd
.insn_count
<= DISPLACED_MODIFIED_INSNS
);
2849 if (dsd
.insn_count
!= 0)
2853 /* Instruction can be relocated to scratch pad. Copy
2854 relocated instruction(s) there. */
2855 for (i
= 0; i
< dsd
.insn_count
; i
++)
2857 if (debug_displaced
)
2859 debug_printf ("displaced: writing insn ");
2860 debug_printf ("%.8x", dsd
.insn_buf
[i
]);
2861 debug_printf (" at %s\n", paddress (gdbarch
, to
+ i
* 4));
2863 write_memory_unsigned_integer (to
+ i
* 4, 4, byte_order_for_code
,
2864 (ULONGEST
) dsd
.insn_buf
[i
]);
2872 return dsc
.release ();
2875 /* Implement the "displaced_step_fixup" gdbarch method. */
2878 aarch64_displaced_step_fixup (struct gdbarch
*gdbarch
,
2879 struct displaced_step_closure
*dsc_
,
2880 CORE_ADDR from
, CORE_ADDR to
,
2881 struct regcache
*regs
)
2883 aarch64_displaced_step_closure
*dsc
= (aarch64_displaced_step_closure
*) dsc_
;
2889 regcache_cooked_read_unsigned (regs
, AARCH64_PC_REGNUM
, &pc
);
2892 /* Condition is true. */
2894 else if (pc
- to
== 4)
2896 /* Condition is false. */
2900 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2903 if (dsc
->pc_adjust
!= 0)
2905 if (debug_displaced
)
2907 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2908 paddress (gdbarch
, from
), dsc
->pc_adjust
);
2910 regcache_cooked_write_unsigned (regs
, AARCH64_PC_REGNUM
,
2911 from
+ dsc
->pc_adjust
);
2915 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2918 aarch64_displaced_step_hw_singlestep (struct gdbarch
*gdbarch
,
2919 struct displaced_step_closure
*closure
)
2924 /* Get the correct target description for the given VQ value.
2925 If VQ is zero then it is assumed SVE is not supported.
2926 (It is not possible to set VQ to zero on an SVE system). */
2929 aarch64_read_description (uint64_t vq
)
2931 if (vq
> AARCH64_MAX_SVE_VQ
)
2932 error (_("VQ is %" PRIu64
", maximum supported value is %d"), vq
,
2933 AARCH64_MAX_SVE_VQ
);
2935 struct target_desc
*tdesc
= tdesc_aarch64_list
[vq
];
2939 tdesc
= aarch64_create_target_description (vq
);
2940 tdesc_aarch64_list
[vq
] = tdesc
;
2946 /* Return the VQ used when creating the target description TDESC. */
2949 aarch64_get_tdesc_vq (const struct target_desc
*tdesc
)
2951 const struct tdesc_feature
*feature_sve
;
2953 if (!tdesc_has_registers (tdesc
))
2956 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
2958 if (feature_sve
== nullptr)
2961 uint64_t vl
= tdesc_register_bitsize (feature_sve
,
2962 aarch64_sve_register_names
[0]) / 8;
2963 return sve_vq_from_vl (vl
);
2967 /* Initialize the current architecture based on INFO. If possible,
2968 re-use an architecture from ARCHES, which is a list of
2969 architectures already created during this debugging session.
2971 Called e.g. at program startup, when reading a core file, and when
2972 reading a binary file. */
2974 static struct gdbarch
*
2975 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2977 struct gdbarch_tdep
*tdep
;
2978 struct gdbarch
*gdbarch
;
2979 struct gdbarch_list
*best_arch
;
2980 struct tdesc_arch_data
*tdesc_data
= NULL
;
2981 const struct target_desc
*tdesc
= info
.target_desc
;
2984 const struct tdesc_feature
*feature_core
;
2985 const struct tdesc_feature
*feature_fpu
;
2986 const struct tdesc_feature
*feature_sve
;
2988 int num_pseudo_regs
= 0;
2990 /* Ensure we always have a target description. */
2991 if (!tdesc_has_registers (tdesc
))
2992 tdesc
= aarch64_read_description (0);
2995 feature_core
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2996 feature_fpu
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2997 feature_sve
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.sve");
2999 if (feature_core
== NULL
)
3002 tdesc_data
= tdesc_data_alloc ();
3004 /* Validate the description provides the mandatory core R registers
3005 and allocate their numbers. */
3006 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
3007 valid_p
&= tdesc_numbered_register (feature_core
, tdesc_data
,
3008 AARCH64_X0_REGNUM
+ i
,
3009 aarch64_r_register_names
[i
]);
3011 num_regs
= AARCH64_X0_REGNUM
+ i
;
3013 /* Add the V registers. */
3014 if (feature_fpu
!= NULL
)
3016 if (feature_sve
!= NULL
)
3017 error (_("Program contains both fpu and SVE features."));
3019 /* Validate the description provides the mandatory V registers
3020 and allocate their numbers. */
3021 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
3022 valid_p
&= tdesc_numbered_register (feature_fpu
, tdesc_data
,
3023 AARCH64_V0_REGNUM
+ i
,
3024 aarch64_v_register_names
[i
]);
3026 num_regs
= AARCH64_V0_REGNUM
+ i
;
3029 /* Add the SVE registers. */
3030 if (feature_sve
!= NULL
)
3032 /* Validate the description provides the mandatory SVE registers
3033 and allocate their numbers. */
3034 for (i
= 0; i
< ARRAY_SIZE (aarch64_sve_register_names
); i
++)
3035 valid_p
&= tdesc_numbered_register (feature_sve
, tdesc_data
,
3036 AARCH64_SVE_Z0_REGNUM
+ i
,
3037 aarch64_sve_register_names
[i
]);
3039 num_regs
= AARCH64_SVE_Z0_REGNUM
+ i
;
3040 num_pseudo_regs
+= 32; /* add the Vn register pseudos. */
3043 if (feature_fpu
!= NULL
|| feature_sve
!= NULL
)
3045 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
3046 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
3047 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
3048 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
3049 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
3054 tdesc_data_cleanup (tdesc_data
);
3058 /* AArch64 code is always little-endian. */
3059 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
3061 /* If there is already a candidate, use it. */
3062 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
3064 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
3066 /* Found a match. */
3070 if (best_arch
!= NULL
)
3072 if (tdesc_data
!= NULL
)
3073 tdesc_data_cleanup (tdesc_data
);
3074 return best_arch
->gdbarch
;
3077 tdep
= XCNEW (struct gdbarch_tdep
);
3078 gdbarch
= gdbarch_alloc (&info
, tdep
);
3080 /* This should be low enough for everything. */
3081 tdep
->lowest_pc
= 0x20;
3082 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
3083 tdep
->jb_elt_size
= 8;
3084 tdep
->vq
= aarch64_get_tdesc_vq (tdesc
);
3086 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
3087 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
3089 /* Frame handling. */
3090 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
3091 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
3092 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
3094 /* Advance PC across function entry code. */
3095 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
3097 /* The stack grows downward. */
3098 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
3100 /* Breakpoint manipulation. */
3101 set_gdbarch_breakpoint_kind_from_pc (gdbarch
,
3102 aarch64_breakpoint::kind_from_pc
);
3103 set_gdbarch_sw_breakpoint_from_kind (gdbarch
,
3104 aarch64_breakpoint::bp_from_kind
);
3105 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
3106 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
3108 /* Information about registers, etc. */
3109 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
3110 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
3111 set_gdbarch_num_regs (gdbarch
, num_regs
);
3113 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
3114 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
3115 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
3116 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
3117 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
3118 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
3119 aarch64_pseudo_register_reggroup_p
);
3122 set_gdbarch_short_bit (gdbarch
, 16);
3123 set_gdbarch_int_bit (gdbarch
, 32);
3124 set_gdbarch_float_bit (gdbarch
, 32);
3125 set_gdbarch_double_bit (gdbarch
, 64);
3126 set_gdbarch_long_double_bit (gdbarch
, 128);
3127 set_gdbarch_long_bit (gdbarch
, 64);
3128 set_gdbarch_long_long_bit (gdbarch
, 64);
3129 set_gdbarch_ptr_bit (gdbarch
, 64);
3130 set_gdbarch_char_signed (gdbarch
, 0);
3131 set_gdbarch_wchar_signed (gdbarch
, 0);
3132 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
3133 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
3134 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
3136 /* Internal <-> external register number maps. */
3137 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
3139 /* Returning results. */
3140 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
3143 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
3145 /* Virtual tables. */
3146 set_gdbarch_vbit_in_delta (gdbarch
, 1);
3148 /* Hook in the ABI-specific overrides, if they have been registered. */
3149 info
.target_desc
= tdesc
;
3150 info
.tdesc_data
= tdesc_data
;
3151 gdbarch_init_osabi (info
, gdbarch
);
3153 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
3155 /* Add some default predicates. */
3156 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
3157 dwarf2_append_unwinders (gdbarch
);
3158 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
3160 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
3162 /* Now we have tuned the configuration, set a few final things,
3163 based on what the OS ABI has told us. */
3165 if (tdep
->jb_pc
>= 0)
3166 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
3168 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
3170 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
3172 /* Add standard register aliases. */
3173 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
3174 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
3175 value_of_aarch64_user_reg
,
3176 &aarch64_register_aliases
[i
].regnum
);
3182 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
3184 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
3189 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3190 paddress (gdbarch
, tdep
->lowest_pc
));
3196 static void aarch64_process_record_test (void);
3201 _initialize_aarch64_tdep (void)
3203 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
3206 /* Debug this file's internals. */
3207 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
3208 Set AArch64 debugging."), _("\
3209 Show AArch64 debugging."), _("\
3210 When on, AArch64 specific debugging is enabled."),
3213 &setdebuglist
, &showdebuglist
);
3216 selftests::register_test ("aarch64-analyze-prologue",
3217 selftests::aarch64_analyze_prologue_test
);
3218 selftests::register_test ("aarch64-process-record",
3219 selftests::aarch64_process_record_test
);
3220 selftests::record_xml_tdesc ("aarch64.xml",
3221 aarch64_create_target_description (0));
3225 /* AArch64 process record-replay related structures, defines etc. */
3227 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3230 unsigned int reg_len = LENGTH; \
3233 REGS = XNEWVEC (uint32_t, reg_len); \
3234 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3239 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3242 unsigned int mem_len = LENGTH; \
3245 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3246 memcpy(&MEMS->len, &RECORD_BUF[0], \
3247 sizeof(struct aarch64_mem_r) * LENGTH); \
3252 /* AArch64 record/replay structures and enumerations. */
3254 struct aarch64_mem_r
3256 uint64_t len
; /* Record length. */
3257 uint64_t addr
; /* Memory address. */
3260 enum aarch64_record_result
3262 AARCH64_RECORD_SUCCESS
,
3263 AARCH64_RECORD_UNSUPPORTED
,
3264 AARCH64_RECORD_UNKNOWN
3267 typedef struct insn_decode_record_t
3269 struct gdbarch
*gdbarch
;
3270 struct regcache
*regcache
;
3271 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
3272 uint32_t aarch64_insn
; /* Insn to be recorded. */
3273 uint32_t mem_rec_count
; /* Count of memory records. */
3274 uint32_t reg_rec_count
; /* Count of register records. */
3275 uint32_t *aarch64_regs
; /* Registers to be recorded. */
3276 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
3277 } insn_decode_record
;
3279 /* Record handler for data processing - register instructions. */
3282 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
3284 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
3285 uint32_t record_buf
[4];
3287 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3288 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3289 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
3291 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
3295 /* Logical (shifted register). */
3296 if (insn_bits24_27
== 0x0a)
3297 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
3299 else if (insn_bits24_27
== 0x0b)
3300 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3302 return AARCH64_RECORD_UNKNOWN
;
3304 record_buf
[0] = reg_rd
;
3305 aarch64_insn_r
->reg_rec_count
= 1;
3307 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3311 if (insn_bits24_27
== 0x0b)
3313 /* Data-processing (3 source). */
3314 record_buf
[0] = reg_rd
;
3315 aarch64_insn_r
->reg_rec_count
= 1;
3317 else if (insn_bits24_27
== 0x0a)
3319 if (insn_bits21_23
== 0x00)
3321 /* Add/subtract (with carry). */
3322 record_buf
[0] = reg_rd
;
3323 aarch64_insn_r
->reg_rec_count
= 1;
3324 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3326 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3327 aarch64_insn_r
->reg_rec_count
= 2;
3330 else if (insn_bits21_23
== 0x02)
3332 /* Conditional compare (register) and conditional compare
3333 (immediate) instructions. */
3334 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3335 aarch64_insn_r
->reg_rec_count
= 1;
3337 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3339 /* CConditional select. */
3340 /* Data-processing (2 source). */
3341 /* Data-processing (1 source). */
3342 record_buf
[0] = reg_rd
;
3343 aarch64_insn_r
->reg_rec_count
= 1;
3346 return AARCH64_RECORD_UNKNOWN
;
3350 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3352 return AARCH64_RECORD_SUCCESS
;
3355 /* Record handler for data processing - immediate instructions. */
3358 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3360 uint8_t reg_rd
, insn_bit23
, insn_bits24_27
, setflags
;
3361 uint32_t record_buf
[4];
3363 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3364 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3365 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3367 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3368 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3369 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3371 record_buf
[0] = reg_rd
;
3372 aarch64_insn_r
->reg_rec_count
= 1;
3374 else if (insn_bits24_27
== 0x01)
3376 /* Add/Subtract (immediate). */
3377 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3378 record_buf
[0] = reg_rd
;
3379 aarch64_insn_r
->reg_rec_count
= 1;
3381 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3383 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3385 /* Logical (immediate). */
3386 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3387 record_buf
[0] = reg_rd
;
3388 aarch64_insn_r
->reg_rec_count
= 1;
3390 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3393 return AARCH64_RECORD_UNKNOWN
;
3395 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3397 return AARCH64_RECORD_SUCCESS
;
3400 /* Record handler for branch, exception generation and system instructions. */
3403 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3405 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3406 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3407 uint32_t record_buf
[4];
3409 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3410 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3411 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3413 if (insn_bits28_31
== 0x0d)
3415 /* Exception generation instructions. */
3416 if (insn_bits24_27
== 0x04)
3418 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3419 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3420 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3422 ULONGEST svc_number
;
3424 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3426 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3430 return AARCH64_RECORD_UNSUPPORTED
;
3432 /* System instructions. */
3433 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3435 uint32_t reg_rt
, reg_crn
;
3437 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3438 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3440 /* Record rt in case of sysl and mrs instructions. */
3441 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3443 record_buf
[0] = reg_rt
;
3444 aarch64_insn_r
->reg_rec_count
= 1;
3446 /* Record cpsr for hint and msr(immediate) instructions. */
3447 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3449 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3450 aarch64_insn_r
->reg_rec_count
= 1;
3453 /* Unconditional branch (register). */
3454 else if((insn_bits24_27
& 0x0e) == 0x06)
3456 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3457 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3458 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3461 return AARCH64_RECORD_UNKNOWN
;
3463 /* Unconditional branch (immediate). */
3464 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3466 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3467 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3468 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3471 /* Compare & branch (immediate), Test & branch (immediate) and
3472 Conditional branch (immediate). */
3473 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3475 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3477 return AARCH64_RECORD_SUCCESS
;
3480 /* Record handler for advanced SIMD load and store instructions. */
3483 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3486 uint64_t addr_offset
= 0;
3487 uint32_t record_buf
[24];
3488 uint64_t record_buf_mem
[24];
3489 uint32_t reg_rn
, reg_rt
;
3490 uint32_t reg_index
= 0, mem_index
= 0;
3491 uint8_t opcode_bits
, size_bits
;
3493 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3494 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3495 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3496 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3497 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3500 debug_printf ("Process record: Advanced SIMD load/store\n");
3502 /* Load/store single structure. */
3503 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3505 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3506 scale
= opcode_bits
>> 2;
3507 selem
= ((opcode_bits
& 0x02) |
3508 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3512 if (size_bits
& 0x01)
3513 return AARCH64_RECORD_UNKNOWN
;
3516 if ((size_bits
>> 1) & 0x01)
3517 return AARCH64_RECORD_UNKNOWN
;
3518 if (size_bits
& 0x01)
3520 if (!((opcode_bits
>> 1) & 0x01))
3523 return AARCH64_RECORD_UNKNOWN
;
3527 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3534 return AARCH64_RECORD_UNKNOWN
;
3540 for (sindex
= 0; sindex
< selem
; sindex
++)
3542 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3543 reg_rt
= (reg_rt
+ 1) % 32;
3547 for (sindex
= 0; sindex
< selem
; sindex
++)
3549 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3550 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3553 record_buf_mem
[mem_index
++] = esize
/ 8;
3554 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3556 addr_offset
= addr_offset
+ (esize
/ 8);
3557 reg_rt
= (reg_rt
+ 1) % 32;
3561 /* Load/store multiple structure. */
3564 uint8_t selem
, esize
, rpt
, elements
;
3565 uint8_t eindex
, rindex
;
3567 esize
= 8 << size_bits
;
3568 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3569 elements
= 128 / esize
;
3571 elements
= 64 / esize
;
3573 switch (opcode_bits
)
3575 /*LD/ST4 (4 Registers). */
3580 /*LD/ST1 (4 Registers). */
3585 /*LD/ST3 (3 Registers). */
3590 /*LD/ST1 (3 Registers). */
3595 /*LD/ST1 (1 Register). */
3600 /*LD/ST2 (2 Registers). */
3605 /*LD/ST1 (2 Registers). */
3611 return AARCH64_RECORD_UNSUPPORTED
;
3614 for (rindex
= 0; rindex
< rpt
; rindex
++)
3615 for (eindex
= 0; eindex
< elements
; eindex
++)
3617 uint8_t reg_tt
, sindex
;
3618 reg_tt
= (reg_rt
+ rindex
) % 32;
3619 for (sindex
= 0; sindex
< selem
; sindex
++)
3621 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3622 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3625 record_buf_mem
[mem_index
++] = esize
/ 8;
3626 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3628 addr_offset
= addr_offset
+ (esize
/ 8);
3629 reg_tt
= (reg_tt
+ 1) % 32;
3634 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3635 record_buf
[reg_index
++] = reg_rn
;
3637 aarch64_insn_r
->reg_rec_count
= reg_index
;
3638 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3639 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3641 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3643 return AARCH64_RECORD_SUCCESS
;
3646 /* Record handler for load and store instructions. */
3649 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3651 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3652 uint8_t insn_bit23
, insn_bit21
;
3653 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3654 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3655 uint64_t datasize
, offset
;
3656 uint32_t record_buf
[8];
3657 uint64_t record_buf_mem
[8];
3660 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3661 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3662 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3663 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3664 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3665 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3666 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3667 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3668 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3669 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3670 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3672 /* Load/store exclusive. */
3673 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3676 debug_printf ("Process record: load/store exclusive\n");
3680 record_buf
[0] = reg_rt
;
3681 aarch64_insn_r
->reg_rec_count
= 1;
3684 record_buf
[1] = reg_rt2
;
3685 aarch64_insn_r
->reg_rec_count
= 2;
3691 datasize
= (8 << size_bits
) * 2;
3693 datasize
= (8 << size_bits
);
3694 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3696 record_buf_mem
[0] = datasize
/ 8;
3697 record_buf_mem
[1] = address
;
3698 aarch64_insn_r
->mem_rec_count
= 1;
3701 /* Save register rs. */
3702 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3703 aarch64_insn_r
->reg_rec_count
= 1;
3707 /* Load register (literal) instructions decoding. */
3708 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3711 debug_printf ("Process record: load register (literal)\n");
3713 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3715 record_buf
[0] = reg_rt
;
3716 aarch64_insn_r
->reg_rec_count
= 1;
3718 /* All types of load/store pair instructions decoding. */
3719 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3722 debug_printf ("Process record: load/store pair\n");
3728 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3729 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3733 record_buf
[0] = reg_rt
;
3734 record_buf
[1] = reg_rt2
;
3736 aarch64_insn_r
->reg_rec_count
= 2;
3741 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3743 size_bits
= size_bits
>> 1;
3744 datasize
= 8 << (2 + size_bits
);
3745 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3746 offset
= offset
<< (2 + size_bits
);
3747 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3749 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3751 if (imm7_off
& 0x40)
3752 address
= address
- offset
;
3754 address
= address
+ offset
;
3757 record_buf_mem
[0] = datasize
/ 8;
3758 record_buf_mem
[1] = address
;
3759 record_buf_mem
[2] = datasize
/ 8;
3760 record_buf_mem
[3] = address
+ (datasize
/ 8);
3761 aarch64_insn_r
->mem_rec_count
= 2;
3763 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3764 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3766 /* Load/store register (unsigned immediate) instructions. */
3767 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3769 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3779 if (size_bits
== 0x3 && vector_flag
== 0x0 && opc
== 0x2)
3781 /* PRFM (immediate) */
3782 return AARCH64_RECORD_SUCCESS
;
3784 else if (size_bits
== 0x2 && vector_flag
== 0x0 && opc
== 0x2)
3786 /* LDRSW (immediate) */
3800 debug_printf ("Process record: load/store (unsigned immediate):"
3801 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3807 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3808 datasize
= 8 << size_bits
;
3809 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3811 offset
= offset
<< size_bits
;
3812 address
= address
+ offset
;
3814 record_buf_mem
[0] = datasize
>> 3;
3815 record_buf_mem
[1] = address
;
3816 aarch64_insn_r
->mem_rec_count
= 1;
3821 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3823 record_buf
[0] = reg_rt
;
3824 aarch64_insn_r
->reg_rec_count
= 1;
3827 /* Load/store register (register offset) instructions. */
3828 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3829 && insn_bits10_11
== 0x02 && insn_bit21
)
3832 debug_printf ("Process record: load/store (register offset)\n");
3833 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3840 if (size_bits
!= 0x03)
3843 return AARCH64_RECORD_UNKNOWN
;
3847 ULONGEST reg_rm_val
;
3849 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3850 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3851 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3852 offset
= reg_rm_val
<< size_bits
;
3854 offset
= reg_rm_val
;
3855 datasize
= 8 << size_bits
;
3856 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3858 address
= address
+ offset
;
3859 record_buf_mem
[0] = datasize
>> 3;
3860 record_buf_mem
[1] = address
;
3861 aarch64_insn_r
->mem_rec_count
= 1;
3866 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3868 record_buf
[0] = reg_rt
;
3869 aarch64_insn_r
->reg_rec_count
= 1;
3872 /* Load/store register (immediate and unprivileged) instructions. */
3873 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3878 debug_printf ("Process record: load/store "
3879 "(immediate and unprivileged)\n");
3881 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3888 if (size_bits
!= 0x03)
3891 return AARCH64_RECORD_UNKNOWN
;
3896 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3897 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3898 datasize
= 8 << size_bits
;
3899 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3901 if (insn_bits10_11
!= 0x01)
3903 if (imm9_off
& 0x0100)
3904 address
= address
- offset
;
3906 address
= address
+ offset
;
3908 record_buf_mem
[0] = datasize
>> 3;
3909 record_buf_mem
[1] = address
;
3910 aarch64_insn_r
->mem_rec_count
= 1;
3915 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3917 record_buf
[0] = reg_rt
;
3918 aarch64_insn_r
->reg_rec_count
= 1;
3920 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3921 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3923 /* Advanced SIMD load/store instructions. */
3925 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3927 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3929 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3931 return AARCH64_RECORD_SUCCESS
;
3934 /* Record handler for data processing SIMD and floating point instructions. */
3937 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3939 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3940 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3941 uint8_t insn_bits11_14
;
3942 uint32_t record_buf
[2];
3944 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3945 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3946 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3947 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3948 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3949 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3950 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3951 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3952 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3955 debug_printf ("Process record: data processing SIMD/FP: ");
3957 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3959 /* Floating point - fixed point conversion instructions. */
3963 debug_printf ("FP - fixed point conversion");
3965 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3966 record_buf
[0] = reg_rd
;
3968 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3970 /* Floating point - conditional compare instructions. */
3971 else if (insn_bits10_11
== 0x01)
3974 debug_printf ("FP - conditional compare");
3976 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3978 /* Floating point - data processing (2-source) and
3979 conditional select instructions. */
3980 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3983 debug_printf ("FP - DP (2-source)");
3985 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3987 else if (insn_bits10_11
== 0x00)
3989 /* Floating point - immediate instructions. */
3990 if ((insn_bits12_15
& 0x01) == 0x01
3991 || (insn_bits12_15
& 0x07) == 0x04)
3994 debug_printf ("FP - immediate");
3995 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3997 /* Floating point - compare instructions. */
3998 else if ((insn_bits12_15
& 0x03) == 0x02)
4001 debug_printf ("FP - immediate");
4002 record_buf
[0] = AARCH64_CPSR_REGNUM
;
4004 /* Floating point - integer conversions instructions. */
4005 else if (insn_bits12_15
== 0x00)
4007 /* Convert float to integer instruction. */
4008 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
4011 debug_printf ("float to int conversion");
4013 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4015 /* Convert integer to float instruction. */
4016 else if ((opcode
>> 1) == 0x01 && !rmode
)
4019 debug_printf ("int to float conversion");
4021 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4023 /* Move float to integer instruction. */
4024 else if ((opcode
>> 1) == 0x03)
4027 debug_printf ("move float to int");
4029 if (!(opcode
& 0x01))
4030 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4032 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4035 return AARCH64_RECORD_UNKNOWN
;
4038 return AARCH64_RECORD_UNKNOWN
;
4041 return AARCH64_RECORD_UNKNOWN
;
4043 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
4046 debug_printf ("SIMD copy");
4048 /* Advanced SIMD copy instructions. */
4049 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
4050 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
4051 && bit (aarch64_insn_r
->aarch64_insn
, 10))
4053 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
4054 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
4056 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4059 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4061 /* All remaining floating point or advanced SIMD instructions. */
4065 debug_printf ("all remain");
4067 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
4071 debug_printf ("\n");
4073 aarch64_insn_r
->reg_rec_count
++;
4074 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
4075 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
4077 return AARCH64_RECORD_SUCCESS
;
4080 /* Decodes insns type and invokes its record handler. */
4083 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
4085 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
4087 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
4088 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
4089 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
4090 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
4092 /* Data processing - immediate instructions. */
4093 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
4094 return aarch64_record_data_proc_imm (aarch64_insn_r
);
4096 /* Branch, exception generation and system instructions. */
4097 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
4098 return aarch64_record_branch_except_sys (aarch64_insn_r
);
4100 /* Load and store instructions. */
4101 if (!ins_bit25
&& ins_bit27
)
4102 return aarch64_record_load_store (aarch64_insn_r
);
4104 /* Data processing - register instructions. */
4105 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
4106 return aarch64_record_data_proc_reg (aarch64_insn_r
);
4108 /* Data processing - SIMD and floating point instructions. */
4109 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
4110 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
4112 return AARCH64_RECORD_UNSUPPORTED
;
4115 /* Cleans up local record registers and memory allocations. */
4118 deallocate_reg_mem (insn_decode_record
*record
)
4120 xfree (record
->aarch64_regs
);
4121 xfree (record
->aarch64_mems
);
4125 namespace selftests
{
4128 aarch64_process_record_test (void)
4130 struct gdbarch_info info
;
4133 gdbarch_info_init (&info
);
4134 info
.bfd_arch_info
= bfd_scan_arch ("aarch64");
4136 struct gdbarch
*gdbarch
= gdbarch_find_by_info (info
);
4137 SELF_CHECK (gdbarch
!= NULL
);
4139 insn_decode_record aarch64_record
;
4141 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4142 aarch64_record
.regcache
= NULL
;
4143 aarch64_record
.this_addr
= 0;
4144 aarch64_record
.gdbarch
= gdbarch
;
4146 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4147 aarch64_record
.aarch64_insn
= 0xf9800020;
4148 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4149 SELF_CHECK (ret
== AARCH64_RECORD_SUCCESS
);
4150 SELF_CHECK (aarch64_record
.reg_rec_count
== 0);
4151 SELF_CHECK (aarch64_record
.mem_rec_count
== 0);
4153 deallocate_reg_mem (&aarch64_record
);
4156 } // namespace selftests
4157 #endif /* GDB_SELF_TEST */
4159 /* Parse the current instruction and record the values of the registers and
4160 memory that will be changed in current instruction to record_arch_list
4161 return -1 if something is wrong. */
4164 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
4165 CORE_ADDR insn_addr
)
4167 uint32_t rec_no
= 0;
4168 uint8_t insn_size
= 4;
4170 gdb_byte buf
[insn_size
];
4171 insn_decode_record aarch64_record
;
4173 memset (&buf
[0], 0, insn_size
);
4174 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
4175 target_read_memory (insn_addr
, &buf
[0], insn_size
);
4176 aarch64_record
.aarch64_insn
4177 = (uint32_t) extract_unsigned_integer (&buf
[0],
4179 gdbarch_byte_order (gdbarch
));
4180 aarch64_record
.regcache
= regcache
;
4181 aarch64_record
.this_addr
= insn_addr
;
4182 aarch64_record
.gdbarch
= gdbarch
;
4184 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
4185 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
4187 printf_unfiltered (_("Process record does not support instruction "
4188 "0x%0x at address %s.\n"),
4189 aarch64_record
.aarch64_insn
,
4190 paddress (gdbarch
, insn_addr
));
4196 /* Record registers. */
4197 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4199 /* Always record register CPSR. */
4200 record_full_arch_list_add_reg (aarch64_record
.regcache
,
4201 AARCH64_CPSR_REGNUM
);
4202 if (aarch64_record
.aarch64_regs
)
4203 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
4204 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
4205 aarch64_record
.aarch64_regs
[rec_no
]))
4208 /* Record memories. */
4209 if (aarch64_record
.aarch64_mems
)
4210 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
4211 if (record_full_arch_list_add_mem
4212 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
4213 aarch64_record
.aarch64_mems
[rec_no
].len
))
4216 if (record_full_arch_list_add_end ())
4220 deallocate_reg_mem (&aarch64_record
);