AArch64 pauth: Indicate unmasked addresses in backtrace
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
42a4f53d 3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
4de283e4 24#include "inferior.h"
07b287a0
MS
25#include "gdbcmd.h"
26#include "gdbcore.h"
4de283e4 27#include "dis-asm.h"
d55e5aa6
TT
28#include "regcache.h"
29#include "reggroups.h"
4de283e4
TT
30#include "value.h"
31#include "arch-utils.h"
32#include "osabi.h"
33#include "frame-unwind.h"
34#include "frame-base.h"
d55e5aa6 35#include "trad-frame.h"
4de283e4
TT
36#include "objfiles.h"
37#include "dwarf2.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
07b287a0 42#include "user-regs.h"
4de283e4
TT
43#include "language.h"
44#include "infcall.h"
45#include "ax.h"
46#include "ax-gdb.h"
268a13a5 47#include "gdbsupport/selftest.h"
4de283e4
TT
48
49#include "aarch64-tdep.h"
50#include "aarch64-ravenscar-thread.h"
51
52#include "elf-bfd.h"
53#include "elf/aarch64.h"
54
268a13a5 55#include "gdbsupport/vec.h"
4de283e4
TT
56
57#include "record.h"
58#include "record-full.h"
59#include "arch/aarch64-insn.h"
0d12e84c 60#include "gdbarch.h"
4de283e4
TT
61
62#include "opcode/aarch64.h"
63#include <algorithm>
f77ee802
YQ
64
65#define submask(x) ((1L << ((x) + 1)) - 1)
66#define bit(obj,st) (((obj) >> (st)) & 1)
67#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68
ea92689a
AH
69/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
70 four members. */
71#define HA_MAX_NUM_FLDS 4
72
95228a0d 73/* All possible aarch64 target descriptors. */
6dc0ebde 74struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
95228a0d 75
07b287a0
MS
76/* The standard register names, and all the valid aliases for them. */
77static const struct
78{
79 const char *const name;
80 int regnum;
81} aarch64_register_aliases[] =
82{
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
87
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
120
121 /* specials */
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
124};
125
126/* The required core 'R' registers. */
127static const char *const aarch64_r_register_names[] =
128{
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
139 "pc", "cpsr"
140};
141
142/* The FP/SIMD 'V' registers. */
143static const char *const aarch64_v_register_names[] =
144{
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
155 "fpsr",
156 "fpcr"
157};
158
739e8682
AH
159/* The SVE 'Z' and 'P' registers. */
160static const char *const aarch64_sve_register_names[] =
161{
162 /* These registers must appear in consecutive RAW register number
163 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
164 "z0", "z1", "z2", "z3",
165 "z4", "z5", "z6", "z7",
166 "z8", "z9", "z10", "z11",
167 "z12", "z13", "z14", "z15",
168 "z16", "z17", "z18", "z19",
169 "z20", "z21", "z22", "z23",
170 "z24", "z25", "z26", "z27",
171 "z28", "z29", "z30", "z31",
172 "fpsr", "fpcr",
173 "p0", "p1", "p2", "p3",
174 "p4", "p5", "p6", "p7",
175 "p8", "p9", "p10", "p11",
176 "p12", "p13", "p14", "p15",
177 "ffr", "vg"
178};
179
76bed0fd
AH
180static const char *const aarch64_pauth_register_names[] =
181{
182 /* Authentication mask for data pointer. */
183 "pauth_dmask",
184 /* Authentication mask for code pointer. */
185 "pauth_cmask"
186};
187
07b287a0
MS
188/* AArch64 prologue cache structure. */
189struct aarch64_prologue_cache
190{
db634143
PL
191 /* The program counter at the start of the function. It is used to
192 identify this frame as a prologue frame. */
193 CORE_ADDR func;
194
195 /* The program counter at the time this frame was created; i.e. where
196 this function was called from. It is used to identify this frame as a
197 stub frame. */
198 CORE_ADDR prev_pc;
199
07b287a0
MS
200 /* The stack pointer at the time this frame was created; i.e. the
201 caller's stack pointer when this function was called. It is used
202 to identify this frame. */
203 CORE_ADDR prev_sp;
204
7dfa3edc
PL
205 /* Is the target available to read from? */
206 int available_p;
207
07b287a0
MS
208 /* The frame base for this frame is just prev_sp - frame size.
209 FRAMESIZE is the distance from the frame pointer to the
210 initial stack pointer. */
211 int framesize;
212
213 /* The register used to hold the frame pointer for this frame. */
214 int framereg;
215
216 /* Saved register offsets. */
217 struct trad_frame_saved_reg *saved_regs;
218};
219
07b287a0
MS
220static void
221show_aarch64_debug (struct ui_file *file, int from_tty,
222 struct cmd_list_element *c, const char *value)
223{
224 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
225}
226
ffdbe864
YQ
227namespace {
228
4d9a9006
YQ
229/* Abstract instruction reader. */
230
231class abstract_instruction_reader
232{
233public:
234 /* Read in one instruction. */
235 virtual ULONGEST read (CORE_ADDR memaddr, int len,
236 enum bfd_endian byte_order) = 0;
237};
238
239/* Instruction reader from real target. */
240
241class instruction_reader : public abstract_instruction_reader
242{
243 public:
244 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 245 override
4d9a9006 246 {
fc2f703e 247 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
248 }
249};
250
ffdbe864
YQ
251} // namespace
252
3d31bc39
AH
253/* If address signing is enabled, mask off the signature bits from the link
254 register, which is passed by value in ADDR, using the register values in
255 THIS_FRAME. */
11e1b75f
AH
256
257static CORE_ADDR
3d31bc39
AH
258aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
259 struct frame_info *this_frame, CORE_ADDR addr)
11e1b75f
AH
260{
261 if (tdep->has_pauth ()
262 && frame_unwind_register_unsigned (this_frame,
263 tdep->pauth_ra_state_regnum))
264 {
265 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
266 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
267 addr = addr & ~cmask;
3d31bc39
AH
268
269 /* Record in the frame that the link register required unmasking. */
270 set_frame_previous_pc_masked (this_frame);
11e1b75f
AH
271 }
272
273 return addr;
274}
275
07b287a0
MS
276/* Analyze a prologue, looking for a recognizable stack frame
277 and frame pointer. Scan until we encounter a store that could
278 clobber the stack frame unexpectedly, or an unknown instruction. */
279
280static CORE_ADDR
281aarch64_analyze_prologue (struct gdbarch *gdbarch,
282 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
283 struct aarch64_prologue_cache *cache,
284 abstract_instruction_reader& reader)
07b287a0
MS
285{
286 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
287 int i;
187f5d00
YQ
288 /* Track X registers and D registers in prologue. */
289 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 290
187f5d00 291 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 292 regs[i] = pv_register (i, 0);
f7b7ed97 293 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
294
295 for (; start < limit; start += 4)
296 {
297 uint32_t insn;
d9ebcbce 298 aarch64_inst inst;
07b287a0 299
4d9a9006 300 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 301
561a72d4 302 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
303 break;
304
305 if (inst.opcode->iclass == addsub_imm
306 && (inst.opcode->op == OP_ADD
307 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 308 {
d9ebcbce
YQ
309 unsigned rd = inst.operands[0].reg.regno;
310 unsigned rn = inst.operands[1].reg.regno;
311
312 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
313 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
314 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
315 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
316
317 if (inst.opcode->op == OP_ADD)
318 {
319 regs[rd] = pv_add_constant (regs[rn],
320 inst.operands[2].imm.value);
321 }
322 else
323 {
324 regs[rd] = pv_add_constant (regs[rn],
325 -inst.operands[2].imm.value);
326 }
327 }
328 else if (inst.opcode->iclass == pcreladdr
329 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
330 {
331 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
332 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
333
334 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 335 }
d9ebcbce 336 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
337 {
338 /* Stop analysis on branch. */
339 break;
340 }
d9ebcbce 341 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
342 {
343 /* Stop analysis on branch. */
344 break;
345 }
d9ebcbce 346 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
347 {
348 /* Stop analysis on branch. */
349 break;
350 }
d9ebcbce 351 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
352 {
353 /* Stop analysis on branch. */
354 break;
355 }
d9ebcbce
YQ
356 else if (inst.opcode->op == OP_MOVZ)
357 {
358 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
359 regs[inst.operands[0].reg.regno] = pv_unknown ();
360 }
361 else if (inst.opcode->iclass == log_shift
362 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 363 {
d9ebcbce
YQ
364 unsigned rd = inst.operands[0].reg.regno;
365 unsigned rn = inst.operands[1].reg.regno;
366 unsigned rm = inst.operands[2].reg.regno;
367
368 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
369 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
370 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
371
372 if (inst.operands[2].shifter.amount == 0
373 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
374 regs[rd] = regs[rm];
375 else
376 {
377 if (aarch64_debug)
b277c936
PL
378 {
379 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 380 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
381 core_addr_to_string_nz (start), insn);
382 }
07b287a0
MS
383 break;
384 }
385 }
d9ebcbce 386 else if (inst.opcode->op == OP_STUR)
07b287a0 387 {
d9ebcbce
YQ
388 unsigned rt = inst.operands[0].reg.regno;
389 unsigned rn = inst.operands[1].addr.base_regno;
390 int is64
391 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
392
393 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
394 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
395 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
396 gdb_assert (!inst.operands[1].addr.offset.is_reg);
397
f7b7ed97
TT
398 stack.store (pv_add_constant (regs[rn],
399 inst.operands[1].addr.offset.imm),
400 is64 ? 8 : 4, regs[rt]);
07b287a0 401 }
d9ebcbce 402 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
403 || (inst.opcode->iclass == ldstpair_indexed
404 && inst.operands[2].addr.preind))
d9ebcbce 405 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 406 {
03bcd739 407 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
408 unsigned rt1;
409 unsigned rt2;
d9ebcbce
YQ
410 unsigned rn = inst.operands[2].addr.base_regno;
411 int32_t imm = inst.operands[2].addr.offset.imm;
412
187f5d00
YQ
413 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
414 || inst.operands[0].type == AARCH64_OPND_Ft);
415 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
416 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
417 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
418 gdb_assert (!inst.operands[2].addr.offset.is_reg);
419
07b287a0
MS
420 /* If recording this store would invalidate the store area
421 (perhaps because rn is not known) then we should abandon
422 further prologue analysis. */
f7b7ed97 423 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
424 break;
425
f7b7ed97 426 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
427 break;
428
187f5d00
YQ
429 rt1 = inst.operands[0].reg.regno;
430 rt2 = inst.operands[1].reg.regno;
431 if (inst.operands[0].type == AARCH64_OPND_Ft)
432 {
433 /* Only bottom 64-bit of each V register (D register) need
434 to be preserved. */
435 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
436 rt1 += AARCH64_X_REGISTER_COUNT;
437 rt2 += AARCH64_X_REGISTER_COUNT;
438 }
439
f7b7ed97
TT
440 stack.store (pv_add_constant (regs[rn], imm), 8,
441 regs[rt1]);
442 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
443 regs[rt2]);
14ac654f 444
d9ebcbce 445 if (inst.operands[2].addr.writeback)
93d96012 446 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 447
07b287a0 448 }
432ec081
YQ
449 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
450 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
451 && (inst.opcode->op == OP_STR_POS
452 || inst.opcode->op == OP_STRF_POS)))
453 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
454 && strcmp ("str", inst.opcode->name) == 0)
455 {
456 /* STR (immediate) */
457 unsigned int rt = inst.operands[0].reg.regno;
458 int32_t imm = inst.operands[1].addr.offset.imm;
459 unsigned int rn = inst.operands[1].addr.base_regno;
460 bool is64
461 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
462 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
463 || inst.operands[0].type == AARCH64_OPND_Ft);
464
465 if (inst.operands[0].type == AARCH64_OPND_Ft)
466 {
467 /* Only bottom 64-bit of each V register (D register) need
468 to be preserved. */
469 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
470 rt += AARCH64_X_REGISTER_COUNT;
471 }
472
f7b7ed97
TT
473 stack.store (pv_add_constant (regs[rn], imm),
474 is64 ? 8 : 4, regs[rt]);
432ec081
YQ
475 if (inst.operands[1].addr.writeback)
476 regs[rn] = pv_add_constant (regs[rn], imm);
477 }
d9ebcbce 478 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
479 {
480 /* Stop analysis on branch. */
481 break;
482 }
17e116a7
AH
483 else if (inst.opcode->iclass == ic_system)
484 {
485 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
486 int ra_state_val = 0;
487
488 if (insn == 0xd503233f /* paciasp. */
489 || insn == 0xd503237f /* pacibsp. */)
490 {
491 /* Return addresses are mangled. */
492 ra_state_val = 1;
493 }
494 else if (insn == 0xd50323bf /* autiasp. */
495 || insn == 0xd50323ff /* autibsp. */)
496 {
497 /* Return addresses are not mangled. */
498 ra_state_val = 0;
499 }
500 else
501 {
502 if (aarch64_debug)
503 debug_printf ("aarch64: prologue analysis gave up addr=%s"
504 " opcode=0x%x (iclass)\n",
505 core_addr_to_string_nz (start), insn);
506 break;
507 }
508
509 if (tdep->has_pauth () && cache != nullptr)
510 trad_frame_set_value (cache->saved_regs,
511 tdep->pauth_ra_state_regnum,
512 ra_state_val);
513 }
07b287a0
MS
514 else
515 {
516 if (aarch64_debug)
b277c936 517 {
0a0da556 518 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
519 " opcode=0x%x\n",
520 core_addr_to_string_nz (start), insn);
521 }
07b287a0
MS
522 break;
523 }
524 }
525
526 if (cache == NULL)
f7b7ed97 527 return start;
07b287a0
MS
528
529 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
530 {
531 /* Frame pointer is fp. Frame size is constant. */
532 cache->framereg = AARCH64_FP_REGNUM;
533 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
534 }
535 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
536 {
537 /* Try the stack pointer. */
538 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
539 cache->framereg = AARCH64_SP_REGNUM;
540 }
541 else
542 {
543 /* We're just out of luck. We don't know where the frame is. */
544 cache->framereg = -1;
545 cache->framesize = 0;
546 }
547
548 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
549 {
550 CORE_ADDR offset;
551
f7b7ed97 552 if (stack.find_reg (gdbarch, i, &offset))
07b287a0
MS
553 cache->saved_regs[i].addr = offset;
554 }
555
187f5d00
YQ
556 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
557 {
558 int regnum = gdbarch_num_regs (gdbarch);
559 CORE_ADDR offset;
560
f7b7ed97
TT
561 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
562 &offset))
187f5d00
YQ
563 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
564 }
565
07b287a0
MS
566 return start;
567}
568
4d9a9006
YQ
569static CORE_ADDR
570aarch64_analyze_prologue (struct gdbarch *gdbarch,
571 CORE_ADDR start, CORE_ADDR limit,
572 struct aarch64_prologue_cache *cache)
573{
574 instruction_reader reader;
575
576 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
577 reader);
578}
579
580#if GDB_SELF_TEST
581
582namespace selftests {
583
584/* Instruction reader from manually cooked instruction sequences. */
585
586class instruction_reader_test : public abstract_instruction_reader
587{
588public:
589 template<size_t SIZE>
590 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
591 : m_insns (insns), m_insns_size (SIZE)
592 {}
593
594 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 595 override
4d9a9006
YQ
596 {
597 SELF_CHECK (len == 4);
598 SELF_CHECK (memaddr % 4 == 0);
599 SELF_CHECK (memaddr / 4 < m_insns_size);
600
601 return m_insns[memaddr / 4];
602 }
603
604private:
605 const uint32_t *m_insns;
606 size_t m_insns_size;
607};
608
609static void
610aarch64_analyze_prologue_test (void)
611{
612 struct gdbarch_info info;
613
614 gdbarch_info_init (&info);
615 info.bfd_arch_info = bfd_scan_arch ("aarch64");
616
617 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
618 SELF_CHECK (gdbarch != NULL);
619
17e116a7
AH
620 struct aarch64_prologue_cache cache;
621 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
622
623 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
624
4d9a9006
YQ
625 /* Test the simple prologue in which frame pointer is used. */
626 {
4d9a9006
YQ
627 static const uint32_t insns[] = {
628 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
629 0x910003fd, /* mov x29, sp */
630 0x97ffffe6, /* bl 0x400580 */
631 };
632 instruction_reader_test reader (insns);
633
634 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
635 SELF_CHECK (end == 4 * 2);
636
637 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
638 SELF_CHECK (cache.framesize == 272);
639
640 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
641 {
642 if (i == AARCH64_FP_REGNUM)
643 SELF_CHECK (cache.saved_regs[i].addr == -272);
644 else if (i == AARCH64_LR_REGNUM)
645 SELF_CHECK (cache.saved_regs[i].addr == -264);
646 else
647 SELF_CHECK (cache.saved_regs[i].addr == -1);
648 }
649
650 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
651 {
652 int regnum = gdbarch_num_regs (gdbarch);
653
654 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
655 == -1);
656 }
657 }
432ec081
YQ
658
659 /* Test a prologue in which STR is used and frame pointer is not
660 used. */
661 {
432ec081
YQ
662 static const uint32_t insns[] = {
663 0xf81d0ff3, /* str x19, [sp, #-48]! */
664 0xb9002fe0, /* str w0, [sp, #44] */
665 0xf90013e1, /* str x1, [sp, #32]*/
666 0xfd000fe0, /* str d0, [sp, #24] */
667 0xaa0203f3, /* mov x19, x2 */
668 0xf94013e0, /* ldr x0, [sp, #32] */
669 };
670 instruction_reader_test reader (insns);
671
68811f8f 672 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
432ec081
YQ
673 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
674
675 SELF_CHECK (end == 4 * 5);
676
677 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
678 SELF_CHECK (cache.framesize == 48);
679
680 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
681 {
682 if (i == 1)
683 SELF_CHECK (cache.saved_regs[i].addr == -16);
684 else if (i == 19)
685 SELF_CHECK (cache.saved_regs[i].addr == -48);
686 else
687 SELF_CHECK (cache.saved_regs[i].addr == -1);
688 }
689
690 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
691 {
692 int regnum = gdbarch_num_regs (gdbarch);
693
694 if (i == 0)
695 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
696 == -24);
697 else
698 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
699 == -1);
700 }
701 }
17e116a7
AH
702
703 /* Test a prologue in which there is a return address signing instruction. */
704 if (tdep->has_pauth ())
705 {
706 static const uint32_t insns[] = {
707 0xd503233f, /* paciasp */
708 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
709 0x910003fd, /* mov x29, sp */
710 0xf801c3f3, /* str x19, [sp, #28] */
711 0xb9401fa0, /* ldr x19, [x29, #28] */
712 };
713 instruction_reader_test reader (insns);
714
68811f8f 715 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
17e116a7
AH
716 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
717 reader);
718
719 SELF_CHECK (end == 4 * 4);
720 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
721 SELF_CHECK (cache.framesize == 48);
722
723 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
724 {
725 if (i == 19)
726 SELF_CHECK (cache.saved_regs[i].addr == -20);
727 else if (i == AARCH64_FP_REGNUM)
728 SELF_CHECK (cache.saved_regs[i].addr == -48);
729 else if (i == AARCH64_LR_REGNUM)
730 SELF_CHECK (cache.saved_regs[i].addr == -40);
731 else
732 SELF_CHECK (cache.saved_regs[i].addr == -1);
733 }
734
735 if (tdep->has_pauth ())
736 {
737 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
738 tdep->pauth_ra_state_regnum));
739 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
740 }
741 }
4d9a9006
YQ
742}
743} // namespace selftests
744#endif /* GDB_SELF_TEST */
745
07b287a0
MS
746/* Implement the "skip_prologue" gdbarch method. */
747
748static CORE_ADDR
749aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
750{
07b287a0 751 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
752
753 /* See if we can determine the end of the prologue via the symbol
754 table. If so, then return either PC, or the PC after the
755 prologue, whichever is greater. */
756 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
757 {
758 CORE_ADDR post_prologue_pc
759 = skip_prologue_using_sal (gdbarch, func_addr);
760
761 if (post_prologue_pc != 0)
325fac50 762 return std::max (pc, post_prologue_pc);
07b287a0
MS
763 }
764
765 /* Can't determine prologue from the symbol table, need to examine
766 instructions. */
767
768 /* Find an upper limit on the function prologue using the debug
769 information. If the debug information could not be used to
770 provide that bound, then use an arbitrary large number as the
771 upper bound. */
772 limit_pc = skip_prologue_using_sal (gdbarch, pc);
773 if (limit_pc == 0)
774 limit_pc = pc + 128; /* Magic. */
775
776 /* Try disassembling prologue. */
777 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
778}
779
780/* Scan the function prologue for THIS_FRAME and populate the prologue
781 cache CACHE. */
782
783static void
784aarch64_scan_prologue (struct frame_info *this_frame,
785 struct aarch64_prologue_cache *cache)
786{
787 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
788 CORE_ADDR prologue_start;
789 CORE_ADDR prologue_end;
790 CORE_ADDR prev_pc = get_frame_pc (this_frame);
791 struct gdbarch *gdbarch = get_frame_arch (this_frame);
792
db634143
PL
793 cache->prev_pc = prev_pc;
794
07b287a0
MS
795 /* Assume we do not find a frame. */
796 cache->framereg = -1;
797 cache->framesize = 0;
798
799 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
800 &prologue_end))
801 {
802 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
803
804 if (sal.line == 0)
805 {
806 /* No line info so use the current PC. */
807 prologue_end = prev_pc;
808 }
809 else if (sal.end < prologue_end)
810 {
811 /* The next line begins after the function end. */
812 prologue_end = sal.end;
813 }
814
325fac50 815 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
816 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
817 }
818 else
819 {
820 CORE_ADDR frame_loc;
07b287a0
MS
821
822 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
823 if (frame_loc == 0)
824 return;
825
826 cache->framereg = AARCH64_FP_REGNUM;
827 cache->framesize = 16;
828 cache->saved_regs[29].addr = 0;
829 cache->saved_regs[30].addr = 8;
830 }
831}
832
7dfa3edc
PL
833/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
834 function may throw an exception if the inferior's registers or memory is
835 not available. */
07b287a0 836
7dfa3edc
PL
837static void
838aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
839 struct aarch64_prologue_cache *cache)
07b287a0 840{
07b287a0
MS
841 CORE_ADDR unwound_fp;
842 int reg;
843
07b287a0
MS
844 aarch64_scan_prologue (this_frame, cache);
845
846 if (cache->framereg == -1)
7dfa3edc 847 return;
07b287a0
MS
848
849 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
850 if (unwound_fp == 0)
7dfa3edc 851 return;
07b287a0
MS
852
853 cache->prev_sp = unwound_fp + cache->framesize;
854
855 /* Calculate actual addresses of saved registers using offsets
856 determined by aarch64_analyze_prologue. */
857 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
858 if (trad_frame_addr_p (cache->saved_regs, reg))
859 cache->saved_regs[reg].addr += cache->prev_sp;
860
db634143
PL
861 cache->func = get_frame_func (this_frame);
862
7dfa3edc
PL
863 cache->available_p = 1;
864}
865
866/* Allocate and fill in *THIS_CACHE with information about the prologue of
867 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
868 Return a pointer to the current aarch64_prologue_cache in
869 *THIS_CACHE. */
870
871static struct aarch64_prologue_cache *
872aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
873{
874 struct aarch64_prologue_cache *cache;
875
876 if (*this_cache != NULL)
9a3c8263 877 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
878
879 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
880 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
881 *this_cache = cache;
882
a70b8144 883 try
7dfa3edc
PL
884 {
885 aarch64_make_prologue_cache_1 (this_frame, cache);
886 }
230d2906 887 catch (const gdb_exception_error &ex)
7dfa3edc
PL
888 {
889 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 890 throw;
7dfa3edc 891 }
7dfa3edc 892
07b287a0
MS
893 return cache;
894}
895
7dfa3edc
PL
896/* Implement the "stop_reason" frame_unwind method. */
897
898static enum unwind_stop_reason
899aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
900 void **this_cache)
901{
902 struct aarch64_prologue_cache *cache
903 = aarch64_make_prologue_cache (this_frame, this_cache);
904
905 if (!cache->available_p)
906 return UNWIND_UNAVAILABLE;
907
908 /* Halt the backtrace at "_start". */
909 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
910 return UNWIND_OUTERMOST;
911
912 /* We've hit a wall, stop. */
913 if (cache->prev_sp == 0)
914 return UNWIND_OUTERMOST;
915
916 return UNWIND_NO_REASON;
917}
918
07b287a0
MS
919/* Our frame ID for a normal frame is the current function's starting
920 PC and the caller's SP when we were called. */
921
922static void
923aarch64_prologue_this_id (struct frame_info *this_frame,
924 void **this_cache, struct frame_id *this_id)
925{
7c8edfae
PL
926 struct aarch64_prologue_cache *cache
927 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 928
7dfa3edc
PL
929 if (!cache->available_p)
930 *this_id = frame_id_build_unavailable_stack (cache->func);
931 else
932 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
933}
934
935/* Implement the "prev_register" frame_unwind method. */
936
937static struct value *
938aarch64_prologue_prev_register (struct frame_info *this_frame,
939 void **this_cache, int prev_regnum)
940{
7c8edfae
PL
941 struct aarch64_prologue_cache *cache
942 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
943
944 /* If we are asked to unwind the PC, then we need to return the LR
945 instead. The prologue may save PC, but it will point into this
946 frame's prologue, not the next frame's resume location. */
947 if (prev_regnum == AARCH64_PC_REGNUM)
948 {
949 CORE_ADDR lr;
17e116a7
AH
950 struct gdbarch *gdbarch = get_frame_arch (this_frame);
951 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
07b287a0
MS
952
953 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
17e116a7
AH
954
955 if (tdep->has_pauth ()
956 && trad_frame_value_p (cache->saved_regs,
957 tdep->pauth_ra_state_regnum))
3d31bc39 958 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
17e116a7 959
07b287a0
MS
960 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
961 }
962
963 /* SP is generally not saved to the stack, but this frame is
964 identified by the next frame's stack pointer at the time of the
965 call. The value was already reconstructed into PREV_SP. */
966 /*
967 +----------+ ^
968 | saved lr | |
969 +->| saved fp |--+
970 | | |
971 | | | <- Previous SP
972 | +----------+
973 | | saved lr |
974 +--| saved fp |<- FP
975 | |
976 | |<- SP
977 +----------+ */
978 if (prev_regnum == AARCH64_SP_REGNUM)
979 return frame_unwind_got_constant (this_frame, prev_regnum,
980 cache->prev_sp);
981
982 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
983 prev_regnum);
984}
985
986/* AArch64 prologue unwinder. */
987struct frame_unwind aarch64_prologue_unwind =
988{
989 NORMAL_FRAME,
7dfa3edc 990 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
991 aarch64_prologue_this_id,
992 aarch64_prologue_prev_register,
993 NULL,
994 default_frame_sniffer
995};
996
8b61f75d
PL
997/* Allocate and fill in *THIS_CACHE with information about the prologue of
998 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
999 Return a pointer to the current aarch64_prologue_cache in
1000 *THIS_CACHE. */
07b287a0
MS
1001
1002static struct aarch64_prologue_cache *
8b61f75d 1003aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 1004{
07b287a0 1005 struct aarch64_prologue_cache *cache;
8b61f75d
PL
1006
1007 if (*this_cache != NULL)
9a3c8263 1008 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
1009
1010 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1011 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 1012 *this_cache = cache;
07b287a0 1013
a70b8144 1014 try
02a2a705
PL
1015 {
1016 cache->prev_sp = get_frame_register_unsigned (this_frame,
1017 AARCH64_SP_REGNUM);
1018 cache->prev_pc = get_frame_pc (this_frame);
1019 cache->available_p = 1;
1020 }
230d2906 1021 catch (const gdb_exception_error &ex)
02a2a705
PL
1022 {
1023 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1024 throw;
02a2a705 1025 }
07b287a0
MS
1026
1027 return cache;
1028}
1029
02a2a705
PL
1030/* Implement the "stop_reason" frame_unwind method. */
1031
1032static enum unwind_stop_reason
1033aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1034 void **this_cache)
1035{
1036 struct aarch64_prologue_cache *cache
1037 = aarch64_make_stub_cache (this_frame, this_cache);
1038
1039 if (!cache->available_p)
1040 return UNWIND_UNAVAILABLE;
1041
1042 return UNWIND_NO_REASON;
1043}
1044
07b287a0
MS
1045/* Our frame ID for a stub frame is the current SP and LR. */
1046
1047static void
1048aarch64_stub_this_id (struct frame_info *this_frame,
1049 void **this_cache, struct frame_id *this_id)
1050{
8b61f75d
PL
1051 struct aarch64_prologue_cache *cache
1052 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 1053
02a2a705
PL
1054 if (cache->available_p)
1055 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1056 else
1057 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
1058}
1059
1060/* Implement the "sniffer" frame_unwind method. */
1061
1062static int
1063aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1064 struct frame_info *this_frame,
1065 void **this_prologue_cache)
1066{
1067 CORE_ADDR addr_in_block;
1068 gdb_byte dummy[4];
1069
1070 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1071 if (in_plt_section (addr_in_block)
07b287a0
MS
1072 /* We also use the stub winder if the target memory is unreadable
1073 to avoid having the prologue unwinder trying to read it. */
1074 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1075 return 1;
1076
1077 return 0;
1078}
1079
1080/* AArch64 stub unwinder. */
1081struct frame_unwind aarch64_stub_unwind =
1082{
1083 NORMAL_FRAME,
02a2a705 1084 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1085 aarch64_stub_this_id,
1086 aarch64_prologue_prev_register,
1087 NULL,
1088 aarch64_stub_unwind_sniffer
1089};
1090
1091/* Return the frame base address of *THIS_FRAME. */
1092
1093static CORE_ADDR
1094aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1095{
7c8edfae
PL
1096 struct aarch64_prologue_cache *cache
1097 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1098
1099 return cache->prev_sp - cache->framesize;
1100}
1101
1102/* AArch64 default frame base information. */
1103struct frame_base aarch64_normal_base =
1104{
1105 &aarch64_prologue_unwind,
1106 aarch64_normal_frame_base,
1107 aarch64_normal_frame_base,
1108 aarch64_normal_frame_base
1109};
1110
07b287a0
MS
1111/* Return the value of the REGNUM register in the previous frame of
1112 *THIS_FRAME. */
1113
1114static struct value *
1115aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1116 void **this_cache, int regnum)
1117{
11e1b75f 1118 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
07b287a0
MS
1119 CORE_ADDR lr;
1120
1121 switch (regnum)
1122 {
1123 case AARCH64_PC_REGNUM:
1124 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
3d31bc39 1125 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
07b287a0
MS
1126 return frame_unwind_got_constant (this_frame, regnum, lr);
1127
1128 default:
1129 internal_error (__FILE__, __LINE__,
1130 _("Unexpected register %d"), regnum);
1131 }
1132}
1133
11e1b75f
AH
1134static const unsigned char op_lit0 = DW_OP_lit0;
1135static const unsigned char op_lit1 = DW_OP_lit1;
1136
07b287a0
MS
1137/* Implement the "init_reg" dwarf2_frame_ops method. */
1138
1139static void
1140aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1141 struct dwarf2_frame_state_reg *reg,
1142 struct frame_info *this_frame)
1143{
11e1b75f
AH
1144 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1145
07b287a0
MS
1146 switch (regnum)
1147 {
1148 case AARCH64_PC_REGNUM:
1149 reg->how = DWARF2_FRAME_REG_FN;
1150 reg->loc.fn = aarch64_dwarf2_prev_register;
11e1b75f
AH
1151 return;
1152
07b287a0
MS
1153 case AARCH64_SP_REGNUM:
1154 reg->how = DWARF2_FRAME_REG_CFA;
11e1b75f
AH
1155 return;
1156 }
1157
1158 /* Init pauth registers. */
1159 if (tdep->has_pauth ())
1160 {
1161 if (regnum == tdep->pauth_ra_state_regnum)
1162 {
1163 /* Initialize RA_STATE to zero. */
1164 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1165 reg->loc.exp.start = &op_lit0;
1166 reg->loc.exp.len = 1;
1167 return;
1168 }
1169 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1170 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1171 {
1172 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1173 return;
1174 }
07b287a0
MS
1175 }
1176}
1177
11e1b75f
AH
1178/* Implement the execute_dwarf_cfa_vendor_op method. */
1179
1180static bool
1181aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1182 struct dwarf2_frame_state *fs)
1183{
1184 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1185 struct dwarf2_frame_state_reg *ra_state;
1186
8fca4da0 1187 if (op == DW_CFA_AARCH64_negate_ra_state)
11e1b75f 1188 {
8fca4da0
AH
1189 /* On systems without pauth, treat as a nop. */
1190 if (!tdep->has_pauth ())
1191 return true;
1192
11e1b75f
AH
1193 /* Allocate RA_STATE column if it's not allocated yet. */
1194 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1195
1196 /* Toggle the status of RA_STATE between 0 and 1. */
1197 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1198 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1199
1200 if (ra_state->loc.exp.start == nullptr
1201 || ra_state->loc.exp.start == &op_lit0)
1202 ra_state->loc.exp.start = &op_lit1;
1203 else
1204 ra_state->loc.exp.start = &op_lit0;
1205
1206 ra_state->loc.exp.len = 1;
1207
1208 return true;
1209 }
1210
1211 return false;
1212}
1213
07b287a0
MS
1214/* When arguments must be pushed onto the stack, they go on in reverse
1215 order. The code below implements a FILO (stack) to do this. */
1216
89055eaa 1217struct stack_item_t
07b287a0 1218{
c3c87445
YQ
1219 /* Value to pass on stack. It can be NULL if this item is for stack
1220 padding. */
7c543f7b 1221 const gdb_byte *data;
07b287a0
MS
1222
1223 /* Size in bytes of value to pass on stack. */
1224 int len;
89055eaa 1225};
07b287a0 1226
b907456c
AB
1227/* Implement the gdbarch type alignment method, overrides the generic
1228 alignment algorithm for anything that is aarch64 specific. */
07b287a0 1229
b907456c
AB
1230static ULONGEST
1231aarch64_type_align (gdbarch *gdbarch, struct type *t)
07b287a0 1232{
07b287a0 1233 t = check_typedef (t);
b907456c 1234 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
07b287a0 1235 {
b907456c
AB
1236 /* Use the natural alignment for vector types (the same for
1237 scalar type), but the maximum alignment is 128-bit. */
1238 if (TYPE_LENGTH (t) > 16)
1239 return 16;
238f2452 1240 else
b907456c 1241 return TYPE_LENGTH (t);
07b287a0 1242 }
b907456c
AB
1243
1244 /* Allow the common code to calculate the alignment. */
1245 return 0;
07b287a0
MS
1246}
1247
ea92689a
AH
1248/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1249
1250 Return the number of register required, or -1 on failure.
1251
1252 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1253 to the element, else fail if the type of this element does not match the
1254 existing value. */
1255
1256static int
1257aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1258 struct type **fundamental_type)
1259{
1260 if (type == nullptr)
1261 return -1;
1262
1263 switch (TYPE_CODE (type))
1264 {
1265 case TYPE_CODE_FLT:
1266 if (TYPE_LENGTH (type) > 16)
1267 return -1;
1268
1269 if (*fundamental_type == nullptr)
1270 *fundamental_type = type;
1271 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1272 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1273 return -1;
1274
1275 return 1;
1276
1277 case TYPE_CODE_COMPLEX:
1278 {
1279 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1280 if (TYPE_LENGTH (target_type) > 16)
1281 return -1;
1282
1283 if (*fundamental_type == nullptr)
1284 *fundamental_type = target_type;
1285 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1286 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1287 return -1;
1288
1289 return 2;
1290 }
1291
1292 case TYPE_CODE_ARRAY:
1293 {
1294 if (TYPE_VECTOR (type))
1295 {
1296 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1297 return -1;
1298
1299 if (*fundamental_type == nullptr)
1300 *fundamental_type = type;
1301 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1302 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1303 return -1;
1304
1305 return 1;
1306 }
1307 else
1308 {
1309 struct type *target_type = TYPE_TARGET_TYPE (type);
1310 int count = aapcs_is_vfp_call_or_return_candidate_1
1311 (target_type, fundamental_type);
1312
1313 if (count == -1)
1314 return count;
1315
d4718d5c 1316 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
ea92689a
AH
1317 return count;
1318 }
1319 }
1320
1321 case TYPE_CODE_STRUCT:
1322 case TYPE_CODE_UNION:
1323 {
1324 int count = 0;
1325
1326 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1327 {
353229bf
AH
1328 /* Ignore any static fields. */
1329 if (field_is_static (&TYPE_FIELD (type, i)))
1330 continue;
1331
ea92689a
AH
1332 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1333
1334 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1335 (member, fundamental_type);
1336 if (sub_count == -1)
1337 return -1;
1338 count += sub_count;
1339 }
73021deb
AH
1340
1341 /* Ensure there is no padding between the fields (allowing for empty
1342 zero length structs) */
1343 int ftype_length = (*fundamental_type == nullptr)
1344 ? 0 : TYPE_LENGTH (*fundamental_type);
1345 if (count * ftype_length != TYPE_LENGTH (type))
1346 return -1;
1347
ea92689a
AH
1348 return count;
1349 }
1350
1351 default:
1352 break;
1353 }
1354
1355 return -1;
1356}
1357
1358/* Return true if an argument, whose type is described by TYPE, can be passed or
1359 returned in simd/fp registers, providing enough parameter passing registers
1360 are available. This is as described in the AAPCS64.
1361
1362 Upon successful return, *COUNT returns the number of needed registers,
1363 *FUNDAMENTAL_TYPE contains the type of those registers.
1364
1365 Candidate as per the AAPCS64 5.4.2.C is either a:
1366 - float.
1367 - short-vector.
1368 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1369 all the members are floats and has at most 4 members.
1370 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1371 all the members are short vectors and has at most 4 members.
1372 - Complex (7.1.1)
1373
1374 Note that HFAs and HVAs can include nested structures and arrays. */
1375
0e745c60 1376static bool
ea92689a
AH
1377aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1378 struct type **fundamental_type)
1379{
1380 if (type == nullptr)
1381 return false;
1382
1383 *fundamental_type = nullptr;
1384
1385 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1386 fundamental_type);
1387
1388 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1389 {
1390 *count = ag_count;
1391 return true;
1392 }
1393 else
1394 return false;
1395}
1396
07b287a0
MS
1397/* AArch64 function call information structure. */
1398struct aarch64_call_info
1399{
1400 /* the current argument number. */
89055eaa 1401 unsigned argnum = 0;
07b287a0
MS
1402
1403 /* The next general purpose register number, equivalent to NGRN as
1404 described in the AArch64 Procedure Call Standard. */
89055eaa 1405 unsigned ngrn = 0;
07b287a0
MS
1406
1407 /* The next SIMD and floating point register number, equivalent to
1408 NSRN as described in the AArch64 Procedure Call Standard. */
89055eaa 1409 unsigned nsrn = 0;
07b287a0
MS
1410
1411 /* The next stacked argument address, equivalent to NSAA as
1412 described in the AArch64 Procedure Call Standard. */
89055eaa 1413 unsigned nsaa = 0;
07b287a0
MS
1414
1415 /* Stack item vector. */
89055eaa 1416 std::vector<stack_item_t> si;
07b287a0
MS
1417};
1418
1419/* Pass a value in a sequence of consecutive X registers. The caller
1420 is responsbile for ensuring sufficient registers are available. */
1421
1422static void
1423pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1424 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1425 struct value *arg)
07b287a0
MS
1426{
1427 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1428 int len = TYPE_LENGTH (type);
1429 enum type_code typecode = TYPE_CODE (type);
1430 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1431 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1432
1433 info->argnum++;
1434
1435 while (len > 0)
1436 {
1437 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1438 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1439 byte_order);
1440
1441
1442 /* Adjust sub-word struct/union args when big-endian. */
1443 if (byte_order == BFD_ENDIAN_BIG
1444 && partial_len < X_REGISTER_SIZE
1445 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1446 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1447
1448 if (aarch64_debug)
b277c936
PL
1449 {
1450 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1451 gdbarch_register_name (gdbarch, regnum),
1452 phex (regval, X_REGISTER_SIZE));
1453 }
07b287a0
MS
1454 regcache_cooked_write_unsigned (regcache, regnum, regval);
1455 len -= partial_len;
1456 buf += partial_len;
1457 regnum++;
1458 }
1459}
1460
1461/* Attempt to marshall a value in a V register. Return 1 if
1462 successful, or 0 if insufficient registers are available. This
1463 function, unlike the equivalent pass_in_x() function does not
1464 handle arguments spread across multiple registers. */
1465
1466static int
1467pass_in_v (struct gdbarch *gdbarch,
1468 struct regcache *regcache,
1469 struct aarch64_call_info *info,
0735fddd 1470 int len, const bfd_byte *buf)
07b287a0
MS
1471{
1472 if (info->nsrn < 8)
1473 {
07b287a0 1474 int regnum = AARCH64_V0_REGNUM + info->nsrn;
3ff2c72e
AH
1475 /* Enough space for a full vector register. */
1476 gdb_byte reg[register_size (gdbarch, regnum)];
1477 gdb_assert (len <= sizeof (reg));
07b287a0
MS
1478
1479 info->argnum++;
1480 info->nsrn++;
1481
0735fddd
YQ
1482 memset (reg, 0, sizeof (reg));
1483 /* PCS C.1, the argument is allocated to the least significant
1484 bits of V register. */
1485 memcpy (reg, buf, len);
b66f5587 1486 regcache->cooked_write (regnum, reg);
0735fddd 1487
07b287a0 1488 if (aarch64_debug)
b277c936
PL
1489 {
1490 debug_printf ("arg %d in %s\n", info->argnum,
1491 gdbarch_register_name (gdbarch, regnum));
1492 }
07b287a0
MS
1493 return 1;
1494 }
1495 info->nsrn = 8;
1496 return 0;
1497}
1498
1499/* Marshall an argument onto the stack. */
1500
1501static void
1502pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1503 struct value *arg)
07b287a0 1504{
8e80f9d1 1505 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1506 int len = TYPE_LENGTH (type);
1507 int align;
1508 stack_item_t item;
1509
1510 info->argnum++;
1511
b907456c 1512 align = type_align (type);
07b287a0
MS
1513
1514 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1515 Natural alignment of the argument's type. */
1516 align = align_up (align, 8);
1517
1518 /* The AArch64 PCS requires at most doubleword alignment. */
1519 if (align > 16)
1520 align = 16;
1521
1522 if (aarch64_debug)
b277c936
PL
1523 {
1524 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1525 info->nsaa);
1526 }
07b287a0
MS
1527
1528 item.len = len;
1529 item.data = buf;
89055eaa 1530 info->si.push_back (item);
07b287a0
MS
1531
1532 info->nsaa += len;
1533 if (info->nsaa & (align - 1))
1534 {
1535 /* Push stack alignment padding. */
1536 int pad = align - (info->nsaa & (align - 1));
1537
1538 item.len = pad;
c3c87445 1539 item.data = NULL;
07b287a0 1540
89055eaa 1541 info->si.push_back (item);
07b287a0
MS
1542 info->nsaa += pad;
1543 }
1544}
1545
1546/* Marshall an argument into a sequence of one or more consecutive X
1547 registers or, if insufficient X registers are available then onto
1548 the stack. */
1549
1550static void
1551pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1552 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1553 struct value *arg)
07b287a0
MS
1554{
1555 int len = TYPE_LENGTH (type);
1556 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1557
1558 /* PCS C.13 - Pass in registers if we have enough spare */
1559 if (info->ngrn + nregs <= 8)
1560 {
8e80f9d1 1561 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1562 info->ngrn += nregs;
1563 }
1564 else
1565 {
1566 info->ngrn = 8;
8e80f9d1 1567 pass_on_stack (info, type, arg);
07b287a0
MS
1568 }
1569}
1570
0e745c60
AH
1571/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1572 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1573 registers. A return value of false is an error state as the value will have
1574 been partially passed to the stack. */
1575static bool
1576pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1577 struct aarch64_call_info *info, struct type *arg_type,
1578 struct value *arg)
07b287a0 1579{
0e745c60
AH
1580 switch (TYPE_CODE (arg_type))
1581 {
1582 case TYPE_CODE_FLT:
1583 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1584 value_contents (arg));
1585 break;
1586
1587 case TYPE_CODE_COMPLEX:
1588 {
1589 const bfd_byte *buf = value_contents (arg);
1590 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1591
1592 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1593 buf))
1594 return false;
1595
1596 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1597 buf + TYPE_LENGTH (target_type));
1598 }
1599
1600 case TYPE_CODE_ARRAY:
1601 if (TYPE_VECTOR (arg_type))
1602 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1603 value_contents (arg));
1604 /* fall through. */
1605
1606 case TYPE_CODE_STRUCT:
1607 case TYPE_CODE_UNION:
1608 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1609 {
353229bf
AH
1610 /* Don't include static fields. */
1611 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1612 continue;
1613
0e745c60
AH
1614 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1615 struct type *field_type = check_typedef (value_type (field));
1616
1617 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1618 field))
1619 return false;
1620 }
1621 return true;
1622
1623 default:
1624 return false;
1625 }
07b287a0
MS
1626}
1627
1628/* Implement the "push_dummy_call" gdbarch method. */
1629
1630static CORE_ADDR
1631aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1632 struct regcache *regcache, CORE_ADDR bp_addr,
1633 int nargs,
cf84fa6b
AH
1634 struct value **args, CORE_ADDR sp,
1635 function_call_return_method return_method,
07b287a0
MS
1636 CORE_ADDR struct_addr)
1637{
07b287a0 1638 int argnum;
07b287a0 1639 struct aarch64_call_info info;
07b287a0 1640
07b287a0
MS
1641 /* We need to know what the type of the called function is in order
1642 to determine the number of named/anonymous arguments for the
1643 actual argument placement, and the return type in order to handle
1644 return value correctly.
1645
1646 The generic code above us views the decision of return in memory
1647 or return in registers as a two stage processes. The language
1648 handler is consulted first and may decide to return in memory (eg
1649 class with copy constructor returned by value), this will cause
1650 the generic code to allocate space AND insert an initial leading
1651 argument.
1652
1653 If the language code does not decide to pass in memory then the
1654 target code is consulted.
1655
1656 If the language code decides to pass in memory we want to move
1657 the pointer inserted as the initial argument from the argument
1658 list and into X8, the conventional AArch64 struct return pointer
38a72da0 1659 register. */
07b287a0
MS
1660
1661 /* Set the return address. For the AArch64, the return breakpoint
1662 is always at BP_ADDR. */
1663 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1664
38a72da0
AH
1665 /* If we were given an initial argument for the return slot, lose it. */
1666 if (return_method == return_method_hidden_param)
07b287a0
MS
1667 {
1668 args++;
1669 nargs--;
1670 }
1671
1672 /* The struct_return pointer occupies X8. */
38a72da0 1673 if (return_method != return_method_normal)
07b287a0
MS
1674 {
1675 if (aarch64_debug)
b277c936
PL
1676 {
1677 debug_printf ("struct return in %s = 0x%s\n",
1678 gdbarch_register_name (gdbarch,
1679 AARCH64_STRUCT_RETURN_REGNUM),
1680 paddress (gdbarch, struct_addr));
1681 }
07b287a0
MS
1682 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1683 struct_addr);
1684 }
1685
1686 for (argnum = 0; argnum < nargs; argnum++)
1687 {
1688 struct value *arg = args[argnum];
0e745c60
AH
1689 struct type *arg_type, *fundamental_type;
1690 int len, elements;
07b287a0
MS
1691
1692 arg_type = check_typedef (value_type (arg));
1693 len = TYPE_LENGTH (arg_type);
1694
0e745c60
AH
1695 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1696 if there are enough spare registers. */
1697 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1698 &fundamental_type))
1699 {
1700 if (info.nsrn + elements <= 8)
1701 {
1702 /* We know that we have sufficient registers available therefore
1703 this will never need to fallback to the stack. */
1704 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1705 arg))
1706 gdb_assert_not_reached ("Failed to push args");
1707 }
1708 else
1709 {
1710 info.nsrn = 8;
1711 pass_on_stack (&info, arg_type, arg);
1712 }
1713 continue;
1714 }
1715
07b287a0
MS
1716 switch (TYPE_CODE (arg_type))
1717 {
1718 case TYPE_CODE_INT:
1719 case TYPE_CODE_BOOL:
1720 case TYPE_CODE_CHAR:
1721 case TYPE_CODE_RANGE:
1722 case TYPE_CODE_ENUM:
1723 if (len < 4)
1724 {
1725 /* Promote to 32 bit integer. */
1726 if (TYPE_UNSIGNED (arg_type))
1727 arg_type = builtin_type (gdbarch)->builtin_uint32;
1728 else
1729 arg_type = builtin_type (gdbarch)->builtin_int32;
1730 arg = value_cast (arg_type, arg);
1731 }
8e80f9d1 1732 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1733 break;
1734
07b287a0
MS
1735 case TYPE_CODE_STRUCT:
1736 case TYPE_CODE_ARRAY:
1737 case TYPE_CODE_UNION:
0e745c60 1738 if (len > 16)
07b287a0
MS
1739 {
1740 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1741 invisible reference. */
1742
1743 /* Allocate aligned storage. */
1744 sp = align_down (sp - len, 16);
1745
1746 /* Write the real data into the stack. */
1747 write_memory (sp, value_contents (arg), len);
1748
1749 /* Construct the indirection. */
1750 arg_type = lookup_pointer_type (arg_type);
1751 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1752 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1753 }
1754 else
1755 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1756 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1757 break;
1758
1759 default:
8e80f9d1 1760 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1761 break;
1762 }
1763 }
1764
1765 /* Make sure stack retains 16 byte alignment. */
1766 if (info.nsaa & 15)
1767 sp -= 16 - (info.nsaa & 15);
1768
89055eaa 1769 while (!info.si.empty ())
07b287a0 1770 {
89055eaa 1771 const stack_item_t &si = info.si.back ();
07b287a0 1772
89055eaa
TT
1773 sp -= si.len;
1774 if (si.data != NULL)
1775 write_memory (sp, si.data, si.len);
1776 info.si.pop_back ();
07b287a0
MS
1777 }
1778
07b287a0
MS
1779 /* Finally, update the SP register. */
1780 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1781
1782 return sp;
1783}
1784
1785/* Implement the "frame_align" gdbarch method. */
1786
1787static CORE_ADDR
1788aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1789{
1790 /* Align the stack to sixteen bytes. */
1791 return sp & ~(CORE_ADDR) 15;
1792}
1793
1794/* Return the type for an AdvSISD Q register. */
1795
1796static struct type *
1797aarch64_vnq_type (struct gdbarch *gdbarch)
1798{
1799 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1800
1801 if (tdep->vnq_type == NULL)
1802 {
1803 struct type *t;
1804 struct type *elem;
1805
1806 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1807 TYPE_CODE_UNION);
1808
1809 elem = builtin_type (gdbarch)->builtin_uint128;
1810 append_composite_type_field (t, "u", elem);
1811
1812 elem = builtin_type (gdbarch)->builtin_int128;
1813 append_composite_type_field (t, "s", elem);
1814
1815 tdep->vnq_type = t;
1816 }
1817
1818 return tdep->vnq_type;
1819}
1820
1821/* Return the type for an AdvSISD D register. */
1822
1823static struct type *
1824aarch64_vnd_type (struct gdbarch *gdbarch)
1825{
1826 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1827
1828 if (tdep->vnd_type == NULL)
1829 {
1830 struct type *t;
1831 struct type *elem;
1832
1833 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1834 TYPE_CODE_UNION);
1835
1836 elem = builtin_type (gdbarch)->builtin_double;
1837 append_composite_type_field (t, "f", elem);
1838
1839 elem = builtin_type (gdbarch)->builtin_uint64;
1840 append_composite_type_field (t, "u", elem);
1841
1842 elem = builtin_type (gdbarch)->builtin_int64;
1843 append_composite_type_field (t, "s", elem);
1844
1845 tdep->vnd_type = t;
1846 }
1847
1848 return tdep->vnd_type;
1849}
1850
1851/* Return the type for an AdvSISD S register. */
1852
1853static struct type *
1854aarch64_vns_type (struct gdbarch *gdbarch)
1855{
1856 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1857
1858 if (tdep->vns_type == NULL)
1859 {
1860 struct type *t;
1861 struct type *elem;
1862
1863 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1864 TYPE_CODE_UNION);
1865
1866 elem = builtin_type (gdbarch)->builtin_float;
1867 append_composite_type_field (t, "f", elem);
1868
1869 elem = builtin_type (gdbarch)->builtin_uint32;
1870 append_composite_type_field (t, "u", elem);
1871
1872 elem = builtin_type (gdbarch)->builtin_int32;
1873 append_composite_type_field (t, "s", elem);
1874
1875 tdep->vns_type = t;
1876 }
1877
1878 return tdep->vns_type;
1879}
1880
1881/* Return the type for an AdvSISD H register. */
1882
1883static struct type *
1884aarch64_vnh_type (struct gdbarch *gdbarch)
1885{
1886 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1887
1888 if (tdep->vnh_type == NULL)
1889 {
1890 struct type *t;
1891 struct type *elem;
1892
1893 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1894 TYPE_CODE_UNION);
1895
a6d0f249
AH
1896 elem = builtin_type (gdbarch)->builtin_half;
1897 append_composite_type_field (t, "f", elem);
1898
07b287a0
MS
1899 elem = builtin_type (gdbarch)->builtin_uint16;
1900 append_composite_type_field (t, "u", elem);
1901
1902 elem = builtin_type (gdbarch)->builtin_int16;
1903 append_composite_type_field (t, "s", elem);
1904
1905 tdep->vnh_type = t;
1906 }
1907
1908 return tdep->vnh_type;
1909}
1910
1911/* Return the type for an AdvSISD B register. */
1912
1913static struct type *
1914aarch64_vnb_type (struct gdbarch *gdbarch)
1915{
1916 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1917
1918 if (tdep->vnb_type == NULL)
1919 {
1920 struct type *t;
1921 struct type *elem;
1922
1923 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1924 TYPE_CODE_UNION);
1925
1926 elem = builtin_type (gdbarch)->builtin_uint8;
1927 append_composite_type_field (t, "u", elem);
1928
1929 elem = builtin_type (gdbarch)->builtin_int8;
1930 append_composite_type_field (t, "s", elem);
1931
1932 tdep->vnb_type = t;
1933 }
1934
1935 return tdep->vnb_type;
1936}
1937
63bad7b6
AH
1938/* Return the type for an AdvSISD V register. */
1939
1940static struct type *
1941aarch64_vnv_type (struct gdbarch *gdbarch)
1942{
1943 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1944
1945 if (tdep->vnv_type == NULL)
1946 {
bffa1015
AH
1947 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1948 slice from the non-pseudo vector registers. However NEON V registers
1949 are always vector registers, and need constructing as such. */
1950 const struct builtin_type *bt = builtin_type (gdbarch);
1951
63bad7b6
AH
1952 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1953 TYPE_CODE_UNION);
1954
bffa1015
AH
1955 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1956 TYPE_CODE_UNION);
1957 append_composite_type_field (sub, "f",
1958 init_vector_type (bt->builtin_double, 2));
1959 append_composite_type_field (sub, "u",
1960 init_vector_type (bt->builtin_uint64, 2));
1961 append_composite_type_field (sub, "s",
1962 init_vector_type (bt->builtin_int64, 2));
1963 append_composite_type_field (t, "d", sub);
1964
1965 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1966 TYPE_CODE_UNION);
1967 append_composite_type_field (sub, "f",
1968 init_vector_type (bt->builtin_float, 4));
1969 append_composite_type_field (sub, "u",
1970 init_vector_type (bt->builtin_uint32, 4));
1971 append_composite_type_field (sub, "s",
1972 init_vector_type (bt->builtin_int32, 4));
1973 append_composite_type_field (t, "s", sub);
1974
1975 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1976 TYPE_CODE_UNION);
a6d0f249
AH
1977 append_composite_type_field (sub, "f",
1978 init_vector_type (bt->builtin_half, 8));
bffa1015
AH
1979 append_composite_type_field (sub, "u",
1980 init_vector_type (bt->builtin_uint16, 8));
1981 append_composite_type_field (sub, "s",
1982 init_vector_type (bt->builtin_int16, 8));
1983 append_composite_type_field (t, "h", sub);
1984
1985 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1986 TYPE_CODE_UNION);
1987 append_composite_type_field (sub, "u",
1988 init_vector_type (bt->builtin_uint8, 16));
1989 append_composite_type_field (sub, "s",
1990 init_vector_type (bt->builtin_int8, 16));
1991 append_composite_type_field (t, "b", sub);
1992
1993 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1994 TYPE_CODE_UNION);
1995 append_composite_type_field (sub, "u",
1996 init_vector_type (bt->builtin_uint128, 1));
1997 append_composite_type_field (sub, "s",
1998 init_vector_type (bt->builtin_int128, 1));
1999 append_composite_type_field (t, "q", sub);
63bad7b6
AH
2000
2001 tdep->vnv_type = t;
2002 }
2003
2004 return tdep->vnv_type;
2005}
2006
07b287a0
MS
2007/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2008
2009static int
2010aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2011{
34dcc7cf
AH
2012 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2013
07b287a0
MS
2014 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2015 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2016
2017 if (reg == AARCH64_DWARF_SP)
2018 return AARCH64_SP_REGNUM;
2019
2020 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2021 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2022
65d4cada
AH
2023 if (reg == AARCH64_DWARF_SVE_VG)
2024 return AARCH64_SVE_VG_REGNUM;
2025
2026 if (reg == AARCH64_DWARF_SVE_FFR)
2027 return AARCH64_SVE_FFR_REGNUM;
2028
2029 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2030 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2031
2032 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2033 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2034
34dcc7cf
AH
2035 if (tdep->has_pauth ())
2036 {
2037 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2038 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2039
2040 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2041 return tdep->pauth_ra_state_regnum;
2042 }
2043
07b287a0
MS
2044 return -1;
2045}
07b287a0
MS
2046
2047/* Implement the "print_insn" gdbarch method. */
2048
2049static int
2050aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2051{
2052 info->symbols = NULL;
6394c606 2053 return default_print_insn (memaddr, info);
07b287a0
MS
2054}
2055
2056/* AArch64 BRK software debug mode instruction.
2057 Note that AArch64 code is always little-endian.
2058 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 2059constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 2060
04180708 2061typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
2062
2063/* Extract from an array REGS containing the (raw) register state a
2064 function return value of type TYPE, and copy that, in virtual
2065 format, into VALBUF. */
2066
2067static void
2068aarch64_extract_return_value (struct type *type, struct regcache *regs,
2069 gdb_byte *valbuf)
2070{
ac7936df 2071 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2072 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2073 int elements;
2074 struct type *fundamental_type;
07b287a0 2075
4f4aedeb
AH
2076 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2077 &fundamental_type))
07b287a0 2078 {
4f4aedeb
AH
2079 int len = TYPE_LENGTH (fundamental_type);
2080
2081 for (int i = 0; i < elements; i++)
2082 {
2083 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2084 /* Enough space for a full vector register. */
2085 gdb_byte buf[register_size (gdbarch, regno)];
2086 gdb_assert (len <= sizeof (buf));
4f4aedeb
AH
2087
2088 if (aarch64_debug)
2089 {
2090 debug_printf ("read HFA or HVA return value element %d from %s\n",
2091 i + 1,
2092 gdbarch_register_name (gdbarch, regno));
2093 }
2094 regs->cooked_read (regno, buf);
07b287a0 2095
4f4aedeb
AH
2096 memcpy (valbuf, buf, len);
2097 valbuf += len;
2098 }
07b287a0
MS
2099 }
2100 else if (TYPE_CODE (type) == TYPE_CODE_INT
2101 || TYPE_CODE (type) == TYPE_CODE_CHAR
2102 || TYPE_CODE (type) == TYPE_CODE_BOOL
2103 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 2104 || TYPE_IS_REFERENCE (type)
07b287a0
MS
2105 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2106 {
6471e7d2 2107 /* If the type is a plain integer, then the access is
07b287a0
MS
2108 straight-forward. Otherwise we have to play around a bit
2109 more. */
2110 int len = TYPE_LENGTH (type);
2111 int regno = AARCH64_X0_REGNUM;
2112 ULONGEST tmp;
2113
2114 while (len > 0)
2115 {
2116 /* By using store_unsigned_integer we avoid having to do
2117 anything special for small big-endian values. */
2118 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2119 store_unsigned_integer (valbuf,
2120 (len > X_REGISTER_SIZE
2121 ? X_REGISTER_SIZE : len), byte_order, tmp);
2122 len -= X_REGISTER_SIZE;
2123 valbuf += X_REGISTER_SIZE;
2124 }
2125 }
07b287a0
MS
2126 else
2127 {
2128 /* For a structure or union the behaviour is as if the value had
2129 been stored to word-aligned memory and then loaded into
2130 registers with 64-bit load instruction(s). */
2131 int len = TYPE_LENGTH (type);
2132 int regno = AARCH64_X0_REGNUM;
2133 bfd_byte buf[X_REGISTER_SIZE];
2134
2135 while (len > 0)
2136 {
dca08e1f 2137 regs->cooked_read (regno++, buf);
07b287a0
MS
2138 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2139 len -= X_REGISTER_SIZE;
2140 valbuf += X_REGISTER_SIZE;
2141 }
2142 }
2143}
2144
2145
2146/* Will a function return an aggregate type in memory or in a
2147 register? Return 0 if an aggregate type can be returned in a
2148 register, 1 if it must be returned in memory. */
2149
2150static int
2151aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2152{
f168693b 2153 type = check_typedef (type);
4f4aedeb
AH
2154 int elements;
2155 struct type *fundamental_type;
07b287a0 2156
4f4aedeb
AH
2157 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2158 &fundamental_type))
07b287a0 2159 {
cd635f74
YQ
2160 /* v0-v7 are used to return values and one register is allocated
2161 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2162 return 0;
2163 }
2164
2165 if (TYPE_LENGTH (type) > 16)
2166 {
2167 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2168 invisible reference. */
2169
2170 return 1;
2171 }
2172
2173 return 0;
2174}
2175
2176/* Write into appropriate registers a function return value of type
2177 TYPE, given in virtual format. */
2178
2179static void
2180aarch64_store_return_value (struct type *type, struct regcache *regs,
2181 const gdb_byte *valbuf)
2182{
ac7936df 2183 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2184 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2185 int elements;
2186 struct type *fundamental_type;
07b287a0 2187
4f4aedeb
AH
2188 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2189 &fundamental_type))
07b287a0 2190 {
4f4aedeb
AH
2191 int len = TYPE_LENGTH (fundamental_type);
2192
2193 for (int i = 0; i < elements; i++)
2194 {
2195 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2196 /* Enough space for a full vector register. */
2197 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2198 gdb_assert (len <= sizeof (tmpbuf));
4f4aedeb
AH
2199
2200 if (aarch64_debug)
2201 {
2202 debug_printf ("write HFA or HVA return value element %d to %s\n",
2203 i + 1,
2204 gdbarch_register_name (gdbarch, regno));
2205 }
07b287a0 2206
4f4aedeb
AH
2207 memcpy (tmpbuf, valbuf,
2208 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2209 regs->cooked_write (regno, tmpbuf);
2210 valbuf += len;
2211 }
07b287a0
MS
2212 }
2213 else if (TYPE_CODE (type) == TYPE_CODE_INT
2214 || TYPE_CODE (type) == TYPE_CODE_CHAR
2215 || TYPE_CODE (type) == TYPE_CODE_BOOL
2216 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 2217 || TYPE_IS_REFERENCE (type)
07b287a0
MS
2218 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2219 {
2220 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2221 {
2222 /* Values of one word or less are zero/sign-extended and
2223 returned in r0. */
2224 bfd_byte tmpbuf[X_REGISTER_SIZE];
2225 LONGEST val = unpack_long (type, valbuf);
2226
2227 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2228 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2229 }
2230 else
2231 {
2232 /* Integral values greater than one word are stored in
2233 consecutive registers starting with r0. This will always
2234 be a multiple of the regiser size. */
2235 int len = TYPE_LENGTH (type);
2236 int regno = AARCH64_X0_REGNUM;
2237
2238 while (len > 0)
2239 {
b66f5587 2240 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2241 len -= X_REGISTER_SIZE;
2242 valbuf += X_REGISTER_SIZE;
2243 }
2244 }
2245 }
07b287a0
MS
2246 else
2247 {
2248 /* For a structure or union the behaviour is as if the value had
2249 been stored to word-aligned memory and then loaded into
2250 registers with 64-bit load instruction(s). */
2251 int len = TYPE_LENGTH (type);
2252 int regno = AARCH64_X0_REGNUM;
2253 bfd_byte tmpbuf[X_REGISTER_SIZE];
2254
2255 while (len > 0)
2256 {
2257 memcpy (tmpbuf, valbuf,
2258 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2259 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2260 len -= X_REGISTER_SIZE;
2261 valbuf += X_REGISTER_SIZE;
2262 }
2263 }
2264}
2265
2266/* Implement the "return_value" gdbarch method. */
2267
2268static enum return_value_convention
2269aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2270 struct type *valtype, struct regcache *regcache,
2271 gdb_byte *readbuf, const gdb_byte *writebuf)
2272{
07b287a0
MS
2273
2274 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2275 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2276 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2277 {
2278 if (aarch64_return_in_memory (gdbarch, valtype))
2279 {
2280 if (aarch64_debug)
b277c936 2281 debug_printf ("return value in memory\n");
07b287a0
MS
2282 return RETURN_VALUE_STRUCT_CONVENTION;
2283 }
2284 }
2285
2286 if (writebuf)
2287 aarch64_store_return_value (valtype, regcache, writebuf);
2288
2289 if (readbuf)
2290 aarch64_extract_return_value (valtype, regcache, readbuf);
2291
2292 if (aarch64_debug)
b277c936 2293 debug_printf ("return value in registers\n");
07b287a0
MS
2294
2295 return RETURN_VALUE_REGISTER_CONVENTION;
2296}
2297
2298/* Implement the "get_longjmp_target" gdbarch method. */
2299
2300static int
2301aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2302{
2303 CORE_ADDR jb_addr;
2304 gdb_byte buf[X_REGISTER_SIZE];
2305 struct gdbarch *gdbarch = get_frame_arch (frame);
2306 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2307 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2308
2309 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2310
2311 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2312 X_REGISTER_SIZE))
2313 return 0;
2314
2315 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2316 return 1;
2317}
ea873d8e
PL
2318
2319/* Implement the "gen_return_address" gdbarch method. */
2320
2321static void
2322aarch64_gen_return_address (struct gdbarch *gdbarch,
2323 struct agent_expr *ax, struct axs_value *value,
2324 CORE_ADDR scope)
2325{
2326 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2327 value->kind = axs_lvalue_register;
2328 value->u.reg = AARCH64_LR_REGNUM;
2329}
07b287a0
MS
2330\f
2331
2332/* Return the pseudo register name corresponding to register regnum. */
2333
2334static const char *
2335aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2336{
63bad7b6
AH
2337 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2338
07b287a0
MS
2339 static const char *const q_name[] =
2340 {
2341 "q0", "q1", "q2", "q3",
2342 "q4", "q5", "q6", "q7",
2343 "q8", "q9", "q10", "q11",
2344 "q12", "q13", "q14", "q15",
2345 "q16", "q17", "q18", "q19",
2346 "q20", "q21", "q22", "q23",
2347 "q24", "q25", "q26", "q27",
2348 "q28", "q29", "q30", "q31",
2349 };
2350
2351 static const char *const d_name[] =
2352 {
2353 "d0", "d1", "d2", "d3",
2354 "d4", "d5", "d6", "d7",
2355 "d8", "d9", "d10", "d11",
2356 "d12", "d13", "d14", "d15",
2357 "d16", "d17", "d18", "d19",
2358 "d20", "d21", "d22", "d23",
2359 "d24", "d25", "d26", "d27",
2360 "d28", "d29", "d30", "d31",
2361 };
2362
2363 static const char *const s_name[] =
2364 {
2365 "s0", "s1", "s2", "s3",
2366 "s4", "s5", "s6", "s7",
2367 "s8", "s9", "s10", "s11",
2368 "s12", "s13", "s14", "s15",
2369 "s16", "s17", "s18", "s19",
2370 "s20", "s21", "s22", "s23",
2371 "s24", "s25", "s26", "s27",
2372 "s28", "s29", "s30", "s31",
2373 };
2374
2375 static const char *const h_name[] =
2376 {
2377 "h0", "h1", "h2", "h3",
2378 "h4", "h5", "h6", "h7",
2379 "h8", "h9", "h10", "h11",
2380 "h12", "h13", "h14", "h15",
2381 "h16", "h17", "h18", "h19",
2382 "h20", "h21", "h22", "h23",
2383 "h24", "h25", "h26", "h27",
2384 "h28", "h29", "h30", "h31",
2385 };
2386
2387 static const char *const b_name[] =
2388 {
2389 "b0", "b1", "b2", "b3",
2390 "b4", "b5", "b6", "b7",
2391 "b8", "b9", "b10", "b11",
2392 "b12", "b13", "b14", "b15",
2393 "b16", "b17", "b18", "b19",
2394 "b20", "b21", "b22", "b23",
2395 "b24", "b25", "b26", "b27",
2396 "b28", "b29", "b30", "b31",
2397 };
2398
34dcc7cf 2399 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2400
34dcc7cf
AH
2401 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2402 return q_name[p_regnum - AARCH64_Q0_REGNUM];
07b287a0 2403
34dcc7cf
AH
2404 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2405 return d_name[p_regnum - AARCH64_D0_REGNUM];
07b287a0 2406
34dcc7cf
AH
2407 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2408 return s_name[p_regnum - AARCH64_S0_REGNUM];
07b287a0 2409
34dcc7cf
AH
2410 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2411 return h_name[p_regnum - AARCH64_H0_REGNUM];
07b287a0 2412
34dcc7cf
AH
2413 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2414 return b_name[p_regnum - AARCH64_B0_REGNUM];
07b287a0 2415
63bad7b6
AH
2416 if (tdep->has_sve ())
2417 {
2418 static const char *const sve_v_name[] =
2419 {
2420 "v0", "v1", "v2", "v3",
2421 "v4", "v5", "v6", "v7",
2422 "v8", "v9", "v10", "v11",
2423 "v12", "v13", "v14", "v15",
2424 "v16", "v17", "v18", "v19",
2425 "v20", "v21", "v22", "v23",
2426 "v24", "v25", "v26", "v27",
2427 "v28", "v29", "v30", "v31",
2428 };
2429
34dcc7cf
AH
2430 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2431 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2432 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
63bad7b6
AH
2433 }
2434
34dcc7cf
AH
2435 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2436 prevents it from being read by methods such as
2437 mi_cmd_trace_frame_collected. */
2438 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2439 return "";
2440
07b287a0
MS
2441 internal_error (__FILE__, __LINE__,
2442 _("aarch64_pseudo_register_name: bad register number %d"),
34dcc7cf 2443 p_regnum);
07b287a0
MS
2444}
2445
2446/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2447
2448static struct type *
2449aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2450{
63bad7b6
AH
2451 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2452
34dcc7cf 2453 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2454
34dcc7cf 2455 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0
MS
2456 return aarch64_vnq_type (gdbarch);
2457
34dcc7cf 2458 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2459 return aarch64_vnd_type (gdbarch);
2460
34dcc7cf 2461 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2462 return aarch64_vns_type (gdbarch);
2463
34dcc7cf 2464 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0
MS
2465 return aarch64_vnh_type (gdbarch);
2466
34dcc7cf 2467 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0
MS
2468 return aarch64_vnb_type (gdbarch);
2469
34dcc7cf
AH
2470 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2471 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6
AH
2472 return aarch64_vnv_type (gdbarch);
2473
34dcc7cf
AH
2474 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2475 return builtin_type (gdbarch)->builtin_uint64;
2476
07b287a0
MS
2477 internal_error (__FILE__, __LINE__,
2478 _("aarch64_pseudo_register_type: bad register number %d"),
34dcc7cf 2479 p_regnum);
07b287a0
MS
2480}
2481
2482/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2483
2484static int
2485aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2486 struct reggroup *group)
2487{
63bad7b6
AH
2488 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2489
34dcc7cf 2490 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2491
34dcc7cf 2492 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0 2493 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2494 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2495 return (group == all_reggroup || group == vector_reggroup
2496 || group == float_reggroup);
34dcc7cf 2497 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2498 return (group == all_reggroup || group == vector_reggroup
2499 || group == float_reggroup);
34dcc7cf 2500 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0 2501 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2502 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0 2503 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
2504 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2505 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6 2506 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
2507 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2508 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2509 return 0;
07b287a0
MS
2510
2511 return group == all_reggroup;
2512}
2513
3c5cd5c3
AH
2514/* Helper for aarch64_pseudo_read_value. */
2515
2516static struct value *
63bad7b6
AH
2517aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2518 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2519 int regsize, struct value *result_value)
2520{
3c5cd5c3
AH
2521 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2522
63bad7b6
AH
2523 /* Enough space for a full vector register. */
2524 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2525 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2526
3c5cd5c3
AH
2527 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2528 mark_value_bytes_unavailable (result_value, 0,
2529 TYPE_LENGTH (value_type (result_value)));
2530 else
2531 memcpy (value_contents_raw (result_value), reg_buf, regsize);
63bad7b6 2532
3c5cd5c3
AH
2533 return result_value;
2534 }
2535
07b287a0
MS
2536/* Implement the "pseudo_register_read_value" gdbarch method. */
2537
2538static struct value *
3c5cd5c3 2539aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2540 int regnum)
2541{
63bad7b6 2542 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3c5cd5c3 2543 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
07b287a0 2544
07b287a0
MS
2545 VALUE_LVAL (result_value) = lval_register;
2546 VALUE_REGNUM (result_value) = regnum;
07b287a0
MS
2547
2548 regnum -= gdbarch_num_regs (gdbarch);
2549
2550 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2551 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2552 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2553 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2554
2555 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2556 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2557 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2558 D_REGISTER_SIZE, result_value);
07b287a0
MS
2559
2560 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2561 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2562 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2563 S_REGISTER_SIZE, result_value);
07b287a0
MS
2564
2565 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2566 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2567 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2568 H_REGISTER_SIZE, result_value);
07b287a0
MS
2569
2570 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2571 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2572 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2573 B_REGISTER_SIZE, result_value);
07b287a0 2574
63bad7b6
AH
2575 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2576 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2577 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2578 regnum - AARCH64_SVE_V0_REGNUM,
2579 V_REGISTER_SIZE, result_value);
2580
07b287a0
MS
2581 gdb_assert_not_reached ("regnum out of bound");
2582}
2583
3c5cd5c3 2584/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2585
2586static void
63bad7b6
AH
2587aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2588 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2589{
3c5cd5c3 2590 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2591
63bad7b6
AH
2592 /* Enough space for a full vector register. */
2593 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2594 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2595
07b287a0
MS
2596 /* Ensure the register buffer is zero, we want gdb writes of the
2597 various 'scalar' pseudo registers to behavior like architectural
2598 writes, register width bytes are written the remainder are set to
2599 zero. */
63bad7b6 2600 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2601
3c5cd5c3
AH
2602 memcpy (reg_buf, buf, regsize);
2603 regcache->raw_write (v_regnum, reg_buf);
2604}
2605
2606/* Implement the "pseudo_register_write" gdbarch method. */
2607
2608static void
2609aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2610 int regnum, const gdb_byte *buf)
2611{
63bad7b6 2612 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
07b287a0
MS
2613 regnum -= gdbarch_num_regs (gdbarch);
2614
2615 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2616 return aarch64_pseudo_write_1 (gdbarch, regcache,
2617 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2618 buf);
07b287a0
MS
2619
2620 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2621 return aarch64_pseudo_write_1 (gdbarch, regcache,
2622 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2623 buf);
07b287a0
MS
2624
2625 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2626 return aarch64_pseudo_write_1 (gdbarch, regcache,
2627 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2628 buf);
07b287a0
MS
2629
2630 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2631 return aarch64_pseudo_write_1 (gdbarch, regcache,
2632 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2633 buf);
07b287a0
MS
2634
2635 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2636 return aarch64_pseudo_write_1 (gdbarch, regcache,
2637 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2638 buf);
2639
2640 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2641 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2642 return aarch64_pseudo_write_1 (gdbarch, regcache,
2643 regnum - AARCH64_SVE_V0_REGNUM,
2644 V_REGISTER_SIZE, buf);
07b287a0
MS
2645
2646 gdb_assert_not_reached ("regnum out of bound");
2647}
2648
07b287a0
MS
2649/* Callback function for user_reg_add. */
2650
2651static struct value *
2652value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2653{
9a3c8263 2654 const int *reg_p = (const int *) baton;
07b287a0
MS
2655
2656 return value_of_register (*reg_p, frame);
2657}
2658\f
2659
9404b58f
KM
2660/* Implement the "software_single_step" gdbarch method, needed to
2661 single step through atomic sequences on AArch64. */
2662
a0ff9e1a 2663static std::vector<CORE_ADDR>
f5ea389a 2664aarch64_software_single_step (struct regcache *regcache)
9404b58f 2665{
ac7936df 2666 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2667 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2668 const int insn_size = 4;
2669 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2670 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2671 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2672 CORE_ADDR loc = pc;
2673 CORE_ADDR closing_insn = 0;
2674 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2675 byte_order_for_code);
2676 int index;
2677 int insn_count;
2678 int bc_insn_count = 0; /* Conditional branch instruction count. */
2679 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2680 aarch64_inst inst;
2681
561a72d4 2682 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2683 return {};
9404b58f
KM
2684
2685 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2686 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2687 return {};
9404b58f
KM
2688
2689 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2690 {
9404b58f
KM
2691 loc += insn_size;
2692 insn = read_memory_unsigned_integer (loc, insn_size,
2693 byte_order_for_code);
2694
561a72d4 2695 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2696 return {};
9404b58f 2697 /* Check if the instruction is a conditional branch. */
f77ee802 2698 if (inst.opcode->iclass == condbranch)
9404b58f 2699 {
f77ee802
YQ
2700 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2701
9404b58f 2702 if (bc_insn_count >= 1)
a0ff9e1a 2703 return {};
9404b58f
KM
2704
2705 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2706 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2707
2708 bc_insn_count++;
2709 last_breakpoint++;
2710 }
2711
2712 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2713 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2714 {
2715 closing_insn = loc;
2716 break;
2717 }
2718 }
2719
2720 /* We didn't find a closing Store Exclusive instruction, fall back. */
2721 if (!closing_insn)
a0ff9e1a 2722 return {};
9404b58f
KM
2723
2724 /* Insert breakpoint after the end of the atomic sequence. */
2725 breaks[0] = loc + insn_size;
2726
2727 /* Check for duplicated breakpoints, and also check that the second
2728 breakpoint is not within the atomic sequence. */
2729 if (last_breakpoint
2730 && (breaks[1] == breaks[0]
2731 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2732 last_breakpoint = 0;
2733
a0ff9e1a
SM
2734 std::vector<CORE_ADDR> next_pcs;
2735
9404b58f
KM
2736 /* Insert the breakpoint at the end of the sequence, and one at the
2737 destination of the conditional branch, if it exists. */
2738 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 2739 next_pcs.push_back (breaks[index]);
9404b58f 2740
93f9a11f 2741 return next_pcs;
9404b58f
KM
2742}
2743
cfba9872 2744struct aarch64_displaced_step_closure : public displaced_step_closure
b6542f81
YQ
2745{
2746 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2747 is being displaced stepping. */
cfba9872 2748 int cond = 0;
b6542f81
YQ
2749
2750 /* PC adjustment offset after displaced stepping. */
cfba9872 2751 int32_t pc_adjust = 0;
b6542f81
YQ
2752};
2753
2754/* Data when visiting instructions for displaced stepping. */
2755
2756struct aarch64_displaced_step_data
2757{
2758 struct aarch64_insn_data base;
2759
2760 /* The address where the instruction will be executed at. */
2761 CORE_ADDR new_addr;
2762 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
e935475c 2763 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
b6542f81
YQ
2764 /* Number of instructions in INSN_BUF. */
2765 unsigned insn_count;
2766 /* Registers when doing displaced stepping. */
2767 struct regcache *regs;
2768
cfba9872 2769 aarch64_displaced_step_closure *dsc;
b6542f81
YQ
2770};
2771
2772/* Implementation of aarch64_insn_visitor method "b". */
2773
2774static void
2775aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2776 struct aarch64_insn_data *data)
2777{
2778 struct aarch64_displaced_step_data *dsd
2779 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2780 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2781
2782 if (can_encode_int32 (new_offset, 28))
2783 {
2784 /* Emit B rather than BL, because executing BL on a new address
2785 will get the wrong address into LR. In order to avoid this,
2786 we emit B, and update LR if the instruction is BL. */
2787 emit_b (dsd->insn_buf, 0, new_offset);
2788 dsd->insn_count++;
2789 }
2790 else
2791 {
2792 /* Write NOP. */
2793 emit_nop (dsd->insn_buf);
2794 dsd->insn_count++;
2795 dsd->dsc->pc_adjust = offset;
2796 }
2797
2798 if (is_bl)
2799 {
2800 /* Update LR. */
2801 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2802 data->insn_addr + 4);
2803 }
2804}
2805
2806/* Implementation of aarch64_insn_visitor method "b_cond". */
2807
2808static void
2809aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2810 struct aarch64_insn_data *data)
2811{
2812 struct aarch64_displaced_step_data *dsd
2813 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2814
2815 /* GDB has to fix up PC after displaced step this instruction
2816 differently according to the condition is true or false. Instead
2817 of checking COND against conditional flags, we can use
2818 the following instructions, and GDB can tell how to fix up PC
2819 according to the PC value.
2820
2821 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2822 INSN1 ;
2823 TAKEN:
2824 INSN2
2825 */
2826
2827 emit_bcond (dsd->insn_buf, cond, 8);
2828 dsd->dsc->cond = 1;
2829 dsd->dsc->pc_adjust = offset;
2830 dsd->insn_count = 1;
2831}
2832
2833/* Dynamically allocate a new register. If we know the register
2834 statically, we should make it a global as above instead of using this
2835 helper function. */
2836
2837static struct aarch64_register
2838aarch64_register (unsigned num, int is64)
2839{
2840 return (struct aarch64_register) { num, is64 };
2841}
2842
2843/* Implementation of aarch64_insn_visitor method "cb". */
2844
2845static void
2846aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2847 const unsigned rn, int is64,
2848 struct aarch64_insn_data *data)
2849{
2850 struct aarch64_displaced_step_data *dsd
2851 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2852
2853 /* The offset is out of range for a compare and branch
2854 instruction. We can use the following instructions instead:
2855
2856 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2857 INSN1 ;
2858 TAKEN:
2859 INSN2
2860 */
2861 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2862 dsd->insn_count = 1;
2863 dsd->dsc->cond = 1;
2864 dsd->dsc->pc_adjust = offset;
2865}
2866
2867/* Implementation of aarch64_insn_visitor method "tb". */
2868
2869static void
2870aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2871 const unsigned rt, unsigned bit,
2872 struct aarch64_insn_data *data)
2873{
2874 struct aarch64_displaced_step_data *dsd
2875 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2876
2877 /* The offset is out of range for a test bit and branch
2878 instruction We can use the following instructions instead:
2879
2880 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2881 INSN1 ;
2882 TAKEN:
2883 INSN2
2884
2885 */
2886 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2887 dsd->insn_count = 1;
2888 dsd->dsc->cond = 1;
2889 dsd->dsc->pc_adjust = offset;
2890}
2891
2892/* Implementation of aarch64_insn_visitor method "adr". */
2893
2894static void
2895aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2896 const int is_adrp, struct aarch64_insn_data *data)
2897{
2898 struct aarch64_displaced_step_data *dsd
2899 = (struct aarch64_displaced_step_data *) data;
2900 /* We know exactly the address the ADR{P,} instruction will compute.
2901 We can just write it to the destination register. */
2902 CORE_ADDR address = data->insn_addr + offset;
2903
2904 if (is_adrp)
2905 {
2906 /* Clear the lower 12 bits of the offset to get the 4K page. */
2907 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2908 address & ~0xfff);
2909 }
2910 else
2911 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2912 address);
2913
2914 dsd->dsc->pc_adjust = 4;
2915 emit_nop (dsd->insn_buf);
2916 dsd->insn_count = 1;
2917}
2918
2919/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2920
2921static void
2922aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2923 const unsigned rt, const int is64,
2924 struct aarch64_insn_data *data)
2925{
2926 struct aarch64_displaced_step_data *dsd
2927 = (struct aarch64_displaced_step_data *) data;
2928 CORE_ADDR address = data->insn_addr + offset;
2929 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2930
2931 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2932 address);
2933
2934 if (is_sw)
2935 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2936 aarch64_register (rt, 1), zero);
2937 else
2938 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2939 aarch64_register (rt, 1), zero);
2940
2941 dsd->dsc->pc_adjust = 4;
2942}
2943
2944/* Implementation of aarch64_insn_visitor method "others". */
2945
2946static void
2947aarch64_displaced_step_others (const uint32_t insn,
2948 struct aarch64_insn_data *data)
2949{
2950 struct aarch64_displaced_step_data *dsd
2951 = (struct aarch64_displaced_step_data *) data;
2952
e1c587c3 2953 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2954 dsd->insn_count = 1;
2955
2956 if ((insn & 0xfffffc1f) == 0xd65f0000)
2957 {
2958 /* RET */
2959 dsd->dsc->pc_adjust = 0;
2960 }
2961 else
2962 dsd->dsc->pc_adjust = 4;
2963}
2964
2965static const struct aarch64_insn_visitor visitor =
2966{
2967 aarch64_displaced_step_b,
2968 aarch64_displaced_step_b_cond,
2969 aarch64_displaced_step_cb,
2970 aarch64_displaced_step_tb,
2971 aarch64_displaced_step_adr,
2972 aarch64_displaced_step_ldr_literal,
2973 aarch64_displaced_step_others,
2974};
2975
2976/* Implement the "displaced_step_copy_insn" gdbarch method. */
2977
2978struct displaced_step_closure *
2979aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2980 CORE_ADDR from, CORE_ADDR to,
2981 struct regcache *regs)
2982{
b6542f81
YQ
2983 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2984 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2985 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2986 aarch64_inst inst;
2987
561a72d4 2988 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 2989 return NULL;
b6542f81
YQ
2990
2991 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2992 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2993 {
2994 /* We can't displaced step atomic sequences. */
2995 return NULL;
2996 }
2997
cfba9872
SM
2998 std::unique_ptr<aarch64_displaced_step_closure> dsc
2999 (new aarch64_displaced_step_closure);
b6542f81
YQ
3000 dsd.base.insn_addr = from;
3001 dsd.new_addr = to;
3002 dsd.regs = regs;
cfba9872 3003 dsd.dsc = dsc.get ();
034f1a81 3004 dsd.insn_count = 0;
b6542f81
YQ
3005 aarch64_relocate_instruction (insn, &visitor,
3006 (struct aarch64_insn_data *) &dsd);
e935475c 3007 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
b6542f81
YQ
3008
3009 if (dsd.insn_count != 0)
3010 {
3011 int i;
3012
3013 /* Instruction can be relocated to scratch pad. Copy
3014 relocated instruction(s) there. */
3015 for (i = 0; i < dsd.insn_count; i++)
3016 {
3017 if (debug_displaced)
3018 {
3019 debug_printf ("displaced: writing insn ");
3020 debug_printf ("%.8x", dsd.insn_buf[i]);
3021 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3022 }
3023 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3024 (ULONGEST) dsd.insn_buf[i]);
3025 }
3026 }
3027 else
3028 {
b6542f81
YQ
3029 dsc = NULL;
3030 }
3031
cfba9872 3032 return dsc.release ();
b6542f81
YQ
3033}
3034
3035/* Implement the "displaced_step_fixup" gdbarch method. */
3036
3037void
3038aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
cfba9872 3039 struct displaced_step_closure *dsc_,
b6542f81
YQ
3040 CORE_ADDR from, CORE_ADDR to,
3041 struct regcache *regs)
3042{
cfba9872
SM
3043 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3044
b6542f81
YQ
3045 if (dsc->cond)
3046 {
3047 ULONGEST pc;
3048
3049 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3050 if (pc - to == 8)
3051 {
3052 /* Condition is true. */
3053 }
3054 else if (pc - to == 4)
3055 {
3056 /* Condition is false. */
3057 dsc->pc_adjust = 4;
3058 }
3059 else
3060 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3061 }
3062
3063 if (dsc->pc_adjust != 0)
3064 {
3065 if (debug_displaced)
3066 {
3067 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3068 paddress (gdbarch, from), dsc->pc_adjust);
3069 }
3070 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3071 from + dsc->pc_adjust);
3072 }
3073}
3074
3075/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3076
3077int
3078aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3079 struct displaced_step_closure *closure)
3080{
3081 return 1;
3082}
3083
95228a0d
AH
3084/* Get the correct target description for the given VQ value.
3085 If VQ is zero then it is assumed SVE is not supported.
3086 (It is not possible to set VQ to zero on an SVE system). */
da434ccb
AH
3087
3088const target_desc *
6dc0ebde 3089aarch64_read_description (uint64_t vq, bool pauth_p)
da434ccb 3090{
95228a0d 3091 if (vq > AARCH64_MAX_SVE_VQ)
39bfb937 3092 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
95228a0d
AH
3093 AARCH64_MAX_SVE_VQ);
3094
6dc0ebde 3095 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
da434ccb 3096
95228a0d
AH
3097 if (tdesc == NULL)
3098 {
6dc0ebde
AH
3099 tdesc = aarch64_create_target_description (vq, pauth_p);
3100 tdesc_aarch64_list[vq][pauth_p] = tdesc;
95228a0d 3101 }
da434ccb 3102
95228a0d 3103 return tdesc;
da434ccb
AH
3104}
3105
ba2d2bb2
AH
3106/* Return the VQ used when creating the target description TDESC. */
3107
1332a140 3108static uint64_t
ba2d2bb2
AH
3109aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3110{
3111 const struct tdesc_feature *feature_sve;
3112
3113 if (!tdesc_has_registers (tdesc))
3114 return 0;
3115
3116 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3117
3118 if (feature_sve == nullptr)
3119 return 0;
3120
12863263
AH
3121 uint64_t vl = tdesc_register_bitsize (feature_sve,
3122 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
3123 return sve_vq_from_vl (vl);
3124}
3125
0ef8a082
AH
3126/* Add all the expected register sets into GDBARCH. */
3127
3128static void
3129aarch64_add_reggroups (struct gdbarch *gdbarch)
3130{
3131 reggroup_add (gdbarch, general_reggroup);
3132 reggroup_add (gdbarch, float_reggroup);
3133 reggroup_add (gdbarch, system_reggroup);
3134 reggroup_add (gdbarch, vector_reggroup);
3135 reggroup_add (gdbarch, all_reggroup);
3136 reggroup_add (gdbarch, save_reggroup);
3137 reggroup_add (gdbarch, restore_reggroup);
3138}
ba2d2bb2 3139
76bed0fd
AH
3140/* Implement the "cannot_store_register" gdbarch method. */
3141
3142static int
3143aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3144{
3145 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3146
3147 if (!tdep->has_pauth ())
3148 return 0;
3149
3150 /* Pointer authentication registers are read-only. */
3151 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3152 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3153}
3154
07b287a0
MS
3155/* Initialize the current architecture based on INFO. If possible,
3156 re-use an architecture from ARCHES, which is a list of
3157 architectures already created during this debugging session.
3158
3159 Called e.g. at program startup, when reading a core file, and when
3160 reading a binary file. */
3161
3162static struct gdbarch *
3163aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3164{
ccb8d7e8 3165 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
76bed0fd 3166 const struct tdesc_feature *feature_pauth;
ccb8d7e8
AH
3167 bool valid_p = true;
3168 int i, num_regs = 0, num_pseudo_regs = 0;
3169 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3170
4da037ef
AH
3171 /* Use the vector length passed via the target info. Here -1 is used for no
3172 SVE, and 0 is unset. If unset then use the vector length from the existing
3173 tdesc. */
3174 uint64_t vq = 0;
3175 if (info.id == (int *) -1)
3176 vq = 0;
3177 else if (info.id != 0)
3178 vq = (uint64_t) info.id;
3179 else
3180 vq = aarch64_get_tdesc_vq (info.target_desc);
3181
3182 if (vq > AARCH64_MAX_SVE_VQ)
596179f7
SDJ
3183 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3184 pulongest (vq), AARCH64_MAX_SVE_VQ);
4da037ef 3185
ccb8d7e8
AH
3186 /* If there is already a candidate, use it. */
3187 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3188 best_arch != nullptr;
3189 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3190 {
3191 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
4da037ef 3192 if (tdep && tdep->vq == vq)
ccb8d7e8
AH
3193 return best_arch->gdbarch;
3194 }
07b287a0 3195
4da037ef
AH
3196 /* Ensure we always have a target descriptor, and that it is for the given VQ
3197 value. */
ccb8d7e8 3198 const struct target_desc *tdesc = info.target_desc;
4da037ef
AH
3199 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3200 tdesc = aarch64_read_description (vq, false);
07b287a0
MS
3201 gdb_assert (tdesc);
3202
ccb8d7e8 3203 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
ba2d2bb2
AH
3204 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3205 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
76bed0fd 3206 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
07b287a0 3207
ccb8d7e8
AH
3208 if (feature_core == nullptr)
3209 return nullptr;
07b287a0 3210
ccb8d7e8 3211 struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
07b287a0 3212
ba2d2bb2 3213 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3214 and allocate their numbers. */
3215 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
ba2d2bb2
AH
3216 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3217 AARCH64_X0_REGNUM + i,
3218 aarch64_r_register_names[i]);
07b287a0
MS
3219
3220 num_regs = AARCH64_X0_REGNUM + i;
3221
ba2d2bb2 3222 /* Add the V registers. */
ccb8d7e8 3223 if (feature_fpu != nullptr)
07b287a0 3224 {
ccb8d7e8 3225 if (feature_sve != nullptr)
ba2d2bb2
AH
3226 error (_("Program contains both fpu and SVE features."));
3227
3228 /* Validate the description provides the mandatory V registers
3229 and allocate their numbers. */
07b287a0 3230 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
ba2d2bb2
AH
3231 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3232 AARCH64_V0_REGNUM + i,
3233 aarch64_v_register_names[i]);
07b287a0
MS
3234
3235 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3236 }
07b287a0 3237
ba2d2bb2 3238 /* Add the SVE registers. */
ccb8d7e8 3239 if (feature_sve != nullptr)
ba2d2bb2
AH
3240 {
3241 /* Validate the description provides the mandatory SVE registers
3242 and allocate their numbers. */
3243 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3244 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3245 AARCH64_SVE_Z0_REGNUM + i,
3246 aarch64_sve_register_names[i]);
3247
3248 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3249 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3250 }
3251
ccb8d7e8 3252 if (feature_fpu != nullptr || feature_sve != nullptr)
ba2d2bb2 3253 {
07b287a0
MS
3254 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3255 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3256 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3257 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3258 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3259 }
3260
76bed0fd
AH
3261 /* Add the pauth registers. */
3262 if (feature_pauth != NULL)
3263 {
3264 first_pauth_regnum = num_regs;
34dcc7cf 3265 pauth_ra_state_offset = num_pseudo_regs;
76bed0fd
AH
3266 /* Validate the descriptor provides the mandatory PAUTH registers and
3267 allocate their numbers. */
3268 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3269 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3270 first_pauth_regnum + i,
3271 aarch64_pauth_register_names[i]);
3272
3273 num_regs += i;
34dcc7cf 3274 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
76bed0fd
AH
3275 }
3276
07b287a0
MS
3277 if (!valid_p)
3278 {
3279 tdesc_data_cleanup (tdesc_data);
ccb8d7e8 3280 return nullptr;
07b287a0
MS
3281 }
3282
3283 /* AArch64 code is always little-endian. */
3284 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3285
ccb8d7e8
AH
3286 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3287 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
07b287a0
MS
3288
3289 /* This should be low enough for everything. */
3290 tdep->lowest_pc = 0x20;
3291 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3292 tdep->jb_elt_size = 8;
4da037ef 3293 tdep->vq = vq;
76bed0fd 3294 tdep->pauth_reg_base = first_pauth_regnum;
34dcc7cf
AH
3295 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3296 : pauth_ra_state_offset + num_regs;
3297
07b287a0
MS
3298 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3299 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3300
07b287a0
MS
3301 /* Advance PC across function entry code. */
3302 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3303
3304 /* The stack grows downward. */
3305 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3306
3307 /* Breakpoint manipulation. */
04180708
YQ
3308 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3309 aarch64_breakpoint::kind_from_pc);
3310 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3311 aarch64_breakpoint::bp_from_kind);
07b287a0 3312 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3313 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3314
3315 /* Information about registers, etc. */
3316 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3317 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3318 set_gdbarch_num_regs (gdbarch, num_regs);
3319
3320 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3321 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3322 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3323 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3324 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3325 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3326 aarch64_pseudo_register_reggroup_p);
76bed0fd 3327 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
07b287a0
MS
3328
3329 /* ABI */
3330 set_gdbarch_short_bit (gdbarch, 16);
3331 set_gdbarch_int_bit (gdbarch, 32);
3332 set_gdbarch_float_bit (gdbarch, 32);
3333 set_gdbarch_double_bit (gdbarch, 64);
3334 set_gdbarch_long_double_bit (gdbarch, 128);
3335 set_gdbarch_long_bit (gdbarch, 64);
3336 set_gdbarch_long_long_bit (gdbarch, 64);
3337 set_gdbarch_ptr_bit (gdbarch, 64);
3338 set_gdbarch_char_signed (gdbarch, 0);
53375380 3339 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3340 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3341 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3342 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
b907456c 3343 set_gdbarch_type_align (gdbarch, aarch64_type_align);
07b287a0
MS
3344
3345 /* Internal <-> external register number maps. */
3346 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3347
3348 /* Returning results. */
3349 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3350
3351 /* Disassembly. */
3352 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3353
3354 /* Virtual tables. */
3355 set_gdbarch_vbit_in_delta (gdbarch, 1);
3356
0ef8a082
AH
3357 /* Register architecture. */
3358 aarch64_add_reggroups (gdbarch);
3359
07b287a0
MS
3360 /* Hook in the ABI-specific overrides, if they have been registered. */
3361 info.target_desc = tdesc;
0dba2a6c 3362 info.tdesc_data = tdesc_data;
07b287a0
MS
3363 gdbarch_init_osabi (info, gdbarch);
3364
3365 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
11e1b75f
AH
3366 /* Register DWARF CFA vendor handler. */
3367 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3368 aarch64_execute_dwarf_cfa_vendor_op);
07b287a0
MS
3369
3370 /* Add some default predicates. */
3371 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3372 dwarf2_append_unwinders (gdbarch);
3373 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3374
3375 frame_base_set_default (gdbarch, &aarch64_normal_base);
3376
3377 /* Now we have tuned the configuration, set a few final things,
3378 based on what the OS ABI has told us. */
3379
3380 if (tdep->jb_pc >= 0)
3381 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3382
ea873d8e
PL
3383 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3384
07b287a0
MS
3385 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3386
3387 /* Add standard register aliases. */
3388 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3389 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3390 value_of_aarch64_user_reg,
3391 &aarch64_register_aliases[i].regnum);
3392
e8bf1ce4
JB
3393 register_aarch64_ravenscar_ops (gdbarch);
3394
07b287a0
MS
3395 return gdbarch;
3396}
3397
3398static void
3399aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3400{
3401 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3402
3403 if (tdep == NULL)
3404 return;
3405
3406 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3407 paddress (gdbarch, tdep->lowest_pc));
3408}
3409
0d4c07af 3410#if GDB_SELF_TEST
1e2b521d
YQ
3411namespace selftests
3412{
3413static void aarch64_process_record_test (void);
3414}
0d4c07af 3415#endif
1e2b521d 3416
07b287a0
MS
3417void
3418_initialize_aarch64_tdep (void)
3419{
3420 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3421 aarch64_dump_tdep);
3422
07b287a0
MS
3423 /* Debug this file's internals. */
3424 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3425Set AArch64 debugging."), _("\
3426Show AArch64 debugging."), _("\
3427When on, AArch64 specific debugging is enabled."),
3428 NULL,
3429 show_aarch64_debug,
3430 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3431
3432#if GDB_SELF_TEST
1526853e
SM
3433 selftests::register_test ("aarch64-analyze-prologue",
3434 selftests::aarch64_analyze_prologue_test);
3435 selftests::register_test ("aarch64-process-record",
3436 selftests::aarch64_process_record_test);
4d9a9006 3437#endif
07b287a0 3438}
99afc88b
OJ
3439
3440/* AArch64 process record-replay related structures, defines etc. */
3441
99afc88b
OJ
3442#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3443 do \
3444 { \
3445 unsigned int reg_len = LENGTH; \
3446 if (reg_len) \
3447 { \
3448 REGS = XNEWVEC (uint32_t, reg_len); \
3449 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3450 } \
3451 } \
3452 while (0)
3453
3454#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3455 do \
3456 { \
3457 unsigned int mem_len = LENGTH; \
3458 if (mem_len) \
3459 { \
3460 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3461 memcpy(&MEMS->len, &RECORD_BUF[0], \
3462 sizeof(struct aarch64_mem_r) * LENGTH); \
3463 } \
3464 } \
3465 while (0)
3466
3467/* AArch64 record/replay structures and enumerations. */
3468
3469struct aarch64_mem_r
3470{
3471 uint64_t len; /* Record length. */
3472 uint64_t addr; /* Memory address. */
3473};
3474
3475enum aarch64_record_result
3476{
3477 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3478 AARCH64_RECORD_UNSUPPORTED,
3479 AARCH64_RECORD_UNKNOWN
3480};
3481
3482typedef struct insn_decode_record_t
3483{
3484 struct gdbarch *gdbarch;
3485 struct regcache *regcache;
3486 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3487 uint32_t aarch64_insn; /* Insn to be recorded. */
3488 uint32_t mem_rec_count; /* Count of memory records. */
3489 uint32_t reg_rec_count; /* Count of register records. */
3490 uint32_t *aarch64_regs; /* Registers to be recorded. */
3491 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3492} insn_decode_record;
3493
3494/* Record handler for data processing - register instructions. */
3495
3496static unsigned int
3497aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3498{
3499 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3500 uint32_t record_buf[4];
3501
3502 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3503 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3504 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3505
3506 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3507 {
3508 uint8_t setflags;
3509
3510 /* Logical (shifted register). */
3511 if (insn_bits24_27 == 0x0a)
3512 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3513 /* Add/subtract. */
3514 else if (insn_bits24_27 == 0x0b)
3515 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3516 else
3517 return AARCH64_RECORD_UNKNOWN;
3518
3519 record_buf[0] = reg_rd;
3520 aarch64_insn_r->reg_rec_count = 1;
3521 if (setflags)
3522 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3523 }
3524 else
3525 {
3526 if (insn_bits24_27 == 0x0b)
3527 {
3528 /* Data-processing (3 source). */
3529 record_buf[0] = reg_rd;
3530 aarch64_insn_r->reg_rec_count = 1;
3531 }
3532 else if (insn_bits24_27 == 0x0a)
3533 {
3534 if (insn_bits21_23 == 0x00)
3535 {
3536 /* Add/subtract (with carry). */
3537 record_buf[0] = reg_rd;
3538 aarch64_insn_r->reg_rec_count = 1;
3539 if (bit (aarch64_insn_r->aarch64_insn, 29))
3540 {
3541 record_buf[1] = AARCH64_CPSR_REGNUM;
3542 aarch64_insn_r->reg_rec_count = 2;
3543 }
3544 }
3545 else if (insn_bits21_23 == 0x02)
3546 {
3547 /* Conditional compare (register) and conditional compare
3548 (immediate) instructions. */
3549 record_buf[0] = AARCH64_CPSR_REGNUM;
3550 aarch64_insn_r->reg_rec_count = 1;
3551 }
3552 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3553 {
3554 /* CConditional select. */
3555 /* Data-processing (2 source). */
3556 /* Data-processing (1 source). */
3557 record_buf[0] = reg_rd;
3558 aarch64_insn_r->reg_rec_count = 1;
3559 }
3560 else
3561 return AARCH64_RECORD_UNKNOWN;
3562 }
3563 }
3564
3565 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3566 record_buf);
3567 return AARCH64_RECORD_SUCCESS;
3568}
3569
3570/* Record handler for data processing - immediate instructions. */
3571
3572static unsigned int
3573aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3574{
78cc6c2d 3575 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3576 uint32_t record_buf[4];
3577
3578 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3579 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3580 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3581
3582 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3583 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3584 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3585 {
3586 record_buf[0] = reg_rd;
3587 aarch64_insn_r->reg_rec_count = 1;
3588 }
3589 else if (insn_bits24_27 == 0x01)
3590 {
3591 /* Add/Subtract (immediate). */
3592 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3593 record_buf[0] = reg_rd;
3594 aarch64_insn_r->reg_rec_count = 1;
3595 if (setflags)
3596 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3597 }
3598 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3599 {
3600 /* Logical (immediate). */
3601 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3602 record_buf[0] = reg_rd;
3603 aarch64_insn_r->reg_rec_count = 1;
3604 if (setflags)
3605 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3606 }
3607 else
3608 return AARCH64_RECORD_UNKNOWN;
3609
3610 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3611 record_buf);
3612 return AARCH64_RECORD_SUCCESS;
3613}
3614
3615/* Record handler for branch, exception generation and system instructions. */
3616
3617static unsigned int
3618aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3619{
3620 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3621 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3622 uint32_t record_buf[4];
3623
3624 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3625 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3626 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3627
3628 if (insn_bits28_31 == 0x0d)
3629 {
3630 /* Exception generation instructions. */
3631 if (insn_bits24_27 == 0x04)
3632 {
5d98d3cd
YQ
3633 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3634 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3635 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3636 {
3637 ULONGEST svc_number;
3638
3639 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3640 &svc_number);
3641 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3642 svc_number);
3643 }
3644 else
3645 return AARCH64_RECORD_UNSUPPORTED;
3646 }
3647 /* System instructions. */
3648 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3649 {
3650 uint32_t reg_rt, reg_crn;
3651
3652 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3653 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3654
3655 /* Record rt in case of sysl and mrs instructions. */
3656 if (bit (aarch64_insn_r->aarch64_insn, 21))
3657 {
3658 record_buf[0] = reg_rt;
3659 aarch64_insn_r->reg_rec_count = 1;
3660 }
3661 /* Record cpsr for hint and msr(immediate) instructions. */
3662 else if (reg_crn == 0x02 || reg_crn == 0x04)
3663 {
3664 record_buf[0] = AARCH64_CPSR_REGNUM;
3665 aarch64_insn_r->reg_rec_count = 1;
3666 }
3667 }
3668 /* Unconditional branch (register). */
3669 else if((insn_bits24_27 & 0x0e) == 0x06)
3670 {
3671 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3672 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3673 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3674 }
3675 else
3676 return AARCH64_RECORD_UNKNOWN;
3677 }
3678 /* Unconditional branch (immediate). */
3679 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3680 {
3681 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3682 if (bit (aarch64_insn_r->aarch64_insn, 31))
3683 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3684 }
3685 else
3686 /* Compare & branch (immediate), Test & branch (immediate) and
3687 Conditional branch (immediate). */
3688 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3689
3690 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3691 record_buf);
3692 return AARCH64_RECORD_SUCCESS;
3693}
3694
3695/* Record handler for advanced SIMD load and store instructions. */
3696
3697static unsigned int
3698aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3699{
3700 CORE_ADDR address;
3701 uint64_t addr_offset = 0;
3702 uint32_t record_buf[24];
3703 uint64_t record_buf_mem[24];
3704 uint32_t reg_rn, reg_rt;
3705 uint32_t reg_index = 0, mem_index = 0;
3706 uint8_t opcode_bits, size_bits;
3707
3708 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3709 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3710 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3711 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3712 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3713
3714 if (record_debug)
b277c936 3715 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3716
3717 /* Load/store single structure. */
3718 if (bit (aarch64_insn_r->aarch64_insn, 24))
3719 {
3720 uint8_t sindex, scale, selem, esize, replicate = 0;
3721 scale = opcode_bits >> 2;
3722 selem = ((opcode_bits & 0x02) |
3723 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3724 switch (scale)
3725 {
3726 case 1:
3727 if (size_bits & 0x01)
3728 return AARCH64_RECORD_UNKNOWN;
3729 break;
3730 case 2:
3731 if ((size_bits >> 1) & 0x01)
3732 return AARCH64_RECORD_UNKNOWN;
3733 if (size_bits & 0x01)
3734 {
3735 if (!((opcode_bits >> 1) & 0x01))
3736 scale = 3;
3737 else
3738 return AARCH64_RECORD_UNKNOWN;
3739 }
3740 break;
3741 case 3:
3742 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3743 {
3744 scale = size_bits;
3745 replicate = 1;
3746 break;
3747 }
3748 else
3749 return AARCH64_RECORD_UNKNOWN;
3750 default:
3751 break;
3752 }
3753 esize = 8 << scale;
3754 if (replicate)
3755 for (sindex = 0; sindex < selem; sindex++)
3756 {
3757 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3758 reg_rt = (reg_rt + 1) % 32;
3759 }
3760 else
3761 {
3762 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3763 {
3764 if (bit (aarch64_insn_r->aarch64_insn, 22))
3765 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3766 else
3767 {
3768 record_buf_mem[mem_index++] = esize / 8;
3769 record_buf_mem[mem_index++] = address + addr_offset;
3770 }
3771 addr_offset = addr_offset + (esize / 8);
3772 reg_rt = (reg_rt + 1) % 32;
3773 }
99afc88b
OJ
3774 }
3775 }
3776 /* Load/store multiple structure. */
3777 else
3778 {
3779 uint8_t selem, esize, rpt, elements;
3780 uint8_t eindex, rindex;
3781
3782 esize = 8 << size_bits;
3783 if (bit (aarch64_insn_r->aarch64_insn, 30))
3784 elements = 128 / esize;
3785 else
3786 elements = 64 / esize;
3787
3788 switch (opcode_bits)
3789 {
3790 /*LD/ST4 (4 Registers). */
3791 case 0:
3792 rpt = 1;
3793 selem = 4;
3794 break;
3795 /*LD/ST1 (4 Registers). */
3796 case 2:
3797 rpt = 4;
3798 selem = 1;
3799 break;
3800 /*LD/ST3 (3 Registers). */
3801 case 4:
3802 rpt = 1;
3803 selem = 3;
3804 break;
3805 /*LD/ST1 (3 Registers). */
3806 case 6:
3807 rpt = 3;
3808 selem = 1;
3809 break;
3810 /*LD/ST1 (1 Register). */
3811 case 7:
3812 rpt = 1;
3813 selem = 1;
3814 break;
3815 /*LD/ST2 (2 Registers). */
3816 case 8:
3817 rpt = 1;
3818 selem = 2;
3819 break;
3820 /*LD/ST1 (2 Registers). */
3821 case 10:
3822 rpt = 2;
3823 selem = 1;
3824 break;
3825 default:
3826 return AARCH64_RECORD_UNSUPPORTED;
3827 break;
3828 }
3829 for (rindex = 0; rindex < rpt; rindex++)
3830 for (eindex = 0; eindex < elements; eindex++)
3831 {
3832 uint8_t reg_tt, sindex;
3833 reg_tt = (reg_rt + rindex) % 32;
3834 for (sindex = 0; sindex < selem; sindex++)
3835 {
3836 if (bit (aarch64_insn_r->aarch64_insn, 22))
3837 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3838 else
3839 {
3840 record_buf_mem[mem_index++] = esize / 8;
3841 record_buf_mem[mem_index++] = address + addr_offset;
3842 }
3843 addr_offset = addr_offset + (esize / 8);
3844 reg_tt = (reg_tt + 1) % 32;
3845 }
3846 }
3847 }
3848
3849 if (bit (aarch64_insn_r->aarch64_insn, 23))
3850 record_buf[reg_index++] = reg_rn;
3851
3852 aarch64_insn_r->reg_rec_count = reg_index;
3853 aarch64_insn_r->mem_rec_count = mem_index / 2;
3854 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3855 record_buf_mem);
3856 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3857 record_buf);
3858 return AARCH64_RECORD_SUCCESS;
3859}
3860
3861/* Record handler for load and store instructions. */
3862
3863static unsigned int
3864aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3865{
3866 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3867 uint8_t insn_bit23, insn_bit21;
3868 uint8_t opc, size_bits, ld_flag, vector_flag;
3869 uint32_t reg_rn, reg_rt, reg_rt2;
3870 uint64_t datasize, offset;
3871 uint32_t record_buf[8];
3872 uint64_t record_buf_mem[8];
3873 CORE_ADDR address;
3874
3875 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3876 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3877 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3878 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3879 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3880 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3881 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3882 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3883 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3884 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3885 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3886
3887 /* Load/store exclusive. */
3888 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3889 {
3890 if (record_debug)
b277c936 3891 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3892
3893 if (ld_flag)
3894 {
3895 record_buf[0] = reg_rt;
3896 aarch64_insn_r->reg_rec_count = 1;
3897 if (insn_bit21)
3898 {
3899 record_buf[1] = reg_rt2;
3900 aarch64_insn_r->reg_rec_count = 2;
3901 }
3902 }
3903 else
3904 {
3905 if (insn_bit21)
3906 datasize = (8 << size_bits) * 2;
3907 else
3908 datasize = (8 << size_bits);
3909 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3910 &address);
3911 record_buf_mem[0] = datasize / 8;
3912 record_buf_mem[1] = address;
3913 aarch64_insn_r->mem_rec_count = 1;
3914 if (!insn_bit23)
3915 {
3916 /* Save register rs. */
3917 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3918 aarch64_insn_r->reg_rec_count = 1;
3919 }
3920 }
3921 }
3922 /* Load register (literal) instructions decoding. */
3923 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3924 {
3925 if (record_debug)
b277c936 3926 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3927 if (vector_flag)
3928 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3929 else
3930 record_buf[0] = reg_rt;
3931 aarch64_insn_r->reg_rec_count = 1;
3932 }
3933 /* All types of load/store pair instructions decoding. */
3934 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3935 {
3936 if (record_debug)
b277c936 3937 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3938
3939 if (ld_flag)
3940 {
3941 if (vector_flag)
3942 {
3943 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3944 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3945 }
3946 else
3947 {
3948 record_buf[0] = reg_rt;
3949 record_buf[1] = reg_rt2;
3950 }
3951 aarch64_insn_r->reg_rec_count = 2;
3952 }
3953 else
3954 {
3955 uint16_t imm7_off;
3956 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3957 if (!vector_flag)
3958 size_bits = size_bits >> 1;
3959 datasize = 8 << (2 + size_bits);
3960 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3961 offset = offset << (2 + size_bits);
3962 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3963 &address);
3964 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3965 {
3966 if (imm7_off & 0x40)
3967 address = address - offset;
3968 else
3969 address = address + offset;
3970 }
3971
3972 record_buf_mem[0] = datasize / 8;
3973 record_buf_mem[1] = address;
3974 record_buf_mem[2] = datasize / 8;
3975 record_buf_mem[3] = address + (datasize / 8);
3976 aarch64_insn_r->mem_rec_count = 2;
3977 }
3978 if (bit (aarch64_insn_r->aarch64_insn, 23))
3979 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3980 }
3981 /* Load/store register (unsigned immediate) instructions. */
3982 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3983 {
3984 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3985 if (!(opc >> 1))
33877125
YQ
3986 {
3987 if (opc & 0x01)
3988 ld_flag = 0x01;
3989 else
3990 ld_flag = 0x0;
3991 }
99afc88b 3992 else
33877125 3993 {
1e2b521d
YQ
3994 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3995 {
3996 /* PRFM (immediate) */
3997 return AARCH64_RECORD_SUCCESS;
3998 }
3999 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4000 {
4001 /* LDRSW (immediate) */
4002 ld_flag = 0x1;
4003 }
33877125 4004 else
1e2b521d
YQ
4005 {
4006 if (opc & 0x01)
4007 ld_flag = 0x01;
4008 else
4009 ld_flag = 0x0;
4010 }
33877125 4011 }
99afc88b
OJ
4012
4013 if (record_debug)
4014 {
b277c936
PL
4015 debug_printf ("Process record: load/store (unsigned immediate):"
4016 " size %x V %d opc %x\n", size_bits, vector_flag,
4017 opc);
99afc88b
OJ
4018 }
4019
4020 if (!ld_flag)
4021 {
4022 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4023 datasize = 8 << size_bits;
4024 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4025 &address);
4026 offset = offset << size_bits;
4027 address = address + offset;
4028
4029 record_buf_mem[0] = datasize >> 3;
4030 record_buf_mem[1] = address;
4031 aarch64_insn_r->mem_rec_count = 1;
4032 }
4033 else
4034 {
4035 if (vector_flag)
4036 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4037 else
4038 record_buf[0] = reg_rt;
4039 aarch64_insn_r->reg_rec_count = 1;
4040 }
4041 }
4042 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
4043 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4044 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
4045 {
4046 if (record_debug)
b277c936 4047 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
4048 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4049 if (!(opc >> 1))
4050 if (opc & 0x01)
4051 ld_flag = 0x01;
4052 else
4053 ld_flag = 0x0;
4054 else
4055 if (size_bits != 0x03)
4056 ld_flag = 0x01;
4057 else
4058 return AARCH64_RECORD_UNKNOWN;
4059
4060 if (!ld_flag)
4061 {
d9436c7c
PA
4062 ULONGEST reg_rm_val;
4063
99afc88b
OJ
4064 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4065 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4066 if (bit (aarch64_insn_r->aarch64_insn, 12))
4067 offset = reg_rm_val << size_bits;
4068 else
4069 offset = reg_rm_val;
4070 datasize = 8 << size_bits;
4071 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4072 &address);
4073 address = address + offset;
4074 record_buf_mem[0] = datasize >> 3;
4075 record_buf_mem[1] = address;
4076 aarch64_insn_r->mem_rec_count = 1;
4077 }
4078 else
4079 {
4080 if (vector_flag)
4081 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4082 else
4083 record_buf[0] = reg_rt;
4084 aarch64_insn_r->reg_rec_count = 1;
4085 }
4086 }
4087 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
4088 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4089 && !insn_bit21)
99afc88b
OJ
4090 {
4091 if (record_debug)
4092 {
b277c936
PL
4093 debug_printf ("Process record: load/store "
4094 "(immediate and unprivileged)\n");
99afc88b
OJ
4095 }
4096 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4097 if (!(opc >> 1))
4098 if (opc & 0x01)
4099 ld_flag = 0x01;
4100 else
4101 ld_flag = 0x0;
4102 else
4103 if (size_bits != 0x03)
4104 ld_flag = 0x01;
4105 else
4106 return AARCH64_RECORD_UNKNOWN;
4107
4108 if (!ld_flag)
4109 {
4110 uint16_t imm9_off;
4111 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4112 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4113 datasize = 8 << size_bits;
4114 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4115 &address);
4116 if (insn_bits10_11 != 0x01)
4117 {
4118 if (imm9_off & 0x0100)
4119 address = address - offset;
4120 else
4121 address = address + offset;
4122 }
4123 record_buf_mem[0] = datasize >> 3;
4124 record_buf_mem[1] = address;
4125 aarch64_insn_r->mem_rec_count = 1;
4126 }
4127 else
4128 {
4129 if (vector_flag)
4130 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4131 else
4132 record_buf[0] = reg_rt;
4133 aarch64_insn_r->reg_rec_count = 1;
4134 }
4135 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4136 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4137 }
4138 /* Advanced SIMD load/store instructions. */
4139 else
4140 return aarch64_record_asimd_load_store (aarch64_insn_r);
4141
4142 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4143 record_buf_mem);
4144 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4145 record_buf);
4146 return AARCH64_RECORD_SUCCESS;
4147}
4148
4149/* Record handler for data processing SIMD and floating point instructions. */
4150
4151static unsigned int
4152aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4153{
4154 uint8_t insn_bit21, opcode, rmode, reg_rd;
4155 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4156 uint8_t insn_bits11_14;
4157 uint32_t record_buf[2];
4158
4159 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4160 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4161 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4162 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4163 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4164 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4165 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4166 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4167 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4168
4169 if (record_debug)
b277c936 4170 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
4171
4172 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4173 {
4174 /* Floating point - fixed point conversion instructions. */
4175 if (!insn_bit21)
4176 {
4177 if (record_debug)
b277c936 4178 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
4179
4180 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4181 record_buf[0] = reg_rd;
4182 else
4183 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4184 }
4185 /* Floating point - conditional compare instructions. */
4186 else if (insn_bits10_11 == 0x01)
4187 {
4188 if (record_debug)
b277c936 4189 debug_printf ("FP - conditional compare");
99afc88b
OJ
4190
4191 record_buf[0] = AARCH64_CPSR_REGNUM;
4192 }
4193 /* Floating point - data processing (2-source) and
4194 conditional select instructions. */
4195 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4196 {
4197 if (record_debug)
b277c936 4198 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
4199
4200 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4201 }
4202 else if (insn_bits10_11 == 0x00)
4203 {
4204 /* Floating point - immediate instructions. */
4205 if ((insn_bits12_15 & 0x01) == 0x01
4206 || (insn_bits12_15 & 0x07) == 0x04)
4207 {
4208 if (record_debug)
b277c936 4209 debug_printf ("FP - immediate");
99afc88b
OJ
4210 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4211 }
4212 /* Floating point - compare instructions. */
4213 else if ((insn_bits12_15 & 0x03) == 0x02)
4214 {
4215 if (record_debug)
b277c936 4216 debug_printf ("FP - immediate");
99afc88b
OJ
4217 record_buf[0] = AARCH64_CPSR_REGNUM;
4218 }
4219 /* Floating point - integer conversions instructions. */
f62fce35 4220 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4221 {
4222 /* Convert float to integer instruction. */
4223 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4224 {
4225 if (record_debug)
b277c936 4226 debug_printf ("float to int conversion");
99afc88b
OJ
4227
4228 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4229 }
4230 /* Convert integer to float instruction. */
4231 else if ((opcode >> 1) == 0x01 && !rmode)
4232 {
4233 if (record_debug)
b277c936 4234 debug_printf ("int to float conversion");
99afc88b
OJ
4235
4236 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4237 }
4238 /* Move float to integer instruction. */
4239 else if ((opcode >> 1) == 0x03)
4240 {
4241 if (record_debug)
b277c936 4242 debug_printf ("move float to int");
99afc88b
OJ
4243
4244 if (!(opcode & 0x01))
4245 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4246 else
4247 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4248 }
f62fce35
YQ
4249 else
4250 return AARCH64_RECORD_UNKNOWN;
99afc88b 4251 }
f62fce35
YQ
4252 else
4253 return AARCH64_RECORD_UNKNOWN;
99afc88b 4254 }
f62fce35
YQ
4255 else
4256 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4257 }
4258 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4259 {
4260 if (record_debug)
b277c936 4261 debug_printf ("SIMD copy");
99afc88b
OJ
4262
4263 /* Advanced SIMD copy instructions. */
4264 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4265 && !bit (aarch64_insn_r->aarch64_insn, 15)
4266 && bit (aarch64_insn_r->aarch64_insn, 10))
4267 {
4268 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4269 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4270 else
4271 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4272 }
4273 else
4274 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4275 }
4276 /* All remaining floating point or advanced SIMD instructions. */
4277 else
4278 {
4279 if (record_debug)
b277c936 4280 debug_printf ("all remain");
99afc88b
OJ
4281
4282 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4283 }
4284
4285 if (record_debug)
b277c936 4286 debug_printf ("\n");
99afc88b
OJ
4287
4288 aarch64_insn_r->reg_rec_count++;
4289 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4290 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4291 record_buf);
4292 return AARCH64_RECORD_SUCCESS;
4293}
4294
4295/* Decodes insns type and invokes its record handler. */
4296
4297static unsigned int
4298aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4299{
4300 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4301
4302 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4303 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4304 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4305 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4306
4307 /* Data processing - immediate instructions. */
4308 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4309 return aarch64_record_data_proc_imm (aarch64_insn_r);
4310
4311 /* Branch, exception generation and system instructions. */
4312 if (ins_bit26 && !ins_bit27 && ins_bit28)
4313 return aarch64_record_branch_except_sys (aarch64_insn_r);
4314
4315 /* Load and store instructions. */
4316 if (!ins_bit25 && ins_bit27)
4317 return aarch64_record_load_store (aarch64_insn_r);
4318
4319 /* Data processing - register instructions. */
4320 if (ins_bit25 && !ins_bit26 && ins_bit27)
4321 return aarch64_record_data_proc_reg (aarch64_insn_r);
4322
4323 /* Data processing - SIMD and floating point instructions. */
4324 if (ins_bit25 && ins_bit26 && ins_bit27)
4325 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4326
4327 return AARCH64_RECORD_UNSUPPORTED;
4328}
4329
4330/* Cleans up local record registers and memory allocations. */
4331
4332static void
4333deallocate_reg_mem (insn_decode_record *record)
4334{
4335 xfree (record->aarch64_regs);
4336 xfree (record->aarch64_mems);
4337}
4338
1e2b521d
YQ
4339#if GDB_SELF_TEST
4340namespace selftests {
4341
4342static void
4343aarch64_process_record_test (void)
4344{
4345 struct gdbarch_info info;
4346 uint32_t ret;
4347
4348 gdbarch_info_init (&info);
4349 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4350
4351 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4352 SELF_CHECK (gdbarch != NULL);
4353
4354 insn_decode_record aarch64_record;
4355
4356 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4357 aarch64_record.regcache = NULL;
4358 aarch64_record.this_addr = 0;
4359 aarch64_record.gdbarch = gdbarch;
4360
4361 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4362 aarch64_record.aarch64_insn = 0xf9800020;
4363 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4364 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4365 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4366 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4367
4368 deallocate_reg_mem (&aarch64_record);
4369}
4370
4371} // namespace selftests
4372#endif /* GDB_SELF_TEST */
4373
99afc88b
OJ
4374/* Parse the current instruction and record the values of the registers and
4375 memory that will be changed in current instruction to record_arch_list
4376 return -1 if something is wrong. */
4377
4378int
4379aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4380 CORE_ADDR insn_addr)
4381{
4382 uint32_t rec_no = 0;
4383 uint8_t insn_size = 4;
4384 uint32_t ret = 0;
99afc88b
OJ
4385 gdb_byte buf[insn_size];
4386 insn_decode_record aarch64_record;
4387
4388 memset (&buf[0], 0, insn_size);
4389 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4390 target_read_memory (insn_addr, &buf[0], insn_size);
4391 aarch64_record.aarch64_insn
4392 = (uint32_t) extract_unsigned_integer (&buf[0],
4393 insn_size,
4394 gdbarch_byte_order (gdbarch));
4395 aarch64_record.regcache = regcache;
4396 aarch64_record.this_addr = insn_addr;
4397 aarch64_record.gdbarch = gdbarch;
4398
4399 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4400 if (ret == AARCH64_RECORD_UNSUPPORTED)
4401 {
4402 printf_unfiltered (_("Process record does not support instruction "
4403 "0x%0x at address %s.\n"),
4404 aarch64_record.aarch64_insn,
4405 paddress (gdbarch, insn_addr));
4406 ret = -1;
4407 }
4408
4409 if (0 == ret)
4410 {
4411 /* Record registers. */
4412 record_full_arch_list_add_reg (aarch64_record.regcache,
4413 AARCH64_PC_REGNUM);
4414 /* Always record register CPSR. */
4415 record_full_arch_list_add_reg (aarch64_record.regcache,
4416 AARCH64_CPSR_REGNUM);
4417 if (aarch64_record.aarch64_regs)
4418 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4419 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4420 aarch64_record.aarch64_regs[rec_no]))
4421 ret = -1;
4422
4423 /* Record memories. */
4424 if (aarch64_record.aarch64_mems)
4425 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4426 if (record_full_arch_list_add_mem
4427 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4428 aarch64_record.aarch64_mems[rec_no].len))
4429 ret = -1;
4430
4431 if (record_full_arch_list_add_end ())
4432 ret = -1;
4433 }
4434
4435 deallocate_reg_mem (&aarch64_record);
4436 return ret;
4437}
This page took 0.639376 seconds and 4 git commands to generate.