AArch64: DWARF unwinder support for signed return addresses
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
42a4f53d 3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
07b287a0
MS
30#include "value.h"
31#include "arch-utils.h"
32#include "osabi.h"
33#include "frame-unwind.h"
34#include "frame-base.h"
35#include "trad-frame.h"
36#include "objfiles.h"
11e1b75f 37#include "dwarf2.h"
07b287a0
MS
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
0747795c 47#include "common/selftest.h"
07b287a0
MS
48
49#include "aarch64-tdep.h"
e8bf1ce4 50#include "aarch64-ravenscar-thread.h"
07b287a0
MS
51
52#include "elf-bfd.h"
53#include "elf/aarch64.h"
54
0747795c 55#include "common/vec.h"
07b287a0 56
99afc88b
OJ
57#include "record.h"
58#include "record-full.h"
787749ea
PL
59#include "arch/aarch64-insn.h"
60
f77ee802 61#include "opcode/aarch64.h"
325fac50 62#include <algorithm>
f77ee802
YQ
63
64#define submask(x) ((1L << ((x) + 1)) - 1)
65#define bit(obj,st) (((obj) >> (st)) & 1)
66#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
ea92689a
AH
68/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
69 four members. */
70#define HA_MAX_NUM_FLDS 4
71
95228a0d 72/* All possible aarch64 target descriptors. */
6dc0ebde 73struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
95228a0d 74
07b287a0
MS
75/* The standard register names, and all the valid aliases for them. */
76static const struct
77{
78 const char *const name;
79 int regnum;
80} aarch64_register_aliases[] =
81{
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123};
124
125/* The required core 'R' registers. */
126static const char *const aarch64_r_register_names[] =
127{
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139};
140
141/* The FP/SIMD 'V' registers. */
142static const char *const aarch64_v_register_names[] =
143{
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156};
157
739e8682
AH
158/* The SVE 'Z' and 'P' registers. */
159static const char *const aarch64_sve_register_names[] =
160{
161 /* These registers must appear in consecutive RAW register number
162 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
163 "z0", "z1", "z2", "z3",
164 "z4", "z5", "z6", "z7",
165 "z8", "z9", "z10", "z11",
166 "z12", "z13", "z14", "z15",
167 "z16", "z17", "z18", "z19",
168 "z20", "z21", "z22", "z23",
169 "z24", "z25", "z26", "z27",
170 "z28", "z29", "z30", "z31",
171 "fpsr", "fpcr",
172 "p0", "p1", "p2", "p3",
173 "p4", "p5", "p6", "p7",
174 "p8", "p9", "p10", "p11",
175 "p12", "p13", "p14", "p15",
176 "ffr", "vg"
177};
178
76bed0fd
AH
179static const char *const aarch64_pauth_register_names[] =
180{
181 /* Authentication mask for data pointer. */
182 "pauth_dmask",
183 /* Authentication mask for code pointer. */
184 "pauth_cmask"
185};
186
07b287a0
MS
187/* AArch64 prologue cache structure. */
188struct aarch64_prologue_cache
189{
db634143
PL
190 /* The program counter at the start of the function. It is used to
191 identify this frame as a prologue frame. */
192 CORE_ADDR func;
193
194 /* The program counter at the time this frame was created; i.e. where
195 this function was called from. It is used to identify this frame as a
196 stub frame. */
197 CORE_ADDR prev_pc;
198
07b287a0
MS
199 /* The stack pointer at the time this frame was created; i.e. the
200 caller's stack pointer when this function was called. It is used
201 to identify this frame. */
202 CORE_ADDR prev_sp;
203
7dfa3edc
PL
204 /* Is the target available to read from? */
205 int available_p;
206
07b287a0
MS
207 /* The frame base for this frame is just prev_sp - frame size.
208 FRAMESIZE is the distance from the frame pointer to the
209 initial stack pointer. */
210 int framesize;
211
212 /* The register used to hold the frame pointer for this frame. */
213 int framereg;
214
215 /* Saved register offsets. */
216 struct trad_frame_saved_reg *saved_regs;
217};
218
07b287a0
MS
219static void
220show_aarch64_debug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222{
223 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
224}
225
ffdbe864
YQ
226namespace {
227
4d9a9006
YQ
228/* Abstract instruction reader. */
229
230class abstract_instruction_reader
231{
232public:
233 /* Read in one instruction. */
234 virtual ULONGEST read (CORE_ADDR memaddr, int len,
235 enum bfd_endian byte_order) = 0;
236};
237
238/* Instruction reader from real target. */
239
240class instruction_reader : public abstract_instruction_reader
241{
242 public:
243 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 244 override
4d9a9006 245 {
fc2f703e 246 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
247 }
248};
249
ffdbe864
YQ
250} // namespace
251
11e1b75f
AH
252/* If address signing is enabled, mask off the signature bits from ADDR, using
253 the register values in THIS_FRAME. */
254
255static CORE_ADDR
256aarch64_frame_unmask_address (struct gdbarch_tdep *tdep,
257 struct frame_info *this_frame,
258 CORE_ADDR addr)
259{
260 if (tdep->has_pauth ()
261 && frame_unwind_register_unsigned (this_frame,
262 tdep->pauth_ra_state_regnum))
263 {
264 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
265 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
266 addr = addr & ~cmask;
267 }
268
269 return addr;
270}
271
07b287a0
MS
272/* Analyze a prologue, looking for a recognizable stack frame
273 and frame pointer. Scan until we encounter a store that could
274 clobber the stack frame unexpectedly, or an unknown instruction. */
275
276static CORE_ADDR
277aarch64_analyze_prologue (struct gdbarch *gdbarch,
278 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
279 struct aarch64_prologue_cache *cache,
280 abstract_instruction_reader& reader)
07b287a0
MS
281{
282 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
283 int i;
187f5d00
YQ
284 /* Track X registers and D registers in prologue. */
285 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 286
187f5d00 287 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 288 regs[i] = pv_register (i, 0);
f7b7ed97 289 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
290
291 for (; start < limit; start += 4)
292 {
293 uint32_t insn;
d9ebcbce 294 aarch64_inst inst;
07b287a0 295
4d9a9006 296 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 297
561a72d4 298 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
299 break;
300
301 if (inst.opcode->iclass == addsub_imm
302 && (inst.opcode->op == OP_ADD
303 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 304 {
d9ebcbce
YQ
305 unsigned rd = inst.operands[0].reg.regno;
306 unsigned rn = inst.operands[1].reg.regno;
307
308 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
309 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
310 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
311 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
312
313 if (inst.opcode->op == OP_ADD)
314 {
315 regs[rd] = pv_add_constant (regs[rn],
316 inst.operands[2].imm.value);
317 }
318 else
319 {
320 regs[rd] = pv_add_constant (regs[rn],
321 -inst.operands[2].imm.value);
322 }
323 }
324 else if (inst.opcode->iclass == pcreladdr
325 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
326 {
327 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
328 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
329
330 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 331 }
d9ebcbce 332 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
333 {
334 /* Stop analysis on branch. */
335 break;
336 }
d9ebcbce 337 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
338 {
339 /* Stop analysis on branch. */
340 break;
341 }
d9ebcbce 342 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
343 {
344 /* Stop analysis on branch. */
345 break;
346 }
d9ebcbce 347 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
348 {
349 /* Stop analysis on branch. */
350 break;
351 }
d9ebcbce
YQ
352 else if (inst.opcode->op == OP_MOVZ)
353 {
354 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
355 regs[inst.operands[0].reg.regno] = pv_unknown ();
356 }
357 else if (inst.opcode->iclass == log_shift
358 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 359 {
d9ebcbce
YQ
360 unsigned rd = inst.operands[0].reg.regno;
361 unsigned rn = inst.operands[1].reg.regno;
362 unsigned rm = inst.operands[2].reg.regno;
363
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
365 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
366 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
367
368 if (inst.operands[2].shifter.amount == 0
369 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
370 regs[rd] = regs[rm];
371 else
372 {
373 if (aarch64_debug)
b277c936
PL
374 {
375 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 376 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
377 core_addr_to_string_nz (start), insn);
378 }
07b287a0
MS
379 break;
380 }
381 }
d9ebcbce 382 else if (inst.opcode->op == OP_STUR)
07b287a0 383 {
d9ebcbce
YQ
384 unsigned rt = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].addr.base_regno;
386 int is64
387 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
388
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
393
f7b7ed97
TT
394 stack.store (pv_add_constant (regs[rn],
395 inst.operands[1].addr.offset.imm),
396 is64 ? 8 : 4, regs[rt]);
07b287a0 397 }
d9ebcbce 398 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
399 || (inst.opcode->iclass == ldstpair_indexed
400 && inst.operands[2].addr.preind))
d9ebcbce 401 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 402 {
03bcd739 403 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
404 unsigned rt1;
405 unsigned rt2;
d9ebcbce
YQ
406 unsigned rn = inst.operands[2].addr.base_regno;
407 int32_t imm = inst.operands[2].addr.offset.imm;
408
187f5d00
YQ
409 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
410 || inst.operands[0].type == AARCH64_OPND_Ft);
411 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
412 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
413 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
414 gdb_assert (!inst.operands[2].addr.offset.is_reg);
415
07b287a0
MS
416 /* If recording this store would invalidate the store area
417 (perhaps because rn is not known) then we should abandon
418 further prologue analysis. */
f7b7ed97 419 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
420 break;
421
f7b7ed97 422 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
423 break;
424
187f5d00
YQ
425 rt1 = inst.operands[0].reg.regno;
426 rt2 = inst.operands[1].reg.regno;
427 if (inst.operands[0].type == AARCH64_OPND_Ft)
428 {
429 /* Only bottom 64-bit of each V register (D register) need
430 to be preserved. */
431 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
432 rt1 += AARCH64_X_REGISTER_COUNT;
433 rt2 += AARCH64_X_REGISTER_COUNT;
434 }
435
f7b7ed97
TT
436 stack.store (pv_add_constant (regs[rn], imm), 8,
437 regs[rt1]);
438 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
439 regs[rt2]);
14ac654f 440
d9ebcbce 441 if (inst.operands[2].addr.writeback)
93d96012 442 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 443
07b287a0 444 }
432ec081
YQ
445 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
446 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
447 && (inst.opcode->op == OP_STR_POS
448 || inst.opcode->op == OP_STRF_POS)))
449 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
450 && strcmp ("str", inst.opcode->name) == 0)
451 {
452 /* STR (immediate) */
453 unsigned int rt = inst.operands[0].reg.regno;
454 int32_t imm = inst.operands[1].addr.offset.imm;
455 unsigned int rn = inst.operands[1].addr.base_regno;
456 bool is64
457 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
458 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
459 || inst.operands[0].type == AARCH64_OPND_Ft);
460
461 if (inst.operands[0].type == AARCH64_OPND_Ft)
462 {
463 /* Only bottom 64-bit of each V register (D register) need
464 to be preserved. */
465 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
466 rt += AARCH64_X_REGISTER_COUNT;
467 }
468
f7b7ed97
TT
469 stack.store (pv_add_constant (regs[rn], imm),
470 is64 ? 8 : 4, regs[rt]);
432ec081
YQ
471 if (inst.operands[1].addr.writeback)
472 regs[rn] = pv_add_constant (regs[rn], imm);
473 }
d9ebcbce 474 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
475 {
476 /* Stop analysis on branch. */
477 break;
478 }
479 else
480 {
481 if (aarch64_debug)
b277c936 482 {
0a0da556 483 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
484 " opcode=0x%x\n",
485 core_addr_to_string_nz (start), insn);
486 }
07b287a0
MS
487 break;
488 }
489 }
490
491 if (cache == NULL)
f7b7ed97 492 return start;
07b287a0
MS
493
494 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
495 {
496 /* Frame pointer is fp. Frame size is constant. */
497 cache->framereg = AARCH64_FP_REGNUM;
498 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
499 }
500 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
501 {
502 /* Try the stack pointer. */
503 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
504 cache->framereg = AARCH64_SP_REGNUM;
505 }
506 else
507 {
508 /* We're just out of luck. We don't know where the frame is. */
509 cache->framereg = -1;
510 cache->framesize = 0;
511 }
512
513 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
514 {
515 CORE_ADDR offset;
516
f7b7ed97 517 if (stack.find_reg (gdbarch, i, &offset))
07b287a0
MS
518 cache->saved_regs[i].addr = offset;
519 }
520
187f5d00
YQ
521 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
522 {
523 int regnum = gdbarch_num_regs (gdbarch);
524 CORE_ADDR offset;
525
f7b7ed97
TT
526 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
527 &offset))
187f5d00
YQ
528 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
529 }
530
07b287a0
MS
531 return start;
532}
533
4d9a9006
YQ
534static CORE_ADDR
535aarch64_analyze_prologue (struct gdbarch *gdbarch,
536 CORE_ADDR start, CORE_ADDR limit,
537 struct aarch64_prologue_cache *cache)
538{
539 instruction_reader reader;
540
541 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
542 reader);
543}
544
545#if GDB_SELF_TEST
546
547namespace selftests {
548
549/* Instruction reader from manually cooked instruction sequences. */
550
551class instruction_reader_test : public abstract_instruction_reader
552{
553public:
554 template<size_t SIZE>
555 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
556 : m_insns (insns), m_insns_size (SIZE)
557 {}
558
559 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 560 override
4d9a9006
YQ
561 {
562 SELF_CHECK (len == 4);
563 SELF_CHECK (memaddr % 4 == 0);
564 SELF_CHECK (memaddr / 4 < m_insns_size);
565
566 return m_insns[memaddr / 4];
567 }
568
569private:
570 const uint32_t *m_insns;
571 size_t m_insns_size;
572};
573
574static void
575aarch64_analyze_prologue_test (void)
576{
577 struct gdbarch_info info;
578
579 gdbarch_info_init (&info);
580 info.bfd_arch_info = bfd_scan_arch ("aarch64");
581
582 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
583 SELF_CHECK (gdbarch != NULL);
584
585 /* Test the simple prologue in which frame pointer is used. */
586 {
587 struct aarch64_prologue_cache cache;
588 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
589
590 static const uint32_t insns[] = {
591 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
592 0x910003fd, /* mov x29, sp */
593 0x97ffffe6, /* bl 0x400580 */
594 };
595 instruction_reader_test reader (insns);
596
597 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
598 SELF_CHECK (end == 4 * 2);
599
600 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
601 SELF_CHECK (cache.framesize == 272);
602
603 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
604 {
605 if (i == AARCH64_FP_REGNUM)
606 SELF_CHECK (cache.saved_regs[i].addr == -272);
607 else if (i == AARCH64_LR_REGNUM)
608 SELF_CHECK (cache.saved_regs[i].addr == -264);
609 else
610 SELF_CHECK (cache.saved_regs[i].addr == -1);
611 }
612
613 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
614 {
615 int regnum = gdbarch_num_regs (gdbarch);
616
617 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
618 == -1);
619 }
620 }
432ec081
YQ
621
622 /* Test a prologue in which STR is used and frame pointer is not
623 used. */
624 {
625 struct aarch64_prologue_cache cache;
626 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
627
628 static const uint32_t insns[] = {
629 0xf81d0ff3, /* str x19, [sp, #-48]! */
630 0xb9002fe0, /* str w0, [sp, #44] */
631 0xf90013e1, /* str x1, [sp, #32]*/
632 0xfd000fe0, /* str d0, [sp, #24] */
633 0xaa0203f3, /* mov x19, x2 */
634 0xf94013e0, /* ldr x0, [sp, #32] */
635 };
636 instruction_reader_test reader (insns);
637
638 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
639
640 SELF_CHECK (end == 4 * 5);
641
642 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
643 SELF_CHECK (cache.framesize == 48);
644
645 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
646 {
647 if (i == 1)
648 SELF_CHECK (cache.saved_regs[i].addr == -16);
649 else if (i == 19)
650 SELF_CHECK (cache.saved_regs[i].addr == -48);
651 else
652 SELF_CHECK (cache.saved_regs[i].addr == -1);
653 }
654
655 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
656 {
657 int regnum = gdbarch_num_regs (gdbarch);
658
659 if (i == 0)
660 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
661 == -24);
662 else
663 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
664 == -1);
665 }
666 }
4d9a9006
YQ
667}
668} // namespace selftests
669#endif /* GDB_SELF_TEST */
670
07b287a0
MS
671/* Implement the "skip_prologue" gdbarch method. */
672
673static CORE_ADDR
674aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
675{
07b287a0 676 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
677
678 /* See if we can determine the end of the prologue via the symbol
679 table. If so, then return either PC, or the PC after the
680 prologue, whichever is greater. */
681 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
682 {
683 CORE_ADDR post_prologue_pc
684 = skip_prologue_using_sal (gdbarch, func_addr);
685
686 if (post_prologue_pc != 0)
325fac50 687 return std::max (pc, post_prologue_pc);
07b287a0
MS
688 }
689
690 /* Can't determine prologue from the symbol table, need to examine
691 instructions. */
692
693 /* Find an upper limit on the function prologue using the debug
694 information. If the debug information could not be used to
695 provide that bound, then use an arbitrary large number as the
696 upper bound. */
697 limit_pc = skip_prologue_using_sal (gdbarch, pc);
698 if (limit_pc == 0)
699 limit_pc = pc + 128; /* Magic. */
700
701 /* Try disassembling prologue. */
702 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
703}
704
705/* Scan the function prologue for THIS_FRAME and populate the prologue
706 cache CACHE. */
707
708static void
709aarch64_scan_prologue (struct frame_info *this_frame,
710 struct aarch64_prologue_cache *cache)
711{
712 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
713 CORE_ADDR prologue_start;
714 CORE_ADDR prologue_end;
715 CORE_ADDR prev_pc = get_frame_pc (this_frame);
716 struct gdbarch *gdbarch = get_frame_arch (this_frame);
717
db634143
PL
718 cache->prev_pc = prev_pc;
719
07b287a0
MS
720 /* Assume we do not find a frame. */
721 cache->framereg = -1;
722 cache->framesize = 0;
723
724 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
725 &prologue_end))
726 {
727 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
728
729 if (sal.line == 0)
730 {
731 /* No line info so use the current PC. */
732 prologue_end = prev_pc;
733 }
734 else if (sal.end < prologue_end)
735 {
736 /* The next line begins after the function end. */
737 prologue_end = sal.end;
738 }
739
325fac50 740 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
741 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
742 }
743 else
744 {
745 CORE_ADDR frame_loc;
07b287a0
MS
746
747 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
748 if (frame_loc == 0)
749 return;
750
751 cache->framereg = AARCH64_FP_REGNUM;
752 cache->framesize = 16;
753 cache->saved_regs[29].addr = 0;
754 cache->saved_regs[30].addr = 8;
755 }
756}
757
7dfa3edc
PL
758/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
759 function may throw an exception if the inferior's registers or memory is
760 not available. */
07b287a0 761
7dfa3edc
PL
762static void
763aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
764 struct aarch64_prologue_cache *cache)
07b287a0 765{
07b287a0
MS
766 CORE_ADDR unwound_fp;
767 int reg;
768
07b287a0
MS
769 aarch64_scan_prologue (this_frame, cache);
770
771 if (cache->framereg == -1)
7dfa3edc 772 return;
07b287a0
MS
773
774 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
775 if (unwound_fp == 0)
7dfa3edc 776 return;
07b287a0
MS
777
778 cache->prev_sp = unwound_fp + cache->framesize;
779
780 /* Calculate actual addresses of saved registers using offsets
781 determined by aarch64_analyze_prologue. */
782 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
783 if (trad_frame_addr_p (cache->saved_regs, reg))
784 cache->saved_regs[reg].addr += cache->prev_sp;
785
db634143
PL
786 cache->func = get_frame_func (this_frame);
787
7dfa3edc
PL
788 cache->available_p = 1;
789}
790
791/* Allocate and fill in *THIS_CACHE with information about the prologue of
792 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
793 Return a pointer to the current aarch64_prologue_cache in
794 *THIS_CACHE. */
795
796static struct aarch64_prologue_cache *
797aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
798{
799 struct aarch64_prologue_cache *cache;
800
801 if (*this_cache != NULL)
9a3c8263 802 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
803
804 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
805 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
806 *this_cache = cache;
807
808 TRY
809 {
810 aarch64_make_prologue_cache_1 (this_frame, cache);
811 }
812 CATCH (ex, RETURN_MASK_ERROR)
813 {
814 if (ex.error != NOT_AVAILABLE_ERROR)
815 throw_exception (ex);
816 }
817 END_CATCH
818
07b287a0
MS
819 return cache;
820}
821
7dfa3edc
PL
822/* Implement the "stop_reason" frame_unwind method. */
823
824static enum unwind_stop_reason
825aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
826 void **this_cache)
827{
828 struct aarch64_prologue_cache *cache
829 = aarch64_make_prologue_cache (this_frame, this_cache);
830
831 if (!cache->available_p)
832 return UNWIND_UNAVAILABLE;
833
834 /* Halt the backtrace at "_start". */
835 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
836 return UNWIND_OUTERMOST;
837
838 /* We've hit a wall, stop. */
839 if (cache->prev_sp == 0)
840 return UNWIND_OUTERMOST;
841
842 return UNWIND_NO_REASON;
843}
844
07b287a0
MS
845/* Our frame ID for a normal frame is the current function's starting
846 PC and the caller's SP when we were called. */
847
848static void
849aarch64_prologue_this_id (struct frame_info *this_frame,
850 void **this_cache, struct frame_id *this_id)
851{
7c8edfae
PL
852 struct aarch64_prologue_cache *cache
853 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 854
7dfa3edc
PL
855 if (!cache->available_p)
856 *this_id = frame_id_build_unavailable_stack (cache->func);
857 else
858 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
859}
860
861/* Implement the "prev_register" frame_unwind method. */
862
863static struct value *
864aarch64_prologue_prev_register (struct frame_info *this_frame,
865 void **this_cache, int prev_regnum)
866{
7c8edfae
PL
867 struct aarch64_prologue_cache *cache
868 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
869
870 /* If we are asked to unwind the PC, then we need to return the LR
871 instead. The prologue may save PC, but it will point into this
872 frame's prologue, not the next frame's resume location. */
873 if (prev_regnum == AARCH64_PC_REGNUM)
874 {
875 CORE_ADDR lr;
876
877 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
878 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
879 }
880
881 /* SP is generally not saved to the stack, but this frame is
882 identified by the next frame's stack pointer at the time of the
883 call. The value was already reconstructed into PREV_SP. */
884 /*
885 +----------+ ^
886 | saved lr | |
887 +->| saved fp |--+
888 | | |
889 | | | <- Previous SP
890 | +----------+
891 | | saved lr |
892 +--| saved fp |<- FP
893 | |
894 | |<- SP
895 +----------+ */
896 if (prev_regnum == AARCH64_SP_REGNUM)
897 return frame_unwind_got_constant (this_frame, prev_regnum,
898 cache->prev_sp);
899
900 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
901 prev_regnum);
902}
903
904/* AArch64 prologue unwinder. */
905struct frame_unwind aarch64_prologue_unwind =
906{
907 NORMAL_FRAME,
7dfa3edc 908 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
909 aarch64_prologue_this_id,
910 aarch64_prologue_prev_register,
911 NULL,
912 default_frame_sniffer
913};
914
8b61f75d
PL
915/* Allocate and fill in *THIS_CACHE with information about the prologue of
916 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
917 Return a pointer to the current aarch64_prologue_cache in
918 *THIS_CACHE. */
07b287a0
MS
919
920static struct aarch64_prologue_cache *
8b61f75d 921aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 922{
07b287a0 923 struct aarch64_prologue_cache *cache;
8b61f75d
PL
924
925 if (*this_cache != NULL)
9a3c8263 926 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
927
928 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
929 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 930 *this_cache = cache;
07b287a0 931
02a2a705
PL
932 TRY
933 {
934 cache->prev_sp = get_frame_register_unsigned (this_frame,
935 AARCH64_SP_REGNUM);
936 cache->prev_pc = get_frame_pc (this_frame);
937 cache->available_p = 1;
938 }
939 CATCH (ex, RETURN_MASK_ERROR)
940 {
941 if (ex.error != NOT_AVAILABLE_ERROR)
942 throw_exception (ex);
943 }
944 END_CATCH
07b287a0
MS
945
946 return cache;
947}
948
02a2a705
PL
949/* Implement the "stop_reason" frame_unwind method. */
950
951static enum unwind_stop_reason
952aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
953 void **this_cache)
954{
955 struct aarch64_prologue_cache *cache
956 = aarch64_make_stub_cache (this_frame, this_cache);
957
958 if (!cache->available_p)
959 return UNWIND_UNAVAILABLE;
960
961 return UNWIND_NO_REASON;
962}
963
07b287a0
MS
964/* Our frame ID for a stub frame is the current SP and LR. */
965
966static void
967aarch64_stub_this_id (struct frame_info *this_frame,
968 void **this_cache, struct frame_id *this_id)
969{
8b61f75d
PL
970 struct aarch64_prologue_cache *cache
971 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 972
02a2a705
PL
973 if (cache->available_p)
974 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
975 else
976 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
977}
978
979/* Implement the "sniffer" frame_unwind method. */
980
981static int
982aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
983 struct frame_info *this_frame,
984 void **this_prologue_cache)
985{
986 CORE_ADDR addr_in_block;
987 gdb_byte dummy[4];
988
989 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 990 if (in_plt_section (addr_in_block)
07b287a0
MS
991 /* We also use the stub winder if the target memory is unreadable
992 to avoid having the prologue unwinder trying to read it. */
993 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
994 return 1;
995
996 return 0;
997}
998
999/* AArch64 stub unwinder. */
1000struct frame_unwind aarch64_stub_unwind =
1001{
1002 NORMAL_FRAME,
02a2a705 1003 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1004 aarch64_stub_this_id,
1005 aarch64_prologue_prev_register,
1006 NULL,
1007 aarch64_stub_unwind_sniffer
1008};
1009
1010/* Return the frame base address of *THIS_FRAME. */
1011
1012static CORE_ADDR
1013aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1014{
7c8edfae
PL
1015 struct aarch64_prologue_cache *cache
1016 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1017
1018 return cache->prev_sp - cache->framesize;
1019}
1020
1021/* AArch64 default frame base information. */
1022struct frame_base aarch64_normal_base =
1023{
1024 &aarch64_prologue_unwind,
1025 aarch64_normal_frame_base,
1026 aarch64_normal_frame_base,
1027 aarch64_normal_frame_base
1028};
1029
07b287a0
MS
1030/* Return the value of the REGNUM register in the previous frame of
1031 *THIS_FRAME. */
1032
1033static struct value *
1034aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1035 void **this_cache, int regnum)
1036{
11e1b75f 1037 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
07b287a0
MS
1038 CORE_ADDR lr;
1039
1040 switch (regnum)
1041 {
1042 case AARCH64_PC_REGNUM:
1043 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
11e1b75f 1044 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
07b287a0
MS
1045 return frame_unwind_got_constant (this_frame, regnum, lr);
1046
1047 default:
1048 internal_error (__FILE__, __LINE__,
1049 _("Unexpected register %d"), regnum);
1050 }
1051}
1052
11e1b75f
AH
1053static const unsigned char op_lit0 = DW_OP_lit0;
1054static const unsigned char op_lit1 = DW_OP_lit1;
1055
07b287a0
MS
1056/* Implement the "init_reg" dwarf2_frame_ops method. */
1057
1058static void
1059aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1060 struct dwarf2_frame_state_reg *reg,
1061 struct frame_info *this_frame)
1062{
11e1b75f
AH
1063 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1064
07b287a0
MS
1065 switch (regnum)
1066 {
1067 case AARCH64_PC_REGNUM:
1068 reg->how = DWARF2_FRAME_REG_FN;
1069 reg->loc.fn = aarch64_dwarf2_prev_register;
11e1b75f
AH
1070 return;
1071
07b287a0
MS
1072 case AARCH64_SP_REGNUM:
1073 reg->how = DWARF2_FRAME_REG_CFA;
11e1b75f
AH
1074 return;
1075 }
1076
1077 /* Init pauth registers. */
1078 if (tdep->has_pauth ())
1079 {
1080 if (regnum == tdep->pauth_ra_state_regnum)
1081 {
1082 /* Initialize RA_STATE to zero. */
1083 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1084 reg->loc.exp.start = &op_lit0;
1085 reg->loc.exp.len = 1;
1086 return;
1087 }
1088 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1089 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1090 {
1091 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1092 return;
1093 }
07b287a0
MS
1094 }
1095}
1096
11e1b75f
AH
1097/* Implement the execute_dwarf_cfa_vendor_op method. */
1098
1099static bool
1100aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1101 struct dwarf2_frame_state *fs)
1102{
1103 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1104 struct dwarf2_frame_state_reg *ra_state;
1105
1106 if (tdep->has_pauth () && op == DW_CFA_AARCH64_negate_ra_state)
1107 {
1108 /* Allocate RA_STATE column if it's not allocated yet. */
1109 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1110
1111 /* Toggle the status of RA_STATE between 0 and 1. */
1112 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1113 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1114
1115 if (ra_state->loc.exp.start == nullptr
1116 || ra_state->loc.exp.start == &op_lit0)
1117 ra_state->loc.exp.start = &op_lit1;
1118 else
1119 ra_state->loc.exp.start = &op_lit0;
1120
1121 ra_state->loc.exp.len = 1;
1122
1123 return true;
1124 }
1125
1126 return false;
1127}
1128
07b287a0
MS
1129/* When arguments must be pushed onto the stack, they go on in reverse
1130 order. The code below implements a FILO (stack) to do this. */
1131
1132typedef struct
1133{
c3c87445
YQ
1134 /* Value to pass on stack. It can be NULL if this item is for stack
1135 padding. */
7c543f7b 1136 const gdb_byte *data;
07b287a0
MS
1137
1138 /* Size in bytes of value to pass on stack. */
1139 int len;
1140} stack_item_t;
1141
1142DEF_VEC_O (stack_item_t);
1143
1144/* Return the alignment (in bytes) of the given type. */
1145
1146static int
1147aarch64_type_align (struct type *t)
1148{
1149 int n;
1150 int align;
1151 int falign;
1152
1153 t = check_typedef (t);
1154 switch (TYPE_CODE (t))
1155 {
1156 default:
1157 /* Should never happen. */
1158 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1159 return 4;
1160
1161 case TYPE_CODE_PTR:
1162 case TYPE_CODE_ENUM:
1163 case TYPE_CODE_INT:
1164 case TYPE_CODE_FLT:
1165 case TYPE_CODE_SET:
1166 case TYPE_CODE_RANGE:
1167 case TYPE_CODE_BITSTRING:
1168 case TYPE_CODE_REF:
aa006118 1169 case TYPE_CODE_RVALUE_REF:
07b287a0
MS
1170 case TYPE_CODE_CHAR:
1171 case TYPE_CODE_BOOL:
1172 return TYPE_LENGTH (t);
1173
1174 case TYPE_CODE_ARRAY:
238f2452
YQ
1175 if (TYPE_VECTOR (t))
1176 {
1177 /* Use the natural alignment for vector types (the same for
1178 scalar type), but the maximum alignment is 128-bit. */
1179 if (TYPE_LENGTH (t) > 16)
1180 return 16;
1181 else
1182 return TYPE_LENGTH (t);
1183 }
1184 else
1185 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
1186 case TYPE_CODE_COMPLEX:
1187 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1188
1189 case TYPE_CODE_STRUCT:
1190 case TYPE_CODE_UNION:
1191 align = 1;
1192 for (n = 0; n < TYPE_NFIELDS (t); n++)
1193 {
1194 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1195 if (falign > align)
1196 align = falign;
1197 }
1198 return align;
1199 }
1200}
1201
ea92689a
AH
1202/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1203
1204 Return the number of register required, or -1 on failure.
1205
1206 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1207 to the element, else fail if the type of this element does not match the
1208 existing value. */
1209
1210static int
1211aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1212 struct type **fundamental_type)
1213{
1214 if (type == nullptr)
1215 return -1;
1216
1217 switch (TYPE_CODE (type))
1218 {
1219 case TYPE_CODE_FLT:
1220 if (TYPE_LENGTH (type) > 16)
1221 return -1;
1222
1223 if (*fundamental_type == nullptr)
1224 *fundamental_type = type;
1225 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1226 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1227 return -1;
1228
1229 return 1;
1230
1231 case TYPE_CODE_COMPLEX:
1232 {
1233 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1234 if (TYPE_LENGTH (target_type) > 16)
1235 return -1;
1236
1237 if (*fundamental_type == nullptr)
1238 *fundamental_type = target_type;
1239 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1240 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1241 return -1;
1242
1243 return 2;
1244 }
1245
1246 case TYPE_CODE_ARRAY:
1247 {
1248 if (TYPE_VECTOR (type))
1249 {
1250 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1251 return -1;
1252
1253 if (*fundamental_type == nullptr)
1254 *fundamental_type = type;
1255 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1256 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1257 return -1;
1258
1259 return 1;
1260 }
1261 else
1262 {
1263 struct type *target_type = TYPE_TARGET_TYPE (type);
1264 int count = aapcs_is_vfp_call_or_return_candidate_1
1265 (target_type, fundamental_type);
1266
1267 if (count == -1)
1268 return count;
1269
d4718d5c 1270 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
ea92689a
AH
1271 return count;
1272 }
1273 }
1274
1275 case TYPE_CODE_STRUCT:
1276 case TYPE_CODE_UNION:
1277 {
1278 int count = 0;
1279
1280 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1281 {
353229bf
AH
1282 /* Ignore any static fields. */
1283 if (field_is_static (&TYPE_FIELD (type, i)))
1284 continue;
1285
ea92689a
AH
1286 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1287
1288 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1289 (member, fundamental_type);
1290 if (sub_count == -1)
1291 return -1;
1292 count += sub_count;
1293 }
73021deb
AH
1294
1295 /* Ensure there is no padding between the fields (allowing for empty
1296 zero length structs) */
1297 int ftype_length = (*fundamental_type == nullptr)
1298 ? 0 : TYPE_LENGTH (*fundamental_type);
1299 if (count * ftype_length != TYPE_LENGTH (type))
1300 return -1;
1301
ea92689a
AH
1302 return count;
1303 }
1304
1305 default:
1306 break;
1307 }
1308
1309 return -1;
1310}
1311
1312/* Return true if an argument, whose type is described by TYPE, can be passed or
1313 returned in simd/fp registers, providing enough parameter passing registers
1314 are available. This is as described in the AAPCS64.
1315
1316 Upon successful return, *COUNT returns the number of needed registers,
1317 *FUNDAMENTAL_TYPE contains the type of those registers.
1318
1319 Candidate as per the AAPCS64 5.4.2.C is either a:
1320 - float.
1321 - short-vector.
1322 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1323 all the members are floats and has at most 4 members.
1324 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1325 all the members are short vectors and has at most 4 members.
1326 - Complex (7.1.1)
1327
1328 Note that HFAs and HVAs can include nested structures and arrays. */
1329
0e745c60 1330static bool
ea92689a
AH
1331aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1332 struct type **fundamental_type)
1333{
1334 if (type == nullptr)
1335 return false;
1336
1337 *fundamental_type = nullptr;
1338
1339 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1340 fundamental_type);
1341
1342 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1343 {
1344 *count = ag_count;
1345 return true;
1346 }
1347 else
1348 return false;
1349}
1350
07b287a0
MS
1351/* AArch64 function call information structure. */
1352struct aarch64_call_info
1353{
1354 /* the current argument number. */
1355 unsigned argnum;
1356
1357 /* The next general purpose register number, equivalent to NGRN as
1358 described in the AArch64 Procedure Call Standard. */
1359 unsigned ngrn;
1360
1361 /* The next SIMD and floating point register number, equivalent to
1362 NSRN as described in the AArch64 Procedure Call Standard. */
1363 unsigned nsrn;
1364
1365 /* The next stacked argument address, equivalent to NSAA as
1366 described in the AArch64 Procedure Call Standard. */
1367 unsigned nsaa;
1368
1369 /* Stack item vector. */
1370 VEC(stack_item_t) *si;
1371};
1372
1373/* Pass a value in a sequence of consecutive X registers. The caller
1374 is responsbile for ensuring sufficient registers are available. */
1375
1376static void
1377pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1378 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1379 struct value *arg)
07b287a0
MS
1380{
1381 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1382 int len = TYPE_LENGTH (type);
1383 enum type_code typecode = TYPE_CODE (type);
1384 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1385 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1386
1387 info->argnum++;
1388
1389 while (len > 0)
1390 {
1391 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1392 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1393 byte_order);
1394
1395
1396 /* Adjust sub-word struct/union args when big-endian. */
1397 if (byte_order == BFD_ENDIAN_BIG
1398 && partial_len < X_REGISTER_SIZE
1399 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1400 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1401
1402 if (aarch64_debug)
b277c936
PL
1403 {
1404 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1405 gdbarch_register_name (gdbarch, regnum),
1406 phex (regval, X_REGISTER_SIZE));
1407 }
07b287a0
MS
1408 regcache_cooked_write_unsigned (regcache, regnum, regval);
1409 len -= partial_len;
1410 buf += partial_len;
1411 regnum++;
1412 }
1413}
1414
1415/* Attempt to marshall a value in a V register. Return 1 if
1416 successful, or 0 if insufficient registers are available. This
1417 function, unlike the equivalent pass_in_x() function does not
1418 handle arguments spread across multiple registers. */
1419
1420static int
1421pass_in_v (struct gdbarch *gdbarch,
1422 struct regcache *regcache,
1423 struct aarch64_call_info *info,
0735fddd 1424 int len, const bfd_byte *buf)
07b287a0
MS
1425{
1426 if (info->nsrn < 8)
1427 {
07b287a0 1428 int regnum = AARCH64_V0_REGNUM + info->nsrn;
3ff2c72e
AH
1429 /* Enough space for a full vector register. */
1430 gdb_byte reg[register_size (gdbarch, regnum)];
1431 gdb_assert (len <= sizeof (reg));
07b287a0
MS
1432
1433 info->argnum++;
1434 info->nsrn++;
1435
0735fddd
YQ
1436 memset (reg, 0, sizeof (reg));
1437 /* PCS C.1, the argument is allocated to the least significant
1438 bits of V register. */
1439 memcpy (reg, buf, len);
b66f5587 1440 regcache->cooked_write (regnum, reg);
0735fddd 1441
07b287a0 1442 if (aarch64_debug)
b277c936
PL
1443 {
1444 debug_printf ("arg %d in %s\n", info->argnum,
1445 gdbarch_register_name (gdbarch, regnum));
1446 }
07b287a0
MS
1447 return 1;
1448 }
1449 info->nsrn = 8;
1450 return 0;
1451}
1452
1453/* Marshall an argument onto the stack. */
1454
1455static void
1456pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1457 struct value *arg)
07b287a0 1458{
8e80f9d1 1459 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1460 int len = TYPE_LENGTH (type);
1461 int align;
1462 stack_item_t item;
1463
1464 info->argnum++;
1465
1466 align = aarch64_type_align (type);
1467
1468 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1469 Natural alignment of the argument's type. */
1470 align = align_up (align, 8);
1471
1472 /* The AArch64 PCS requires at most doubleword alignment. */
1473 if (align > 16)
1474 align = 16;
1475
1476 if (aarch64_debug)
b277c936
PL
1477 {
1478 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1479 info->nsaa);
1480 }
07b287a0
MS
1481
1482 item.len = len;
1483 item.data = buf;
1484 VEC_safe_push (stack_item_t, info->si, &item);
1485
1486 info->nsaa += len;
1487 if (info->nsaa & (align - 1))
1488 {
1489 /* Push stack alignment padding. */
1490 int pad = align - (info->nsaa & (align - 1));
1491
1492 item.len = pad;
c3c87445 1493 item.data = NULL;
07b287a0
MS
1494
1495 VEC_safe_push (stack_item_t, info->si, &item);
1496 info->nsaa += pad;
1497 }
1498}
1499
1500/* Marshall an argument into a sequence of one or more consecutive X
1501 registers or, if insufficient X registers are available then onto
1502 the stack. */
1503
1504static void
1505pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1506 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1507 struct value *arg)
07b287a0
MS
1508{
1509 int len = TYPE_LENGTH (type);
1510 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1511
1512 /* PCS C.13 - Pass in registers if we have enough spare */
1513 if (info->ngrn + nregs <= 8)
1514 {
8e80f9d1 1515 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1516 info->ngrn += nregs;
1517 }
1518 else
1519 {
1520 info->ngrn = 8;
8e80f9d1 1521 pass_on_stack (info, type, arg);
07b287a0
MS
1522 }
1523}
1524
0e745c60
AH
1525/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1526 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1527 registers. A return value of false is an error state as the value will have
1528 been partially passed to the stack. */
1529static bool
1530pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1531 struct aarch64_call_info *info, struct type *arg_type,
1532 struct value *arg)
07b287a0 1533{
0e745c60
AH
1534 switch (TYPE_CODE (arg_type))
1535 {
1536 case TYPE_CODE_FLT:
1537 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1538 value_contents (arg));
1539 break;
1540
1541 case TYPE_CODE_COMPLEX:
1542 {
1543 const bfd_byte *buf = value_contents (arg);
1544 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1545
1546 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1547 buf))
1548 return false;
1549
1550 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1551 buf + TYPE_LENGTH (target_type));
1552 }
1553
1554 case TYPE_CODE_ARRAY:
1555 if (TYPE_VECTOR (arg_type))
1556 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1557 value_contents (arg));
1558 /* fall through. */
1559
1560 case TYPE_CODE_STRUCT:
1561 case TYPE_CODE_UNION:
1562 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1563 {
353229bf
AH
1564 /* Don't include static fields. */
1565 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1566 continue;
1567
0e745c60
AH
1568 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1569 struct type *field_type = check_typedef (value_type (field));
1570
1571 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1572 field))
1573 return false;
1574 }
1575 return true;
1576
1577 default:
1578 return false;
1579 }
07b287a0
MS
1580}
1581
1582/* Implement the "push_dummy_call" gdbarch method. */
1583
1584static CORE_ADDR
1585aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1586 struct regcache *regcache, CORE_ADDR bp_addr,
1587 int nargs,
cf84fa6b
AH
1588 struct value **args, CORE_ADDR sp,
1589 function_call_return_method return_method,
07b287a0
MS
1590 CORE_ADDR struct_addr)
1591{
07b287a0 1592 int argnum;
07b287a0 1593 struct aarch64_call_info info;
07b287a0
MS
1594
1595 memset (&info, 0, sizeof (info));
1596
1597 /* We need to know what the type of the called function is in order
1598 to determine the number of named/anonymous arguments for the
1599 actual argument placement, and the return type in order to handle
1600 return value correctly.
1601
1602 The generic code above us views the decision of return in memory
1603 or return in registers as a two stage processes. The language
1604 handler is consulted first and may decide to return in memory (eg
1605 class with copy constructor returned by value), this will cause
1606 the generic code to allocate space AND insert an initial leading
1607 argument.
1608
1609 If the language code does not decide to pass in memory then the
1610 target code is consulted.
1611
1612 If the language code decides to pass in memory we want to move
1613 the pointer inserted as the initial argument from the argument
1614 list and into X8, the conventional AArch64 struct return pointer
38a72da0 1615 register. */
07b287a0
MS
1616
1617 /* Set the return address. For the AArch64, the return breakpoint
1618 is always at BP_ADDR. */
1619 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1620
38a72da0
AH
1621 /* If we were given an initial argument for the return slot, lose it. */
1622 if (return_method == return_method_hidden_param)
07b287a0
MS
1623 {
1624 args++;
1625 nargs--;
1626 }
1627
1628 /* The struct_return pointer occupies X8. */
38a72da0 1629 if (return_method != return_method_normal)
07b287a0
MS
1630 {
1631 if (aarch64_debug)
b277c936
PL
1632 {
1633 debug_printf ("struct return in %s = 0x%s\n",
1634 gdbarch_register_name (gdbarch,
1635 AARCH64_STRUCT_RETURN_REGNUM),
1636 paddress (gdbarch, struct_addr));
1637 }
07b287a0
MS
1638 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1639 struct_addr);
1640 }
1641
1642 for (argnum = 0; argnum < nargs; argnum++)
1643 {
1644 struct value *arg = args[argnum];
0e745c60
AH
1645 struct type *arg_type, *fundamental_type;
1646 int len, elements;
07b287a0
MS
1647
1648 arg_type = check_typedef (value_type (arg));
1649 len = TYPE_LENGTH (arg_type);
1650
0e745c60
AH
1651 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1652 if there are enough spare registers. */
1653 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1654 &fundamental_type))
1655 {
1656 if (info.nsrn + elements <= 8)
1657 {
1658 /* We know that we have sufficient registers available therefore
1659 this will never need to fallback to the stack. */
1660 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1661 arg))
1662 gdb_assert_not_reached ("Failed to push args");
1663 }
1664 else
1665 {
1666 info.nsrn = 8;
1667 pass_on_stack (&info, arg_type, arg);
1668 }
1669 continue;
1670 }
1671
07b287a0
MS
1672 switch (TYPE_CODE (arg_type))
1673 {
1674 case TYPE_CODE_INT:
1675 case TYPE_CODE_BOOL:
1676 case TYPE_CODE_CHAR:
1677 case TYPE_CODE_RANGE:
1678 case TYPE_CODE_ENUM:
1679 if (len < 4)
1680 {
1681 /* Promote to 32 bit integer. */
1682 if (TYPE_UNSIGNED (arg_type))
1683 arg_type = builtin_type (gdbarch)->builtin_uint32;
1684 else
1685 arg_type = builtin_type (gdbarch)->builtin_int32;
1686 arg = value_cast (arg_type, arg);
1687 }
8e80f9d1 1688 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1689 break;
1690
07b287a0
MS
1691 case TYPE_CODE_STRUCT:
1692 case TYPE_CODE_ARRAY:
1693 case TYPE_CODE_UNION:
0e745c60 1694 if (len > 16)
07b287a0
MS
1695 {
1696 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1697 invisible reference. */
1698
1699 /* Allocate aligned storage. */
1700 sp = align_down (sp - len, 16);
1701
1702 /* Write the real data into the stack. */
1703 write_memory (sp, value_contents (arg), len);
1704
1705 /* Construct the indirection. */
1706 arg_type = lookup_pointer_type (arg_type);
1707 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1708 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1709 }
1710 else
1711 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1712 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1713 break;
1714
1715 default:
8e80f9d1 1716 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1717 break;
1718 }
1719 }
1720
1721 /* Make sure stack retains 16 byte alignment. */
1722 if (info.nsaa & 15)
1723 sp -= 16 - (info.nsaa & 15);
1724
1725 while (!VEC_empty (stack_item_t, info.si))
1726 {
1727 stack_item_t *si = VEC_last (stack_item_t, info.si);
1728
1729 sp -= si->len;
c3c87445
YQ
1730 if (si->data != NULL)
1731 write_memory (sp, si->data, si->len);
07b287a0
MS
1732 VEC_pop (stack_item_t, info.si);
1733 }
1734
1735 VEC_free (stack_item_t, info.si);
1736
1737 /* Finally, update the SP register. */
1738 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1739
1740 return sp;
1741}
1742
1743/* Implement the "frame_align" gdbarch method. */
1744
1745static CORE_ADDR
1746aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1747{
1748 /* Align the stack to sixteen bytes. */
1749 return sp & ~(CORE_ADDR) 15;
1750}
1751
1752/* Return the type for an AdvSISD Q register. */
1753
1754static struct type *
1755aarch64_vnq_type (struct gdbarch *gdbarch)
1756{
1757 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1758
1759 if (tdep->vnq_type == NULL)
1760 {
1761 struct type *t;
1762 struct type *elem;
1763
1764 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1765 TYPE_CODE_UNION);
1766
1767 elem = builtin_type (gdbarch)->builtin_uint128;
1768 append_composite_type_field (t, "u", elem);
1769
1770 elem = builtin_type (gdbarch)->builtin_int128;
1771 append_composite_type_field (t, "s", elem);
1772
1773 tdep->vnq_type = t;
1774 }
1775
1776 return tdep->vnq_type;
1777}
1778
1779/* Return the type for an AdvSISD D register. */
1780
1781static struct type *
1782aarch64_vnd_type (struct gdbarch *gdbarch)
1783{
1784 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1785
1786 if (tdep->vnd_type == NULL)
1787 {
1788 struct type *t;
1789 struct type *elem;
1790
1791 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1792 TYPE_CODE_UNION);
1793
1794 elem = builtin_type (gdbarch)->builtin_double;
1795 append_composite_type_field (t, "f", elem);
1796
1797 elem = builtin_type (gdbarch)->builtin_uint64;
1798 append_composite_type_field (t, "u", elem);
1799
1800 elem = builtin_type (gdbarch)->builtin_int64;
1801 append_composite_type_field (t, "s", elem);
1802
1803 tdep->vnd_type = t;
1804 }
1805
1806 return tdep->vnd_type;
1807}
1808
1809/* Return the type for an AdvSISD S register. */
1810
1811static struct type *
1812aarch64_vns_type (struct gdbarch *gdbarch)
1813{
1814 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1815
1816 if (tdep->vns_type == NULL)
1817 {
1818 struct type *t;
1819 struct type *elem;
1820
1821 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1822 TYPE_CODE_UNION);
1823
1824 elem = builtin_type (gdbarch)->builtin_float;
1825 append_composite_type_field (t, "f", elem);
1826
1827 elem = builtin_type (gdbarch)->builtin_uint32;
1828 append_composite_type_field (t, "u", elem);
1829
1830 elem = builtin_type (gdbarch)->builtin_int32;
1831 append_composite_type_field (t, "s", elem);
1832
1833 tdep->vns_type = t;
1834 }
1835
1836 return tdep->vns_type;
1837}
1838
1839/* Return the type for an AdvSISD H register. */
1840
1841static struct type *
1842aarch64_vnh_type (struct gdbarch *gdbarch)
1843{
1844 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1845
1846 if (tdep->vnh_type == NULL)
1847 {
1848 struct type *t;
1849 struct type *elem;
1850
1851 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1852 TYPE_CODE_UNION);
1853
1854 elem = builtin_type (gdbarch)->builtin_uint16;
1855 append_composite_type_field (t, "u", elem);
1856
1857 elem = builtin_type (gdbarch)->builtin_int16;
1858 append_composite_type_field (t, "s", elem);
1859
1860 tdep->vnh_type = t;
1861 }
1862
1863 return tdep->vnh_type;
1864}
1865
1866/* Return the type for an AdvSISD B register. */
1867
1868static struct type *
1869aarch64_vnb_type (struct gdbarch *gdbarch)
1870{
1871 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1872
1873 if (tdep->vnb_type == NULL)
1874 {
1875 struct type *t;
1876 struct type *elem;
1877
1878 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1879 TYPE_CODE_UNION);
1880
1881 elem = builtin_type (gdbarch)->builtin_uint8;
1882 append_composite_type_field (t, "u", elem);
1883
1884 elem = builtin_type (gdbarch)->builtin_int8;
1885 append_composite_type_field (t, "s", elem);
1886
1887 tdep->vnb_type = t;
1888 }
1889
1890 return tdep->vnb_type;
1891}
1892
63bad7b6
AH
1893/* Return the type for an AdvSISD V register. */
1894
1895static struct type *
1896aarch64_vnv_type (struct gdbarch *gdbarch)
1897{
1898 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1899
1900 if (tdep->vnv_type == NULL)
1901 {
1902 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1903 TYPE_CODE_UNION);
1904
1905 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1906 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1907 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1908 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1909 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1910
1911 tdep->vnv_type = t;
1912 }
1913
1914 return tdep->vnv_type;
1915}
1916
07b287a0
MS
1917/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1918
1919static int
1920aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1921{
34dcc7cf
AH
1922 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1923
07b287a0
MS
1924 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1925 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1926
1927 if (reg == AARCH64_DWARF_SP)
1928 return AARCH64_SP_REGNUM;
1929
1930 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1931 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1932
65d4cada
AH
1933 if (reg == AARCH64_DWARF_SVE_VG)
1934 return AARCH64_SVE_VG_REGNUM;
1935
1936 if (reg == AARCH64_DWARF_SVE_FFR)
1937 return AARCH64_SVE_FFR_REGNUM;
1938
1939 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1940 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1941
1942 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1943 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1944
34dcc7cf
AH
1945 if (tdep->has_pauth ())
1946 {
1947 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
1948 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
1949
1950 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
1951 return tdep->pauth_ra_state_regnum;
1952 }
1953
07b287a0
MS
1954 return -1;
1955}
07b287a0
MS
1956
1957/* Implement the "print_insn" gdbarch method. */
1958
1959static int
1960aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1961{
1962 info->symbols = NULL;
6394c606 1963 return default_print_insn (memaddr, info);
07b287a0
MS
1964}
1965
1966/* AArch64 BRK software debug mode instruction.
1967 Note that AArch64 code is always little-endian.
1968 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 1969constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 1970
04180708 1971typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
1972
1973/* Extract from an array REGS containing the (raw) register state a
1974 function return value of type TYPE, and copy that, in virtual
1975 format, into VALBUF. */
1976
1977static void
1978aarch64_extract_return_value (struct type *type, struct regcache *regs,
1979 gdb_byte *valbuf)
1980{
ac7936df 1981 struct gdbarch *gdbarch = regs->arch ();
07b287a0 1982 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
1983 int elements;
1984 struct type *fundamental_type;
07b287a0 1985
4f4aedeb
AH
1986 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1987 &fundamental_type))
07b287a0 1988 {
4f4aedeb
AH
1989 int len = TYPE_LENGTH (fundamental_type);
1990
1991 for (int i = 0; i < elements; i++)
1992 {
1993 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
1994 /* Enough space for a full vector register. */
1995 gdb_byte buf[register_size (gdbarch, regno)];
1996 gdb_assert (len <= sizeof (buf));
4f4aedeb
AH
1997
1998 if (aarch64_debug)
1999 {
2000 debug_printf ("read HFA or HVA return value element %d from %s\n",
2001 i + 1,
2002 gdbarch_register_name (gdbarch, regno));
2003 }
2004 regs->cooked_read (regno, buf);
07b287a0 2005
4f4aedeb
AH
2006 memcpy (valbuf, buf, len);
2007 valbuf += len;
2008 }
07b287a0
MS
2009 }
2010 else if (TYPE_CODE (type) == TYPE_CODE_INT
2011 || TYPE_CODE (type) == TYPE_CODE_CHAR
2012 || TYPE_CODE (type) == TYPE_CODE_BOOL
2013 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 2014 || TYPE_IS_REFERENCE (type)
07b287a0
MS
2015 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2016 {
6471e7d2 2017 /* If the type is a plain integer, then the access is
07b287a0
MS
2018 straight-forward. Otherwise we have to play around a bit
2019 more. */
2020 int len = TYPE_LENGTH (type);
2021 int regno = AARCH64_X0_REGNUM;
2022 ULONGEST tmp;
2023
2024 while (len > 0)
2025 {
2026 /* By using store_unsigned_integer we avoid having to do
2027 anything special for small big-endian values. */
2028 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2029 store_unsigned_integer (valbuf,
2030 (len > X_REGISTER_SIZE
2031 ? X_REGISTER_SIZE : len), byte_order, tmp);
2032 len -= X_REGISTER_SIZE;
2033 valbuf += X_REGISTER_SIZE;
2034 }
2035 }
07b287a0
MS
2036 else
2037 {
2038 /* For a structure or union the behaviour is as if the value had
2039 been stored to word-aligned memory and then loaded into
2040 registers with 64-bit load instruction(s). */
2041 int len = TYPE_LENGTH (type);
2042 int regno = AARCH64_X0_REGNUM;
2043 bfd_byte buf[X_REGISTER_SIZE];
2044
2045 while (len > 0)
2046 {
dca08e1f 2047 regs->cooked_read (regno++, buf);
07b287a0
MS
2048 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2049 len -= X_REGISTER_SIZE;
2050 valbuf += X_REGISTER_SIZE;
2051 }
2052 }
2053}
2054
2055
2056/* Will a function return an aggregate type in memory or in a
2057 register? Return 0 if an aggregate type can be returned in a
2058 register, 1 if it must be returned in memory. */
2059
2060static int
2061aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2062{
f168693b 2063 type = check_typedef (type);
4f4aedeb
AH
2064 int elements;
2065 struct type *fundamental_type;
07b287a0 2066
4f4aedeb
AH
2067 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2068 &fundamental_type))
07b287a0 2069 {
cd635f74
YQ
2070 /* v0-v7 are used to return values and one register is allocated
2071 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2072 return 0;
2073 }
2074
2075 if (TYPE_LENGTH (type) > 16)
2076 {
2077 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2078 invisible reference. */
2079
2080 return 1;
2081 }
2082
2083 return 0;
2084}
2085
2086/* Write into appropriate registers a function return value of type
2087 TYPE, given in virtual format. */
2088
2089static void
2090aarch64_store_return_value (struct type *type, struct regcache *regs,
2091 const gdb_byte *valbuf)
2092{
ac7936df 2093 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2094 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2095 int elements;
2096 struct type *fundamental_type;
07b287a0 2097
4f4aedeb
AH
2098 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2099 &fundamental_type))
07b287a0 2100 {
4f4aedeb
AH
2101 int len = TYPE_LENGTH (fundamental_type);
2102
2103 for (int i = 0; i < elements; i++)
2104 {
2105 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2106 /* Enough space for a full vector register. */
2107 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2108 gdb_assert (len <= sizeof (tmpbuf));
4f4aedeb
AH
2109
2110 if (aarch64_debug)
2111 {
2112 debug_printf ("write HFA or HVA return value element %d to %s\n",
2113 i + 1,
2114 gdbarch_register_name (gdbarch, regno));
2115 }
07b287a0 2116
4f4aedeb
AH
2117 memcpy (tmpbuf, valbuf,
2118 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2119 regs->cooked_write (regno, tmpbuf);
2120 valbuf += len;
2121 }
07b287a0
MS
2122 }
2123 else if (TYPE_CODE (type) == TYPE_CODE_INT
2124 || TYPE_CODE (type) == TYPE_CODE_CHAR
2125 || TYPE_CODE (type) == TYPE_CODE_BOOL
2126 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 2127 || TYPE_IS_REFERENCE (type)
07b287a0
MS
2128 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2129 {
2130 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2131 {
2132 /* Values of one word or less are zero/sign-extended and
2133 returned in r0. */
2134 bfd_byte tmpbuf[X_REGISTER_SIZE];
2135 LONGEST val = unpack_long (type, valbuf);
2136
2137 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2138 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2139 }
2140 else
2141 {
2142 /* Integral values greater than one word are stored in
2143 consecutive registers starting with r0. This will always
2144 be a multiple of the regiser size. */
2145 int len = TYPE_LENGTH (type);
2146 int regno = AARCH64_X0_REGNUM;
2147
2148 while (len > 0)
2149 {
b66f5587 2150 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2151 len -= X_REGISTER_SIZE;
2152 valbuf += X_REGISTER_SIZE;
2153 }
2154 }
2155 }
07b287a0
MS
2156 else
2157 {
2158 /* For a structure or union the behaviour is as if the value had
2159 been stored to word-aligned memory and then loaded into
2160 registers with 64-bit load instruction(s). */
2161 int len = TYPE_LENGTH (type);
2162 int regno = AARCH64_X0_REGNUM;
2163 bfd_byte tmpbuf[X_REGISTER_SIZE];
2164
2165 while (len > 0)
2166 {
2167 memcpy (tmpbuf, valbuf,
2168 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2169 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2170 len -= X_REGISTER_SIZE;
2171 valbuf += X_REGISTER_SIZE;
2172 }
2173 }
2174}
2175
2176/* Implement the "return_value" gdbarch method. */
2177
2178static enum return_value_convention
2179aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2180 struct type *valtype, struct regcache *regcache,
2181 gdb_byte *readbuf, const gdb_byte *writebuf)
2182{
07b287a0
MS
2183
2184 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2185 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2186 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2187 {
2188 if (aarch64_return_in_memory (gdbarch, valtype))
2189 {
2190 if (aarch64_debug)
b277c936 2191 debug_printf ("return value in memory\n");
07b287a0
MS
2192 return RETURN_VALUE_STRUCT_CONVENTION;
2193 }
2194 }
2195
2196 if (writebuf)
2197 aarch64_store_return_value (valtype, regcache, writebuf);
2198
2199 if (readbuf)
2200 aarch64_extract_return_value (valtype, regcache, readbuf);
2201
2202 if (aarch64_debug)
b277c936 2203 debug_printf ("return value in registers\n");
07b287a0
MS
2204
2205 return RETURN_VALUE_REGISTER_CONVENTION;
2206}
2207
2208/* Implement the "get_longjmp_target" gdbarch method. */
2209
2210static int
2211aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2212{
2213 CORE_ADDR jb_addr;
2214 gdb_byte buf[X_REGISTER_SIZE];
2215 struct gdbarch *gdbarch = get_frame_arch (frame);
2216 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2217 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2218
2219 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2220
2221 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2222 X_REGISTER_SIZE))
2223 return 0;
2224
2225 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2226 return 1;
2227}
ea873d8e
PL
2228
2229/* Implement the "gen_return_address" gdbarch method. */
2230
2231static void
2232aarch64_gen_return_address (struct gdbarch *gdbarch,
2233 struct agent_expr *ax, struct axs_value *value,
2234 CORE_ADDR scope)
2235{
2236 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2237 value->kind = axs_lvalue_register;
2238 value->u.reg = AARCH64_LR_REGNUM;
2239}
07b287a0
MS
2240\f
2241
2242/* Return the pseudo register name corresponding to register regnum. */
2243
2244static const char *
2245aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2246{
63bad7b6
AH
2247 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2248
07b287a0
MS
2249 static const char *const q_name[] =
2250 {
2251 "q0", "q1", "q2", "q3",
2252 "q4", "q5", "q6", "q7",
2253 "q8", "q9", "q10", "q11",
2254 "q12", "q13", "q14", "q15",
2255 "q16", "q17", "q18", "q19",
2256 "q20", "q21", "q22", "q23",
2257 "q24", "q25", "q26", "q27",
2258 "q28", "q29", "q30", "q31",
2259 };
2260
2261 static const char *const d_name[] =
2262 {
2263 "d0", "d1", "d2", "d3",
2264 "d4", "d5", "d6", "d7",
2265 "d8", "d9", "d10", "d11",
2266 "d12", "d13", "d14", "d15",
2267 "d16", "d17", "d18", "d19",
2268 "d20", "d21", "d22", "d23",
2269 "d24", "d25", "d26", "d27",
2270 "d28", "d29", "d30", "d31",
2271 };
2272
2273 static const char *const s_name[] =
2274 {
2275 "s0", "s1", "s2", "s3",
2276 "s4", "s5", "s6", "s7",
2277 "s8", "s9", "s10", "s11",
2278 "s12", "s13", "s14", "s15",
2279 "s16", "s17", "s18", "s19",
2280 "s20", "s21", "s22", "s23",
2281 "s24", "s25", "s26", "s27",
2282 "s28", "s29", "s30", "s31",
2283 };
2284
2285 static const char *const h_name[] =
2286 {
2287 "h0", "h1", "h2", "h3",
2288 "h4", "h5", "h6", "h7",
2289 "h8", "h9", "h10", "h11",
2290 "h12", "h13", "h14", "h15",
2291 "h16", "h17", "h18", "h19",
2292 "h20", "h21", "h22", "h23",
2293 "h24", "h25", "h26", "h27",
2294 "h28", "h29", "h30", "h31",
2295 };
2296
2297 static const char *const b_name[] =
2298 {
2299 "b0", "b1", "b2", "b3",
2300 "b4", "b5", "b6", "b7",
2301 "b8", "b9", "b10", "b11",
2302 "b12", "b13", "b14", "b15",
2303 "b16", "b17", "b18", "b19",
2304 "b20", "b21", "b22", "b23",
2305 "b24", "b25", "b26", "b27",
2306 "b28", "b29", "b30", "b31",
2307 };
2308
34dcc7cf 2309 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2310
34dcc7cf
AH
2311 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2312 return q_name[p_regnum - AARCH64_Q0_REGNUM];
07b287a0 2313
34dcc7cf
AH
2314 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2315 return d_name[p_regnum - AARCH64_D0_REGNUM];
07b287a0 2316
34dcc7cf
AH
2317 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2318 return s_name[p_regnum - AARCH64_S0_REGNUM];
07b287a0 2319
34dcc7cf
AH
2320 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2321 return h_name[p_regnum - AARCH64_H0_REGNUM];
07b287a0 2322
34dcc7cf
AH
2323 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2324 return b_name[p_regnum - AARCH64_B0_REGNUM];
07b287a0 2325
63bad7b6
AH
2326 if (tdep->has_sve ())
2327 {
2328 static const char *const sve_v_name[] =
2329 {
2330 "v0", "v1", "v2", "v3",
2331 "v4", "v5", "v6", "v7",
2332 "v8", "v9", "v10", "v11",
2333 "v12", "v13", "v14", "v15",
2334 "v16", "v17", "v18", "v19",
2335 "v20", "v21", "v22", "v23",
2336 "v24", "v25", "v26", "v27",
2337 "v28", "v29", "v30", "v31",
2338 };
2339
34dcc7cf
AH
2340 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2341 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2342 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
63bad7b6
AH
2343 }
2344
34dcc7cf
AH
2345 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2346 prevents it from being read by methods such as
2347 mi_cmd_trace_frame_collected. */
2348 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2349 return "";
2350
07b287a0
MS
2351 internal_error (__FILE__, __LINE__,
2352 _("aarch64_pseudo_register_name: bad register number %d"),
34dcc7cf 2353 p_regnum);
07b287a0
MS
2354}
2355
2356/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2357
2358static struct type *
2359aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2360{
63bad7b6
AH
2361 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2362
34dcc7cf 2363 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2364
34dcc7cf 2365 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0
MS
2366 return aarch64_vnq_type (gdbarch);
2367
34dcc7cf 2368 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2369 return aarch64_vnd_type (gdbarch);
2370
34dcc7cf 2371 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2372 return aarch64_vns_type (gdbarch);
2373
34dcc7cf 2374 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0
MS
2375 return aarch64_vnh_type (gdbarch);
2376
34dcc7cf 2377 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0
MS
2378 return aarch64_vnb_type (gdbarch);
2379
34dcc7cf
AH
2380 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2381 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6
AH
2382 return aarch64_vnv_type (gdbarch);
2383
34dcc7cf
AH
2384 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2385 return builtin_type (gdbarch)->builtin_uint64;
2386
07b287a0
MS
2387 internal_error (__FILE__, __LINE__,
2388 _("aarch64_pseudo_register_type: bad register number %d"),
34dcc7cf 2389 p_regnum);
07b287a0
MS
2390}
2391
2392/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2393
2394static int
2395aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2396 struct reggroup *group)
2397{
63bad7b6
AH
2398 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2399
34dcc7cf 2400 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2401
34dcc7cf 2402 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0 2403 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2404 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2405 return (group == all_reggroup || group == vector_reggroup
2406 || group == float_reggroup);
34dcc7cf 2407 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2408 return (group == all_reggroup || group == vector_reggroup
2409 || group == float_reggroup);
34dcc7cf 2410 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0 2411 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2412 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0 2413 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
2414 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2415 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6 2416 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
2417 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2418 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2419 return 0;
07b287a0
MS
2420
2421 return group == all_reggroup;
2422}
2423
3c5cd5c3
AH
2424/* Helper for aarch64_pseudo_read_value. */
2425
2426static struct value *
63bad7b6
AH
2427aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2428 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2429 int regsize, struct value *result_value)
2430{
3c5cd5c3
AH
2431 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2432
63bad7b6
AH
2433 /* Enough space for a full vector register. */
2434 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2435 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2436
3c5cd5c3
AH
2437 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2438 mark_value_bytes_unavailable (result_value, 0,
2439 TYPE_LENGTH (value_type (result_value)));
2440 else
2441 memcpy (value_contents_raw (result_value), reg_buf, regsize);
63bad7b6 2442
3c5cd5c3
AH
2443 return result_value;
2444 }
2445
07b287a0
MS
2446/* Implement the "pseudo_register_read_value" gdbarch method. */
2447
2448static struct value *
3c5cd5c3 2449aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2450 int regnum)
2451{
63bad7b6 2452 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3c5cd5c3 2453 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
07b287a0 2454
07b287a0
MS
2455 VALUE_LVAL (result_value) = lval_register;
2456 VALUE_REGNUM (result_value) = regnum;
07b287a0
MS
2457
2458 regnum -= gdbarch_num_regs (gdbarch);
2459
2460 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2461 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2462 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2463 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2464
2465 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2466 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2467 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2468 D_REGISTER_SIZE, result_value);
07b287a0
MS
2469
2470 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2471 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2472 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2473 S_REGISTER_SIZE, result_value);
07b287a0
MS
2474
2475 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2476 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2477 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2478 H_REGISTER_SIZE, result_value);
07b287a0
MS
2479
2480 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2481 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2482 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2483 B_REGISTER_SIZE, result_value);
07b287a0 2484
63bad7b6
AH
2485 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2486 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2487 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2488 regnum - AARCH64_SVE_V0_REGNUM,
2489 V_REGISTER_SIZE, result_value);
2490
07b287a0
MS
2491 gdb_assert_not_reached ("regnum out of bound");
2492}
2493
3c5cd5c3 2494/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2495
2496static void
63bad7b6
AH
2497aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2498 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2499{
3c5cd5c3 2500 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2501
63bad7b6
AH
2502 /* Enough space for a full vector register. */
2503 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2504 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2505
07b287a0
MS
2506 /* Ensure the register buffer is zero, we want gdb writes of the
2507 various 'scalar' pseudo registers to behavior like architectural
2508 writes, register width bytes are written the remainder are set to
2509 zero. */
63bad7b6 2510 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2511
3c5cd5c3
AH
2512 memcpy (reg_buf, buf, regsize);
2513 regcache->raw_write (v_regnum, reg_buf);
2514}
2515
2516/* Implement the "pseudo_register_write" gdbarch method. */
2517
2518static void
2519aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2520 int regnum, const gdb_byte *buf)
2521{
63bad7b6 2522 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
07b287a0
MS
2523 regnum -= gdbarch_num_regs (gdbarch);
2524
2525 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2526 return aarch64_pseudo_write_1 (gdbarch, regcache,
2527 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2528 buf);
07b287a0
MS
2529
2530 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2531 return aarch64_pseudo_write_1 (gdbarch, regcache,
2532 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2533 buf);
07b287a0
MS
2534
2535 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2536 return aarch64_pseudo_write_1 (gdbarch, regcache,
2537 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2538 buf);
07b287a0
MS
2539
2540 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2541 return aarch64_pseudo_write_1 (gdbarch, regcache,
2542 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2543 buf);
07b287a0
MS
2544
2545 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2546 return aarch64_pseudo_write_1 (gdbarch, regcache,
2547 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2548 buf);
2549
2550 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2551 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2552 return aarch64_pseudo_write_1 (gdbarch, regcache,
2553 regnum - AARCH64_SVE_V0_REGNUM,
2554 V_REGISTER_SIZE, buf);
07b287a0
MS
2555
2556 gdb_assert_not_reached ("regnum out of bound");
2557}
2558
07b287a0
MS
2559/* Callback function for user_reg_add. */
2560
2561static struct value *
2562value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2563{
9a3c8263 2564 const int *reg_p = (const int *) baton;
07b287a0
MS
2565
2566 return value_of_register (*reg_p, frame);
2567}
2568\f
2569
9404b58f
KM
2570/* Implement the "software_single_step" gdbarch method, needed to
2571 single step through atomic sequences on AArch64. */
2572
a0ff9e1a 2573static std::vector<CORE_ADDR>
f5ea389a 2574aarch64_software_single_step (struct regcache *regcache)
9404b58f 2575{
ac7936df 2576 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2577 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2578 const int insn_size = 4;
2579 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2580 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2581 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2582 CORE_ADDR loc = pc;
2583 CORE_ADDR closing_insn = 0;
2584 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2585 byte_order_for_code);
2586 int index;
2587 int insn_count;
2588 int bc_insn_count = 0; /* Conditional branch instruction count. */
2589 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2590 aarch64_inst inst;
2591
561a72d4 2592 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2593 return {};
9404b58f
KM
2594
2595 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2596 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2597 return {};
9404b58f
KM
2598
2599 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2600 {
9404b58f
KM
2601 loc += insn_size;
2602 insn = read_memory_unsigned_integer (loc, insn_size,
2603 byte_order_for_code);
2604
561a72d4 2605 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2606 return {};
9404b58f 2607 /* Check if the instruction is a conditional branch. */
f77ee802 2608 if (inst.opcode->iclass == condbranch)
9404b58f 2609 {
f77ee802
YQ
2610 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2611
9404b58f 2612 if (bc_insn_count >= 1)
a0ff9e1a 2613 return {};
9404b58f
KM
2614
2615 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2616 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2617
2618 bc_insn_count++;
2619 last_breakpoint++;
2620 }
2621
2622 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2623 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2624 {
2625 closing_insn = loc;
2626 break;
2627 }
2628 }
2629
2630 /* We didn't find a closing Store Exclusive instruction, fall back. */
2631 if (!closing_insn)
a0ff9e1a 2632 return {};
9404b58f
KM
2633
2634 /* Insert breakpoint after the end of the atomic sequence. */
2635 breaks[0] = loc + insn_size;
2636
2637 /* Check for duplicated breakpoints, and also check that the second
2638 breakpoint is not within the atomic sequence. */
2639 if (last_breakpoint
2640 && (breaks[1] == breaks[0]
2641 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2642 last_breakpoint = 0;
2643
a0ff9e1a
SM
2644 std::vector<CORE_ADDR> next_pcs;
2645
9404b58f
KM
2646 /* Insert the breakpoint at the end of the sequence, and one at the
2647 destination of the conditional branch, if it exists. */
2648 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 2649 next_pcs.push_back (breaks[index]);
9404b58f 2650
93f9a11f 2651 return next_pcs;
9404b58f
KM
2652}
2653
cfba9872 2654struct aarch64_displaced_step_closure : public displaced_step_closure
b6542f81
YQ
2655{
2656 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2657 is being displaced stepping. */
cfba9872 2658 int cond = 0;
b6542f81
YQ
2659
2660 /* PC adjustment offset after displaced stepping. */
cfba9872 2661 int32_t pc_adjust = 0;
b6542f81
YQ
2662};
2663
2664/* Data when visiting instructions for displaced stepping. */
2665
2666struct aarch64_displaced_step_data
2667{
2668 struct aarch64_insn_data base;
2669
2670 /* The address where the instruction will be executed at. */
2671 CORE_ADDR new_addr;
2672 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2673 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2674 /* Number of instructions in INSN_BUF. */
2675 unsigned insn_count;
2676 /* Registers when doing displaced stepping. */
2677 struct regcache *regs;
2678
cfba9872 2679 aarch64_displaced_step_closure *dsc;
b6542f81
YQ
2680};
2681
2682/* Implementation of aarch64_insn_visitor method "b". */
2683
2684static void
2685aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2686 struct aarch64_insn_data *data)
2687{
2688 struct aarch64_displaced_step_data *dsd
2689 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2690 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2691
2692 if (can_encode_int32 (new_offset, 28))
2693 {
2694 /* Emit B rather than BL, because executing BL on a new address
2695 will get the wrong address into LR. In order to avoid this,
2696 we emit B, and update LR if the instruction is BL. */
2697 emit_b (dsd->insn_buf, 0, new_offset);
2698 dsd->insn_count++;
2699 }
2700 else
2701 {
2702 /* Write NOP. */
2703 emit_nop (dsd->insn_buf);
2704 dsd->insn_count++;
2705 dsd->dsc->pc_adjust = offset;
2706 }
2707
2708 if (is_bl)
2709 {
2710 /* Update LR. */
2711 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2712 data->insn_addr + 4);
2713 }
2714}
2715
2716/* Implementation of aarch64_insn_visitor method "b_cond". */
2717
2718static void
2719aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2720 struct aarch64_insn_data *data)
2721{
2722 struct aarch64_displaced_step_data *dsd
2723 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2724
2725 /* GDB has to fix up PC after displaced step this instruction
2726 differently according to the condition is true or false. Instead
2727 of checking COND against conditional flags, we can use
2728 the following instructions, and GDB can tell how to fix up PC
2729 according to the PC value.
2730
2731 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2732 INSN1 ;
2733 TAKEN:
2734 INSN2
2735 */
2736
2737 emit_bcond (dsd->insn_buf, cond, 8);
2738 dsd->dsc->cond = 1;
2739 dsd->dsc->pc_adjust = offset;
2740 dsd->insn_count = 1;
2741}
2742
2743/* Dynamically allocate a new register. If we know the register
2744 statically, we should make it a global as above instead of using this
2745 helper function. */
2746
2747static struct aarch64_register
2748aarch64_register (unsigned num, int is64)
2749{
2750 return (struct aarch64_register) { num, is64 };
2751}
2752
2753/* Implementation of aarch64_insn_visitor method "cb". */
2754
2755static void
2756aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2757 const unsigned rn, int is64,
2758 struct aarch64_insn_data *data)
2759{
2760 struct aarch64_displaced_step_data *dsd
2761 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2762
2763 /* The offset is out of range for a compare and branch
2764 instruction. We can use the following instructions instead:
2765
2766 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2767 INSN1 ;
2768 TAKEN:
2769 INSN2
2770 */
2771 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2772 dsd->insn_count = 1;
2773 dsd->dsc->cond = 1;
2774 dsd->dsc->pc_adjust = offset;
2775}
2776
2777/* Implementation of aarch64_insn_visitor method "tb". */
2778
2779static void
2780aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2781 const unsigned rt, unsigned bit,
2782 struct aarch64_insn_data *data)
2783{
2784 struct aarch64_displaced_step_data *dsd
2785 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2786
2787 /* The offset is out of range for a test bit and branch
2788 instruction We can use the following instructions instead:
2789
2790 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2791 INSN1 ;
2792 TAKEN:
2793 INSN2
2794
2795 */
2796 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2797 dsd->insn_count = 1;
2798 dsd->dsc->cond = 1;
2799 dsd->dsc->pc_adjust = offset;
2800}
2801
2802/* Implementation of aarch64_insn_visitor method "adr". */
2803
2804static void
2805aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2806 const int is_adrp, struct aarch64_insn_data *data)
2807{
2808 struct aarch64_displaced_step_data *dsd
2809 = (struct aarch64_displaced_step_data *) data;
2810 /* We know exactly the address the ADR{P,} instruction will compute.
2811 We can just write it to the destination register. */
2812 CORE_ADDR address = data->insn_addr + offset;
2813
2814 if (is_adrp)
2815 {
2816 /* Clear the lower 12 bits of the offset to get the 4K page. */
2817 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2818 address & ~0xfff);
2819 }
2820 else
2821 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2822 address);
2823
2824 dsd->dsc->pc_adjust = 4;
2825 emit_nop (dsd->insn_buf);
2826 dsd->insn_count = 1;
2827}
2828
2829/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2830
2831static void
2832aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2833 const unsigned rt, const int is64,
2834 struct aarch64_insn_data *data)
2835{
2836 struct aarch64_displaced_step_data *dsd
2837 = (struct aarch64_displaced_step_data *) data;
2838 CORE_ADDR address = data->insn_addr + offset;
2839 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2840
2841 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2842 address);
2843
2844 if (is_sw)
2845 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2846 aarch64_register (rt, 1), zero);
2847 else
2848 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2849 aarch64_register (rt, 1), zero);
2850
2851 dsd->dsc->pc_adjust = 4;
2852}
2853
2854/* Implementation of aarch64_insn_visitor method "others". */
2855
2856static void
2857aarch64_displaced_step_others (const uint32_t insn,
2858 struct aarch64_insn_data *data)
2859{
2860 struct aarch64_displaced_step_data *dsd
2861 = (struct aarch64_displaced_step_data *) data;
2862
e1c587c3 2863 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2864 dsd->insn_count = 1;
2865
2866 if ((insn & 0xfffffc1f) == 0xd65f0000)
2867 {
2868 /* RET */
2869 dsd->dsc->pc_adjust = 0;
2870 }
2871 else
2872 dsd->dsc->pc_adjust = 4;
2873}
2874
2875static const struct aarch64_insn_visitor visitor =
2876{
2877 aarch64_displaced_step_b,
2878 aarch64_displaced_step_b_cond,
2879 aarch64_displaced_step_cb,
2880 aarch64_displaced_step_tb,
2881 aarch64_displaced_step_adr,
2882 aarch64_displaced_step_ldr_literal,
2883 aarch64_displaced_step_others,
2884};
2885
2886/* Implement the "displaced_step_copy_insn" gdbarch method. */
2887
2888struct displaced_step_closure *
2889aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2890 CORE_ADDR from, CORE_ADDR to,
2891 struct regcache *regs)
2892{
b6542f81
YQ
2893 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2894 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2895 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2896 aarch64_inst inst;
2897
561a72d4 2898 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 2899 return NULL;
b6542f81
YQ
2900
2901 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2902 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2903 {
2904 /* We can't displaced step atomic sequences. */
2905 return NULL;
2906 }
2907
cfba9872
SM
2908 std::unique_ptr<aarch64_displaced_step_closure> dsc
2909 (new aarch64_displaced_step_closure);
b6542f81
YQ
2910 dsd.base.insn_addr = from;
2911 dsd.new_addr = to;
2912 dsd.regs = regs;
cfba9872 2913 dsd.dsc = dsc.get ();
034f1a81 2914 dsd.insn_count = 0;
b6542f81
YQ
2915 aarch64_relocate_instruction (insn, &visitor,
2916 (struct aarch64_insn_data *) &dsd);
2917 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2918
2919 if (dsd.insn_count != 0)
2920 {
2921 int i;
2922
2923 /* Instruction can be relocated to scratch pad. Copy
2924 relocated instruction(s) there. */
2925 for (i = 0; i < dsd.insn_count; i++)
2926 {
2927 if (debug_displaced)
2928 {
2929 debug_printf ("displaced: writing insn ");
2930 debug_printf ("%.8x", dsd.insn_buf[i]);
2931 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2932 }
2933 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2934 (ULONGEST) dsd.insn_buf[i]);
2935 }
2936 }
2937 else
2938 {
b6542f81
YQ
2939 dsc = NULL;
2940 }
2941
cfba9872 2942 return dsc.release ();
b6542f81
YQ
2943}
2944
2945/* Implement the "displaced_step_fixup" gdbarch method. */
2946
2947void
2948aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
cfba9872 2949 struct displaced_step_closure *dsc_,
b6542f81
YQ
2950 CORE_ADDR from, CORE_ADDR to,
2951 struct regcache *regs)
2952{
cfba9872
SM
2953 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2954
b6542f81
YQ
2955 if (dsc->cond)
2956 {
2957 ULONGEST pc;
2958
2959 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2960 if (pc - to == 8)
2961 {
2962 /* Condition is true. */
2963 }
2964 else if (pc - to == 4)
2965 {
2966 /* Condition is false. */
2967 dsc->pc_adjust = 4;
2968 }
2969 else
2970 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2971 }
2972
2973 if (dsc->pc_adjust != 0)
2974 {
2975 if (debug_displaced)
2976 {
2977 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2978 paddress (gdbarch, from), dsc->pc_adjust);
2979 }
2980 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2981 from + dsc->pc_adjust);
2982 }
2983}
2984
2985/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2986
2987int
2988aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2989 struct displaced_step_closure *closure)
2990{
2991 return 1;
2992}
2993
95228a0d
AH
2994/* Get the correct target description for the given VQ value.
2995 If VQ is zero then it is assumed SVE is not supported.
2996 (It is not possible to set VQ to zero on an SVE system). */
da434ccb
AH
2997
2998const target_desc *
6dc0ebde 2999aarch64_read_description (uint64_t vq, bool pauth_p)
da434ccb 3000{
95228a0d 3001 if (vq > AARCH64_MAX_SVE_VQ)
39bfb937 3002 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
95228a0d
AH
3003 AARCH64_MAX_SVE_VQ);
3004
6dc0ebde 3005 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
da434ccb 3006
95228a0d
AH
3007 if (tdesc == NULL)
3008 {
6dc0ebde
AH
3009 tdesc = aarch64_create_target_description (vq, pauth_p);
3010 tdesc_aarch64_list[vq][pauth_p] = tdesc;
95228a0d 3011 }
da434ccb 3012
95228a0d 3013 return tdesc;
da434ccb
AH
3014}
3015
ba2d2bb2
AH
3016/* Return the VQ used when creating the target description TDESC. */
3017
1332a140 3018static uint64_t
ba2d2bb2
AH
3019aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3020{
3021 const struct tdesc_feature *feature_sve;
3022
3023 if (!tdesc_has_registers (tdesc))
3024 return 0;
3025
3026 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3027
3028 if (feature_sve == nullptr)
3029 return 0;
3030
12863263
AH
3031 uint64_t vl = tdesc_register_bitsize (feature_sve,
3032 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
3033 return sve_vq_from_vl (vl);
3034}
3035
0ef8a082
AH
3036/* Add all the expected register sets into GDBARCH. */
3037
3038static void
3039aarch64_add_reggroups (struct gdbarch *gdbarch)
3040{
3041 reggroup_add (gdbarch, general_reggroup);
3042 reggroup_add (gdbarch, float_reggroup);
3043 reggroup_add (gdbarch, system_reggroup);
3044 reggroup_add (gdbarch, vector_reggroup);
3045 reggroup_add (gdbarch, all_reggroup);
3046 reggroup_add (gdbarch, save_reggroup);
3047 reggroup_add (gdbarch, restore_reggroup);
3048}
ba2d2bb2 3049
76bed0fd
AH
3050/* Implement the "cannot_store_register" gdbarch method. */
3051
3052static int
3053aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3054{
3055 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3056
3057 if (!tdep->has_pauth ())
3058 return 0;
3059
3060 /* Pointer authentication registers are read-only. */
3061 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3062 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3063}
3064
07b287a0
MS
3065/* Initialize the current architecture based on INFO. If possible,
3066 re-use an architecture from ARCHES, which is a list of
3067 architectures already created during this debugging session.
3068
3069 Called e.g. at program startup, when reading a core file, and when
3070 reading a binary file. */
3071
3072static struct gdbarch *
3073aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3074{
3075 struct gdbarch_tdep *tdep;
3076 struct gdbarch *gdbarch;
3077 struct gdbarch_list *best_arch;
3078 struct tdesc_arch_data *tdesc_data = NULL;
3079 const struct target_desc *tdesc = info.target_desc;
3080 int i;
07b287a0 3081 int valid_p = 1;
ba2d2bb2
AH
3082 const struct tdesc_feature *feature_core;
3083 const struct tdesc_feature *feature_fpu;
3084 const struct tdesc_feature *feature_sve;
76bed0fd 3085 const struct tdesc_feature *feature_pauth;
07b287a0
MS
3086 int num_regs = 0;
3087 int num_pseudo_regs = 0;
76bed0fd 3088 int first_pauth_regnum = -1;
34dcc7cf 3089 int pauth_ra_state_offset = -1;
07b287a0 3090
ba2d2bb2 3091 /* Ensure we always have a target description. */
07b287a0 3092 if (!tdesc_has_registers (tdesc))
6dc0ebde 3093 tdesc = aarch64_read_description (0, false);
07b287a0
MS
3094 gdb_assert (tdesc);
3095
ba2d2bb2
AH
3096 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
3097 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3098 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
76bed0fd 3099 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
07b287a0 3100
ba2d2bb2 3101 if (feature_core == NULL)
07b287a0
MS
3102 return NULL;
3103
3104 tdesc_data = tdesc_data_alloc ();
3105
ba2d2bb2 3106 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3107 and allocate their numbers. */
3108 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
ba2d2bb2
AH
3109 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3110 AARCH64_X0_REGNUM + i,
3111 aarch64_r_register_names[i]);
07b287a0
MS
3112
3113 num_regs = AARCH64_X0_REGNUM + i;
3114
ba2d2bb2
AH
3115 /* Add the V registers. */
3116 if (feature_fpu != NULL)
07b287a0 3117 {
ba2d2bb2
AH
3118 if (feature_sve != NULL)
3119 error (_("Program contains both fpu and SVE features."));
3120
3121 /* Validate the description provides the mandatory V registers
3122 and allocate their numbers. */
07b287a0 3123 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
ba2d2bb2
AH
3124 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3125 AARCH64_V0_REGNUM + i,
3126 aarch64_v_register_names[i]);
07b287a0
MS
3127
3128 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3129 }
07b287a0 3130
ba2d2bb2
AH
3131 /* Add the SVE registers. */
3132 if (feature_sve != NULL)
3133 {
3134 /* Validate the description provides the mandatory SVE registers
3135 and allocate their numbers. */
3136 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3137 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3138 AARCH64_SVE_Z0_REGNUM + i,
3139 aarch64_sve_register_names[i]);
3140
3141 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3142 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3143 }
3144
3145 if (feature_fpu != NULL || feature_sve != NULL)
3146 {
07b287a0
MS
3147 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3148 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3149 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3150 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3151 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3152 }
3153
76bed0fd
AH
3154 /* Add the pauth registers. */
3155 if (feature_pauth != NULL)
3156 {
3157 first_pauth_regnum = num_regs;
34dcc7cf 3158 pauth_ra_state_offset = num_pseudo_regs;
76bed0fd
AH
3159 /* Validate the descriptor provides the mandatory PAUTH registers and
3160 allocate their numbers. */
3161 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3162 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3163 first_pauth_regnum + i,
3164 aarch64_pauth_register_names[i]);
3165
3166 num_regs += i;
34dcc7cf 3167 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
76bed0fd
AH
3168 }
3169
07b287a0
MS
3170 if (!valid_p)
3171 {
3172 tdesc_data_cleanup (tdesc_data);
3173 return NULL;
3174 }
3175
3176 /* AArch64 code is always little-endian. */
3177 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3178
3179 /* If there is already a candidate, use it. */
3180 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3181 best_arch != NULL;
3182 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3183 {
3184 /* Found a match. */
3185 break;
3186 }
3187
3188 if (best_arch != NULL)
3189 {
3190 if (tdesc_data != NULL)
3191 tdesc_data_cleanup (tdesc_data);
3192 return best_arch->gdbarch;
3193 }
3194
8d749320 3195 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
3196 gdbarch = gdbarch_alloc (&info, tdep);
3197
3198 /* This should be low enough for everything. */
3199 tdep->lowest_pc = 0x20;
3200 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3201 tdep->jb_elt_size = 8;
ba2d2bb2 3202 tdep->vq = aarch64_get_tdesc_vq (tdesc);
76bed0fd 3203 tdep->pauth_reg_base = first_pauth_regnum;
34dcc7cf
AH
3204 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3205 : pauth_ra_state_offset + num_regs;
3206
07b287a0
MS
3207
3208 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3209 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3210
07b287a0
MS
3211 /* Advance PC across function entry code. */
3212 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3213
3214 /* The stack grows downward. */
3215 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3216
3217 /* Breakpoint manipulation. */
04180708
YQ
3218 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3219 aarch64_breakpoint::kind_from_pc);
3220 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3221 aarch64_breakpoint::bp_from_kind);
07b287a0 3222 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3223 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3224
3225 /* Information about registers, etc. */
3226 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3227 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3228 set_gdbarch_num_regs (gdbarch, num_regs);
3229
3230 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3231 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3232 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3233 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3234 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3235 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3236 aarch64_pseudo_register_reggroup_p);
76bed0fd 3237 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
07b287a0
MS
3238
3239 /* ABI */
3240 set_gdbarch_short_bit (gdbarch, 16);
3241 set_gdbarch_int_bit (gdbarch, 32);
3242 set_gdbarch_float_bit (gdbarch, 32);
3243 set_gdbarch_double_bit (gdbarch, 64);
3244 set_gdbarch_long_double_bit (gdbarch, 128);
3245 set_gdbarch_long_bit (gdbarch, 64);
3246 set_gdbarch_long_long_bit (gdbarch, 64);
3247 set_gdbarch_ptr_bit (gdbarch, 64);
3248 set_gdbarch_char_signed (gdbarch, 0);
53375380 3249 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3250 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3251 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3252 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3253
3254 /* Internal <-> external register number maps. */
3255 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3256
3257 /* Returning results. */
3258 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3259
3260 /* Disassembly. */
3261 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3262
3263 /* Virtual tables. */
3264 set_gdbarch_vbit_in_delta (gdbarch, 1);
3265
0ef8a082
AH
3266 /* Register architecture. */
3267 aarch64_add_reggroups (gdbarch);
3268
07b287a0
MS
3269 /* Hook in the ABI-specific overrides, if they have been registered. */
3270 info.target_desc = tdesc;
0dba2a6c 3271 info.tdesc_data = tdesc_data;
07b287a0
MS
3272 gdbarch_init_osabi (info, gdbarch);
3273
3274 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
11e1b75f
AH
3275 /* Register DWARF CFA vendor handler. */
3276 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3277 aarch64_execute_dwarf_cfa_vendor_op);
07b287a0
MS
3278
3279 /* Add some default predicates. */
3280 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3281 dwarf2_append_unwinders (gdbarch);
3282 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3283
3284 frame_base_set_default (gdbarch, &aarch64_normal_base);
3285
3286 /* Now we have tuned the configuration, set a few final things,
3287 based on what the OS ABI has told us. */
3288
3289 if (tdep->jb_pc >= 0)
3290 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3291
ea873d8e
PL
3292 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3293
07b287a0
MS
3294 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3295
3296 /* Add standard register aliases. */
3297 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3298 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3299 value_of_aarch64_user_reg,
3300 &aarch64_register_aliases[i].regnum);
3301
e8bf1ce4
JB
3302 register_aarch64_ravenscar_ops (gdbarch);
3303
07b287a0
MS
3304 return gdbarch;
3305}
3306
3307static void
3308aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3309{
3310 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3311
3312 if (tdep == NULL)
3313 return;
3314
3315 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3316 paddress (gdbarch, tdep->lowest_pc));
3317}
3318
0d4c07af 3319#if GDB_SELF_TEST
1e2b521d
YQ
3320namespace selftests
3321{
3322static void aarch64_process_record_test (void);
3323}
0d4c07af 3324#endif
1e2b521d 3325
07b287a0
MS
3326void
3327_initialize_aarch64_tdep (void)
3328{
3329 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3330 aarch64_dump_tdep);
3331
07b287a0
MS
3332 /* Debug this file's internals. */
3333 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3334Set AArch64 debugging."), _("\
3335Show AArch64 debugging."), _("\
3336When on, AArch64 specific debugging is enabled."),
3337 NULL,
3338 show_aarch64_debug,
3339 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3340
3341#if GDB_SELF_TEST
1526853e
SM
3342 selftests::register_test ("aarch64-analyze-prologue",
3343 selftests::aarch64_analyze_prologue_test);
3344 selftests::register_test ("aarch64-process-record",
3345 selftests::aarch64_process_record_test);
6654d750 3346 selftests::record_xml_tdesc ("aarch64.xml",
6dc0ebde 3347 aarch64_create_target_description (0, false));
4d9a9006 3348#endif
07b287a0 3349}
99afc88b
OJ
3350
3351/* AArch64 process record-replay related structures, defines etc. */
3352
99afc88b
OJ
3353#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3354 do \
3355 { \
3356 unsigned int reg_len = LENGTH; \
3357 if (reg_len) \
3358 { \
3359 REGS = XNEWVEC (uint32_t, reg_len); \
3360 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3361 } \
3362 } \
3363 while (0)
3364
3365#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3366 do \
3367 { \
3368 unsigned int mem_len = LENGTH; \
3369 if (mem_len) \
3370 { \
3371 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3372 memcpy(&MEMS->len, &RECORD_BUF[0], \
3373 sizeof(struct aarch64_mem_r) * LENGTH); \
3374 } \
3375 } \
3376 while (0)
3377
3378/* AArch64 record/replay structures and enumerations. */
3379
3380struct aarch64_mem_r
3381{
3382 uint64_t len; /* Record length. */
3383 uint64_t addr; /* Memory address. */
3384};
3385
3386enum aarch64_record_result
3387{
3388 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3389 AARCH64_RECORD_UNSUPPORTED,
3390 AARCH64_RECORD_UNKNOWN
3391};
3392
3393typedef struct insn_decode_record_t
3394{
3395 struct gdbarch *gdbarch;
3396 struct regcache *regcache;
3397 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3398 uint32_t aarch64_insn; /* Insn to be recorded. */
3399 uint32_t mem_rec_count; /* Count of memory records. */
3400 uint32_t reg_rec_count; /* Count of register records. */
3401 uint32_t *aarch64_regs; /* Registers to be recorded. */
3402 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3403} insn_decode_record;
3404
3405/* Record handler for data processing - register instructions. */
3406
3407static unsigned int
3408aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3409{
3410 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3411 uint32_t record_buf[4];
3412
3413 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3414 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3415 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3416
3417 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3418 {
3419 uint8_t setflags;
3420
3421 /* Logical (shifted register). */
3422 if (insn_bits24_27 == 0x0a)
3423 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3424 /* Add/subtract. */
3425 else if (insn_bits24_27 == 0x0b)
3426 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3427 else
3428 return AARCH64_RECORD_UNKNOWN;
3429
3430 record_buf[0] = reg_rd;
3431 aarch64_insn_r->reg_rec_count = 1;
3432 if (setflags)
3433 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3434 }
3435 else
3436 {
3437 if (insn_bits24_27 == 0x0b)
3438 {
3439 /* Data-processing (3 source). */
3440 record_buf[0] = reg_rd;
3441 aarch64_insn_r->reg_rec_count = 1;
3442 }
3443 else if (insn_bits24_27 == 0x0a)
3444 {
3445 if (insn_bits21_23 == 0x00)
3446 {
3447 /* Add/subtract (with carry). */
3448 record_buf[0] = reg_rd;
3449 aarch64_insn_r->reg_rec_count = 1;
3450 if (bit (aarch64_insn_r->aarch64_insn, 29))
3451 {
3452 record_buf[1] = AARCH64_CPSR_REGNUM;
3453 aarch64_insn_r->reg_rec_count = 2;
3454 }
3455 }
3456 else if (insn_bits21_23 == 0x02)
3457 {
3458 /* Conditional compare (register) and conditional compare
3459 (immediate) instructions. */
3460 record_buf[0] = AARCH64_CPSR_REGNUM;
3461 aarch64_insn_r->reg_rec_count = 1;
3462 }
3463 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3464 {
3465 /* CConditional select. */
3466 /* Data-processing (2 source). */
3467 /* Data-processing (1 source). */
3468 record_buf[0] = reg_rd;
3469 aarch64_insn_r->reg_rec_count = 1;
3470 }
3471 else
3472 return AARCH64_RECORD_UNKNOWN;
3473 }
3474 }
3475
3476 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3477 record_buf);
3478 return AARCH64_RECORD_SUCCESS;
3479}
3480
3481/* Record handler for data processing - immediate instructions. */
3482
3483static unsigned int
3484aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3485{
78cc6c2d 3486 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3487 uint32_t record_buf[4];
3488
3489 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3490 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3491 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3492
3493 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3494 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3495 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3496 {
3497 record_buf[0] = reg_rd;
3498 aarch64_insn_r->reg_rec_count = 1;
3499 }
3500 else if (insn_bits24_27 == 0x01)
3501 {
3502 /* Add/Subtract (immediate). */
3503 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3504 record_buf[0] = reg_rd;
3505 aarch64_insn_r->reg_rec_count = 1;
3506 if (setflags)
3507 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3508 }
3509 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3510 {
3511 /* Logical (immediate). */
3512 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3513 record_buf[0] = reg_rd;
3514 aarch64_insn_r->reg_rec_count = 1;
3515 if (setflags)
3516 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3517 }
3518 else
3519 return AARCH64_RECORD_UNKNOWN;
3520
3521 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3522 record_buf);
3523 return AARCH64_RECORD_SUCCESS;
3524}
3525
3526/* Record handler for branch, exception generation and system instructions. */
3527
3528static unsigned int
3529aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3530{
3531 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3532 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3533 uint32_t record_buf[4];
3534
3535 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3536 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3537 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3538
3539 if (insn_bits28_31 == 0x0d)
3540 {
3541 /* Exception generation instructions. */
3542 if (insn_bits24_27 == 0x04)
3543 {
5d98d3cd
YQ
3544 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3545 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3546 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3547 {
3548 ULONGEST svc_number;
3549
3550 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3551 &svc_number);
3552 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3553 svc_number);
3554 }
3555 else
3556 return AARCH64_RECORD_UNSUPPORTED;
3557 }
3558 /* System instructions. */
3559 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3560 {
3561 uint32_t reg_rt, reg_crn;
3562
3563 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3564 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3565
3566 /* Record rt in case of sysl and mrs instructions. */
3567 if (bit (aarch64_insn_r->aarch64_insn, 21))
3568 {
3569 record_buf[0] = reg_rt;
3570 aarch64_insn_r->reg_rec_count = 1;
3571 }
3572 /* Record cpsr for hint and msr(immediate) instructions. */
3573 else if (reg_crn == 0x02 || reg_crn == 0x04)
3574 {
3575 record_buf[0] = AARCH64_CPSR_REGNUM;
3576 aarch64_insn_r->reg_rec_count = 1;
3577 }
3578 }
3579 /* Unconditional branch (register). */
3580 else if((insn_bits24_27 & 0x0e) == 0x06)
3581 {
3582 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3583 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3584 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3585 }
3586 else
3587 return AARCH64_RECORD_UNKNOWN;
3588 }
3589 /* Unconditional branch (immediate). */
3590 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3591 {
3592 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3593 if (bit (aarch64_insn_r->aarch64_insn, 31))
3594 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3595 }
3596 else
3597 /* Compare & branch (immediate), Test & branch (immediate) and
3598 Conditional branch (immediate). */
3599 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3600
3601 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3602 record_buf);
3603 return AARCH64_RECORD_SUCCESS;
3604}
3605
3606/* Record handler for advanced SIMD load and store instructions. */
3607
3608static unsigned int
3609aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3610{
3611 CORE_ADDR address;
3612 uint64_t addr_offset = 0;
3613 uint32_t record_buf[24];
3614 uint64_t record_buf_mem[24];
3615 uint32_t reg_rn, reg_rt;
3616 uint32_t reg_index = 0, mem_index = 0;
3617 uint8_t opcode_bits, size_bits;
3618
3619 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3620 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3621 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3622 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3623 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3624
3625 if (record_debug)
b277c936 3626 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3627
3628 /* Load/store single structure. */
3629 if (bit (aarch64_insn_r->aarch64_insn, 24))
3630 {
3631 uint8_t sindex, scale, selem, esize, replicate = 0;
3632 scale = opcode_bits >> 2;
3633 selem = ((opcode_bits & 0x02) |
3634 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3635 switch (scale)
3636 {
3637 case 1:
3638 if (size_bits & 0x01)
3639 return AARCH64_RECORD_UNKNOWN;
3640 break;
3641 case 2:
3642 if ((size_bits >> 1) & 0x01)
3643 return AARCH64_RECORD_UNKNOWN;
3644 if (size_bits & 0x01)
3645 {
3646 if (!((opcode_bits >> 1) & 0x01))
3647 scale = 3;
3648 else
3649 return AARCH64_RECORD_UNKNOWN;
3650 }
3651 break;
3652 case 3:
3653 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3654 {
3655 scale = size_bits;
3656 replicate = 1;
3657 break;
3658 }
3659 else
3660 return AARCH64_RECORD_UNKNOWN;
3661 default:
3662 break;
3663 }
3664 esize = 8 << scale;
3665 if (replicate)
3666 for (sindex = 0; sindex < selem; sindex++)
3667 {
3668 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3669 reg_rt = (reg_rt + 1) % 32;
3670 }
3671 else
3672 {
3673 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3674 {
3675 if (bit (aarch64_insn_r->aarch64_insn, 22))
3676 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3677 else
3678 {
3679 record_buf_mem[mem_index++] = esize / 8;
3680 record_buf_mem[mem_index++] = address + addr_offset;
3681 }
3682 addr_offset = addr_offset + (esize / 8);
3683 reg_rt = (reg_rt + 1) % 32;
3684 }
99afc88b
OJ
3685 }
3686 }
3687 /* Load/store multiple structure. */
3688 else
3689 {
3690 uint8_t selem, esize, rpt, elements;
3691 uint8_t eindex, rindex;
3692
3693 esize = 8 << size_bits;
3694 if (bit (aarch64_insn_r->aarch64_insn, 30))
3695 elements = 128 / esize;
3696 else
3697 elements = 64 / esize;
3698
3699 switch (opcode_bits)
3700 {
3701 /*LD/ST4 (4 Registers). */
3702 case 0:
3703 rpt = 1;
3704 selem = 4;
3705 break;
3706 /*LD/ST1 (4 Registers). */
3707 case 2:
3708 rpt = 4;
3709 selem = 1;
3710 break;
3711 /*LD/ST3 (3 Registers). */
3712 case 4:
3713 rpt = 1;
3714 selem = 3;
3715 break;
3716 /*LD/ST1 (3 Registers). */
3717 case 6:
3718 rpt = 3;
3719 selem = 1;
3720 break;
3721 /*LD/ST1 (1 Register). */
3722 case 7:
3723 rpt = 1;
3724 selem = 1;
3725 break;
3726 /*LD/ST2 (2 Registers). */
3727 case 8:
3728 rpt = 1;
3729 selem = 2;
3730 break;
3731 /*LD/ST1 (2 Registers). */
3732 case 10:
3733 rpt = 2;
3734 selem = 1;
3735 break;
3736 default:
3737 return AARCH64_RECORD_UNSUPPORTED;
3738 break;
3739 }
3740 for (rindex = 0; rindex < rpt; rindex++)
3741 for (eindex = 0; eindex < elements; eindex++)
3742 {
3743 uint8_t reg_tt, sindex;
3744 reg_tt = (reg_rt + rindex) % 32;
3745 for (sindex = 0; sindex < selem; sindex++)
3746 {
3747 if (bit (aarch64_insn_r->aarch64_insn, 22))
3748 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3749 else
3750 {
3751 record_buf_mem[mem_index++] = esize / 8;
3752 record_buf_mem[mem_index++] = address + addr_offset;
3753 }
3754 addr_offset = addr_offset + (esize / 8);
3755 reg_tt = (reg_tt + 1) % 32;
3756 }
3757 }
3758 }
3759
3760 if (bit (aarch64_insn_r->aarch64_insn, 23))
3761 record_buf[reg_index++] = reg_rn;
3762
3763 aarch64_insn_r->reg_rec_count = reg_index;
3764 aarch64_insn_r->mem_rec_count = mem_index / 2;
3765 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3766 record_buf_mem);
3767 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3768 record_buf);
3769 return AARCH64_RECORD_SUCCESS;
3770}
3771
3772/* Record handler for load and store instructions. */
3773
3774static unsigned int
3775aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3776{
3777 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3778 uint8_t insn_bit23, insn_bit21;
3779 uint8_t opc, size_bits, ld_flag, vector_flag;
3780 uint32_t reg_rn, reg_rt, reg_rt2;
3781 uint64_t datasize, offset;
3782 uint32_t record_buf[8];
3783 uint64_t record_buf_mem[8];
3784 CORE_ADDR address;
3785
3786 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3787 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3788 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3789 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3790 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3791 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3792 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3793 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3794 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3795 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3796 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3797
3798 /* Load/store exclusive. */
3799 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3800 {
3801 if (record_debug)
b277c936 3802 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3803
3804 if (ld_flag)
3805 {
3806 record_buf[0] = reg_rt;
3807 aarch64_insn_r->reg_rec_count = 1;
3808 if (insn_bit21)
3809 {
3810 record_buf[1] = reg_rt2;
3811 aarch64_insn_r->reg_rec_count = 2;
3812 }
3813 }
3814 else
3815 {
3816 if (insn_bit21)
3817 datasize = (8 << size_bits) * 2;
3818 else
3819 datasize = (8 << size_bits);
3820 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3821 &address);
3822 record_buf_mem[0] = datasize / 8;
3823 record_buf_mem[1] = address;
3824 aarch64_insn_r->mem_rec_count = 1;
3825 if (!insn_bit23)
3826 {
3827 /* Save register rs. */
3828 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3829 aarch64_insn_r->reg_rec_count = 1;
3830 }
3831 }
3832 }
3833 /* Load register (literal) instructions decoding. */
3834 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3835 {
3836 if (record_debug)
b277c936 3837 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3838 if (vector_flag)
3839 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3840 else
3841 record_buf[0] = reg_rt;
3842 aarch64_insn_r->reg_rec_count = 1;
3843 }
3844 /* All types of load/store pair instructions decoding. */
3845 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3846 {
3847 if (record_debug)
b277c936 3848 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3849
3850 if (ld_flag)
3851 {
3852 if (vector_flag)
3853 {
3854 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3855 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3856 }
3857 else
3858 {
3859 record_buf[0] = reg_rt;
3860 record_buf[1] = reg_rt2;
3861 }
3862 aarch64_insn_r->reg_rec_count = 2;
3863 }
3864 else
3865 {
3866 uint16_t imm7_off;
3867 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3868 if (!vector_flag)
3869 size_bits = size_bits >> 1;
3870 datasize = 8 << (2 + size_bits);
3871 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3872 offset = offset << (2 + size_bits);
3873 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3874 &address);
3875 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3876 {
3877 if (imm7_off & 0x40)
3878 address = address - offset;
3879 else
3880 address = address + offset;
3881 }
3882
3883 record_buf_mem[0] = datasize / 8;
3884 record_buf_mem[1] = address;
3885 record_buf_mem[2] = datasize / 8;
3886 record_buf_mem[3] = address + (datasize / 8);
3887 aarch64_insn_r->mem_rec_count = 2;
3888 }
3889 if (bit (aarch64_insn_r->aarch64_insn, 23))
3890 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3891 }
3892 /* Load/store register (unsigned immediate) instructions. */
3893 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3894 {
3895 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3896 if (!(opc >> 1))
33877125
YQ
3897 {
3898 if (opc & 0x01)
3899 ld_flag = 0x01;
3900 else
3901 ld_flag = 0x0;
3902 }
99afc88b 3903 else
33877125 3904 {
1e2b521d
YQ
3905 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3906 {
3907 /* PRFM (immediate) */
3908 return AARCH64_RECORD_SUCCESS;
3909 }
3910 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3911 {
3912 /* LDRSW (immediate) */
3913 ld_flag = 0x1;
3914 }
33877125 3915 else
1e2b521d
YQ
3916 {
3917 if (opc & 0x01)
3918 ld_flag = 0x01;
3919 else
3920 ld_flag = 0x0;
3921 }
33877125 3922 }
99afc88b
OJ
3923
3924 if (record_debug)
3925 {
b277c936
PL
3926 debug_printf ("Process record: load/store (unsigned immediate):"
3927 " size %x V %d opc %x\n", size_bits, vector_flag,
3928 opc);
99afc88b
OJ
3929 }
3930
3931 if (!ld_flag)
3932 {
3933 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3934 datasize = 8 << size_bits;
3935 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3936 &address);
3937 offset = offset << size_bits;
3938 address = address + offset;
3939
3940 record_buf_mem[0] = datasize >> 3;
3941 record_buf_mem[1] = address;
3942 aarch64_insn_r->mem_rec_count = 1;
3943 }
3944 else
3945 {
3946 if (vector_flag)
3947 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3948 else
3949 record_buf[0] = reg_rt;
3950 aarch64_insn_r->reg_rec_count = 1;
3951 }
3952 }
3953 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3954 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3955 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3956 {
3957 if (record_debug)
b277c936 3958 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3959 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3960 if (!(opc >> 1))
3961 if (opc & 0x01)
3962 ld_flag = 0x01;
3963 else
3964 ld_flag = 0x0;
3965 else
3966 if (size_bits != 0x03)
3967 ld_flag = 0x01;
3968 else
3969 return AARCH64_RECORD_UNKNOWN;
3970
3971 if (!ld_flag)
3972 {
d9436c7c
PA
3973 ULONGEST reg_rm_val;
3974
99afc88b
OJ
3975 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3976 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3977 if (bit (aarch64_insn_r->aarch64_insn, 12))
3978 offset = reg_rm_val << size_bits;
3979 else
3980 offset = reg_rm_val;
3981 datasize = 8 << size_bits;
3982 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3983 &address);
3984 address = address + offset;
3985 record_buf_mem[0] = datasize >> 3;
3986 record_buf_mem[1] = address;
3987 aarch64_insn_r->mem_rec_count = 1;
3988 }
3989 else
3990 {
3991 if (vector_flag)
3992 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3993 else
3994 record_buf[0] = reg_rt;
3995 aarch64_insn_r->reg_rec_count = 1;
3996 }
3997 }
3998 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3999 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4000 && !insn_bit21)
99afc88b
OJ
4001 {
4002 if (record_debug)
4003 {
b277c936
PL
4004 debug_printf ("Process record: load/store "
4005 "(immediate and unprivileged)\n");
99afc88b
OJ
4006 }
4007 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4008 if (!(opc >> 1))
4009 if (opc & 0x01)
4010 ld_flag = 0x01;
4011 else
4012 ld_flag = 0x0;
4013 else
4014 if (size_bits != 0x03)
4015 ld_flag = 0x01;
4016 else
4017 return AARCH64_RECORD_UNKNOWN;
4018
4019 if (!ld_flag)
4020 {
4021 uint16_t imm9_off;
4022 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4023 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4024 datasize = 8 << size_bits;
4025 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4026 &address);
4027 if (insn_bits10_11 != 0x01)
4028 {
4029 if (imm9_off & 0x0100)
4030 address = address - offset;
4031 else
4032 address = address + offset;
4033 }
4034 record_buf_mem[0] = datasize >> 3;
4035 record_buf_mem[1] = address;
4036 aarch64_insn_r->mem_rec_count = 1;
4037 }
4038 else
4039 {
4040 if (vector_flag)
4041 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4042 else
4043 record_buf[0] = reg_rt;
4044 aarch64_insn_r->reg_rec_count = 1;
4045 }
4046 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4047 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4048 }
4049 /* Advanced SIMD load/store instructions. */
4050 else
4051 return aarch64_record_asimd_load_store (aarch64_insn_r);
4052
4053 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4054 record_buf_mem);
4055 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4056 record_buf);
4057 return AARCH64_RECORD_SUCCESS;
4058}
4059
4060/* Record handler for data processing SIMD and floating point instructions. */
4061
4062static unsigned int
4063aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4064{
4065 uint8_t insn_bit21, opcode, rmode, reg_rd;
4066 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4067 uint8_t insn_bits11_14;
4068 uint32_t record_buf[2];
4069
4070 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4071 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4072 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4073 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4074 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4075 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4076 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4077 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4078 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4079
4080 if (record_debug)
b277c936 4081 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
4082
4083 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4084 {
4085 /* Floating point - fixed point conversion instructions. */
4086 if (!insn_bit21)
4087 {
4088 if (record_debug)
b277c936 4089 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
4090
4091 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4092 record_buf[0] = reg_rd;
4093 else
4094 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4095 }
4096 /* Floating point - conditional compare instructions. */
4097 else if (insn_bits10_11 == 0x01)
4098 {
4099 if (record_debug)
b277c936 4100 debug_printf ("FP - conditional compare");
99afc88b
OJ
4101
4102 record_buf[0] = AARCH64_CPSR_REGNUM;
4103 }
4104 /* Floating point - data processing (2-source) and
4105 conditional select instructions. */
4106 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4107 {
4108 if (record_debug)
b277c936 4109 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
4110
4111 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4112 }
4113 else if (insn_bits10_11 == 0x00)
4114 {
4115 /* Floating point - immediate instructions. */
4116 if ((insn_bits12_15 & 0x01) == 0x01
4117 || (insn_bits12_15 & 0x07) == 0x04)
4118 {
4119 if (record_debug)
b277c936 4120 debug_printf ("FP - immediate");
99afc88b
OJ
4121 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4122 }
4123 /* Floating point - compare instructions. */
4124 else if ((insn_bits12_15 & 0x03) == 0x02)
4125 {
4126 if (record_debug)
b277c936 4127 debug_printf ("FP - immediate");
99afc88b
OJ
4128 record_buf[0] = AARCH64_CPSR_REGNUM;
4129 }
4130 /* Floating point - integer conversions instructions. */
f62fce35 4131 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4132 {
4133 /* Convert float to integer instruction. */
4134 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4135 {
4136 if (record_debug)
b277c936 4137 debug_printf ("float to int conversion");
99afc88b
OJ
4138
4139 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4140 }
4141 /* Convert integer to float instruction. */
4142 else if ((opcode >> 1) == 0x01 && !rmode)
4143 {
4144 if (record_debug)
b277c936 4145 debug_printf ("int to float conversion");
99afc88b
OJ
4146
4147 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4148 }
4149 /* Move float to integer instruction. */
4150 else if ((opcode >> 1) == 0x03)
4151 {
4152 if (record_debug)
b277c936 4153 debug_printf ("move float to int");
99afc88b
OJ
4154
4155 if (!(opcode & 0x01))
4156 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4157 else
4158 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4159 }
f62fce35
YQ
4160 else
4161 return AARCH64_RECORD_UNKNOWN;
99afc88b 4162 }
f62fce35
YQ
4163 else
4164 return AARCH64_RECORD_UNKNOWN;
99afc88b 4165 }
f62fce35
YQ
4166 else
4167 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4168 }
4169 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4170 {
4171 if (record_debug)
b277c936 4172 debug_printf ("SIMD copy");
99afc88b
OJ
4173
4174 /* Advanced SIMD copy instructions. */
4175 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4176 && !bit (aarch64_insn_r->aarch64_insn, 15)
4177 && bit (aarch64_insn_r->aarch64_insn, 10))
4178 {
4179 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4180 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4181 else
4182 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4183 }
4184 else
4185 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4186 }
4187 /* All remaining floating point or advanced SIMD instructions. */
4188 else
4189 {
4190 if (record_debug)
b277c936 4191 debug_printf ("all remain");
99afc88b
OJ
4192
4193 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4194 }
4195
4196 if (record_debug)
b277c936 4197 debug_printf ("\n");
99afc88b
OJ
4198
4199 aarch64_insn_r->reg_rec_count++;
4200 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4201 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4202 record_buf);
4203 return AARCH64_RECORD_SUCCESS;
4204}
4205
4206/* Decodes insns type and invokes its record handler. */
4207
4208static unsigned int
4209aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4210{
4211 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4212
4213 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4214 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4215 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4216 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4217
4218 /* Data processing - immediate instructions. */
4219 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4220 return aarch64_record_data_proc_imm (aarch64_insn_r);
4221
4222 /* Branch, exception generation and system instructions. */
4223 if (ins_bit26 && !ins_bit27 && ins_bit28)
4224 return aarch64_record_branch_except_sys (aarch64_insn_r);
4225
4226 /* Load and store instructions. */
4227 if (!ins_bit25 && ins_bit27)
4228 return aarch64_record_load_store (aarch64_insn_r);
4229
4230 /* Data processing - register instructions. */
4231 if (ins_bit25 && !ins_bit26 && ins_bit27)
4232 return aarch64_record_data_proc_reg (aarch64_insn_r);
4233
4234 /* Data processing - SIMD and floating point instructions. */
4235 if (ins_bit25 && ins_bit26 && ins_bit27)
4236 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4237
4238 return AARCH64_RECORD_UNSUPPORTED;
4239}
4240
4241/* Cleans up local record registers and memory allocations. */
4242
4243static void
4244deallocate_reg_mem (insn_decode_record *record)
4245{
4246 xfree (record->aarch64_regs);
4247 xfree (record->aarch64_mems);
4248}
4249
1e2b521d
YQ
4250#if GDB_SELF_TEST
4251namespace selftests {
4252
4253static void
4254aarch64_process_record_test (void)
4255{
4256 struct gdbarch_info info;
4257 uint32_t ret;
4258
4259 gdbarch_info_init (&info);
4260 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4261
4262 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4263 SELF_CHECK (gdbarch != NULL);
4264
4265 insn_decode_record aarch64_record;
4266
4267 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4268 aarch64_record.regcache = NULL;
4269 aarch64_record.this_addr = 0;
4270 aarch64_record.gdbarch = gdbarch;
4271
4272 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4273 aarch64_record.aarch64_insn = 0xf9800020;
4274 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4275 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4276 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4277 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4278
4279 deallocate_reg_mem (&aarch64_record);
4280}
4281
4282} // namespace selftests
4283#endif /* GDB_SELF_TEST */
4284
99afc88b
OJ
4285/* Parse the current instruction and record the values of the registers and
4286 memory that will be changed in current instruction to record_arch_list
4287 return -1 if something is wrong. */
4288
4289int
4290aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4291 CORE_ADDR insn_addr)
4292{
4293 uint32_t rec_no = 0;
4294 uint8_t insn_size = 4;
4295 uint32_t ret = 0;
99afc88b
OJ
4296 gdb_byte buf[insn_size];
4297 insn_decode_record aarch64_record;
4298
4299 memset (&buf[0], 0, insn_size);
4300 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4301 target_read_memory (insn_addr, &buf[0], insn_size);
4302 aarch64_record.aarch64_insn
4303 = (uint32_t) extract_unsigned_integer (&buf[0],
4304 insn_size,
4305 gdbarch_byte_order (gdbarch));
4306 aarch64_record.regcache = regcache;
4307 aarch64_record.this_addr = insn_addr;
4308 aarch64_record.gdbarch = gdbarch;
4309
4310 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4311 if (ret == AARCH64_RECORD_UNSUPPORTED)
4312 {
4313 printf_unfiltered (_("Process record does not support instruction "
4314 "0x%0x at address %s.\n"),
4315 aarch64_record.aarch64_insn,
4316 paddress (gdbarch, insn_addr));
4317 ret = -1;
4318 }
4319
4320 if (0 == ret)
4321 {
4322 /* Record registers. */
4323 record_full_arch_list_add_reg (aarch64_record.regcache,
4324 AARCH64_PC_REGNUM);
4325 /* Always record register CPSR. */
4326 record_full_arch_list_add_reg (aarch64_record.regcache,
4327 AARCH64_CPSR_REGNUM);
4328 if (aarch64_record.aarch64_regs)
4329 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4330 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4331 aarch64_record.aarch64_regs[rec_no]))
4332 ret = -1;
4333
4334 /* Record memories. */
4335 if (aarch64_record.aarch64_mems)
4336 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4337 if (record_full_arch_list_add_mem
4338 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4339 aarch64_record.aarch64_mems[rec_no].len))
4340 ret = -1;
4341
4342 if (record_full_arch_list_add_end ())
4343 ret = -1;
4344 }
4345
4346 deallocate_reg_mem (&aarch64_record);
4347 return ret;
4348}
This page took 0.697063 seconds and 4 git commands to generate.