Use "pulongest" on aarch64-tdep.c:aarch64_gdbarch_init
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
42a4f53d 3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
4de283e4 24#include "inferior.h"
07b287a0
MS
25#include "gdbcmd.h"
26#include "gdbcore.h"
4de283e4 27#include "dis-asm.h"
d55e5aa6
TT
28#include "regcache.h"
29#include "reggroups.h"
4de283e4
TT
30#include "value.h"
31#include "arch-utils.h"
32#include "osabi.h"
33#include "frame-unwind.h"
34#include "frame-base.h"
d55e5aa6 35#include "trad-frame.h"
4de283e4
TT
36#include "objfiles.h"
37#include "dwarf2.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
07b287a0 42#include "user-regs.h"
4de283e4
TT
43#include "language.h"
44#include "infcall.h"
45#include "ax.h"
46#include "ax-gdb.h"
47#include "common/selftest.h"
48
49#include "aarch64-tdep.h"
50#include "aarch64-ravenscar-thread.h"
51
52#include "elf-bfd.h"
53#include "elf/aarch64.h"
54
55#include "common/vec.h"
56
57#include "record.h"
58#include "record-full.h"
59#include "arch/aarch64-insn.h"
60
61#include "opcode/aarch64.h"
62#include <algorithm>
f77ee802
YQ
63
64#define submask(x) ((1L << ((x) + 1)) - 1)
65#define bit(obj,st) (((obj) >> (st)) & 1)
66#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
ea92689a
AH
68/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
69 four members. */
70#define HA_MAX_NUM_FLDS 4
71
95228a0d 72/* All possible aarch64 target descriptors. */
6dc0ebde 73struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
95228a0d 74
07b287a0
MS
75/* The standard register names, and all the valid aliases for them. */
76static const struct
77{
78 const char *const name;
79 int regnum;
80} aarch64_register_aliases[] =
81{
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123};
124
125/* The required core 'R' registers. */
126static const char *const aarch64_r_register_names[] =
127{
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139};
140
141/* The FP/SIMD 'V' registers. */
142static const char *const aarch64_v_register_names[] =
143{
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156};
157
739e8682
AH
158/* The SVE 'Z' and 'P' registers. */
159static const char *const aarch64_sve_register_names[] =
160{
161 /* These registers must appear in consecutive RAW register number
162 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
163 "z0", "z1", "z2", "z3",
164 "z4", "z5", "z6", "z7",
165 "z8", "z9", "z10", "z11",
166 "z12", "z13", "z14", "z15",
167 "z16", "z17", "z18", "z19",
168 "z20", "z21", "z22", "z23",
169 "z24", "z25", "z26", "z27",
170 "z28", "z29", "z30", "z31",
171 "fpsr", "fpcr",
172 "p0", "p1", "p2", "p3",
173 "p4", "p5", "p6", "p7",
174 "p8", "p9", "p10", "p11",
175 "p12", "p13", "p14", "p15",
176 "ffr", "vg"
177};
178
76bed0fd
AH
179static const char *const aarch64_pauth_register_names[] =
180{
181 /* Authentication mask for data pointer. */
182 "pauth_dmask",
183 /* Authentication mask for code pointer. */
184 "pauth_cmask"
185};
186
07b287a0
MS
187/* AArch64 prologue cache structure. */
188struct aarch64_prologue_cache
189{
db634143
PL
190 /* The program counter at the start of the function. It is used to
191 identify this frame as a prologue frame. */
192 CORE_ADDR func;
193
194 /* The program counter at the time this frame was created; i.e. where
195 this function was called from. It is used to identify this frame as a
196 stub frame. */
197 CORE_ADDR prev_pc;
198
07b287a0
MS
199 /* The stack pointer at the time this frame was created; i.e. the
200 caller's stack pointer when this function was called. It is used
201 to identify this frame. */
202 CORE_ADDR prev_sp;
203
7dfa3edc
PL
204 /* Is the target available to read from? */
205 int available_p;
206
07b287a0
MS
207 /* The frame base for this frame is just prev_sp - frame size.
208 FRAMESIZE is the distance from the frame pointer to the
209 initial stack pointer. */
210 int framesize;
211
212 /* The register used to hold the frame pointer for this frame. */
213 int framereg;
214
215 /* Saved register offsets. */
216 struct trad_frame_saved_reg *saved_regs;
217};
218
07b287a0
MS
219static void
220show_aarch64_debug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222{
223 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
224}
225
ffdbe864
YQ
226namespace {
227
4d9a9006
YQ
228/* Abstract instruction reader. */
229
230class abstract_instruction_reader
231{
232public:
233 /* Read in one instruction. */
234 virtual ULONGEST read (CORE_ADDR memaddr, int len,
235 enum bfd_endian byte_order) = 0;
236};
237
238/* Instruction reader from real target. */
239
240class instruction_reader : public abstract_instruction_reader
241{
242 public:
243 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 244 override
4d9a9006 245 {
fc2f703e 246 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
247 }
248};
249
ffdbe864
YQ
250} // namespace
251
11e1b75f
AH
252/* If address signing is enabled, mask off the signature bits from ADDR, using
253 the register values in THIS_FRAME. */
254
255static CORE_ADDR
256aarch64_frame_unmask_address (struct gdbarch_tdep *tdep,
257 struct frame_info *this_frame,
258 CORE_ADDR addr)
259{
260 if (tdep->has_pauth ()
261 && frame_unwind_register_unsigned (this_frame,
262 tdep->pauth_ra_state_regnum))
263 {
264 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
265 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
266 addr = addr & ~cmask;
267 }
268
269 return addr;
270}
271
07b287a0
MS
272/* Analyze a prologue, looking for a recognizable stack frame
273 and frame pointer. Scan until we encounter a store that could
274 clobber the stack frame unexpectedly, or an unknown instruction. */
275
276static CORE_ADDR
277aarch64_analyze_prologue (struct gdbarch *gdbarch,
278 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
279 struct aarch64_prologue_cache *cache,
280 abstract_instruction_reader& reader)
07b287a0
MS
281{
282 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
283 int i;
187f5d00
YQ
284 /* Track X registers and D registers in prologue. */
285 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 286
187f5d00 287 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 288 regs[i] = pv_register (i, 0);
f7b7ed97 289 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
290
291 for (; start < limit; start += 4)
292 {
293 uint32_t insn;
d9ebcbce 294 aarch64_inst inst;
07b287a0 295
4d9a9006 296 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 297
561a72d4 298 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
299 break;
300
301 if (inst.opcode->iclass == addsub_imm
302 && (inst.opcode->op == OP_ADD
303 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 304 {
d9ebcbce
YQ
305 unsigned rd = inst.operands[0].reg.regno;
306 unsigned rn = inst.operands[1].reg.regno;
307
308 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
309 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
310 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
311 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
312
313 if (inst.opcode->op == OP_ADD)
314 {
315 regs[rd] = pv_add_constant (regs[rn],
316 inst.operands[2].imm.value);
317 }
318 else
319 {
320 regs[rd] = pv_add_constant (regs[rn],
321 -inst.operands[2].imm.value);
322 }
323 }
324 else if (inst.opcode->iclass == pcreladdr
325 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
326 {
327 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
328 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
329
330 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 331 }
d9ebcbce 332 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
333 {
334 /* Stop analysis on branch. */
335 break;
336 }
d9ebcbce 337 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
338 {
339 /* Stop analysis on branch. */
340 break;
341 }
d9ebcbce 342 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
343 {
344 /* Stop analysis on branch. */
345 break;
346 }
d9ebcbce 347 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
348 {
349 /* Stop analysis on branch. */
350 break;
351 }
d9ebcbce
YQ
352 else if (inst.opcode->op == OP_MOVZ)
353 {
354 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
355 regs[inst.operands[0].reg.regno] = pv_unknown ();
356 }
357 else if (inst.opcode->iclass == log_shift
358 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 359 {
d9ebcbce
YQ
360 unsigned rd = inst.operands[0].reg.regno;
361 unsigned rn = inst.operands[1].reg.regno;
362 unsigned rm = inst.operands[2].reg.regno;
363
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
365 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
366 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
367
368 if (inst.operands[2].shifter.amount == 0
369 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
370 regs[rd] = regs[rm];
371 else
372 {
373 if (aarch64_debug)
b277c936
PL
374 {
375 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 376 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
377 core_addr_to_string_nz (start), insn);
378 }
07b287a0
MS
379 break;
380 }
381 }
d9ebcbce 382 else if (inst.opcode->op == OP_STUR)
07b287a0 383 {
d9ebcbce
YQ
384 unsigned rt = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].addr.base_regno;
386 int is64
387 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
388
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
393
f7b7ed97
TT
394 stack.store (pv_add_constant (regs[rn],
395 inst.operands[1].addr.offset.imm),
396 is64 ? 8 : 4, regs[rt]);
07b287a0 397 }
d9ebcbce 398 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
399 || (inst.opcode->iclass == ldstpair_indexed
400 && inst.operands[2].addr.preind))
d9ebcbce 401 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 402 {
03bcd739 403 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
404 unsigned rt1;
405 unsigned rt2;
d9ebcbce
YQ
406 unsigned rn = inst.operands[2].addr.base_regno;
407 int32_t imm = inst.operands[2].addr.offset.imm;
408
187f5d00
YQ
409 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
410 || inst.operands[0].type == AARCH64_OPND_Ft);
411 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
412 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
413 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
414 gdb_assert (!inst.operands[2].addr.offset.is_reg);
415
07b287a0
MS
416 /* If recording this store would invalidate the store area
417 (perhaps because rn is not known) then we should abandon
418 further prologue analysis. */
f7b7ed97 419 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
420 break;
421
f7b7ed97 422 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
423 break;
424
187f5d00
YQ
425 rt1 = inst.operands[0].reg.regno;
426 rt2 = inst.operands[1].reg.regno;
427 if (inst.operands[0].type == AARCH64_OPND_Ft)
428 {
429 /* Only bottom 64-bit of each V register (D register) need
430 to be preserved. */
431 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
432 rt1 += AARCH64_X_REGISTER_COUNT;
433 rt2 += AARCH64_X_REGISTER_COUNT;
434 }
435
f7b7ed97
TT
436 stack.store (pv_add_constant (regs[rn], imm), 8,
437 regs[rt1]);
438 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
439 regs[rt2]);
14ac654f 440
d9ebcbce 441 if (inst.operands[2].addr.writeback)
93d96012 442 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 443
07b287a0 444 }
432ec081
YQ
445 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
446 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
447 && (inst.opcode->op == OP_STR_POS
448 || inst.opcode->op == OP_STRF_POS)))
449 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
450 && strcmp ("str", inst.opcode->name) == 0)
451 {
452 /* STR (immediate) */
453 unsigned int rt = inst.operands[0].reg.regno;
454 int32_t imm = inst.operands[1].addr.offset.imm;
455 unsigned int rn = inst.operands[1].addr.base_regno;
456 bool is64
457 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
458 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
459 || inst.operands[0].type == AARCH64_OPND_Ft);
460
461 if (inst.operands[0].type == AARCH64_OPND_Ft)
462 {
463 /* Only bottom 64-bit of each V register (D register) need
464 to be preserved. */
465 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
466 rt += AARCH64_X_REGISTER_COUNT;
467 }
468
f7b7ed97
TT
469 stack.store (pv_add_constant (regs[rn], imm),
470 is64 ? 8 : 4, regs[rt]);
432ec081
YQ
471 if (inst.operands[1].addr.writeback)
472 regs[rn] = pv_add_constant (regs[rn], imm);
473 }
d9ebcbce 474 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
475 {
476 /* Stop analysis on branch. */
477 break;
478 }
17e116a7
AH
479 else if (inst.opcode->iclass == ic_system)
480 {
481 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
482 int ra_state_val = 0;
483
484 if (insn == 0xd503233f /* paciasp. */
485 || insn == 0xd503237f /* pacibsp. */)
486 {
487 /* Return addresses are mangled. */
488 ra_state_val = 1;
489 }
490 else if (insn == 0xd50323bf /* autiasp. */
491 || insn == 0xd50323ff /* autibsp. */)
492 {
493 /* Return addresses are not mangled. */
494 ra_state_val = 0;
495 }
496 else
497 {
498 if (aarch64_debug)
499 debug_printf ("aarch64: prologue analysis gave up addr=%s"
500 " opcode=0x%x (iclass)\n",
501 core_addr_to_string_nz (start), insn);
502 break;
503 }
504
505 if (tdep->has_pauth () && cache != nullptr)
506 trad_frame_set_value (cache->saved_regs,
507 tdep->pauth_ra_state_regnum,
508 ra_state_val);
509 }
07b287a0
MS
510 else
511 {
512 if (aarch64_debug)
b277c936 513 {
0a0da556 514 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
515 " opcode=0x%x\n",
516 core_addr_to_string_nz (start), insn);
517 }
07b287a0
MS
518 break;
519 }
520 }
521
522 if (cache == NULL)
f7b7ed97 523 return start;
07b287a0
MS
524
525 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
526 {
527 /* Frame pointer is fp. Frame size is constant. */
528 cache->framereg = AARCH64_FP_REGNUM;
529 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
530 }
531 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
532 {
533 /* Try the stack pointer. */
534 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
535 cache->framereg = AARCH64_SP_REGNUM;
536 }
537 else
538 {
539 /* We're just out of luck. We don't know where the frame is. */
540 cache->framereg = -1;
541 cache->framesize = 0;
542 }
543
544 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
545 {
546 CORE_ADDR offset;
547
f7b7ed97 548 if (stack.find_reg (gdbarch, i, &offset))
07b287a0
MS
549 cache->saved_regs[i].addr = offset;
550 }
551
187f5d00
YQ
552 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
553 {
554 int regnum = gdbarch_num_regs (gdbarch);
555 CORE_ADDR offset;
556
f7b7ed97
TT
557 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
558 &offset))
187f5d00
YQ
559 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
560 }
561
07b287a0
MS
562 return start;
563}
564
4d9a9006
YQ
565static CORE_ADDR
566aarch64_analyze_prologue (struct gdbarch *gdbarch,
567 CORE_ADDR start, CORE_ADDR limit,
568 struct aarch64_prologue_cache *cache)
569{
570 instruction_reader reader;
571
572 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
573 reader);
574}
575
576#if GDB_SELF_TEST
577
578namespace selftests {
579
580/* Instruction reader from manually cooked instruction sequences. */
581
582class instruction_reader_test : public abstract_instruction_reader
583{
584public:
585 template<size_t SIZE>
586 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
587 : m_insns (insns), m_insns_size (SIZE)
588 {}
589
590 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 591 override
4d9a9006
YQ
592 {
593 SELF_CHECK (len == 4);
594 SELF_CHECK (memaddr % 4 == 0);
595 SELF_CHECK (memaddr / 4 < m_insns_size);
596
597 return m_insns[memaddr / 4];
598 }
599
600private:
601 const uint32_t *m_insns;
602 size_t m_insns_size;
603};
604
605static void
606aarch64_analyze_prologue_test (void)
607{
608 struct gdbarch_info info;
609
610 gdbarch_info_init (&info);
611 info.bfd_arch_info = bfd_scan_arch ("aarch64");
612
613 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
614 SELF_CHECK (gdbarch != NULL);
615
17e116a7
AH
616 struct aarch64_prologue_cache cache;
617 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
618
619 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
620
4d9a9006
YQ
621 /* Test the simple prologue in which frame pointer is used. */
622 {
4d9a9006
YQ
623 static const uint32_t insns[] = {
624 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
625 0x910003fd, /* mov x29, sp */
626 0x97ffffe6, /* bl 0x400580 */
627 };
628 instruction_reader_test reader (insns);
629
630 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
631 SELF_CHECK (end == 4 * 2);
632
633 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
634 SELF_CHECK (cache.framesize == 272);
635
636 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
637 {
638 if (i == AARCH64_FP_REGNUM)
639 SELF_CHECK (cache.saved_regs[i].addr == -272);
640 else if (i == AARCH64_LR_REGNUM)
641 SELF_CHECK (cache.saved_regs[i].addr == -264);
642 else
643 SELF_CHECK (cache.saved_regs[i].addr == -1);
644 }
645
646 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
647 {
648 int regnum = gdbarch_num_regs (gdbarch);
649
650 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
651 == -1);
652 }
653 }
432ec081
YQ
654
655 /* Test a prologue in which STR is used and frame pointer is not
656 used. */
657 {
432ec081
YQ
658 static const uint32_t insns[] = {
659 0xf81d0ff3, /* str x19, [sp, #-48]! */
660 0xb9002fe0, /* str w0, [sp, #44] */
661 0xf90013e1, /* str x1, [sp, #32]*/
662 0xfd000fe0, /* str d0, [sp, #24] */
663 0xaa0203f3, /* mov x19, x2 */
664 0xf94013e0, /* ldr x0, [sp, #32] */
665 };
666 instruction_reader_test reader (insns);
667
68811f8f 668 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
432ec081
YQ
669 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
670
671 SELF_CHECK (end == 4 * 5);
672
673 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
674 SELF_CHECK (cache.framesize == 48);
675
676 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
677 {
678 if (i == 1)
679 SELF_CHECK (cache.saved_regs[i].addr == -16);
680 else if (i == 19)
681 SELF_CHECK (cache.saved_regs[i].addr == -48);
682 else
683 SELF_CHECK (cache.saved_regs[i].addr == -1);
684 }
685
686 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
687 {
688 int regnum = gdbarch_num_regs (gdbarch);
689
690 if (i == 0)
691 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
692 == -24);
693 else
694 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
695 == -1);
696 }
697 }
17e116a7
AH
698
699 /* Test a prologue in which there is a return address signing instruction. */
700 if (tdep->has_pauth ())
701 {
702 static const uint32_t insns[] = {
703 0xd503233f, /* paciasp */
704 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
705 0x910003fd, /* mov x29, sp */
706 0xf801c3f3, /* str x19, [sp, #28] */
707 0xb9401fa0, /* ldr x19, [x29, #28] */
708 };
709 instruction_reader_test reader (insns);
710
68811f8f 711 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
17e116a7
AH
712 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
713 reader);
714
715 SELF_CHECK (end == 4 * 4);
716 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
717 SELF_CHECK (cache.framesize == 48);
718
719 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
720 {
721 if (i == 19)
722 SELF_CHECK (cache.saved_regs[i].addr == -20);
723 else if (i == AARCH64_FP_REGNUM)
724 SELF_CHECK (cache.saved_regs[i].addr == -48);
725 else if (i == AARCH64_LR_REGNUM)
726 SELF_CHECK (cache.saved_regs[i].addr == -40);
727 else
728 SELF_CHECK (cache.saved_regs[i].addr == -1);
729 }
730
731 if (tdep->has_pauth ())
732 {
733 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
734 tdep->pauth_ra_state_regnum));
735 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
736 }
737 }
4d9a9006
YQ
738}
739} // namespace selftests
740#endif /* GDB_SELF_TEST */
741
07b287a0
MS
742/* Implement the "skip_prologue" gdbarch method. */
743
744static CORE_ADDR
745aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
746{
07b287a0 747 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
748
749 /* See if we can determine the end of the prologue via the symbol
750 table. If so, then return either PC, or the PC after the
751 prologue, whichever is greater. */
752 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
753 {
754 CORE_ADDR post_prologue_pc
755 = skip_prologue_using_sal (gdbarch, func_addr);
756
757 if (post_prologue_pc != 0)
325fac50 758 return std::max (pc, post_prologue_pc);
07b287a0
MS
759 }
760
761 /* Can't determine prologue from the symbol table, need to examine
762 instructions. */
763
764 /* Find an upper limit on the function prologue using the debug
765 information. If the debug information could not be used to
766 provide that bound, then use an arbitrary large number as the
767 upper bound. */
768 limit_pc = skip_prologue_using_sal (gdbarch, pc);
769 if (limit_pc == 0)
770 limit_pc = pc + 128; /* Magic. */
771
772 /* Try disassembling prologue. */
773 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
774}
775
776/* Scan the function prologue for THIS_FRAME and populate the prologue
777 cache CACHE. */
778
779static void
780aarch64_scan_prologue (struct frame_info *this_frame,
781 struct aarch64_prologue_cache *cache)
782{
783 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
784 CORE_ADDR prologue_start;
785 CORE_ADDR prologue_end;
786 CORE_ADDR prev_pc = get_frame_pc (this_frame);
787 struct gdbarch *gdbarch = get_frame_arch (this_frame);
788
db634143
PL
789 cache->prev_pc = prev_pc;
790
07b287a0
MS
791 /* Assume we do not find a frame. */
792 cache->framereg = -1;
793 cache->framesize = 0;
794
795 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
796 &prologue_end))
797 {
798 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
799
800 if (sal.line == 0)
801 {
802 /* No line info so use the current PC. */
803 prologue_end = prev_pc;
804 }
805 else if (sal.end < prologue_end)
806 {
807 /* The next line begins after the function end. */
808 prologue_end = sal.end;
809 }
810
325fac50 811 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
812 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
813 }
814 else
815 {
816 CORE_ADDR frame_loc;
07b287a0
MS
817
818 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
819 if (frame_loc == 0)
820 return;
821
822 cache->framereg = AARCH64_FP_REGNUM;
823 cache->framesize = 16;
824 cache->saved_regs[29].addr = 0;
825 cache->saved_regs[30].addr = 8;
826 }
827}
828
7dfa3edc
PL
829/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
830 function may throw an exception if the inferior's registers or memory is
831 not available. */
07b287a0 832
7dfa3edc
PL
833static void
834aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
835 struct aarch64_prologue_cache *cache)
07b287a0 836{
07b287a0
MS
837 CORE_ADDR unwound_fp;
838 int reg;
839
07b287a0
MS
840 aarch64_scan_prologue (this_frame, cache);
841
842 if (cache->framereg == -1)
7dfa3edc 843 return;
07b287a0
MS
844
845 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
846 if (unwound_fp == 0)
7dfa3edc 847 return;
07b287a0
MS
848
849 cache->prev_sp = unwound_fp + cache->framesize;
850
851 /* Calculate actual addresses of saved registers using offsets
852 determined by aarch64_analyze_prologue. */
853 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
854 if (trad_frame_addr_p (cache->saved_regs, reg))
855 cache->saved_regs[reg].addr += cache->prev_sp;
856
db634143
PL
857 cache->func = get_frame_func (this_frame);
858
7dfa3edc
PL
859 cache->available_p = 1;
860}
861
862/* Allocate and fill in *THIS_CACHE with information about the prologue of
863 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
864 Return a pointer to the current aarch64_prologue_cache in
865 *THIS_CACHE. */
866
867static struct aarch64_prologue_cache *
868aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
869{
870 struct aarch64_prologue_cache *cache;
871
872 if (*this_cache != NULL)
9a3c8263 873 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
874
875 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
876 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
877 *this_cache = cache;
878
a70b8144 879 try
7dfa3edc
PL
880 {
881 aarch64_make_prologue_cache_1 (this_frame, cache);
882 }
230d2906 883 catch (const gdb_exception_error &ex)
7dfa3edc
PL
884 {
885 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 886 throw;
7dfa3edc 887 }
7dfa3edc 888
07b287a0
MS
889 return cache;
890}
891
7dfa3edc
PL
892/* Implement the "stop_reason" frame_unwind method. */
893
894static enum unwind_stop_reason
895aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
896 void **this_cache)
897{
898 struct aarch64_prologue_cache *cache
899 = aarch64_make_prologue_cache (this_frame, this_cache);
900
901 if (!cache->available_p)
902 return UNWIND_UNAVAILABLE;
903
904 /* Halt the backtrace at "_start". */
905 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
906 return UNWIND_OUTERMOST;
907
908 /* We've hit a wall, stop. */
909 if (cache->prev_sp == 0)
910 return UNWIND_OUTERMOST;
911
912 return UNWIND_NO_REASON;
913}
914
07b287a0
MS
915/* Our frame ID for a normal frame is the current function's starting
916 PC and the caller's SP when we were called. */
917
918static void
919aarch64_prologue_this_id (struct frame_info *this_frame,
920 void **this_cache, struct frame_id *this_id)
921{
7c8edfae
PL
922 struct aarch64_prologue_cache *cache
923 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 924
7dfa3edc
PL
925 if (!cache->available_p)
926 *this_id = frame_id_build_unavailable_stack (cache->func);
927 else
928 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
929}
930
931/* Implement the "prev_register" frame_unwind method. */
932
933static struct value *
934aarch64_prologue_prev_register (struct frame_info *this_frame,
935 void **this_cache, int prev_regnum)
936{
7c8edfae
PL
937 struct aarch64_prologue_cache *cache
938 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
939
940 /* If we are asked to unwind the PC, then we need to return the LR
941 instead. The prologue may save PC, but it will point into this
942 frame's prologue, not the next frame's resume location. */
943 if (prev_regnum == AARCH64_PC_REGNUM)
944 {
945 CORE_ADDR lr;
17e116a7
AH
946 struct gdbarch *gdbarch = get_frame_arch (this_frame);
947 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
07b287a0
MS
948
949 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
17e116a7
AH
950
951 if (tdep->has_pauth ()
952 && trad_frame_value_p (cache->saved_regs,
953 tdep->pauth_ra_state_regnum))
954 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
955
07b287a0
MS
956 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
957 }
958
959 /* SP is generally not saved to the stack, but this frame is
960 identified by the next frame's stack pointer at the time of the
961 call. The value was already reconstructed into PREV_SP. */
962 /*
963 +----------+ ^
964 | saved lr | |
965 +->| saved fp |--+
966 | | |
967 | | | <- Previous SP
968 | +----------+
969 | | saved lr |
970 +--| saved fp |<- FP
971 | |
972 | |<- SP
973 +----------+ */
974 if (prev_regnum == AARCH64_SP_REGNUM)
975 return frame_unwind_got_constant (this_frame, prev_regnum,
976 cache->prev_sp);
977
978 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
979 prev_regnum);
980}
981
982/* AArch64 prologue unwinder. */
983struct frame_unwind aarch64_prologue_unwind =
984{
985 NORMAL_FRAME,
7dfa3edc 986 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
987 aarch64_prologue_this_id,
988 aarch64_prologue_prev_register,
989 NULL,
990 default_frame_sniffer
991};
992
8b61f75d
PL
993/* Allocate and fill in *THIS_CACHE with information about the prologue of
994 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
995 Return a pointer to the current aarch64_prologue_cache in
996 *THIS_CACHE. */
07b287a0
MS
997
998static struct aarch64_prologue_cache *
8b61f75d 999aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 1000{
07b287a0 1001 struct aarch64_prologue_cache *cache;
8b61f75d
PL
1002
1003 if (*this_cache != NULL)
9a3c8263 1004 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
1005
1006 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1007 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 1008 *this_cache = cache;
07b287a0 1009
a70b8144 1010 try
02a2a705
PL
1011 {
1012 cache->prev_sp = get_frame_register_unsigned (this_frame,
1013 AARCH64_SP_REGNUM);
1014 cache->prev_pc = get_frame_pc (this_frame);
1015 cache->available_p = 1;
1016 }
230d2906 1017 catch (const gdb_exception_error &ex)
02a2a705
PL
1018 {
1019 if (ex.error != NOT_AVAILABLE_ERROR)
eedc3f4f 1020 throw;
02a2a705 1021 }
07b287a0
MS
1022
1023 return cache;
1024}
1025
02a2a705
PL
1026/* Implement the "stop_reason" frame_unwind method. */
1027
1028static enum unwind_stop_reason
1029aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1030 void **this_cache)
1031{
1032 struct aarch64_prologue_cache *cache
1033 = aarch64_make_stub_cache (this_frame, this_cache);
1034
1035 if (!cache->available_p)
1036 return UNWIND_UNAVAILABLE;
1037
1038 return UNWIND_NO_REASON;
1039}
1040
07b287a0
MS
1041/* Our frame ID for a stub frame is the current SP and LR. */
1042
1043static void
1044aarch64_stub_this_id (struct frame_info *this_frame,
1045 void **this_cache, struct frame_id *this_id)
1046{
8b61f75d
PL
1047 struct aarch64_prologue_cache *cache
1048 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 1049
02a2a705
PL
1050 if (cache->available_p)
1051 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1052 else
1053 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
1054}
1055
1056/* Implement the "sniffer" frame_unwind method. */
1057
1058static int
1059aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1060 struct frame_info *this_frame,
1061 void **this_prologue_cache)
1062{
1063 CORE_ADDR addr_in_block;
1064 gdb_byte dummy[4];
1065
1066 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1067 if (in_plt_section (addr_in_block)
07b287a0
MS
1068 /* We also use the stub winder if the target memory is unreadable
1069 to avoid having the prologue unwinder trying to read it. */
1070 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1071 return 1;
1072
1073 return 0;
1074}
1075
1076/* AArch64 stub unwinder. */
1077struct frame_unwind aarch64_stub_unwind =
1078{
1079 NORMAL_FRAME,
02a2a705 1080 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1081 aarch64_stub_this_id,
1082 aarch64_prologue_prev_register,
1083 NULL,
1084 aarch64_stub_unwind_sniffer
1085};
1086
1087/* Return the frame base address of *THIS_FRAME. */
1088
1089static CORE_ADDR
1090aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1091{
7c8edfae
PL
1092 struct aarch64_prologue_cache *cache
1093 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1094
1095 return cache->prev_sp - cache->framesize;
1096}
1097
1098/* AArch64 default frame base information. */
1099struct frame_base aarch64_normal_base =
1100{
1101 &aarch64_prologue_unwind,
1102 aarch64_normal_frame_base,
1103 aarch64_normal_frame_base,
1104 aarch64_normal_frame_base
1105};
1106
07b287a0
MS
1107/* Return the value of the REGNUM register in the previous frame of
1108 *THIS_FRAME. */
1109
1110static struct value *
1111aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1112 void **this_cache, int regnum)
1113{
11e1b75f 1114 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
07b287a0
MS
1115 CORE_ADDR lr;
1116
1117 switch (regnum)
1118 {
1119 case AARCH64_PC_REGNUM:
1120 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
11e1b75f 1121 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
07b287a0
MS
1122 return frame_unwind_got_constant (this_frame, regnum, lr);
1123
1124 default:
1125 internal_error (__FILE__, __LINE__,
1126 _("Unexpected register %d"), regnum);
1127 }
1128}
1129
11e1b75f
AH
1130static const unsigned char op_lit0 = DW_OP_lit0;
1131static const unsigned char op_lit1 = DW_OP_lit1;
1132
07b287a0
MS
1133/* Implement the "init_reg" dwarf2_frame_ops method. */
1134
1135static void
1136aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1137 struct dwarf2_frame_state_reg *reg,
1138 struct frame_info *this_frame)
1139{
11e1b75f
AH
1140 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1141
07b287a0
MS
1142 switch (regnum)
1143 {
1144 case AARCH64_PC_REGNUM:
1145 reg->how = DWARF2_FRAME_REG_FN;
1146 reg->loc.fn = aarch64_dwarf2_prev_register;
11e1b75f
AH
1147 return;
1148
07b287a0
MS
1149 case AARCH64_SP_REGNUM:
1150 reg->how = DWARF2_FRAME_REG_CFA;
11e1b75f
AH
1151 return;
1152 }
1153
1154 /* Init pauth registers. */
1155 if (tdep->has_pauth ())
1156 {
1157 if (regnum == tdep->pauth_ra_state_regnum)
1158 {
1159 /* Initialize RA_STATE to zero. */
1160 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1161 reg->loc.exp.start = &op_lit0;
1162 reg->loc.exp.len = 1;
1163 return;
1164 }
1165 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1166 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1167 {
1168 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1169 return;
1170 }
07b287a0
MS
1171 }
1172}
1173
11e1b75f
AH
1174/* Implement the execute_dwarf_cfa_vendor_op method. */
1175
1176static bool
1177aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1178 struct dwarf2_frame_state *fs)
1179{
1180 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1181 struct dwarf2_frame_state_reg *ra_state;
1182
1183 if (tdep->has_pauth () && op == DW_CFA_AARCH64_negate_ra_state)
1184 {
1185 /* Allocate RA_STATE column if it's not allocated yet. */
1186 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1187
1188 /* Toggle the status of RA_STATE between 0 and 1. */
1189 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1190 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1191
1192 if (ra_state->loc.exp.start == nullptr
1193 || ra_state->loc.exp.start == &op_lit0)
1194 ra_state->loc.exp.start = &op_lit1;
1195 else
1196 ra_state->loc.exp.start = &op_lit0;
1197
1198 ra_state->loc.exp.len = 1;
1199
1200 return true;
1201 }
1202
1203 return false;
1204}
1205
07b287a0
MS
1206/* When arguments must be pushed onto the stack, they go on in reverse
1207 order. The code below implements a FILO (stack) to do this. */
1208
1209typedef struct
1210{
c3c87445
YQ
1211 /* Value to pass on stack. It can be NULL if this item is for stack
1212 padding. */
7c543f7b 1213 const gdb_byte *data;
07b287a0
MS
1214
1215 /* Size in bytes of value to pass on stack. */
1216 int len;
1217} stack_item_t;
1218
1219DEF_VEC_O (stack_item_t);
1220
b907456c
AB
1221/* Implement the gdbarch type alignment method, overrides the generic
1222 alignment algorithm for anything that is aarch64 specific. */
07b287a0 1223
b907456c
AB
1224static ULONGEST
1225aarch64_type_align (gdbarch *gdbarch, struct type *t)
07b287a0 1226{
07b287a0 1227 t = check_typedef (t);
b907456c 1228 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
07b287a0 1229 {
b907456c
AB
1230 /* Use the natural alignment for vector types (the same for
1231 scalar type), but the maximum alignment is 128-bit. */
1232 if (TYPE_LENGTH (t) > 16)
1233 return 16;
238f2452 1234 else
b907456c 1235 return TYPE_LENGTH (t);
07b287a0 1236 }
b907456c
AB
1237
1238 /* Allow the common code to calculate the alignment. */
1239 return 0;
07b287a0
MS
1240}
1241
ea92689a
AH
1242/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1243
1244 Return the number of register required, or -1 on failure.
1245
1246 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1247 to the element, else fail if the type of this element does not match the
1248 existing value. */
1249
1250static int
1251aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1252 struct type **fundamental_type)
1253{
1254 if (type == nullptr)
1255 return -1;
1256
1257 switch (TYPE_CODE (type))
1258 {
1259 case TYPE_CODE_FLT:
1260 if (TYPE_LENGTH (type) > 16)
1261 return -1;
1262
1263 if (*fundamental_type == nullptr)
1264 *fundamental_type = type;
1265 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1266 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1267 return -1;
1268
1269 return 1;
1270
1271 case TYPE_CODE_COMPLEX:
1272 {
1273 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1274 if (TYPE_LENGTH (target_type) > 16)
1275 return -1;
1276
1277 if (*fundamental_type == nullptr)
1278 *fundamental_type = target_type;
1279 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1280 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1281 return -1;
1282
1283 return 2;
1284 }
1285
1286 case TYPE_CODE_ARRAY:
1287 {
1288 if (TYPE_VECTOR (type))
1289 {
1290 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1291 return -1;
1292
1293 if (*fundamental_type == nullptr)
1294 *fundamental_type = type;
1295 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1296 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1297 return -1;
1298
1299 return 1;
1300 }
1301 else
1302 {
1303 struct type *target_type = TYPE_TARGET_TYPE (type);
1304 int count = aapcs_is_vfp_call_or_return_candidate_1
1305 (target_type, fundamental_type);
1306
1307 if (count == -1)
1308 return count;
1309
d4718d5c 1310 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
ea92689a
AH
1311 return count;
1312 }
1313 }
1314
1315 case TYPE_CODE_STRUCT:
1316 case TYPE_CODE_UNION:
1317 {
1318 int count = 0;
1319
1320 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1321 {
353229bf
AH
1322 /* Ignore any static fields. */
1323 if (field_is_static (&TYPE_FIELD (type, i)))
1324 continue;
1325
ea92689a
AH
1326 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1327
1328 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1329 (member, fundamental_type);
1330 if (sub_count == -1)
1331 return -1;
1332 count += sub_count;
1333 }
73021deb
AH
1334
1335 /* Ensure there is no padding between the fields (allowing for empty
1336 zero length structs) */
1337 int ftype_length = (*fundamental_type == nullptr)
1338 ? 0 : TYPE_LENGTH (*fundamental_type);
1339 if (count * ftype_length != TYPE_LENGTH (type))
1340 return -1;
1341
ea92689a
AH
1342 return count;
1343 }
1344
1345 default:
1346 break;
1347 }
1348
1349 return -1;
1350}
1351
1352/* Return true if an argument, whose type is described by TYPE, can be passed or
1353 returned in simd/fp registers, providing enough parameter passing registers
1354 are available. This is as described in the AAPCS64.
1355
1356 Upon successful return, *COUNT returns the number of needed registers,
1357 *FUNDAMENTAL_TYPE contains the type of those registers.
1358
1359 Candidate as per the AAPCS64 5.4.2.C is either a:
1360 - float.
1361 - short-vector.
1362 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1363 all the members are floats and has at most 4 members.
1364 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1365 all the members are short vectors and has at most 4 members.
1366 - Complex (7.1.1)
1367
1368 Note that HFAs and HVAs can include nested structures and arrays. */
1369
0e745c60 1370static bool
ea92689a
AH
1371aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1372 struct type **fundamental_type)
1373{
1374 if (type == nullptr)
1375 return false;
1376
1377 *fundamental_type = nullptr;
1378
1379 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1380 fundamental_type);
1381
1382 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1383 {
1384 *count = ag_count;
1385 return true;
1386 }
1387 else
1388 return false;
1389}
1390
07b287a0
MS
1391/* AArch64 function call information structure. */
1392struct aarch64_call_info
1393{
1394 /* the current argument number. */
1395 unsigned argnum;
1396
1397 /* The next general purpose register number, equivalent to NGRN as
1398 described in the AArch64 Procedure Call Standard. */
1399 unsigned ngrn;
1400
1401 /* The next SIMD and floating point register number, equivalent to
1402 NSRN as described in the AArch64 Procedure Call Standard. */
1403 unsigned nsrn;
1404
1405 /* The next stacked argument address, equivalent to NSAA as
1406 described in the AArch64 Procedure Call Standard. */
1407 unsigned nsaa;
1408
1409 /* Stack item vector. */
1410 VEC(stack_item_t) *si;
1411};
1412
1413/* Pass a value in a sequence of consecutive X registers. The caller
1414 is responsbile for ensuring sufficient registers are available. */
1415
1416static void
1417pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1418 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1419 struct value *arg)
07b287a0
MS
1420{
1421 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1422 int len = TYPE_LENGTH (type);
1423 enum type_code typecode = TYPE_CODE (type);
1424 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1425 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1426
1427 info->argnum++;
1428
1429 while (len > 0)
1430 {
1431 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1432 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1433 byte_order);
1434
1435
1436 /* Adjust sub-word struct/union args when big-endian. */
1437 if (byte_order == BFD_ENDIAN_BIG
1438 && partial_len < X_REGISTER_SIZE
1439 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1440 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1441
1442 if (aarch64_debug)
b277c936
PL
1443 {
1444 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1445 gdbarch_register_name (gdbarch, regnum),
1446 phex (regval, X_REGISTER_SIZE));
1447 }
07b287a0
MS
1448 regcache_cooked_write_unsigned (regcache, regnum, regval);
1449 len -= partial_len;
1450 buf += partial_len;
1451 regnum++;
1452 }
1453}
1454
1455/* Attempt to marshall a value in a V register. Return 1 if
1456 successful, or 0 if insufficient registers are available. This
1457 function, unlike the equivalent pass_in_x() function does not
1458 handle arguments spread across multiple registers. */
1459
1460static int
1461pass_in_v (struct gdbarch *gdbarch,
1462 struct regcache *regcache,
1463 struct aarch64_call_info *info,
0735fddd 1464 int len, const bfd_byte *buf)
07b287a0
MS
1465{
1466 if (info->nsrn < 8)
1467 {
07b287a0 1468 int regnum = AARCH64_V0_REGNUM + info->nsrn;
3ff2c72e
AH
1469 /* Enough space for a full vector register. */
1470 gdb_byte reg[register_size (gdbarch, regnum)];
1471 gdb_assert (len <= sizeof (reg));
07b287a0
MS
1472
1473 info->argnum++;
1474 info->nsrn++;
1475
0735fddd
YQ
1476 memset (reg, 0, sizeof (reg));
1477 /* PCS C.1, the argument is allocated to the least significant
1478 bits of V register. */
1479 memcpy (reg, buf, len);
b66f5587 1480 regcache->cooked_write (regnum, reg);
0735fddd 1481
07b287a0 1482 if (aarch64_debug)
b277c936
PL
1483 {
1484 debug_printf ("arg %d in %s\n", info->argnum,
1485 gdbarch_register_name (gdbarch, regnum));
1486 }
07b287a0
MS
1487 return 1;
1488 }
1489 info->nsrn = 8;
1490 return 0;
1491}
1492
1493/* Marshall an argument onto the stack. */
1494
1495static void
1496pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1497 struct value *arg)
07b287a0 1498{
8e80f9d1 1499 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1500 int len = TYPE_LENGTH (type);
1501 int align;
1502 stack_item_t item;
1503
1504 info->argnum++;
1505
b907456c 1506 align = type_align (type);
07b287a0
MS
1507
1508 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1509 Natural alignment of the argument's type. */
1510 align = align_up (align, 8);
1511
1512 /* The AArch64 PCS requires at most doubleword alignment. */
1513 if (align > 16)
1514 align = 16;
1515
1516 if (aarch64_debug)
b277c936
PL
1517 {
1518 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1519 info->nsaa);
1520 }
07b287a0
MS
1521
1522 item.len = len;
1523 item.data = buf;
1524 VEC_safe_push (stack_item_t, info->si, &item);
1525
1526 info->nsaa += len;
1527 if (info->nsaa & (align - 1))
1528 {
1529 /* Push stack alignment padding. */
1530 int pad = align - (info->nsaa & (align - 1));
1531
1532 item.len = pad;
c3c87445 1533 item.data = NULL;
07b287a0
MS
1534
1535 VEC_safe_push (stack_item_t, info->si, &item);
1536 info->nsaa += pad;
1537 }
1538}
1539
1540/* Marshall an argument into a sequence of one or more consecutive X
1541 registers or, if insufficient X registers are available then onto
1542 the stack. */
1543
1544static void
1545pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1546 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1547 struct value *arg)
07b287a0
MS
1548{
1549 int len = TYPE_LENGTH (type);
1550 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1551
1552 /* PCS C.13 - Pass in registers if we have enough spare */
1553 if (info->ngrn + nregs <= 8)
1554 {
8e80f9d1 1555 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1556 info->ngrn += nregs;
1557 }
1558 else
1559 {
1560 info->ngrn = 8;
8e80f9d1 1561 pass_on_stack (info, type, arg);
07b287a0
MS
1562 }
1563}
1564
0e745c60
AH
1565/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1566 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1567 registers. A return value of false is an error state as the value will have
1568 been partially passed to the stack. */
1569static bool
1570pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1571 struct aarch64_call_info *info, struct type *arg_type,
1572 struct value *arg)
07b287a0 1573{
0e745c60
AH
1574 switch (TYPE_CODE (arg_type))
1575 {
1576 case TYPE_CODE_FLT:
1577 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1578 value_contents (arg));
1579 break;
1580
1581 case TYPE_CODE_COMPLEX:
1582 {
1583 const bfd_byte *buf = value_contents (arg);
1584 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1585
1586 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1587 buf))
1588 return false;
1589
1590 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1591 buf + TYPE_LENGTH (target_type));
1592 }
1593
1594 case TYPE_CODE_ARRAY:
1595 if (TYPE_VECTOR (arg_type))
1596 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1597 value_contents (arg));
1598 /* fall through. */
1599
1600 case TYPE_CODE_STRUCT:
1601 case TYPE_CODE_UNION:
1602 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1603 {
353229bf
AH
1604 /* Don't include static fields. */
1605 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1606 continue;
1607
0e745c60
AH
1608 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1609 struct type *field_type = check_typedef (value_type (field));
1610
1611 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1612 field))
1613 return false;
1614 }
1615 return true;
1616
1617 default:
1618 return false;
1619 }
07b287a0
MS
1620}
1621
1622/* Implement the "push_dummy_call" gdbarch method. */
1623
1624static CORE_ADDR
1625aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1626 struct regcache *regcache, CORE_ADDR bp_addr,
1627 int nargs,
cf84fa6b
AH
1628 struct value **args, CORE_ADDR sp,
1629 function_call_return_method return_method,
07b287a0
MS
1630 CORE_ADDR struct_addr)
1631{
07b287a0 1632 int argnum;
07b287a0 1633 struct aarch64_call_info info;
07b287a0
MS
1634
1635 memset (&info, 0, sizeof (info));
1636
1637 /* We need to know what the type of the called function is in order
1638 to determine the number of named/anonymous arguments for the
1639 actual argument placement, and the return type in order to handle
1640 return value correctly.
1641
1642 The generic code above us views the decision of return in memory
1643 or return in registers as a two stage processes. The language
1644 handler is consulted first and may decide to return in memory (eg
1645 class with copy constructor returned by value), this will cause
1646 the generic code to allocate space AND insert an initial leading
1647 argument.
1648
1649 If the language code does not decide to pass in memory then the
1650 target code is consulted.
1651
1652 If the language code decides to pass in memory we want to move
1653 the pointer inserted as the initial argument from the argument
1654 list and into X8, the conventional AArch64 struct return pointer
38a72da0 1655 register. */
07b287a0
MS
1656
1657 /* Set the return address. For the AArch64, the return breakpoint
1658 is always at BP_ADDR. */
1659 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1660
38a72da0
AH
1661 /* If we were given an initial argument for the return slot, lose it. */
1662 if (return_method == return_method_hidden_param)
07b287a0
MS
1663 {
1664 args++;
1665 nargs--;
1666 }
1667
1668 /* The struct_return pointer occupies X8. */
38a72da0 1669 if (return_method != return_method_normal)
07b287a0
MS
1670 {
1671 if (aarch64_debug)
b277c936
PL
1672 {
1673 debug_printf ("struct return in %s = 0x%s\n",
1674 gdbarch_register_name (gdbarch,
1675 AARCH64_STRUCT_RETURN_REGNUM),
1676 paddress (gdbarch, struct_addr));
1677 }
07b287a0
MS
1678 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1679 struct_addr);
1680 }
1681
1682 for (argnum = 0; argnum < nargs; argnum++)
1683 {
1684 struct value *arg = args[argnum];
0e745c60
AH
1685 struct type *arg_type, *fundamental_type;
1686 int len, elements;
07b287a0
MS
1687
1688 arg_type = check_typedef (value_type (arg));
1689 len = TYPE_LENGTH (arg_type);
1690
0e745c60
AH
1691 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1692 if there are enough spare registers. */
1693 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1694 &fundamental_type))
1695 {
1696 if (info.nsrn + elements <= 8)
1697 {
1698 /* We know that we have sufficient registers available therefore
1699 this will never need to fallback to the stack. */
1700 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1701 arg))
1702 gdb_assert_not_reached ("Failed to push args");
1703 }
1704 else
1705 {
1706 info.nsrn = 8;
1707 pass_on_stack (&info, arg_type, arg);
1708 }
1709 continue;
1710 }
1711
07b287a0
MS
1712 switch (TYPE_CODE (arg_type))
1713 {
1714 case TYPE_CODE_INT:
1715 case TYPE_CODE_BOOL:
1716 case TYPE_CODE_CHAR:
1717 case TYPE_CODE_RANGE:
1718 case TYPE_CODE_ENUM:
1719 if (len < 4)
1720 {
1721 /* Promote to 32 bit integer. */
1722 if (TYPE_UNSIGNED (arg_type))
1723 arg_type = builtin_type (gdbarch)->builtin_uint32;
1724 else
1725 arg_type = builtin_type (gdbarch)->builtin_int32;
1726 arg = value_cast (arg_type, arg);
1727 }
8e80f9d1 1728 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1729 break;
1730
07b287a0
MS
1731 case TYPE_CODE_STRUCT:
1732 case TYPE_CODE_ARRAY:
1733 case TYPE_CODE_UNION:
0e745c60 1734 if (len > 16)
07b287a0
MS
1735 {
1736 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1737 invisible reference. */
1738
1739 /* Allocate aligned storage. */
1740 sp = align_down (sp - len, 16);
1741
1742 /* Write the real data into the stack. */
1743 write_memory (sp, value_contents (arg), len);
1744
1745 /* Construct the indirection. */
1746 arg_type = lookup_pointer_type (arg_type);
1747 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1748 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1749 }
1750 else
1751 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1752 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1753 break;
1754
1755 default:
8e80f9d1 1756 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1757 break;
1758 }
1759 }
1760
1761 /* Make sure stack retains 16 byte alignment. */
1762 if (info.nsaa & 15)
1763 sp -= 16 - (info.nsaa & 15);
1764
1765 while (!VEC_empty (stack_item_t, info.si))
1766 {
1767 stack_item_t *si = VEC_last (stack_item_t, info.si);
1768
1769 sp -= si->len;
c3c87445
YQ
1770 if (si->data != NULL)
1771 write_memory (sp, si->data, si->len);
07b287a0
MS
1772 VEC_pop (stack_item_t, info.si);
1773 }
1774
1775 VEC_free (stack_item_t, info.si);
1776
1777 /* Finally, update the SP register. */
1778 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1779
1780 return sp;
1781}
1782
1783/* Implement the "frame_align" gdbarch method. */
1784
1785static CORE_ADDR
1786aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1787{
1788 /* Align the stack to sixteen bytes. */
1789 return sp & ~(CORE_ADDR) 15;
1790}
1791
1792/* Return the type for an AdvSISD Q register. */
1793
1794static struct type *
1795aarch64_vnq_type (struct gdbarch *gdbarch)
1796{
1797 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1798
1799 if (tdep->vnq_type == NULL)
1800 {
1801 struct type *t;
1802 struct type *elem;
1803
1804 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1805 TYPE_CODE_UNION);
1806
1807 elem = builtin_type (gdbarch)->builtin_uint128;
1808 append_composite_type_field (t, "u", elem);
1809
1810 elem = builtin_type (gdbarch)->builtin_int128;
1811 append_composite_type_field (t, "s", elem);
1812
1813 tdep->vnq_type = t;
1814 }
1815
1816 return tdep->vnq_type;
1817}
1818
1819/* Return the type for an AdvSISD D register. */
1820
1821static struct type *
1822aarch64_vnd_type (struct gdbarch *gdbarch)
1823{
1824 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1825
1826 if (tdep->vnd_type == NULL)
1827 {
1828 struct type *t;
1829 struct type *elem;
1830
1831 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1832 TYPE_CODE_UNION);
1833
1834 elem = builtin_type (gdbarch)->builtin_double;
1835 append_composite_type_field (t, "f", elem);
1836
1837 elem = builtin_type (gdbarch)->builtin_uint64;
1838 append_composite_type_field (t, "u", elem);
1839
1840 elem = builtin_type (gdbarch)->builtin_int64;
1841 append_composite_type_field (t, "s", elem);
1842
1843 tdep->vnd_type = t;
1844 }
1845
1846 return tdep->vnd_type;
1847}
1848
1849/* Return the type for an AdvSISD S register. */
1850
1851static struct type *
1852aarch64_vns_type (struct gdbarch *gdbarch)
1853{
1854 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1855
1856 if (tdep->vns_type == NULL)
1857 {
1858 struct type *t;
1859 struct type *elem;
1860
1861 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1862 TYPE_CODE_UNION);
1863
1864 elem = builtin_type (gdbarch)->builtin_float;
1865 append_composite_type_field (t, "f", elem);
1866
1867 elem = builtin_type (gdbarch)->builtin_uint32;
1868 append_composite_type_field (t, "u", elem);
1869
1870 elem = builtin_type (gdbarch)->builtin_int32;
1871 append_composite_type_field (t, "s", elem);
1872
1873 tdep->vns_type = t;
1874 }
1875
1876 return tdep->vns_type;
1877}
1878
1879/* Return the type for an AdvSISD H register. */
1880
1881static struct type *
1882aarch64_vnh_type (struct gdbarch *gdbarch)
1883{
1884 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1885
1886 if (tdep->vnh_type == NULL)
1887 {
1888 struct type *t;
1889 struct type *elem;
1890
1891 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1892 TYPE_CODE_UNION);
1893
1894 elem = builtin_type (gdbarch)->builtin_uint16;
1895 append_composite_type_field (t, "u", elem);
1896
1897 elem = builtin_type (gdbarch)->builtin_int16;
1898 append_composite_type_field (t, "s", elem);
1899
1900 tdep->vnh_type = t;
1901 }
1902
1903 return tdep->vnh_type;
1904}
1905
1906/* Return the type for an AdvSISD B register. */
1907
1908static struct type *
1909aarch64_vnb_type (struct gdbarch *gdbarch)
1910{
1911 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1912
1913 if (tdep->vnb_type == NULL)
1914 {
1915 struct type *t;
1916 struct type *elem;
1917
1918 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1919 TYPE_CODE_UNION);
1920
1921 elem = builtin_type (gdbarch)->builtin_uint8;
1922 append_composite_type_field (t, "u", elem);
1923
1924 elem = builtin_type (gdbarch)->builtin_int8;
1925 append_composite_type_field (t, "s", elem);
1926
1927 tdep->vnb_type = t;
1928 }
1929
1930 return tdep->vnb_type;
1931}
1932
63bad7b6
AH
1933/* Return the type for an AdvSISD V register. */
1934
1935static struct type *
1936aarch64_vnv_type (struct gdbarch *gdbarch)
1937{
1938 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1939
1940 if (tdep->vnv_type == NULL)
1941 {
bffa1015
AH
1942 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1943 slice from the non-pseudo vector registers. However NEON V registers
1944 are always vector registers, and need constructing as such. */
1945 const struct builtin_type *bt = builtin_type (gdbarch);
1946
63bad7b6
AH
1947 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1948 TYPE_CODE_UNION);
1949
bffa1015
AH
1950 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1951 TYPE_CODE_UNION);
1952 append_composite_type_field (sub, "f",
1953 init_vector_type (bt->builtin_double, 2));
1954 append_composite_type_field (sub, "u",
1955 init_vector_type (bt->builtin_uint64, 2));
1956 append_composite_type_field (sub, "s",
1957 init_vector_type (bt->builtin_int64, 2));
1958 append_composite_type_field (t, "d", sub);
1959
1960 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1961 TYPE_CODE_UNION);
1962 append_composite_type_field (sub, "f",
1963 init_vector_type (bt->builtin_float, 4));
1964 append_composite_type_field (sub, "u",
1965 init_vector_type (bt->builtin_uint32, 4));
1966 append_composite_type_field (sub, "s",
1967 init_vector_type (bt->builtin_int32, 4));
1968 append_composite_type_field (t, "s", sub);
1969
1970 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1971 TYPE_CODE_UNION);
1972 append_composite_type_field (sub, "u",
1973 init_vector_type (bt->builtin_uint16, 8));
1974 append_composite_type_field (sub, "s",
1975 init_vector_type (bt->builtin_int16, 8));
1976 append_composite_type_field (t, "h", sub);
1977
1978 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1979 TYPE_CODE_UNION);
1980 append_composite_type_field (sub, "u",
1981 init_vector_type (bt->builtin_uint8, 16));
1982 append_composite_type_field (sub, "s",
1983 init_vector_type (bt->builtin_int8, 16));
1984 append_composite_type_field (t, "b", sub);
1985
1986 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1987 TYPE_CODE_UNION);
1988 append_composite_type_field (sub, "u",
1989 init_vector_type (bt->builtin_uint128, 1));
1990 append_composite_type_field (sub, "s",
1991 init_vector_type (bt->builtin_int128, 1));
1992 append_composite_type_field (t, "q", sub);
63bad7b6
AH
1993
1994 tdep->vnv_type = t;
1995 }
1996
1997 return tdep->vnv_type;
1998}
1999
07b287a0
MS
2000/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2001
2002static int
2003aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2004{
34dcc7cf
AH
2005 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2006
07b287a0
MS
2007 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2008 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2009
2010 if (reg == AARCH64_DWARF_SP)
2011 return AARCH64_SP_REGNUM;
2012
2013 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2014 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2015
65d4cada
AH
2016 if (reg == AARCH64_DWARF_SVE_VG)
2017 return AARCH64_SVE_VG_REGNUM;
2018
2019 if (reg == AARCH64_DWARF_SVE_FFR)
2020 return AARCH64_SVE_FFR_REGNUM;
2021
2022 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2023 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2024
2025 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2026 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2027
34dcc7cf
AH
2028 if (tdep->has_pauth ())
2029 {
2030 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2031 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2032
2033 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2034 return tdep->pauth_ra_state_regnum;
2035 }
2036
07b287a0
MS
2037 return -1;
2038}
07b287a0
MS
2039
2040/* Implement the "print_insn" gdbarch method. */
2041
2042static int
2043aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2044{
2045 info->symbols = NULL;
6394c606 2046 return default_print_insn (memaddr, info);
07b287a0
MS
2047}
2048
2049/* AArch64 BRK software debug mode instruction.
2050 Note that AArch64 code is always little-endian.
2051 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 2052constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 2053
04180708 2054typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
2055
2056/* Extract from an array REGS containing the (raw) register state a
2057 function return value of type TYPE, and copy that, in virtual
2058 format, into VALBUF. */
2059
2060static void
2061aarch64_extract_return_value (struct type *type, struct regcache *regs,
2062 gdb_byte *valbuf)
2063{
ac7936df 2064 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2065 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2066 int elements;
2067 struct type *fundamental_type;
07b287a0 2068
4f4aedeb
AH
2069 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2070 &fundamental_type))
07b287a0 2071 {
4f4aedeb
AH
2072 int len = TYPE_LENGTH (fundamental_type);
2073
2074 for (int i = 0; i < elements; i++)
2075 {
2076 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2077 /* Enough space for a full vector register. */
2078 gdb_byte buf[register_size (gdbarch, regno)];
2079 gdb_assert (len <= sizeof (buf));
4f4aedeb
AH
2080
2081 if (aarch64_debug)
2082 {
2083 debug_printf ("read HFA or HVA return value element %d from %s\n",
2084 i + 1,
2085 gdbarch_register_name (gdbarch, regno));
2086 }
2087 regs->cooked_read (regno, buf);
07b287a0 2088
4f4aedeb
AH
2089 memcpy (valbuf, buf, len);
2090 valbuf += len;
2091 }
07b287a0
MS
2092 }
2093 else if (TYPE_CODE (type) == TYPE_CODE_INT
2094 || TYPE_CODE (type) == TYPE_CODE_CHAR
2095 || TYPE_CODE (type) == TYPE_CODE_BOOL
2096 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 2097 || TYPE_IS_REFERENCE (type)
07b287a0
MS
2098 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2099 {
6471e7d2 2100 /* If the type is a plain integer, then the access is
07b287a0
MS
2101 straight-forward. Otherwise we have to play around a bit
2102 more. */
2103 int len = TYPE_LENGTH (type);
2104 int regno = AARCH64_X0_REGNUM;
2105 ULONGEST tmp;
2106
2107 while (len > 0)
2108 {
2109 /* By using store_unsigned_integer we avoid having to do
2110 anything special for small big-endian values. */
2111 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2112 store_unsigned_integer (valbuf,
2113 (len > X_REGISTER_SIZE
2114 ? X_REGISTER_SIZE : len), byte_order, tmp);
2115 len -= X_REGISTER_SIZE;
2116 valbuf += X_REGISTER_SIZE;
2117 }
2118 }
07b287a0
MS
2119 else
2120 {
2121 /* For a structure or union the behaviour is as if the value had
2122 been stored to word-aligned memory and then loaded into
2123 registers with 64-bit load instruction(s). */
2124 int len = TYPE_LENGTH (type);
2125 int regno = AARCH64_X0_REGNUM;
2126 bfd_byte buf[X_REGISTER_SIZE];
2127
2128 while (len > 0)
2129 {
dca08e1f 2130 regs->cooked_read (regno++, buf);
07b287a0
MS
2131 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2132 len -= X_REGISTER_SIZE;
2133 valbuf += X_REGISTER_SIZE;
2134 }
2135 }
2136}
2137
2138
2139/* Will a function return an aggregate type in memory or in a
2140 register? Return 0 if an aggregate type can be returned in a
2141 register, 1 if it must be returned in memory. */
2142
2143static int
2144aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2145{
f168693b 2146 type = check_typedef (type);
4f4aedeb
AH
2147 int elements;
2148 struct type *fundamental_type;
07b287a0 2149
4f4aedeb
AH
2150 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2151 &fundamental_type))
07b287a0 2152 {
cd635f74
YQ
2153 /* v0-v7 are used to return values and one register is allocated
2154 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2155 return 0;
2156 }
2157
2158 if (TYPE_LENGTH (type) > 16)
2159 {
2160 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2161 invisible reference. */
2162
2163 return 1;
2164 }
2165
2166 return 0;
2167}
2168
2169/* Write into appropriate registers a function return value of type
2170 TYPE, given in virtual format. */
2171
2172static void
2173aarch64_store_return_value (struct type *type, struct regcache *regs,
2174 const gdb_byte *valbuf)
2175{
ac7936df 2176 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2177 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2178 int elements;
2179 struct type *fundamental_type;
07b287a0 2180
4f4aedeb
AH
2181 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2182 &fundamental_type))
07b287a0 2183 {
4f4aedeb
AH
2184 int len = TYPE_LENGTH (fundamental_type);
2185
2186 for (int i = 0; i < elements; i++)
2187 {
2188 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2189 /* Enough space for a full vector register. */
2190 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2191 gdb_assert (len <= sizeof (tmpbuf));
4f4aedeb
AH
2192
2193 if (aarch64_debug)
2194 {
2195 debug_printf ("write HFA or HVA return value element %d to %s\n",
2196 i + 1,
2197 gdbarch_register_name (gdbarch, regno));
2198 }
07b287a0 2199
4f4aedeb
AH
2200 memcpy (tmpbuf, valbuf,
2201 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2202 regs->cooked_write (regno, tmpbuf);
2203 valbuf += len;
2204 }
07b287a0
MS
2205 }
2206 else if (TYPE_CODE (type) == TYPE_CODE_INT
2207 || TYPE_CODE (type) == TYPE_CODE_CHAR
2208 || TYPE_CODE (type) == TYPE_CODE_BOOL
2209 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 2210 || TYPE_IS_REFERENCE (type)
07b287a0
MS
2211 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2212 {
2213 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2214 {
2215 /* Values of one word or less are zero/sign-extended and
2216 returned in r0. */
2217 bfd_byte tmpbuf[X_REGISTER_SIZE];
2218 LONGEST val = unpack_long (type, valbuf);
2219
2220 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2221 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2222 }
2223 else
2224 {
2225 /* Integral values greater than one word are stored in
2226 consecutive registers starting with r0. This will always
2227 be a multiple of the regiser size. */
2228 int len = TYPE_LENGTH (type);
2229 int regno = AARCH64_X0_REGNUM;
2230
2231 while (len > 0)
2232 {
b66f5587 2233 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2234 len -= X_REGISTER_SIZE;
2235 valbuf += X_REGISTER_SIZE;
2236 }
2237 }
2238 }
07b287a0
MS
2239 else
2240 {
2241 /* For a structure or union the behaviour is as if the value had
2242 been stored to word-aligned memory and then loaded into
2243 registers with 64-bit load instruction(s). */
2244 int len = TYPE_LENGTH (type);
2245 int regno = AARCH64_X0_REGNUM;
2246 bfd_byte tmpbuf[X_REGISTER_SIZE];
2247
2248 while (len > 0)
2249 {
2250 memcpy (tmpbuf, valbuf,
2251 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2252 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2253 len -= X_REGISTER_SIZE;
2254 valbuf += X_REGISTER_SIZE;
2255 }
2256 }
2257}
2258
2259/* Implement the "return_value" gdbarch method. */
2260
2261static enum return_value_convention
2262aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2263 struct type *valtype, struct regcache *regcache,
2264 gdb_byte *readbuf, const gdb_byte *writebuf)
2265{
07b287a0
MS
2266
2267 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2268 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2269 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2270 {
2271 if (aarch64_return_in_memory (gdbarch, valtype))
2272 {
2273 if (aarch64_debug)
b277c936 2274 debug_printf ("return value in memory\n");
07b287a0
MS
2275 return RETURN_VALUE_STRUCT_CONVENTION;
2276 }
2277 }
2278
2279 if (writebuf)
2280 aarch64_store_return_value (valtype, regcache, writebuf);
2281
2282 if (readbuf)
2283 aarch64_extract_return_value (valtype, regcache, readbuf);
2284
2285 if (aarch64_debug)
b277c936 2286 debug_printf ("return value in registers\n");
07b287a0
MS
2287
2288 return RETURN_VALUE_REGISTER_CONVENTION;
2289}
2290
2291/* Implement the "get_longjmp_target" gdbarch method. */
2292
2293static int
2294aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2295{
2296 CORE_ADDR jb_addr;
2297 gdb_byte buf[X_REGISTER_SIZE];
2298 struct gdbarch *gdbarch = get_frame_arch (frame);
2299 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2300 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2301
2302 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2303
2304 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2305 X_REGISTER_SIZE))
2306 return 0;
2307
2308 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2309 return 1;
2310}
ea873d8e
PL
2311
2312/* Implement the "gen_return_address" gdbarch method. */
2313
2314static void
2315aarch64_gen_return_address (struct gdbarch *gdbarch,
2316 struct agent_expr *ax, struct axs_value *value,
2317 CORE_ADDR scope)
2318{
2319 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2320 value->kind = axs_lvalue_register;
2321 value->u.reg = AARCH64_LR_REGNUM;
2322}
07b287a0
MS
2323\f
2324
2325/* Return the pseudo register name corresponding to register regnum. */
2326
2327static const char *
2328aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2329{
63bad7b6
AH
2330 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2331
07b287a0
MS
2332 static const char *const q_name[] =
2333 {
2334 "q0", "q1", "q2", "q3",
2335 "q4", "q5", "q6", "q7",
2336 "q8", "q9", "q10", "q11",
2337 "q12", "q13", "q14", "q15",
2338 "q16", "q17", "q18", "q19",
2339 "q20", "q21", "q22", "q23",
2340 "q24", "q25", "q26", "q27",
2341 "q28", "q29", "q30", "q31",
2342 };
2343
2344 static const char *const d_name[] =
2345 {
2346 "d0", "d1", "d2", "d3",
2347 "d4", "d5", "d6", "d7",
2348 "d8", "d9", "d10", "d11",
2349 "d12", "d13", "d14", "d15",
2350 "d16", "d17", "d18", "d19",
2351 "d20", "d21", "d22", "d23",
2352 "d24", "d25", "d26", "d27",
2353 "d28", "d29", "d30", "d31",
2354 };
2355
2356 static const char *const s_name[] =
2357 {
2358 "s0", "s1", "s2", "s3",
2359 "s4", "s5", "s6", "s7",
2360 "s8", "s9", "s10", "s11",
2361 "s12", "s13", "s14", "s15",
2362 "s16", "s17", "s18", "s19",
2363 "s20", "s21", "s22", "s23",
2364 "s24", "s25", "s26", "s27",
2365 "s28", "s29", "s30", "s31",
2366 };
2367
2368 static const char *const h_name[] =
2369 {
2370 "h0", "h1", "h2", "h3",
2371 "h4", "h5", "h6", "h7",
2372 "h8", "h9", "h10", "h11",
2373 "h12", "h13", "h14", "h15",
2374 "h16", "h17", "h18", "h19",
2375 "h20", "h21", "h22", "h23",
2376 "h24", "h25", "h26", "h27",
2377 "h28", "h29", "h30", "h31",
2378 };
2379
2380 static const char *const b_name[] =
2381 {
2382 "b0", "b1", "b2", "b3",
2383 "b4", "b5", "b6", "b7",
2384 "b8", "b9", "b10", "b11",
2385 "b12", "b13", "b14", "b15",
2386 "b16", "b17", "b18", "b19",
2387 "b20", "b21", "b22", "b23",
2388 "b24", "b25", "b26", "b27",
2389 "b28", "b29", "b30", "b31",
2390 };
2391
34dcc7cf 2392 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2393
34dcc7cf
AH
2394 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2395 return q_name[p_regnum - AARCH64_Q0_REGNUM];
07b287a0 2396
34dcc7cf
AH
2397 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2398 return d_name[p_regnum - AARCH64_D0_REGNUM];
07b287a0 2399
34dcc7cf
AH
2400 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2401 return s_name[p_regnum - AARCH64_S0_REGNUM];
07b287a0 2402
34dcc7cf
AH
2403 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2404 return h_name[p_regnum - AARCH64_H0_REGNUM];
07b287a0 2405
34dcc7cf
AH
2406 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2407 return b_name[p_regnum - AARCH64_B0_REGNUM];
07b287a0 2408
63bad7b6
AH
2409 if (tdep->has_sve ())
2410 {
2411 static const char *const sve_v_name[] =
2412 {
2413 "v0", "v1", "v2", "v3",
2414 "v4", "v5", "v6", "v7",
2415 "v8", "v9", "v10", "v11",
2416 "v12", "v13", "v14", "v15",
2417 "v16", "v17", "v18", "v19",
2418 "v20", "v21", "v22", "v23",
2419 "v24", "v25", "v26", "v27",
2420 "v28", "v29", "v30", "v31",
2421 };
2422
34dcc7cf
AH
2423 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2424 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2425 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
63bad7b6
AH
2426 }
2427
34dcc7cf
AH
2428 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2429 prevents it from being read by methods such as
2430 mi_cmd_trace_frame_collected. */
2431 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2432 return "";
2433
07b287a0
MS
2434 internal_error (__FILE__, __LINE__,
2435 _("aarch64_pseudo_register_name: bad register number %d"),
34dcc7cf 2436 p_regnum);
07b287a0
MS
2437}
2438
2439/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2440
2441static struct type *
2442aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2443{
63bad7b6
AH
2444 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2445
34dcc7cf 2446 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2447
34dcc7cf 2448 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0
MS
2449 return aarch64_vnq_type (gdbarch);
2450
34dcc7cf 2451 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2452 return aarch64_vnd_type (gdbarch);
2453
34dcc7cf 2454 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2455 return aarch64_vns_type (gdbarch);
2456
34dcc7cf 2457 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0
MS
2458 return aarch64_vnh_type (gdbarch);
2459
34dcc7cf 2460 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0
MS
2461 return aarch64_vnb_type (gdbarch);
2462
34dcc7cf
AH
2463 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2464 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6
AH
2465 return aarch64_vnv_type (gdbarch);
2466
34dcc7cf
AH
2467 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2468 return builtin_type (gdbarch)->builtin_uint64;
2469
07b287a0
MS
2470 internal_error (__FILE__, __LINE__,
2471 _("aarch64_pseudo_register_type: bad register number %d"),
34dcc7cf 2472 p_regnum);
07b287a0
MS
2473}
2474
2475/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2476
2477static int
2478aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2479 struct reggroup *group)
2480{
63bad7b6
AH
2481 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2482
34dcc7cf 2483 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
07b287a0 2484
34dcc7cf 2485 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
07b287a0 2486 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2487 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
07b287a0
MS
2488 return (group == all_reggroup || group == vector_reggroup
2489 || group == float_reggroup);
34dcc7cf 2490 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
07b287a0
MS
2491 return (group == all_reggroup || group == vector_reggroup
2492 || group == float_reggroup);
34dcc7cf 2493 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
07b287a0 2494 return group == all_reggroup || group == vector_reggroup;
34dcc7cf 2495 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
07b287a0 2496 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
2497 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2498 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
63bad7b6 2499 return group == all_reggroup || group == vector_reggroup;
34dcc7cf
AH
2500 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2501 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2502 return 0;
07b287a0
MS
2503
2504 return group == all_reggroup;
2505}
2506
3c5cd5c3
AH
2507/* Helper for aarch64_pseudo_read_value. */
2508
2509static struct value *
63bad7b6
AH
2510aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2511 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2512 int regsize, struct value *result_value)
2513{
3c5cd5c3
AH
2514 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2515
63bad7b6
AH
2516 /* Enough space for a full vector register. */
2517 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2518 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2519
3c5cd5c3
AH
2520 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2521 mark_value_bytes_unavailable (result_value, 0,
2522 TYPE_LENGTH (value_type (result_value)));
2523 else
2524 memcpy (value_contents_raw (result_value), reg_buf, regsize);
63bad7b6 2525
3c5cd5c3
AH
2526 return result_value;
2527 }
2528
07b287a0
MS
2529/* Implement the "pseudo_register_read_value" gdbarch method. */
2530
2531static struct value *
3c5cd5c3 2532aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2533 int regnum)
2534{
63bad7b6 2535 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3c5cd5c3 2536 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
07b287a0 2537
07b287a0
MS
2538 VALUE_LVAL (result_value) = lval_register;
2539 VALUE_REGNUM (result_value) = regnum;
07b287a0
MS
2540
2541 regnum -= gdbarch_num_regs (gdbarch);
2542
2543 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2544 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2545 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2546 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2547
2548 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2549 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2550 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2551 D_REGISTER_SIZE, result_value);
07b287a0
MS
2552
2553 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2554 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2555 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2556 S_REGISTER_SIZE, result_value);
07b287a0
MS
2557
2558 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2559 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2560 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2561 H_REGISTER_SIZE, result_value);
07b287a0
MS
2562
2563 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2564 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2565 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2566 B_REGISTER_SIZE, result_value);
07b287a0 2567
63bad7b6
AH
2568 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2569 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2570 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2571 regnum - AARCH64_SVE_V0_REGNUM,
2572 V_REGISTER_SIZE, result_value);
2573
07b287a0
MS
2574 gdb_assert_not_reached ("regnum out of bound");
2575}
2576
3c5cd5c3 2577/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2578
2579static void
63bad7b6
AH
2580aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2581 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2582{
3c5cd5c3 2583 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2584
63bad7b6
AH
2585 /* Enough space for a full vector register. */
2586 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2587 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2588
07b287a0
MS
2589 /* Ensure the register buffer is zero, we want gdb writes of the
2590 various 'scalar' pseudo registers to behavior like architectural
2591 writes, register width bytes are written the remainder are set to
2592 zero. */
63bad7b6 2593 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2594
3c5cd5c3
AH
2595 memcpy (reg_buf, buf, regsize);
2596 regcache->raw_write (v_regnum, reg_buf);
2597}
2598
2599/* Implement the "pseudo_register_write" gdbarch method. */
2600
2601static void
2602aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2603 int regnum, const gdb_byte *buf)
2604{
63bad7b6 2605 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
07b287a0
MS
2606 regnum -= gdbarch_num_regs (gdbarch);
2607
2608 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2609 return aarch64_pseudo_write_1 (gdbarch, regcache,
2610 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2611 buf);
07b287a0
MS
2612
2613 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2614 return aarch64_pseudo_write_1 (gdbarch, regcache,
2615 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2616 buf);
07b287a0
MS
2617
2618 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2619 return aarch64_pseudo_write_1 (gdbarch, regcache,
2620 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2621 buf);
07b287a0
MS
2622
2623 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2624 return aarch64_pseudo_write_1 (gdbarch, regcache,
2625 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2626 buf);
07b287a0
MS
2627
2628 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2629 return aarch64_pseudo_write_1 (gdbarch, regcache,
2630 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2631 buf);
2632
2633 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2634 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2635 return aarch64_pseudo_write_1 (gdbarch, regcache,
2636 regnum - AARCH64_SVE_V0_REGNUM,
2637 V_REGISTER_SIZE, buf);
07b287a0
MS
2638
2639 gdb_assert_not_reached ("regnum out of bound");
2640}
2641
07b287a0
MS
2642/* Callback function for user_reg_add. */
2643
2644static struct value *
2645value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2646{
9a3c8263 2647 const int *reg_p = (const int *) baton;
07b287a0
MS
2648
2649 return value_of_register (*reg_p, frame);
2650}
2651\f
2652
9404b58f
KM
2653/* Implement the "software_single_step" gdbarch method, needed to
2654 single step through atomic sequences on AArch64. */
2655
a0ff9e1a 2656static std::vector<CORE_ADDR>
f5ea389a 2657aarch64_software_single_step (struct regcache *regcache)
9404b58f 2658{
ac7936df 2659 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2660 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2661 const int insn_size = 4;
2662 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2663 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2664 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2665 CORE_ADDR loc = pc;
2666 CORE_ADDR closing_insn = 0;
2667 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2668 byte_order_for_code);
2669 int index;
2670 int insn_count;
2671 int bc_insn_count = 0; /* Conditional branch instruction count. */
2672 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2673 aarch64_inst inst;
2674
561a72d4 2675 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2676 return {};
9404b58f
KM
2677
2678 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2679 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2680 return {};
9404b58f
KM
2681
2682 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2683 {
9404b58f
KM
2684 loc += insn_size;
2685 insn = read_memory_unsigned_integer (loc, insn_size,
2686 byte_order_for_code);
2687
561a72d4 2688 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2689 return {};
9404b58f 2690 /* Check if the instruction is a conditional branch. */
f77ee802 2691 if (inst.opcode->iclass == condbranch)
9404b58f 2692 {
f77ee802
YQ
2693 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2694
9404b58f 2695 if (bc_insn_count >= 1)
a0ff9e1a 2696 return {};
9404b58f
KM
2697
2698 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2699 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2700
2701 bc_insn_count++;
2702 last_breakpoint++;
2703 }
2704
2705 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2706 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2707 {
2708 closing_insn = loc;
2709 break;
2710 }
2711 }
2712
2713 /* We didn't find a closing Store Exclusive instruction, fall back. */
2714 if (!closing_insn)
a0ff9e1a 2715 return {};
9404b58f
KM
2716
2717 /* Insert breakpoint after the end of the atomic sequence. */
2718 breaks[0] = loc + insn_size;
2719
2720 /* Check for duplicated breakpoints, and also check that the second
2721 breakpoint is not within the atomic sequence. */
2722 if (last_breakpoint
2723 && (breaks[1] == breaks[0]
2724 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2725 last_breakpoint = 0;
2726
a0ff9e1a
SM
2727 std::vector<CORE_ADDR> next_pcs;
2728
9404b58f
KM
2729 /* Insert the breakpoint at the end of the sequence, and one at the
2730 destination of the conditional branch, if it exists. */
2731 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 2732 next_pcs.push_back (breaks[index]);
9404b58f 2733
93f9a11f 2734 return next_pcs;
9404b58f
KM
2735}
2736
cfba9872 2737struct aarch64_displaced_step_closure : public displaced_step_closure
b6542f81
YQ
2738{
2739 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2740 is being displaced stepping. */
cfba9872 2741 int cond = 0;
b6542f81
YQ
2742
2743 /* PC adjustment offset after displaced stepping. */
cfba9872 2744 int32_t pc_adjust = 0;
b6542f81
YQ
2745};
2746
2747/* Data when visiting instructions for displaced stepping. */
2748
2749struct aarch64_displaced_step_data
2750{
2751 struct aarch64_insn_data base;
2752
2753 /* The address where the instruction will be executed at. */
2754 CORE_ADDR new_addr;
2755 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2756 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2757 /* Number of instructions in INSN_BUF. */
2758 unsigned insn_count;
2759 /* Registers when doing displaced stepping. */
2760 struct regcache *regs;
2761
cfba9872 2762 aarch64_displaced_step_closure *dsc;
b6542f81
YQ
2763};
2764
2765/* Implementation of aarch64_insn_visitor method "b". */
2766
2767static void
2768aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2769 struct aarch64_insn_data *data)
2770{
2771 struct aarch64_displaced_step_data *dsd
2772 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2773 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2774
2775 if (can_encode_int32 (new_offset, 28))
2776 {
2777 /* Emit B rather than BL, because executing BL on a new address
2778 will get the wrong address into LR. In order to avoid this,
2779 we emit B, and update LR if the instruction is BL. */
2780 emit_b (dsd->insn_buf, 0, new_offset);
2781 dsd->insn_count++;
2782 }
2783 else
2784 {
2785 /* Write NOP. */
2786 emit_nop (dsd->insn_buf);
2787 dsd->insn_count++;
2788 dsd->dsc->pc_adjust = offset;
2789 }
2790
2791 if (is_bl)
2792 {
2793 /* Update LR. */
2794 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2795 data->insn_addr + 4);
2796 }
2797}
2798
2799/* Implementation of aarch64_insn_visitor method "b_cond". */
2800
2801static void
2802aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2803 struct aarch64_insn_data *data)
2804{
2805 struct aarch64_displaced_step_data *dsd
2806 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2807
2808 /* GDB has to fix up PC after displaced step this instruction
2809 differently according to the condition is true or false. Instead
2810 of checking COND against conditional flags, we can use
2811 the following instructions, and GDB can tell how to fix up PC
2812 according to the PC value.
2813
2814 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2815 INSN1 ;
2816 TAKEN:
2817 INSN2
2818 */
2819
2820 emit_bcond (dsd->insn_buf, cond, 8);
2821 dsd->dsc->cond = 1;
2822 dsd->dsc->pc_adjust = offset;
2823 dsd->insn_count = 1;
2824}
2825
2826/* Dynamically allocate a new register. If we know the register
2827 statically, we should make it a global as above instead of using this
2828 helper function. */
2829
2830static struct aarch64_register
2831aarch64_register (unsigned num, int is64)
2832{
2833 return (struct aarch64_register) { num, is64 };
2834}
2835
2836/* Implementation of aarch64_insn_visitor method "cb". */
2837
2838static void
2839aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2840 const unsigned rn, int is64,
2841 struct aarch64_insn_data *data)
2842{
2843 struct aarch64_displaced_step_data *dsd
2844 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2845
2846 /* The offset is out of range for a compare and branch
2847 instruction. We can use the following instructions instead:
2848
2849 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2850 INSN1 ;
2851 TAKEN:
2852 INSN2
2853 */
2854 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2855 dsd->insn_count = 1;
2856 dsd->dsc->cond = 1;
2857 dsd->dsc->pc_adjust = offset;
2858}
2859
2860/* Implementation of aarch64_insn_visitor method "tb". */
2861
2862static void
2863aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2864 const unsigned rt, unsigned bit,
2865 struct aarch64_insn_data *data)
2866{
2867 struct aarch64_displaced_step_data *dsd
2868 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2869
2870 /* The offset is out of range for a test bit and branch
2871 instruction We can use the following instructions instead:
2872
2873 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2874 INSN1 ;
2875 TAKEN:
2876 INSN2
2877
2878 */
2879 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2880 dsd->insn_count = 1;
2881 dsd->dsc->cond = 1;
2882 dsd->dsc->pc_adjust = offset;
2883}
2884
2885/* Implementation of aarch64_insn_visitor method "adr". */
2886
2887static void
2888aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2889 const int is_adrp, struct aarch64_insn_data *data)
2890{
2891 struct aarch64_displaced_step_data *dsd
2892 = (struct aarch64_displaced_step_data *) data;
2893 /* We know exactly the address the ADR{P,} instruction will compute.
2894 We can just write it to the destination register. */
2895 CORE_ADDR address = data->insn_addr + offset;
2896
2897 if (is_adrp)
2898 {
2899 /* Clear the lower 12 bits of the offset to get the 4K page. */
2900 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2901 address & ~0xfff);
2902 }
2903 else
2904 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2905 address);
2906
2907 dsd->dsc->pc_adjust = 4;
2908 emit_nop (dsd->insn_buf);
2909 dsd->insn_count = 1;
2910}
2911
2912/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2913
2914static void
2915aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2916 const unsigned rt, const int is64,
2917 struct aarch64_insn_data *data)
2918{
2919 struct aarch64_displaced_step_data *dsd
2920 = (struct aarch64_displaced_step_data *) data;
2921 CORE_ADDR address = data->insn_addr + offset;
2922 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2923
2924 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2925 address);
2926
2927 if (is_sw)
2928 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2929 aarch64_register (rt, 1), zero);
2930 else
2931 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2932 aarch64_register (rt, 1), zero);
2933
2934 dsd->dsc->pc_adjust = 4;
2935}
2936
2937/* Implementation of aarch64_insn_visitor method "others". */
2938
2939static void
2940aarch64_displaced_step_others (const uint32_t insn,
2941 struct aarch64_insn_data *data)
2942{
2943 struct aarch64_displaced_step_data *dsd
2944 = (struct aarch64_displaced_step_data *) data;
2945
e1c587c3 2946 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2947 dsd->insn_count = 1;
2948
2949 if ((insn & 0xfffffc1f) == 0xd65f0000)
2950 {
2951 /* RET */
2952 dsd->dsc->pc_adjust = 0;
2953 }
2954 else
2955 dsd->dsc->pc_adjust = 4;
2956}
2957
2958static const struct aarch64_insn_visitor visitor =
2959{
2960 aarch64_displaced_step_b,
2961 aarch64_displaced_step_b_cond,
2962 aarch64_displaced_step_cb,
2963 aarch64_displaced_step_tb,
2964 aarch64_displaced_step_adr,
2965 aarch64_displaced_step_ldr_literal,
2966 aarch64_displaced_step_others,
2967};
2968
2969/* Implement the "displaced_step_copy_insn" gdbarch method. */
2970
2971struct displaced_step_closure *
2972aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2973 CORE_ADDR from, CORE_ADDR to,
2974 struct regcache *regs)
2975{
b6542f81
YQ
2976 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2977 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2978 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2979 aarch64_inst inst;
2980
561a72d4 2981 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 2982 return NULL;
b6542f81
YQ
2983
2984 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2985 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2986 {
2987 /* We can't displaced step atomic sequences. */
2988 return NULL;
2989 }
2990
cfba9872
SM
2991 std::unique_ptr<aarch64_displaced_step_closure> dsc
2992 (new aarch64_displaced_step_closure);
b6542f81
YQ
2993 dsd.base.insn_addr = from;
2994 dsd.new_addr = to;
2995 dsd.regs = regs;
cfba9872 2996 dsd.dsc = dsc.get ();
034f1a81 2997 dsd.insn_count = 0;
b6542f81
YQ
2998 aarch64_relocate_instruction (insn, &visitor,
2999 (struct aarch64_insn_data *) &dsd);
3000 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
3001
3002 if (dsd.insn_count != 0)
3003 {
3004 int i;
3005
3006 /* Instruction can be relocated to scratch pad. Copy
3007 relocated instruction(s) there. */
3008 for (i = 0; i < dsd.insn_count; i++)
3009 {
3010 if (debug_displaced)
3011 {
3012 debug_printf ("displaced: writing insn ");
3013 debug_printf ("%.8x", dsd.insn_buf[i]);
3014 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3015 }
3016 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3017 (ULONGEST) dsd.insn_buf[i]);
3018 }
3019 }
3020 else
3021 {
b6542f81
YQ
3022 dsc = NULL;
3023 }
3024
cfba9872 3025 return dsc.release ();
b6542f81
YQ
3026}
3027
3028/* Implement the "displaced_step_fixup" gdbarch method. */
3029
3030void
3031aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
cfba9872 3032 struct displaced_step_closure *dsc_,
b6542f81
YQ
3033 CORE_ADDR from, CORE_ADDR to,
3034 struct regcache *regs)
3035{
cfba9872
SM
3036 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3037
b6542f81
YQ
3038 if (dsc->cond)
3039 {
3040 ULONGEST pc;
3041
3042 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3043 if (pc - to == 8)
3044 {
3045 /* Condition is true. */
3046 }
3047 else if (pc - to == 4)
3048 {
3049 /* Condition is false. */
3050 dsc->pc_adjust = 4;
3051 }
3052 else
3053 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3054 }
3055
3056 if (dsc->pc_adjust != 0)
3057 {
3058 if (debug_displaced)
3059 {
3060 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3061 paddress (gdbarch, from), dsc->pc_adjust);
3062 }
3063 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3064 from + dsc->pc_adjust);
3065 }
3066}
3067
3068/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3069
3070int
3071aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3072 struct displaced_step_closure *closure)
3073{
3074 return 1;
3075}
3076
95228a0d
AH
3077/* Get the correct target description for the given VQ value.
3078 If VQ is zero then it is assumed SVE is not supported.
3079 (It is not possible to set VQ to zero on an SVE system). */
da434ccb
AH
3080
3081const target_desc *
6dc0ebde 3082aarch64_read_description (uint64_t vq, bool pauth_p)
da434ccb 3083{
95228a0d 3084 if (vq > AARCH64_MAX_SVE_VQ)
39bfb937 3085 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
95228a0d
AH
3086 AARCH64_MAX_SVE_VQ);
3087
6dc0ebde 3088 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
da434ccb 3089
95228a0d
AH
3090 if (tdesc == NULL)
3091 {
6dc0ebde
AH
3092 tdesc = aarch64_create_target_description (vq, pauth_p);
3093 tdesc_aarch64_list[vq][pauth_p] = tdesc;
95228a0d 3094 }
da434ccb 3095
95228a0d 3096 return tdesc;
da434ccb
AH
3097}
3098
ba2d2bb2
AH
3099/* Return the VQ used when creating the target description TDESC. */
3100
1332a140 3101static uint64_t
ba2d2bb2
AH
3102aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3103{
3104 const struct tdesc_feature *feature_sve;
3105
3106 if (!tdesc_has_registers (tdesc))
3107 return 0;
3108
3109 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3110
3111 if (feature_sve == nullptr)
3112 return 0;
3113
12863263
AH
3114 uint64_t vl = tdesc_register_bitsize (feature_sve,
3115 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
3116 return sve_vq_from_vl (vl);
3117}
3118
0ef8a082
AH
3119/* Add all the expected register sets into GDBARCH. */
3120
3121static void
3122aarch64_add_reggroups (struct gdbarch *gdbarch)
3123{
3124 reggroup_add (gdbarch, general_reggroup);
3125 reggroup_add (gdbarch, float_reggroup);
3126 reggroup_add (gdbarch, system_reggroup);
3127 reggroup_add (gdbarch, vector_reggroup);
3128 reggroup_add (gdbarch, all_reggroup);
3129 reggroup_add (gdbarch, save_reggroup);
3130 reggroup_add (gdbarch, restore_reggroup);
3131}
ba2d2bb2 3132
76bed0fd
AH
3133/* Implement the "cannot_store_register" gdbarch method. */
3134
3135static int
3136aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3137{
3138 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3139
3140 if (!tdep->has_pauth ())
3141 return 0;
3142
3143 /* Pointer authentication registers are read-only. */
3144 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3145 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3146}
3147
07b287a0
MS
3148/* Initialize the current architecture based on INFO. If possible,
3149 re-use an architecture from ARCHES, which is a list of
3150 architectures already created during this debugging session.
3151
3152 Called e.g. at program startup, when reading a core file, and when
3153 reading a binary file. */
3154
3155static struct gdbarch *
3156aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3157{
ccb8d7e8 3158 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
76bed0fd 3159 const struct tdesc_feature *feature_pauth;
ccb8d7e8
AH
3160 bool valid_p = true;
3161 int i, num_regs = 0, num_pseudo_regs = 0;
3162 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3163
4da037ef
AH
3164 /* Use the vector length passed via the target info. Here -1 is used for no
3165 SVE, and 0 is unset. If unset then use the vector length from the existing
3166 tdesc. */
3167 uint64_t vq = 0;
3168 if (info.id == (int *) -1)
3169 vq = 0;
3170 else if (info.id != 0)
3171 vq = (uint64_t) info.id;
3172 else
3173 vq = aarch64_get_tdesc_vq (info.target_desc);
3174
3175 if (vq > AARCH64_MAX_SVE_VQ)
596179f7
SDJ
3176 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3177 pulongest (vq), AARCH64_MAX_SVE_VQ);
4da037ef 3178
ccb8d7e8
AH
3179 /* If there is already a candidate, use it. */
3180 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3181 best_arch != nullptr;
3182 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3183 {
3184 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
4da037ef 3185 if (tdep && tdep->vq == vq)
ccb8d7e8
AH
3186 return best_arch->gdbarch;
3187 }
07b287a0 3188
4da037ef
AH
3189 /* Ensure we always have a target descriptor, and that it is for the given VQ
3190 value. */
ccb8d7e8 3191 const struct target_desc *tdesc = info.target_desc;
4da037ef
AH
3192 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3193 tdesc = aarch64_read_description (vq, false);
07b287a0
MS
3194 gdb_assert (tdesc);
3195
ccb8d7e8 3196 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
ba2d2bb2
AH
3197 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3198 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
76bed0fd 3199 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
07b287a0 3200
ccb8d7e8
AH
3201 if (feature_core == nullptr)
3202 return nullptr;
07b287a0 3203
ccb8d7e8 3204 struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
07b287a0 3205
ba2d2bb2 3206 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3207 and allocate their numbers. */
3208 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
ba2d2bb2
AH
3209 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3210 AARCH64_X0_REGNUM + i,
3211 aarch64_r_register_names[i]);
07b287a0
MS
3212
3213 num_regs = AARCH64_X0_REGNUM + i;
3214
ba2d2bb2 3215 /* Add the V registers. */
ccb8d7e8 3216 if (feature_fpu != nullptr)
07b287a0 3217 {
ccb8d7e8 3218 if (feature_sve != nullptr)
ba2d2bb2
AH
3219 error (_("Program contains both fpu and SVE features."));
3220
3221 /* Validate the description provides the mandatory V registers
3222 and allocate their numbers. */
07b287a0 3223 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
ba2d2bb2
AH
3224 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3225 AARCH64_V0_REGNUM + i,
3226 aarch64_v_register_names[i]);
07b287a0
MS
3227
3228 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3229 }
07b287a0 3230
ba2d2bb2 3231 /* Add the SVE registers. */
ccb8d7e8 3232 if (feature_sve != nullptr)
ba2d2bb2
AH
3233 {
3234 /* Validate the description provides the mandatory SVE registers
3235 and allocate their numbers. */
3236 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3237 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3238 AARCH64_SVE_Z0_REGNUM + i,
3239 aarch64_sve_register_names[i]);
3240
3241 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3242 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3243 }
3244
ccb8d7e8 3245 if (feature_fpu != nullptr || feature_sve != nullptr)
ba2d2bb2 3246 {
07b287a0
MS
3247 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3248 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3249 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3250 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3251 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3252 }
3253
76bed0fd
AH
3254 /* Add the pauth registers. */
3255 if (feature_pauth != NULL)
3256 {
3257 first_pauth_regnum = num_regs;
34dcc7cf 3258 pauth_ra_state_offset = num_pseudo_regs;
76bed0fd
AH
3259 /* Validate the descriptor provides the mandatory PAUTH registers and
3260 allocate their numbers. */
3261 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3262 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3263 first_pauth_regnum + i,
3264 aarch64_pauth_register_names[i]);
3265
3266 num_regs += i;
34dcc7cf 3267 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
76bed0fd
AH
3268 }
3269
07b287a0
MS
3270 if (!valid_p)
3271 {
3272 tdesc_data_cleanup (tdesc_data);
ccb8d7e8 3273 return nullptr;
07b287a0
MS
3274 }
3275
3276 /* AArch64 code is always little-endian. */
3277 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3278
ccb8d7e8
AH
3279 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3280 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
07b287a0
MS
3281
3282 /* This should be low enough for everything. */
3283 tdep->lowest_pc = 0x20;
3284 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3285 tdep->jb_elt_size = 8;
4da037ef 3286 tdep->vq = vq;
76bed0fd 3287 tdep->pauth_reg_base = first_pauth_regnum;
34dcc7cf
AH
3288 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3289 : pauth_ra_state_offset + num_regs;
3290
07b287a0
MS
3291 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3292 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3293
07b287a0
MS
3294 /* Advance PC across function entry code. */
3295 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3296
3297 /* The stack grows downward. */
3298 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3299
3300 /* Breakpoint manipulation. */
04180708
YQ
3301 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3302 aarch64_breakpoint::kind_from_pc);
3303 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3304 aarch64_breakpoint::bp_from_kind);
07b287a0 3305 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3306 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3307
3308 /* Information about registers, etc. */
3309 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3310 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3311 set_gdbarch_num_regs (gdbarch, num_regs);
3312
3313 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3314 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3315 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3316 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3317 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3318 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3319 aarch64_pseudo_register_reggroup_p);
76bed0fd 3320 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
07b287a0
MS
3321
3322 /* ABI */
3323 set_gdbarch_short_bit (gdbarch, 16);
3324 set_gdbarch_int_bit (gdbarch, 32);
3325 set_gdbarch_float_bit (gdbarch, 32);
3326 set_gdbarch_double_bit (gdbarch, 64);
3327 set_gdbarch_long_double_bit (gdbarch, 128);
3328 set_gdbarch_long_bit (gdbarch, 64);
3329 set_gdbarch_long_long_bit (gdbarch, 64);
3330 set_gdbarch_ptr_bit (gdbarch, 64);
3331 set_gdbarch_char_signed (gdbarch, 0);
53375380 3332 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3333 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3334 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3335 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
b907456c 3336 set_gdbarch_type_align (gdbarch, aarch64_type_align);
07b287a0
MS
3337
3338 /* Internal <-> external register number maps. */
3339 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3340
3341 /* Returning results. */
3342 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3343
3344 /* Disassembly. */
3345 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3346
3347 /* Virtual tables. */
3348 set_gdbarch_vbit_in_delta (gdbarch, 1);
3349
0ef8a082
AH
3350 /* Register architecture. */
3351 aarch64_add_reggroups (gdbarch);
3352
07b287a0
MS
3353 /* Hook in the ABI-specific overrides, if they have been registered. */
3354 info.target_desc = tdesc;
0dba2a6c 3355 info.tdesc_data = tdesc_data;
07b287a0
MS
3356 gdbarch_init_osabi (info, gdbarch);
3357
3358 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
11e1b75f
AH
3359 /* Register DWARF CFA vendor handler. */
3360 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3361 aarch64_execute_dwarf_cfa_vendor_op);
07b287a0
MS
3362
3363 /* Add some default predicates. */
3364 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3365 dwarf2_append_unwinders (gdbarch);
3366 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3367
3368 frame_base_set_default (gdbarch, &aarch64_normal_base);
3369
3370 /* Now we have tuned the configuration, set a few final things,
3371 based on what the OS ABI has told us. */
3372
3373 if (tdep->jb_pc >= 0)
3374 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3375
ea873d8e
PL
3376 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3377
07b287a0
MS
3378 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3379
3380 /* Add standard register aliases. */
3381 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3382 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3383 value_of_aarch64_user_reg,
3384 &aarch64_register_aliases[i].regnum);
3385
e8bf1ce4
JB
3386 register_aarch64_ravenscar_ops (gdbarch);
3387
07b287a0
MS
3388 return gdbarch;
3389}
3390
3391static void
3392aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3393{
3394 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3395
3396 if (tdep == NULL)
3397 return;
3398
3399 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3400 paddress (gdbarch, tdep->lowest_pc));
3401}
3402
0d4c07af 3403#if GDB_SELF_TEST
1e2b521d
YQ
3404namespace selftests
3405{
3406static void aarch64_process_record_test (void);
3407}
0d4c07af 3408#endif
1e2b521d 3409
07b287a0
MS
3410void
3411_initialize_aarch64_tdep (void)
3412{
3413 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3414 aarch64_dump_tdep);
3415
07b287a0
MS
3416 /* Debug this file's internals. */
3417 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3418Set AArch64 debugging."), _("\
3419Show AArch64 debugging."), _("\
3420When on, AArch64 specific debugging is enabled."),
3421 NULL,
3422 show_aarch64_debug,
3423 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3424
3425#if GDB_SELF_TEST
1526853e
SM
3426 selftests::register_test ("aarch64-analyze-prologue",
3427 selftests::aarch64_analyze_prologue_test);
3428 selftests::register_test ("aarch64-process-record",
3429 selftests::aarch64_process_record_test);
6654d750 3430 selftests::record_xml_tdesc ("aarch64.xml",
6dc0ebde 3431 aarch64_create_target_description (0, false));
4d9a9006 3432#endif
07b287a0 3433}
99afc88b
OJ
3434
3435/* AArch64 process record-replay related structures, defines etc. */
3436
99afc88b
OJ
3437#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3438 do \
3439 { \
3440 unsigned int reg_len = LENGTH; \
3441 if (reg_len) \
3442 { \
3443 REGS = XNEWVEC (uint32_t, reg_len); \
3444 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3445 } \
3446 } \
3447 while (0)
3448
3449#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3450 do \
3451 { \
3452 unsigned int mem_len = LENGTH; \
3453 if (mem_len) \
3454 { \
3455 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3456 memcpy(&MEMS->len, &RECORD_BUF[0], \
3457 sizeof(struct aarch64_mem_r) * LENGTH); \
3458 } \
3459 } \
3460 while (0)
3461
3462/* AArch64 record/replay structures and enumerations. */
3463
3464struct aarch64_mem_r
3465{
3466 uint64_t len; /* Record length. */
3467 uint64_t addr; /* Memory address. */
3468};
3469
3470enum aarch64_record_result
3471{
3472 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3473 AARCH64_RECORD_UNSUPPORTED,
3474 AARCH64_RECORD_UNKNOWN
3475};
3476
3477typedef struct insn_decode_record_t
3478{
3479 struct gdbarch *gdbarch;
3480 struct regcache *regcache;
3481 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3482 uint32_t aarch64_insn; /* Insn to be recorded. */
3483 uint32_t mem_rec_count; /* Count of memory records. */
3484 uint32_t reg_rec_count; /* Count of register records. */
3485 uint32_t *aarch64_regs; /* Registers to be recorded. */
3486 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3487} insn_decode_record;
3488
3489/* Record handler for data processing - register instructions. */
3490
3491static unsigned int
3492aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3493{
3494 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3495 uint32_t record_buf[4];
3496
3497 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3498 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3499 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3500
3501 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3502 {
3503 uint8_t setflags;
3504
3505 /* Logical (shifted register). */
3506 if (insn_bits24_27 == 0x0a)
3507 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3508 /* Add/subtract. */
3509 else if (insn_bits24_27 == 0x0b)
3510 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3511 else
3512 return AARCH64_RECORD_UNKNOWN;
3513
3514 record_buf[0] = reg_rd;
3515 aarch64_insn_r->reg_rec_count = 1;
3516 if (setflags)
3517 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3518 }
3519 else
3520 {
3521 if (insn_bits24_27 == 0x0b)
3522 {
3523 /* Data-processing (3 source). */
3524 record_buf[0] = reg_rd;
3525 aarch64_insn_r->reg_rec_count = 1;
3526 }
3527 else if (insn_bits24_27 == 0x0a)
3528 {
3529 if (insn_bits21_23 == 0x00)
3530 {
3531 /* Add/subtract (with carry). */
3532 record_buf[0] = reg_rd;
3533 aarch64_insn_r->reg_rec_count = 1;
3534 if (bit (aarch64_insn_r->aarch64_insn, 29))
3535 {
3536 record_buf[1] = AARCH64_CPSR_REGNUM;
3537 aarch64_insn_r->reg_rec_count = 2;
3538 }
3539 }
3540 else if (insn_bits21_23 == 0x02)
3541 {
3542 /* Conditional compare (register) and conditional compare
3543 (immediate) instructions. */
3544 record_buf[0] = AARCH64_CPSR_REGNUM;
3545 aarch64_insn_r->reg_rec_count = 1;
3546 }
3547 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3548 {
3549 /* CConditional select. */
3550 /* Data-processing (2 source). */
3551 /* Data-processing (1 source). */
3552 record_buf[0] = reg_rd;
3553 aarch64_insn_r->reg_rec_count = 1;
3554 }
3555 else
3556 return AARCH64_RECORD_UNKNOWN;
3557 }
3558 }
3559
3560 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3561 record_buf);
3562 return AARCH64_RECORD_SUCCESS;
3563}
3564
3565/* Record handler for data processing - immediate instructions. */
3566
3567static unsigned int
3568aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3569{
78cc6c2d 3570 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3571 uint32_t record_buf[4];
3572
3573 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3574 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3575 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3576
3577 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3578 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3579 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3580 {
3581 record_buf[0] = reg_rd;
3582 aarch64_insn_r->reg_rec_count = 1;
3583 }
3584 else if (insn_bits24_27 == 0x01)
3585 {
3586 /* Add/Subtract (immediate). */
3587 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3588 record_buf[0] = reg_rd;
3589 aarch64_insn_r->reg_rec_count = 1;
3590 if (setflags)
3591 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3592 }
3593 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3594 {
3595 /* Logical (immediate). */
3596 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3597 record_buf[0] = reg_rd;
3598 aarch64_insn_r->reg_rec_count = 1;
3599 if (setflags)
3600 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3601 }
3602 else
3603 return AARCH64_RECORD_UNKNOWN;
3604
3605 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3606 record_buf);
3607 return AARCH64_RECORD_SUCCESS;
3608}
3609
3610/* Record handler for branch, exception generation and system instructions. */
3611
3612static unsigned int
3613aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3614{
3615 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3616 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3617 uint32_t record_buf[4];
3618
3619 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3620 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3621 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3622
3623 if (insn_bits28_31 == 0x0d)
3624 {
3625 /* Exception generation instructions. */
3626 if (insn_bits24_27 == 0x04)
3627 {
5d98d3cd
YQ
3628 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3629 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3630 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3631 {
3632 ULONGEST svc_number;
3633
3634 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3635 &svc_number);
3636 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3637 svc_number);
3638 }
3639 else
3640 return AARCH64_RECORD_UNSUPPORTED;
3641 }
3642 /* System instructions. */
3643 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3644 {
3645 uint32_t reg_rt, reg_crn;
3646
3647 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3648 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3649
3650 /* Record rt in case of sysl and mrs instructions. */
3651 if (bit (aarch64_insn_r->aarch64_insn, 21))
3652 {
3653 record_buf[0] = reg_rt;
3654 aarch64_insn_r->reg_rec_count = 1;
3655 }
3656 /* Record cpsr for hint and msr(immediate) instructions. */
3657 else if (reg_crn == 0x02 || reg_crn == 0x04)
3658 {
3659 record_buf[0] = AARCH64_CPSR_REGNUM;
3660 aarch64_insn_r->reg_rec_count = 1;
3661 }
3662 }
3663 /* Unconditional branch (register). */
3664 else if((insn_bits24_27 & 0x0e) == 0x06)
3665 {
3666 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3667 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3668 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3669 }
3670 else
3671 return AARCH64_RECORD_UNKNOWN;
3672 }
3673 /* Unconditional branch (immediate). */
3674 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3675 {
3676 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3677 if (bit (aarch64_insn_r->aarch64_insn, 31))
3678 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3679 }
3680 else
3681 /* Compare & branch (immediate), Test & branch (immediate) and
3682 Conditional branch (immediate). */
3683 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3684
3685 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3686 record_buf);
3687 return AARCH64_RECORD_SUCCESS;
3688}
3689
3690/* Record handler for advanced SIMD load and store instructions. */
3691
3692static unsigned int
3693aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3694{
3695 CORE_ADDR address;
3696 uint64_t addr_offset = 0;
3697 uint32_t record_buf[24];
3698 uint64_t record_buf_mem[24];
3699 uint32_t reg_rn, reg_rt;
3700 uint32_t reg_index = 0, mem_index = 0;
3701 uint8_t opcode_bits, size_bits;
3702
3703 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3704 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3705 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3706 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3707 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3708
3709 if (record_debug)
b277c936 3710 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3711
3712 /* Load/store single structure. */
3713 if (bit (aarch64_insn_r->aarch64_insn, 24))
3714 {
3715 uint8_t sindex, scale, selem, esize, replicate = 0;
3716 scale = opcode_bits >> 2;
3717 selem = ((opcode_bits & 0x02) |
3718 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3719 switch (scale)
3720 {
3721 case 1:
3722 if (size_bits & 0x01)
3723 return AARCH64_RECORD_UNKNOWN;
3724 break;
3725 case 2:
3726 if ((size_bits >> 1) & 0x01)
3727 return AARCH64_RECORD_UNKNOWN;
3728 if (size_bits & 0x01)
3729 {
3730 if (!((opcode_bits >> 1) & 0x01))
3731 scale = 3;
3732 else
3733 return AARCH64_RECORD_UNKNOWN;
3734 }
3735 break;
3736 case 3:
3737 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3738 {
3739 scale = size_bits;
3740 replicate = 1;
3741 break;
3742 }
3743 else
3744 return AARCH64_RECORD_UNKNOWN;
3745 default:
3746 break;
3747 }
3748 esize = 8 << scale;
3749 if (replicate)
3750 for (sindex = 0; sindex < selem; sindex++)
3751 {
3752 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3753 reg_rt = (reg_rt + 1) % 32;
3754 }
3755 else
3756 {
3757 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3758 {
3759 if (bit (aarch64_insn_r->aarch64_insn, 22))
3760 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3761 else
3762 {
3763 record_buf_mem[mem_index++] = esize / 8;
3764 record_buf_mem[mem_index++] = address + addr_offset;
3765 }
3766 addr_offset = addr_offset + (esize / 8);
3767 reg_rt = (reg_rt + 1) % 32;
3768 }
99afc88b
OJ
3769 }
3770 }
3771 /* Load/store multiple structure. */
3772 else
3773 {
3774 uint8_t selem, esize, rpt, elements;
3775 uint8_t eindex, rindex;
3776
3777 esize = 8 << size_bits;
3778 if (bit (aarch64_insn_r->aarch64_insn, 30))
3779 elements = 128 / esize;
3780 else
3781 elements = 64 / esize;
3782
3783 switch (opcode_bits)
3784 {
3785 /*LD/ST4 (4 Registers). */
3786 case 0:
3787 rpt = 1;
3788 selem = 4;
3789 break;
3790 /*LD/ST1 (4 Registers). */
3791 case 2:
3792 rpt = 4;
3793 selem = 1;
3794 break;
3795 /*LD/ST3 (3 Registers). */
3796 case 4:
3797 rpt = 1;
3798 selem = 3;
3799 break;
3800 /*LD/ST1 (3 Registers). */
3801 case 6:
3802 rpt = 3;
3803 selem = 1;
3804 break;
3805 /*LD/ST1 (1 Register). */
3806 case 7:
3807 rpt = 1;
3808 selem = 1;
3809 break;
3810 /*LD/ST2 (2 Registers). */
3811 case 8:
3812 rpt = 1;
3813 selem = 2;
3814 break;
3815 /*LD/ST1 (2 Registers). */
3816 case 10:
3817 rpt = 2;
3818 selem = 1;
3819 break;
3820 default:
3821 return AARCH64_RECORD_UNSUPPORTED;
3822 break;
3823 }
3824 for (rindex = 0; rindex < rpt; rindex++)
3825 for (eindex = 0; eindex < elements; eindex++)
3826 {
3827 uint8_t reg_tt, sindex;
3828 reg_tt = (reg_rt + rindex) % 32;
3829 for (sindex = 0; sindex < selem; sindex++)
3830 {
3831 if (bit (aarch64_insn_r->aarch64_insn, 22))
3832 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3833 else
3834 {
3835 record_buf_mem[mem_index++] = esize / 8;
3836 record_buf_mem[mem_index++] = address + addr_offset;
3837 }
3838 addr_offset = addr_offset + (esize / 8);
3839 reg_tt = (reg_tt + 1) % 32;
3840 }
3841 }
3842 }
3843
3844 if (bit (aarch64_insn_r->aarch64_insn, 23))
3845 record_buf[reg_index++] = reg_rn;
3846
3847 aarch64_insn_r->reg_rec_count = reg_index;
3848 aarch64_insn_r->mem_rec_count = mem_index / 2;
3849 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3850 record_buf_mem);
3851 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3852 record_buf);
3853 return AARCH64_RECORD_SUCCESS;
3854}
3855
3856/* Record handler for load and store instructions. */
3857
3858static unsigned int
3859aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3860{
3861 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3862 uint8_t insn_bit23, insn_bit21;
3863 uint8_t opc, size_bits, ld_flag, vector_flag;
3864 uint32_t reg_rn, reg_rt, reg_rt2;
3865 uint64_t datasize, offset;
3866 uint32_t record_buf[8];
3867 uint64_t record_buf_mem[8];
3868 CORE_ADDR address;
3869
3870 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3871 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3872 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3873 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3874 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3875 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3876 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3877 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3878 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3879 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3880 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3881
3882 /* Load/store exclusive. */
3883 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3884 {
3885 if (record_debug)
b277c936 3886 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3887
3888 if (ld_flag)
3889 {
3890 record_buf[0] = reg_rt;
3891 aarch64_insn_r->reg_rec_count = 1;
3892 if (insn_bit21)
3893 {
3894 record_buf[1] = reg_rt2;
3895 aarch64_insn_r->reg_rec_count = 2;
3896 }
3897 }
3898 else
3899 {
3900 if (insn_bit21)
3901 datasize = (8 << size_bits) * 2;
3902 else
3903 datasize = (8 << size_bits);
3904 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3905 &address);
3906 record_buf_mem[0] = datasize / 8;
3907 record_buf_mem[1] = address;
3908 aarch64_insn_r->mem_rec_count = 1;
3909 if (!insn_bit23)
3910 {
3911 /* Save register rs. */
3912 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3913 aarch64_insn_r->reg_rec_count = 1;
3914 }
3915 }
3916 }
3917 /* Load register (literal) instructions decoding. */
3918 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3919 {
3920 if (record_debug)
b277c936 3921 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3922 if (vector_flag)
3923 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3924 else
3925 record_buf[0] = reg_rt;
3926 aarch64_insn_r->reg_rec_count = 1;
3927 }
3928 /* All types of load/store pair instructions decoding. */
3929 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3930 {
3931 if (record_debug)
b277c936 3932 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3933
3934 if (ld_flag)
3935 {
3936 if (vector_flag)
3937 {
3938 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3939 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3940 }
3941 else
3942 {
3943 record_buf[0] = reg_rt;
3944 record_buf[1] = reg_rt2;
3945 }
3946 aarch64_insn_r->reg_rec_count = 2;
3947 }
3948 else
3949 {
3950 uint16_t imm7_off;
3951 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3952 if (!vector_flag)
3953 size_bits = size_bits >> 1;
3954 datasize = 8 << (2 + size_bits);
3955 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3956 offset = offset << (2 + size_bits);
3957 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3958 &address);
3959 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3960 {
3961 if (imm7_off & 0x40)
3962 address = address - offset;
3963 else
3964 address = address + offset;
3965 }
3966
3967 record_buf_mem[0] = datasize / 8;
3968 record_buf_mem[1] = address;
3969 record_buf_mem[2] = datasize / 8;
3970 record_buf_mem[3] = address + (datasize / 8);
3971 aarch64_insn_r->mem_rec_count = 2;
3972 }
3973 if (bit (aarch64_insn_r->aarch64_insn, 23))
3974 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3975 }
3976 /* Load/store register (unsigned immediate) instructions. */
3977 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3978 {
3979 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3980 if (!(opc >> 1))
33877125
YQ
3981 {
3982 if (opc & 0x01)
3983 ld_flag = 0x01;
3984 else
3985 ld_flag = 0x0;
3986 }
99afc88b 3987 else
33877125 3988 {
1e2b521d
YQ
3989 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3990 {
3991 /* PRFM (immediate) */
3992 return AARCH64_RECORD_SUCCESS;
3993 }
3994 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3995 {
3996 /* LDRSW (immediate) */
3997 ld_flag = 0x1;
3998 }
33877125 3999 else
1e2b521d
YQ
4000 {
4001 if (opc & 0x01)
4002 ld_flag = 0x01;
4003 else
4004 ld_flag = 0x0;
4005 }
33877125 4006 }
99afc88b
OJ
4007
4008 if (record_debug)
4009 {
b277c936
PL
4010 debug_printf ("Process record: load/store (unsigned immediate):"
4011 " size %x V %d opc %x\n", size_bits, vector_flag,
4012 opc);
99afc88b
OJ
4013 }
4014
4015 if (!ld_flag)
4016 {
4017 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4018 datasize = 8 << size_bits;
4019 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4020 &address);
4021 offset = offset << size_bits;
4022 address = address + offset;
4023
4024 record_buf_mem[0] = datasize >> 3;
4025 record_buf_mem[1] = address;
4026 aarch64_insn_r->mem_rec_count = 1;
4027 }
4028 else
4029 {
4030 if (vector_flag)
4031 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4032 else
4033 record_buf[0] = reg_rt;
4034 aarch64_insn_r->reg_rec_count = 1;
4035 }
4036 }
4037 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
4038 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4039 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
4040 {
4041 if (record_debug)
b277c936 4042 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
4043 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4044 if (!(opc >> 1))
4045 if (opc & 0x01)
4046 ld_flag = 0x01;
4047 else
4048 ld_flag = 0x0;
4049 else
4050 if (size_bits != 0x03)
4051 ld_flag = 0x01;
4052 else
4053 return AARCH64_RECORD_UNKNOWN;
4054
4055 if (!ld_flag)
4056 {
d9436c7c
PA
4057 ULONGEST reg_rm_val;
4058
99afc88b
OJ
4059 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4060 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4061 if (bit (aarch64_insn_r->aarch64_insn, 12))
4062 offset = reg_rm_val << size_bits;
4063 else
4064 offset = reg_rm_val;
4065 datasize = 8 << size_bits;
4066 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4067 &address);
4068 address = address + offset;
4069 record_buf_mem[0] = datasize >> 3;
4070 record_buf_mem[1] = address;
4071 aarch64_insn_r->mem_rec_count = 1;
4072 }
4073 else
4074 {
4075 if (vector_flag)
4076 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4077 else
4078 record_buf[0] = reg_rt;
4079 aarch64_insn_r->reg_rec_count = 1;
4080 }
4081 }
4082 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
4083 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4084 && !insn_bit21)
99afc88b
OJ
4085 {
4086 if (record_debug)
4087 {
b277c936
PL
4088 debug_printf ("Process record: load/store "
4089 "(immediate and unprivileged)\n");
99afc88b
OJ
4090 }
4091 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4092 if (!(opc >> 1))
4093 if (opc & 0x01)
4094 ld_flag = 0x01;
4095 else
4096 ld_flag = 0x0;
4097 else
4098 if (size_bits != 0x03)
4099 ld_flag = 0x01;
4100 else
4101 return AARCH64_RECORD_UNKNOWN;
4102
4103 if (!ld_flag)
4104 {
4105 uint16_t imm9_off;
4106 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4107 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4108 datasize = 8 << size_bits;
4109 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4110 &address);
4111 if (insn_bits10_11 != 0x01)
4112 {
4113 if (imm9_off & 0x0100)
4114 address = address - offset;
4115 else
4116 address = address + offset;
4117 }
4118 record_buf_mem[0] = datasize >> 3;
4119 record_buf_mem[1] = address;
4120 aarch64_insn_r->mem_rec_count = 1;
4121 }
4122 else
4123 {
4124 if (vector_flag)
4125 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4126 else
4127 record_buf[0] = reg_rt;
4128 aarch64_insn_r->reg_rec_count = 1;
4129 }
4130 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4131 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4132 }
4133 /* Advanced SIMD load/store instructions. */
4134 else
4135 return aarch64_record_asimd_load_store (aarch64_insn_r);
4136
4137 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4138 record_buf_mem);
4139 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4140 record_buf);
4141 return AARCH64_RECORD_SUCCESS;
4142}
4143
4144/* Record handler for data processing SIMD and floating point instructions. */
4145
4146static unsigned int
4147aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4148{
4149 uint8_t insn_bit21, opcode, rmode, reg_rd;
4150 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4151 uint8_t insn_bits11_14;
4152 uint32_t record_buf[2];
4153
4154 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4155 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4156 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4157 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4158 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4159 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4160 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4161 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4162 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4163
4164 if (record_debug)
b277c936 4165 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
4166
4167 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4168 {
4169 /* Floating point - fixed point conversion instructions. */
4170 if (!insn_bit21)
4171 {
4172 if (record_debug)
b277c936 4173 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
4174
4175 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4176 record_buf[0] = reg_rd;
4177 else
4178 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4179 }
4180 /* Floating point - conditional compare instructions. */
4181 else if (insn_bits10_11 == 0x01)
4182 {
4183 if (record_debug)
b277c936 4184 debug_printf ("FP - conditional compare");
99afc88b
OJ
4185
4186 record_buf[0] = AARCH64_CPSR_REGNUM;
4187 }
4188 /* Floating point - data processing (2-source) and
4189 conditional select instructions. */
4190 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4191 {
4192 if (record_debug)
b277c936 4193 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
4194
4195 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4196 }
4197 else if (insn_bits10_11 == 0x00)
4198 {
4199 /* Floating point - immediate instructions. */
4200 if ((insn_bits12_15 & 0x01) == 0x01
4201 || (insn_bits12_15 & 0x07) == 0x04)
4202 {
4203 if (record_debug)
b277c936 4204 debug_printf ("FP - immediate");
99afc88b
OJ
4205 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4206 }
4207 /* Floating point - compare instructions. */
4208 else if ((insn_bits12_15 & 0x03) == 0x02)
4209 {
4210 if (record_debug)
b277c936 4211 debug_printf ("FP - immediate");
99afc88b
OJ
4212 record_buf[0] = AARCH64_CPSR_REGNUM;
4213 }
4214 /* Floating point - integer conversions instructions. */
f62fce35 4215 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4216 {
4217 /* Convert float to integer instruction. */
4218 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4219 {
4220 if (record_debug)
b277c936 4221 debug_printf ("float to int conversion");
99afc88b
OJ
4222
4223 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4224 }
4225 /* Convert integer to float instruction. */
4226 else if ((opcode >> 1) == 0x01 && !rmode)
4227 {
4228 if (record_debug)
b277c936 4229 debug_printf ("int to float conversion");
99afc88b
OJ
4230
4231 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4232 }
4233 /* Move float to integer instruction. */
4234 else if ((opcode >> 1) == 0x03)
4235 {
4236 if (record_debug)
b277c936 4237 debug_printf ("move float to int");
99afc88b
OJ
4238
4239 if (!(opcode & 0x01))
4240 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4241 else
4242 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4243 }
f62fce35
YQ
4244 else
4245 return AARCH64_RECORD_UNKNOWN;
99afc88b 4246 }
f62fce35
YQ
4247 else
4248 return AARCH64_RECORD_UNKNOWN;
99afc88b 4249 }
f62fce35
YQ
4250 else
4251 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4252 }
4253 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4254 {
4255 if (record_debug)
b277c936 4256 debug_printf ("SIMD copy");
99afc88b
OJ
4257
4258 /* Advanced SIMD copy instructions. */
4259 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4260 && !bit (aarch64_insn_r->aarch64_insn, 15)
4261 && bit (aarch64_insn_r->aarch64_insn, 10))
4262 {
4263 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4264 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4265 else
4266 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4267 }
4268 else
4269 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4270 }
4271 /* All remaining floating point or advanced SIMD instructions. */
4272 else
4273 {
4274 if (record_debug)
b277c936 4275 debug_printf ("all remain");
99afc88b
OJ
4276
4277 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4278 }
4279
4280 if (record_debug)
b277c936 4281 debug_printf ("\n");
99afc88b
OJ
4282
4283 aarch64_insn_r->reg_rec_count++;
4284 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4285 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4286 record_buf);
4287 return AARCH64_RECORD_SUCCESS;
4288}
4289
4290/* Decodes insns type and invokes its record handler. */
4291
4292static unsigned int
4293aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4294{
4295 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4296
4297 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4298 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4299 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4300 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4301
4302 /* Data processing - immediate instructions. */
4303 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4304 return aarch64_record_data_proc_imm (aarch64_insn_r);
4305
4306 /* Branch, exception generation and system instructions. */
4307 if (ins_bit26 && !ins_bit27 && ins_bit28)
4308 return aarch64_record_branch_except_sys (aarch64_insn_r);
4309
4310 /* Load and store instructions. */
4311 if (!ins_bit25 && ins_bit27)
4312 return aarch64_record_load_store (aarch64_insn_r);
4313
4314 /* Data processing - register instructions. */
4315 if (ins_bit25 && !ins_bit26 && ins_bit27)
4316 return aarch64_record_data_proc_reg (aarch64_insn_r);
4317
4318 /* Data processing - SIMD and floating point instructions. */
4319 if (ins_bit25 && ins_bit26 && ins_bit27)
4320 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4321
4322 return AARCH64_RECORD_UNSUPPORTED;
4323}
4324
4325/* Cleans up local record registers and memory allocations. */
4326
4327static void
4328deallocate_reg_mem (insn_decode_record *record)
4329{
4330 xfree (record->aarch64_regs);
4331 xfree (record->aarch64_mems);
4332}
4333
1e2b521d
YQ
4334#if GDB_SELF_TEST
4335namespace selftests {
4336
4337static void
4338aarch64_process_record_test (void)
4339{
4340 struct gdbarch_info info;
4341 uint32_t ret;
4342
4343 gdbarch_info_init (&info);
4344 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4345
4346 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4347 SELF_CHECK (gdbarch != NULL);
4348
4349 insn_decode_record aarch64_record;
4350
4351 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4352 aarch64_record.regcache = NULL;
4353 aarch64_record.this_addr = 0;
4354 aarch64_record.gdbarch = gdbarch;
4355
4356 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4357 aarch64_record.aarch64_insn = 0xf9800020;
4358 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4359 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4360 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4361 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4362
4363 deallocate_reg_mem (&aarch64_record);
4364}
4365
4366} // namespace selftests
4367#endif /* GDB_SELF_TEST */
4368
99afc88b
OJ
4369/* Parse the current instruction and record the values of the registers and
4370 memory that will be changed in current instruction to record_arch_list
4371 return -1 if something is wrong. */
4372
4373int
4374aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4375 CORE_ADDR insn_addr)
4376{
4377 uint32_t rec_no = 0;
4378 uint8_t insn_size = 4;
4379 uint32_t ret = 0;
99afc88b
OJ
4380 gdb_byte buf[insn_size];
4381 insn_decode_record aarch64_record;
4382
4383 memset (&buf[0], 0, insn_size);
4384 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4385 target_read_memory (insn_addr, &buf[0], insn_size);
4386 aarch64_record.aarch64_insn
4387 = (uint32_t) extract_unsigned_integer (&buf[0],
4388 insn_size,
4389 gdbarch_byte_order (gdbarch));
4390 aarch64_record.regcache = regcache;
4391 aarch64_record.this_addr = insn_addr;
4392 aarch64_record.gdbarch = gdbarch;
4393
4394 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4395 if (ret == AARCH64_RECORD_UNSUPPORTED)
4396 {
4397 printf_unfiltered (_("Process record does not support instruction "
4398 "0x%0x at address %s.\n"),
4399 aarch64_record.aarch64_insn,
4400 paddress (gdbarch, insn_addr));
4401 ret = -1;
4402 }
4403
4404 if (0 == ret)
4405 {
4406 /* Record registers. */
4407 record_full_arch_list_add_reg (aarch64_record.regcache,
4408 AARCH64_PC_REGNUM);
4409 /* Always record register CPSR. */
4410 record_full_arch_list_add_reg (aarch64_record.regcache,
4411 AARCH64_CPSR_REGNUM);
4412 if (aarch64_record.aarch64_regs)
4413 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4414 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4415 aarch64_record.aarch64_regs[rec_no]))
4416 ret = -1;
4417
4418 /* Record memories. */
4419 if (aarch64_record.aarch64_mems)
4420 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4421 if (record_full_arch_list_add_mem
4422 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4423 aarch64_record.aarch64_mems[rec_no].len))
4424 ret = -1;
4425
4426 if (record_full_arch_list_add_end ())
4427 ret = -1;
4428 }
4429
4430 deallocate_reg_mem (&aarch64_record);
4431 return ret;
4432}
This page took 0.589936 seconds and 4 git commands to generate.