x86-64: fix ZMM register state tracking
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
e2882c85 3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
07b287a0
MS
30#include "value.h"
31#include "arch-utils.h"
32#include "osabi.h"
33#include "frame-unwind.h"
34#include "frame-base.h"
35#include "trad-frame.h"
36#include "objfiles.h"
37#include "dwarf2-frame.h"
38#include "gdbtypes.h"
39#include "prologue-value.h"
40#include "target-descriptions.h"
41#include "user-regs.h"
42#include "language.h"
43#include "infcall.h"
ea873d8e
PL
44#include "ax.h"
45#include "ax-gdb.h"
4d9a9006 46#include "selftest.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
e8bf1ce4 49#include "aarch64-ravenscar-thread.h"
07b287a0
MS
50
51#include "elf-bfd.h"
52#include "elf/aarch64.h"
53
07b287a0
MS
54#include "vec.h"
55
99afc88b
OJ
56#include "record.h"
57#include "record-full.h"
787749ea
PL
58#include "arch/aarch64-insn.h"
59
f77ee802 60#include "opcode/aarch64.h"
325fac50 61#include <algorithm>
f77ee802
YQ
62
63#define submask(x) ((1L << ((x) + 1)) - 1)
64#define bit(obj,st) (((obj) >> (st)) & 1)
65#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66
ea92689a
AH
67/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
68 four members. */
69#define HA_MAX_NUM_FLDS 4
70
95228a0d
AH
71/* All possible aarch64 target descriptors. */
72struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
73
07b287a0
MS
74/* The standard register names, and all the valid aliases for them. */
75static const struct
76{
77 const char *const name;
78 int regnum;
79} aarch64_register_aliases[] =
80{
81 /* 64-bit register names. */
82 {"fp", AARCH64_FP_REGNUM},
83 {"lr", AARCH64_LR_REGNUM},
84 {"sp", AARCH64_SP_REGNUM},
85
86 /* 32-bit register names. */
87 {"w0", AARCH64_X0_REGNUM + 0},
88 {"w1", AARCH64_X0_REGNUM + 1},
89 {"w2", AARCH64_X0_REGNUM + 2},
90 {"w3", AARCH64_X0_REGNUM + 3},
91 {"w4", AARCH64_X0_REGNUM + 4},
92 {"w5", AARCH64_X0_REGNUM + 5},
93 {"w6", AARCH64_X0_REGNUM + 6},
94 {"w7", AARCH64_X0_REGNUM + 7},
95 {"w8", AARCH64_X0_REGNUM + 8},
96 {"w9", AARCH64_X0_REGNUM + 9},
97 {"w10", AARCH64_X0_REGNUM + 10},
98 {"w11", AARCH64_X0_REGNUM + 11},
99 {"w12", AARCH64_X0_REGNUM + 12},
100 {"w13", AARCH64_X0_REGNUM + 13},
101 {"w14", AARCH64_X0_REGNUM + 14},
102 {"w15", AARCH64_X0_REGNUM + 15},
103 {"w16", AARCH64_X0_REGNUM + 16},
104 {"w17", AARCH64_X0_REGNUM + 17},
105 {"w18", AARCH64_X0_REGNUM + 18},
106 {"w19", AARCH64_X0_REGNUM + 19},
107 {"w20", AARCH64_X0_REGNUM + 20},
108 {"w21", AARCH64_X0_REGNUM + 21},
109 {"w22", AARCH64_X0_REGNUM + 22},
110 {"w23", AARCH64_X0_REGNUM + 23},
111 {"w24", AARCH64_X0_REGNUM + 24},
112 {"w25", AARCH64_X0_REGNUM + 25},
113 {"w26", AARCH64_X0_REGNUM + 26},
114 {"w27", AARCH64_X0_REGNUM + 27},
115 {"w28", AARCH64_X0_REGNUM + 28},
116 {"w29", AARCH64_X0_REGNUM + 29},
117 {"w30", AARCH64_X0_REGNUM + 30},
118
119 /* specials */
120 {"ip0", AARCH64_X0_REGNUM + 16},
121 {"ip1", AARCH64_X0_REGNUM + 17}
122};
123
124/* The required core 'R' registers. */
125static const char *const aarch64_r_register_names[] =
126{
127 /* These registers must appear in consecutive RAW register number
128 order and they must begin with AARCH64_X0_REGNUM! */
129 "x0", "x1", "x2", "x3",
130 "x4", "x5", "x6", "x7",
131 "x8", "x9", "x10", "x11",
132 "x12", "x13", "x14", "x15",
133 "x16", "x17", "x18", "x19",
134 "x20", "x21", "x22", "x23",
135 "x24", "x25", "x26", "x27",
136 "x28", "x29", "x30", "sp",
137 "pc", "cpsr"
138};
139
140/* The FP/SIMD 'V' registers. */
141static const char *const aarch64_v_register_names[] =
142{
143 /* These registers must appear in consecutive RAW register number
144 order and they must begin with AARCH64_V0_REGNUM! */
145 "v0", "v1", "v2", "v3",
146 "v4", "v5", "v6", "v7",
147 "v8", "v9", "v10", "v11",
148 "v12", "v13", "v14", "v15",
149 "v16", "v17", "v18", "v19",
150 "v20", "v21", "v22", "v23",
151 "v24", "v25", "v26", "v27",
152 "v28", "v29", "v30", "v31",
153 "fpsr",
154 "fpcr"
155};
156
739e8682
AH
157/* The SVE 'Z' and 'P' registers. */
158static const char *const aarch64_sve_register_names[] =
159{
160 /* These registers must appear in consecutive RAW register number
161 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
162 "z0", "z1", "z2", "z3",
163 "z4", "z5", "z6", "z7",
164 "z8", "z9", "z10", "z11",
165 "z12", "z13", "z14", "z15",
166 "z16", "z17", "z18", "z19",
167 "z20", "z21", "z22", "z23",
168 "z24", "z25", "z26", "z27",
169 "z28", "z29", "z30", "z31",
170 "fpsr", "fpcr",
171 "p0", "p1", "p2", "p3",
172 "p4", "p5", "p6", "p7",
173 "p8", "p9", "p10", "p11",
174 "p12", "p13", "p14", "p15",
175 "ffr", "vg"
176};
177
07b287a0
MS
178/* AArch64 prologue cache structure. */
179struct aarch64_prologue_cache
180{
db634143
PL
181 /* The program counter at the start of the function. It is used to
182 identify this frame as a prologue frame. */
183 CORE_ADDR func;
184
185 /* The program counter at the time this frame was created; i.e. where
186 this function was called from. It is used to identify this frame as a
187 stub frame. */
188 CORE_ADDR prev_pc;
189
07b287a0
MS
190 /* The stack pointer at the time this frame was created; i.e. the
191 caller's stack pointer when this function was called. It is used
192 to identify this frame. */
193 CORE_ADDR prev_sp;
194
7dfa3edc
PL
195 /* Is the target available to read from? */
196 int available_p;
197
07b287a0
MS
198 /* The frame base for this frame is just prev_sp - frame size.
199 FRAMESIZE is the distance from the frame pointer to the
200 initial stack pointer. */
201 int framesize;
202
203 /* The register used to hold the frame pointer for this frame. */
204 int framereg;
205
206 /* Saved register offsets. */
207 struct trad_frame_saved_reg *saved_regs;
208};
209
07b287a0
MS
210static void
211show_aarch64_debug (struct ui_file *file, int from_tty,
212 struct cmd_list_element *c, const char *value)
213{
214 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
215}
216
ffdbe864
YQ
217namespace {
218
4d9a9006
YQ
219/* Abstract instruction reader. */
220
221class abstract_instruction_reader
222{
223public:
224 /* Read in one instruction. */
225 virtual ULONGEST read (CORE_ADDR memaddr, int len,
226 enum bfd_endian byte_order) = 0;
227};
228
229/* Instruction reader from real target. */
230
231class instruction_reader : public abstract_instruction_reader
232{
233 public:
234 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 235 override
4d9a9006 236 {
fc2f703e 237 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
238 }
239};
240
ffdbe864
YQ
241} // namespace
242
07b287a0
MS
243/* Analyze a prologue, looking for a recognizable stack frame
244 and frame pointer. Scan until we encounter a store that could
245 clobber the stack frame unexpectedly, or an unknown instruction. */
246
247static CORE_ADDR
248aarch64_analyze_prologue (struct gdbarch *gdbarch,
249 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
250 struct aarch64_prologue_cache *cache,
251 abstract_instruction_reader& reader)
07b287a0
MS
252{
253 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
254 int i;
187f5d00
YQ
255 /* Track X registers and D registers in prologue. */
256 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 257
187f5d00 258 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 259 regs[i] = pv_register (i, 0);
f7b7ed97 260 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
261
262 for (; start < limit; start += 4)
263 {
264 uint32_t insn;
d9ebcbce 265 aarch64_inst inst;
07b287a0 266
4d9a9006 267 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 268
561a72d4 269 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
270 break;
271
272 if (inst.opcode->iclass == addsub_imm
273 && (inst.opcode->op == OP_ADD
274 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 275 {
d9ebcbce
YQ
276 unsigned rd = inst.operands[0].reg.regno;
277 unsigned rn = inst.operands[1].reg.regno;
278
279 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
281 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
282 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
283
284 if (inst.opcode->op == OP_ADD)
285 {
286 regs[rd] = pv_add_constant (regs[rn],
287 inst.operands[2].imm.value);
288 }
289 else
290 {
291 regs[rd] = pv_add_constant (regs[rn],
292 -inst.operands[2].imm.value);
293 }
294 }
295 else if (inst.opcode->iclass == pcreladdr
296 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
297 {
298 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
299 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
300
301 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 302 }
d9ebcbce 303 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
304 {
305 /* Stop analysis on branch. */
306 break;
307 }
d9ebcbce 308 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
309 {
310 /* Stop analysis on branch. */
311 break;
312 }
d9ebcbce 313 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
314 {
315 /* Stop analysis on branch. */
316 break;
317 }
d9ebcbce 318 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
319 {
320 /* Stop analysis on branch. */
321 break;
322 }
d9ebcbce
YQ
323 else if (inst.opcode->op == OP_MOVZ)
324 {
325 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
326 regs[inst.operands[0].reg.regno] = pv_unknown ();
327 }
328 else if (inst.opcode->iclass == log_shift
329 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 330 {
d9ebcbce
YQ
331 unsigned rd = inst.operands[0].reg.regno;
332 unsigned rn = inst.operands[1].reg.regno;
333 unsigned rm = inst.operands[2].reg.regno;
334
335 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
336 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
337 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
338
339 if (inst.operands[2].shifter.amount == 0
340 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
341 regs[rd] = regs[rm];
342 else
343 {
344 if (aarch64_debug)
b277c936
PL
345 {
346 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 347 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
348 core_addr_to_string_nz (start), insn);
349 }
07b287a0
MS
350 break;
351 }
352 }
d9ebcbce 353 else if (inst.opcode->op == OP_STUR)
07b287a0 354 {
d9ebcbce
YQ
355 unsigned rt = inst.operands[0].reg.regno;
356 unsigned rn = inst.operands[1].addr.base_regno;
357 int is64
358 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
359
360 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
361 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
362 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
363 gdb_assert (!inst.operands[1].addr.offset.is_reg);
364
f7b7ed97
TT
365 stack.store (pv_add_constant (regs[rn],
366 inst.operands[1].addr.offset.imm),
367 is64 ? 8 : 4, regs[rt]);
07b287a0 368 }
d9ebcbce 369 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
370 || (inst.opcode->iclass == ldstpair_indexed
371 && inst.operands[2].addr.preind))
d9ebcbce 372 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 373 {
03bcd739 374 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
375 unsigned rt1;
376 unsigned rt2;
d9ebcbce
YQ
377 unsigned rn = inst.operands[2].addr.base_regno;
378 int32_t imm = inst.operands[2].addr.offset.imm;
379
187f5d00
YQ
380 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
381 || inst.operands[0].type == AARCH64_OPND_Ft);
382 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
383 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
384 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
385 gdb_assert (!inst.operands[2].addr.offset.is_reg);
386
07b287a0
MS
387 /* If recording this store would invalidate the store area
388 (perhaps because rn is not known) then we should abandon
389 further prologue analysis. */
f7b7ed97 390 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
391 break;
392
f7b7ed97 393 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
394 break;
395
187f5d00
YQ
396 rt1 = inst.operands[0].reg.regno;
397 rt2 = inst.operands[1].reg.regno;
398 if (inst.operands[0].type == AARCH64_OPND_Ft)
399 {
400 /* Only bottom 64-bit of each V register (D register) need
401 to be preserved. */
402 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
403 rt1 += AARCH64_X_REGISTER_COUNT;
404 rt2 += AARCH64_X_REGISTER_COUNT;
405 }
406
f7b7ed97
TT
407 stack.store (pv_add_constant (regs[rn], imm), 8,
408 regs[rt1]);
409 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
410 regs[rt2]);
14ac654f 411
d9ebcbce 412 if (inst.operands[2].addr.writeback)
93d96012 413 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 414
07b287a0 415 }
432ec081
YQ
416 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
417 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
418 && (inst.opcode->op == OP_STR_POS
419 || inst.opcode->op == OP_STRF_POS)))
420 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
421 && strcmp ("str", inst.opcode->name) == 0)
422 {
423 /* STR (immediate) */
424 unsigned int rt = inst.operands[0].reg.regno;
425 int32_t imm = inst.operands[1].addr.offset.imm;
426 unsigned int rn = inst.operands[1].addr.base_regno;
427 bool is64
428 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
429 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
430 || inst.operands[0].type == AARCH64_OPND_Ft);
431
432 if (inst.operands[0].type == AARCH64_OPND_Ft)
433 {
434 /* Only bottom 64-bit of each V register (D register) need
435 to be preserved. */
436 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
437 rt += AARCH64_X_REGISTER_COUNT;
438 }
439
f7b7ed97
TT
440 stack.store (pv_add_constant (regs[rn], imm),
441 is64 ? 8 : 4, regs[rt]);
432ec081
YQ
442 if (inst.operands[1].addr.writeback)
443 regs[rn] = pv_add_constant (regs[rn], imm);
444 }
d9ebcbce 445 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
446 {
447 /* Stop analysis on branch. */
448 break;
449 }
450 else
451 {
452 if (aarch64_debug)
b277c936 453 {
0a0da556 454 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
455 " opcode=0x%x\n",
456 core_addr_to_string_nz (start), insn);
457 }
07b287a0
MS
458 break;
459 }
460 }
461
462 if (cache == NULL)
f7b7ed97 463 return start;
07b287a0
MS
464
465 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
466 {
467 /* Frame pointer is fp. Frame size is constant. */
468 cache->framereg = AARCH64_FP_REGNUM;
469 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
470 }
471 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
472 {
473 /* Try the stack pointer. */
474 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
475 cache->framereg = AARCH64_SP_REGNUM;
476 }
477 else
478 {
479 /* We're just out of luck. We don't know where the frame is. */
480 cache->framereg = -1;
481 cache->framesize = 0;
482 }
483
484 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
485 {
486 CORE_ADDR offset;
487
f7b7ed97 488 if (stack.find_reg (gdbarch, i, &offset))
07b287a0
MS
489 cache->saved_regs[i].addr = offset;
490 }
491
187f5d00
YQ
492 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
493 {
494 int regnum = gdbarch_num_regs (gdbarch);
495 CORE_ADDR offset;
496
f7b7ed97
TT
497 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
498 &offset))
187f5d00
YQ
499 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
500 }
501
07b287a0
MS
502 return start;
503}
504
4d9a9006
YQ
505static CORE_ADDR
506aarch64_analyze_prologue (struct gdbarch *gdbarch,
507 CORE_ADDR start, CORE_ADDR limit,
508 struct aarch64_prologue_cache *cache)
509{
510 instruction_reader reader;
511
512 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
513 reader);
514}
515
516#if GDB_SELF_TEST
517
518namespace selftests {
519
520/* Instruction reader from manually cooked instruction sequences. */
521
522class instruction_reader_test : public abstract_instruction_reader
523{
524public:
525 template<size_t SIZE>
526 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
527 : m_insns (insns), m_insns_size (SIZE)
528 {}
529
530 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 531 override
4d9a9006
YQ
532 {
533 SELF_CHECK (len == 4);
534 SELF_CHECK (memaddr % 4 == 0);
535 SELF_CHECK (memaddr / 4 < m_insns_size);
536
537 return m_insns[memaddr / 4];
538 }
539
540private:
541 const uint32_t *m_insns;
542 size_t m_insns_size;
543};
544
545static void
546aarch64_analyze_prologue_test (void)
547{
548 struct gdbarch_info info;
549
550 gdbarch_info_init (&info);
551 info.bfd_arch_info = bfd_scan_arch ("aarch64");
552
553 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
554 SELF_CHECK (gdbarch != NULL);
555
556 /* Test the simple prologue in which frame pointer is used. */
557 {
558 struct aarch64_prologue_cache cache;
559 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
560
561 static const uint32_t insns[] = {
562 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
563 0x910003fd, /* mov x29, sp */
564 0x97ffffe6, /* bl 0x400580 */
565 };
566 instruction_reader_test reader (insns);
567
568 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
569 SELF_CHECK (end == 4 * 2);
570
571 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
572 SELF_CHECK (cache.framesize == 272);
573
574 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
575 {
576 if (i == AARCH64_FP_REGNUM)
577 SELF_CHECK (cache.saved_regs[i].addr == -272);
578 else if (i == AARCH64_LR_REGNUM)
579 SELF_CHECK (cache.saved_regs[i].addr == -264);
580 else
581 SELF_CHECK (cache.saved_regs[i].addr == -1);
582 }
583
584 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
585 {
586 int regnum = gdbarch_num_regs (gdbarch);
587
588 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
589 == -1);
590 }
591 }
432ec081
YQ
592
593 /* Test a prologue in which STR is used and frame pointer is not
594 used. */
595 {
596 struct aarch64_prologue_cache cache;
597 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
598
599 static const uint32_t insns[] = {
600 0xf81d0ff3, /* str x19, [sp, #-48]! */
601 0xb9002fe0, /* str w0, [sp, #44] */
602 0xf90013e1, /* str x1, [sp, #32]*/
603 0xfd000fe0, /* str d0, [sp, #24] */
604 0xaa0203f3, /* mov x19, x2 */
605 0xf94013e0, /* ldr x0, [sp, #32] */
606 };
607 instruction_reader_test reader (insns);
608
609 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
610
611 SELF_CHECK (end == 4 * 5);
612
613 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
614 SELF_CHECK (cache.framesize == 48);
615
616 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
617 {
618 if (i == 1)
619 SELF_CHECK (cache.saved_regs[i].addr == -16);
620 else if (i == 19)
621 SELF_CHECK (cache.saved_regs[i].addr == -48);
622 else
623 SELF_CHECK (cache.saved_regs[i].addr == -1);
624 }
625
626 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
627 {
628 int regnum = gdbarch_num_regs (gdbarch);
629
630 if (i == 0)
631 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
632 == -24);
633 else
634 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
635 == -1);
636 }
637 }
4d9a9006
YQ
638}
639} // namespace selftests
640#endif /* GDB_SELF_TEST */
641
07b287a0
MS
642/* Implement the "skip_prologue" gdbarch method. */
643
644static CORE_ADDR
645aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
646{
07b287a0 647 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
648
649 /* See if we can determine the end of the prologue via the symbol
650 table. If so, then return either PC, or the PC after the
651 prologue, whichever is greater. */
652 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
653 {
654 CORE_ADDR post_prologue_pc
655 = skip_prologue_using_sal (gdbarch, func_addr);
656
657 if (post_prologue_pc != 0)
325fac50 658 return std::max (pc, post_prologue_pc);
07b287a0
MS
659 }
660
661 /* Can't determine prologue from the symbol table, need to examine
662 instructions. */
663
664 /* Find an upper limit on the function prologue using the debug
665 information. If the debug information could not be used to
666 provide that bound, then use an arbitrary large number as the
667 upper bound. */
668 limit_pc = skip_prologue_using_sal (gdbarch, pc);
669 if (limit_pc == 0)
670 limit_pc = pc + 128; /* Magic. */
671
672 /* Try disassembling prologue. */
673 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
674}
675
676/* Scan the function prologue for THIS_FRAME and populate the prologue
677 cache CACHE. */
678
679static void
680aarch64_scan_prologue (struct frame_info *this_frame,
681 struct aarch64_prologue_cache *cache)
682{
683 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
684 CORE_ADDR prologue_start;
685 CORE_ADDR prologue_end;
686 CORE_ADDR prev_pc = get_frame_pc (this_frame);
687 struct gdbarch *gdbarch = get_frame_arch (this_frame);
688
db634143
PL
689 cache->prev_pc = prev_pc;
690
07b287a0
MS
691 /* Assume we do not find a frame. */
692 cache->framereg = -1;
693 cache->framesize = 0;
694
695 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
696 &prologue_end))
697 {
698 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
699
700 if (sal.line == 0)
701 {
702 /* No line info so use the current PC. */
703 prologue_end = prev_pc;
704 }
705 else if (sal.end < prologue_end)
706 {
707 /* The next line begins after the function end. */
708 prologue_end = sal.end;
709 }
710
325fac50 711 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
712 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
713 }
714 else
715 {
716 CORE_ADDR frame_loc;
07b287a0
MS
717
718 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
719 if (frame_loc == 0)
720 return;
721
722 cache->framereg = AARCH64_FP_REGNUM;
723 cache->framesize = 16;
724 cache->saved_regs[29].addr = 0;
725 cache->saved_regs[30].addr = 8;
726 }
727}
728
7dfa3edc
PL
729/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
730 function may throw an exception if the inferior's registers or memory is
731 not available. */
07b287a0 732
7dfa3edc
PL
733static void
734aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
735 struct aarch64_prologue_cache *cache)
07b287a0 736{
07b287a0
MS
737 CORE_ADDR unwound_fp;
738 int reg;
739
07b287a0
MS
740 aarch64_scan_prologue (this_frame, cache);
741
742 if (cache->framereg == -1)
7dfa3edc 743 return;
07b287a0
MS
744
745 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
746 if (unwound_fp == 0)
7dfa3edc 747 return;
07b287a0
MS
748
749 cache->prev_sp = unwound_fp + cache->framesize;
750
751 /* Calculate actual addresses of saved registers using offsets
752 determined by aarch64_analyze_prologue. */
753 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
754 if (trad_frame_addr_p (cache->saved_regs, reg))
755 cache->saved_regs[reg].addr += cache->prev_sp;
756
db634143
PL
757 cache->func = get_frame_func (this_frame);
758
7dfa3edc
PL
759 cache->available_p = 1;
760}
761
762/* Allocate and fill in *THIS_CACHE with information about the prologue of
763 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
764 Return a pointer to the current aarch64_prologue_cache in
765 *THIS_CACHE. */
766
767static struct aarch64_prologue_cache *
768aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
769{
770 struct aarch64_prologue_cache *cache;
771
772 if (*this_cache != NULL)
9a3c8263 773 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
774
775 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
776 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
777 *this_cache = cache;
778
779 TRY
780 {
781 aarch64_make_prologue_cache_1 (this_frame, cache);
782 }
783 CATCH (ex, RETURN_MASK_ERROR)
784 {
785 if (ex.error != NOT_AVAILABLE_ERROR)
786 throw_exception (ex);
787 }
788 END_CATCH
789
07b287a0
MS
790 return cache;
791}
792
7dfa3edc
PL
793/* Implement the "stop_reason" frame_unwind method. */
794
795static enum unwind_stop_reason
796aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
797 void **this_cache)
798{
799 struct aarch64_prologue_cache *cache
800 = aarch64_make_prologue_cache (this_frame, this_cache);
801
802 if (!cache->available_p)
803 return UNWIND_UNAVAILABLE;
804
805 /* Halt the backtrace at "_start". */
806 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
807 return UNWIND_OUTERMOST;
808
809 /* We've hit a wall, stop. */
810 if (cache->prev_sp == 0)
811 return UNWIND_OUTERMOST;
812
813 return UNWIND_NO_REASON;
814}
815
07b287a0
MS
816/* Our frame ID for a normal frame is the current function's starting
817 PC and the caller's SP when we were called. */
818
819static void
820aarch64_prologue_this_id (struct frame_info *this_frame,
821 void **this_cache, struct frame_id *this_id)
822{
7c8edfae
PL
823 struct aarch64_prologue_cache *cache
824 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 825
7dfa3edc
PL
826 if (!cache->available_p)
827 *this_id = frame_id_build_unavailable_stack (cache->func);
828 else
829 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
830}
831
832/* Implement the "prev_register" frame_unwind method. */
833
834static struct value *
835aarch64_prologue_prev_register (struct frame_info *this_frame,
836 void **this_cache, int prev_regnum)
837{
7c8edfae
PL
838 struct aarch64_prologue_cache *cache
839 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
840
841 /* If we are asked to unwind the PC, then we need to return the LR
842 instead. The prologue may save PC, but it will point into this
843 frame's prologue, not the next frame's resume location. */
844 if (prev_regnum == AARCH64_PC_REGNUM)
845 {
846 CORE_ADDR lr;
847
848 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
849 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
850 }
851
852 /* SP is generally not saved to the stack, but this frame is
853 identified by the next frame's stack pointer at the time of the
854 call. The value was already reconstructed into PREV_SP. */
855 /*
856 +----------+ ^
857 | saved lr | |
858 +->| saved fp |--+
859 | | |
860 | | | <- Previous SP
861 | +----------+
862 | | saved lr |
863 +--| saved fp |<- FP
864 | |
865 | |<- SP
866 +----------+ */
867 if (prev_regnum == AARCH64_SP_REGNUM)
868 return frame_unwind_got_constant (this_frame, prev_regnum,
869 cache->prev_sp);
870
871 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
872 prev_regnum);
873}
874
875/* AArch64 prologue unwinder. */
876struct frame_unwind aarch64_prologue_unwind =
877{
878 NORMAL_FRAME,
7dfa3edc 879 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
880 aarch64_prologue_this_id,
881 aarch64_prologue_prev_register,
882 NULL,
883 default_frame_sniffer
884};
885
8b61f75d
PL
886/* Allocate and fill in *THIS_CACHE with information about the prologue of
887 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
888 Return a pointer to the current aarch64_prologue_cache in
889 *THIS_CACHE. */
07b287a0
MS
890
891static struct aarch64_prologue_cache *
8b61f75d 892aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 893{
07b287a0 894 struct aarch64_prologue_cache *cache;
8b61f75d
PL
895
896 if (*this_cache != NULL)
9a3c8263 897 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
898
899 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
900 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 901 *this_cache = cache;
07b287a0 902
02a2a705
PL
903 TRY
904 {
905 cache->prev_sp = get_frame_register_unsigned (this_frame,
906 AARCH64_SP_REGNUM);
907 cache->prev_pc = get_frame_pc (this_frame);
908 cache->available_p = 1;
909 }
910 CATCH (ex, RETURN_MASK_ERROR)
911 {
912 if (ex.error != NOT_AVAILABLE_ERROR)
913 throw_exception (ex);
914 }
915 END_CATCH
07b287a0
MS
916
917 return cache;
918}
919
02a2a705
PL
920/* Implement the "stop_reason" frame_unwind method. */
921
922static enum unwind_stop_reason
923aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
924 void **this_cache)
925{
926 struct aarch64_prologue_cache *cache
927 = aarch64_make_stub_cache (this_frame, this_cache);
928
929 if (!cache->available_p)
930 return UNWIND_UNAVAILABLE;
931
932 return UNWIND_NO_REASON;
933}
934
07b287a0
MS
935/* Our frame ID for a stub frame is the current SP and LR. */
936
937static void
938aarch64_stub_this_id (struct frame_info *this_frame,
939 void **this_cache, struct frame_id *this_id)
940{
8b61f75d
PL
941 struct aarch64_prologue_cache *cache
942 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 943
02a2a705
PL
944 if (cache->available_p)
945 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
946 else
947 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
948}
949
950/* Implement the "sniffer" frame_unwind method. */
951
952static int
953aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
954 struct frame_info *this_frame,
955 void **this_prologue_cache)
956{
957 CORE_ADDR addr_in_block;
958 gdb_byte dummy[4];
959
960 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 961 if (in_plt_section (addr_in_block)
07b287a0
MS
962 /* We also use the stub winder if the target memory is unreadable
963 to avoid having the prologue unwinder trying to read it. */
964 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
965 return 1;
966
967 return 0;
968}
969
970/* AArch64 stub unwinder. */
971struct frame_unwind aarch64_stub_unwind =
972{
973 NORMAL_FRAME,
02a2a705 974 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
975 aarch64_stub_this_id,
976 aarch64_prologue_prev_register,
977 NULL,
978 aarch64_stub_unwind_sniffer
979};
980
981/* Return the frame base address of *THIS_FRAME. */
982
983static CORE_ADDR
984aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
985{
7c8edfae
PL
986 struct aarch64_prologue_cache *cache
987 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
988
989 return cache->prev_sp - cache->framesize;
990}
991
992/* AArch64 default frame base information. */
993struct frame_base aarch64_normal_base =
994{
995 &aarch64_prologue_unwind,
996 aarch64_normal_frame_base,
997 aarch64_normal_frame_base,
998 aarch64_normal_frame_base
999};
1000
1001/* Assuming THIS_FRAME is a dummy, return the frame ID of that
1002 dummy frame. The frame ID's base needs to match the TOS value
1003 saved by save_dummy_frame_tos () and returned from
1004 aarch64_push_dummy_call, and the PC needs to match the dummy
1005 frame's breakpoint. */
1006
1007static struct frame_id
1008aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1009{
1010 return frame_id_build (get_frame_register_unsigned (this_frame,
1011 AARCH64_SP_REGNUM),
1012 get_frame_pc (this_frame));
1013}
1014
1015/* Implement the "unwind_pc" gdbarch method. */
1016
1017static CORE_ADDR
1018aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1019{
1020 CORE_ADDR pc
1021 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1022
1023 return pc;
1024}
1025
1026/* Implement the "unwind_sp" gdbarch method. */
1027
1028static CORE_ADDR
1029aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1030{
1031 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1032}
1033
1034/* Return the value of the REGNUM register in the previous frame of
1035 *THIS_FRAME. */
1036
1037static struct value *
1038aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1039 void **this_cache, int regnum)
1040{
07b287a0
MS
1041 CORE_ADDR lr;
1042
1043 switch (regnum)
1044 {
1045 case AARCH64_PC_REGNUM:
1046 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1047 return frame_unwind_got_constant (this_frame, regnum, lr);
1048
1049 default:
1050 internal_error (__FILE__, __LINE__,
1051 _("Unexpected register %d"), regnum);
1052 }
1053}
1054
1055/* Implement the "init_reg" dwarf2_frame_ops method. */
1056
1057static void
1058aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1059 struct dwarf2_frame_state_reg *reg,
1060 struct frame_info *this_frame)
1061{
1062 switch (regnum)
1063 {
1064 case AARCH64_PC_REGNUM:
1065 reg->how = DWARF2_FRAME_REG_FN;
1066 reg->loc.fn = aarch64_dwarf2_prev_register;
1067 break;
1068 case AARCH64_SP_REGNUM:
1069 reg->how = DWARF2_FRAME_REG_CFA;
1070 break;
1071 }
1072}
1073
1074/* When arguments must be pushed onto the stack, they go on in reverse
1075 order. The code below implements a FILO (stack) to do this. */
1076
1077typedef struct
1078{
c3c87445
YQ
1079 /* Value to pass on stack. It can be NULL if this item is for stack
1080 padding. */
7c543f7b 1081 const gdb_byte *data;
07b287a0
MS
1082
1083 /* Size in bytes of value to pass on stack. */
1084 int len;
1085} stack_item_t;
1086
1087DEF_VEC_O (stack_item_t);
1088
1089/* Return the alignment (in bytes) of the given type. */
1090
1091static int
1092aarch64_type_align (struct type *t)
1093{
1094 int n;
1095 int align;
1096 int falign;
1097
1098 t = check_typedef (t);
1099 switch (TYPE_CODE (t))
1100 {
1101 default:
1102 /* Should never happen. */
1103 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1104 return 4;
1105
1106 case TYPE_CODE_PTR:
1107 case TYPE_CODE_ENUM:
1108 case TYPE_CODE_INT:
1109 case TYPE_CODE_FLT:
1110 case TYPE_CODE_SET:
1111 case TYPE_CODE_RANGE:
1112 case TYPE_CODE_BITSTRING:
1113 case TYPE_CODE_REF:
aa006118 1114 case TYPE_CODE_RVALUE_REF:
07b287a0
MS
1115 case TYPE_CODE_CHAR:
1116 case TYPE_CODE_BOOL:
1117 return TYPE_LENGTH (t);
1118
1119 case TYPE_CODE_ARRAY:
238f2452
YQ
1120 if (TYPE_VECTOR (t))
1121 {
1122 /* Use the natural alignment for vector types (the same for
1123 scalar type), but the maximum alignment is 128-bit. */
1124 if (TYPE_LENGTH (t) > 16)
1125 return 16;
1126 else
1127 return TYPE_LENGTH (t);
1128 }
1129 else
1130 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
1131 case TYPE_CODE_COMPLEX:
1132 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1133
1134 case TYPE_CODE_STRUCT:
1135 case TYPE_CODE_UNION:
1136 align = 1;
1137 for (n = 0; n < TYPE_NFIELDS (t); n++)
1138 {
1139 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1140 if (falign > align)
1141 align = falign;
1142 }
1143 return align;
1144 }
1145}
1146
ea92689a
AH
1147/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1148
1149 Return the number of register required, or -1 on failure.
1150
1151 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1152 to the element, else fail if the type of this element does not match the
1153 existing value. */
1154
1155static int
1156aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1157 struct type **fundamental_type)
1158{
1159 if (type == nullptr)
1160 return -1;
1161
1162 switch (TYPE_CODE (type))
1163 {
1164 case TYPE_CODE_FLT:
1165 if (TYPE_LENGTH (type) > 16)
1166 return -1;
1167
1168 if (*fundamental_type == nullptr)
1169 *fundamental_type = type;
1170 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1171 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1172 return -1;
1173
1174 return 1;
1175
1176 case TYPE_CODE_COMPLEX:
1177 {
1178 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1179 if (TYPE_LENGTH (target_type) > 16)
1180 return -1;
1181
1182 if (*fundamental_type == nullptr)
1183 *fundamental_type = target_type;
1184 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1185 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1186 return -1;
1187
1188 return 2;
1189 }
1190
1191 case TYPE_CODE_ARRAY:
1192 {
1193 if (TYPE_VECTOR (type))
1194 {
1195 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1196 return -1;
1197
1198 if (*fundamental_type == nullptr)
1199 *fundamental_type = type;
1200 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1201 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1202 return -1;
1203
1204 return 1;
1205 }
1206 else
1207 {
1208 struct type *target_type = TYPE_TARGET_TYPE (type);
1209 int count = aapcs_is_vfp_call_or_return_candidate_1
1210 (target_type, fundamental_type);
1211
1212 if (count == -1)
1213 return count;
1214
1215 count *= TYPE_LENGTH (type);
1216 return count;
1217 }
1218 }
1219
1220 case TYPE_CODE_STRUCT:
1221 case TYPE_CODE_UNION:
1222 {
1223 int count = 0;
1224
1225 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1226 {
1227 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1228
1229 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1230 (member, fundamental_type);
1231 if (sub_count == -1)
1232 return -1;
1233 count += sub_count;
1234 }
1235 return count;
1236 }
1237
1238 default:
1239 break;
1240 }
1241
1242 return -1;
1243}
1244
1245/* Return true if an argument, whose type is described by TYPE, can be passed or
1246 returned in simd/fp registers, providing enough parameter passing registers
1247 are available. This is as described in the AAPCS64.
1248
1249 Upon successful return, *COUNT returns the number of needed registers,
1250 *FUNDAMENTAL_TYPE contains the type of those registers.
1251
1252 Candidate as per the AAPCS64 5.4.2.C is either a:
1253 - float.
1254 - short-vector.
1255 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1256 all the members are floats and has at most 4 members.
1257 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1258 all the members are short vectors and has at most 4 members.
1259 - Complex (7.1.1)
1260
1261 Note that HFAs and HVAs can include nested structures and arrays. */
1262
0e745c60 1263static bool
ea92689a
AH
1264aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1265 struct type **fundamental_type)
1266{
1267 if (type == nullptr)
1268 return false;
1269
1270 *fundamental_type = nullptr;
1271
1272 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1273 fundamental_type);
1274
1275 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1276 {
1277 *count = ag_count;
1278 return true;
1279 }
1280 else
1281 return false;
1282}
1283
07b287a0
MS
1284/* AArch64 function call information structure. */
1285struct aarch64_call_info
1286{
1287 /* the current argument number. */
1288 unsigned argnum;
1289
1290 /* The next general purpose register number, equivalent to NGRN as
1291 described in the AArch64 Procedure Call Standard. */
1292 unsigned ngrn;
1293
1294 /* The next SIMD and floating point register number, equivalent to
1295 NSRN as described in the AArch64 Procedure Call Standard. */
1296 unsigned nsrn;
1297
1298 /* The next stacked argument address, equivalent to NSAA as
1299 described in the AArch64 Procedure Call Standard. */
1300 unsigned nsaa;
1301
1302 /* Stack item vector. */
1303 VEC(stack_item_t) *si;
1304};
1305
1306/* Pass a value in a sequence of consecutive X registers. The caller
1307 is responsbile for ensuring sufficient registers are available. */
1308
1309static void
1310pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1311 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1312 struct value *arg)
07b287a0
MS
1313{
1314 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1315 int len = TYPE_LENGTH (type);
1316 enum type_code typecode = TYPE_CODE (type);
1317 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1318 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1319
1320 info->argnum++;
1321
1322 while (len > 0)
1323 {
1324 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1325 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1326 byte_order);
1327
1328
1329 /* Adjust sub-word struct/union args when big-endian. */
1330 if (byte_order == BFD_ENDIAN_BIG
1331 && partial_len < X_REGISTER_SIZE
1332 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1333 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1334
1335 if (aarch64_debug)
b277c936
PL
1336 {
1337 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1338 gdbarch_register_name (gdbarch, regnum),
1339 phex (regval, X_REGISTER_SIZE));
1340 }
07b287a0
MS
1341 regcache_cooked_write_unsigned (regcache, regnum, regval);
1342 len -= partial_len;
1343 buf += partial_len;
1344 regnum++;
1345 }
1346}
1347
1348/* Attempt to marshall a value in a V register. Return 1 if
1349 successful, or 0 if insufficient registers are available. This
1350 function, unlike the equivalent pass_in_x() function does not
1351 handle arguments spread across multiple registers. */
1352
1353static int
1354pass_in_v (struct gdbarch *gdbarch,
1355 struct regcache *regcache,
1356 struct aarch64_call_info *info,
0735fddd 1357 int len, const bfd_byte *buf)
07b287a0
MS
1358{
1359 if (info->nsrn < 8)
1360 {
07b287a0 1361 int regnum = AARCH64_V0_REGNUM + info->nsrn;
3ff2c72e
AH
1362 /* Enough space for a full vector register. */
1363 gdb_byte reg[register_size (gdbarch, regnum)];
1364 gdb_assert (len <= sizeof (reg));
07b287a0
MS
1365
1366 info->argnum++;
1367 info->nsrn++;
1368
0735fddd
YQ
1369 memset (reg, 0, sizeof (reg));
1370 /* PCS C.1, the argument is allocated to the least significant
1371 bits of V register. */
1372 memcpy (reg, buf, len);
b66f5587 1373 regcache->cooked_write (regnum, reg);
0735fddd 1374
07b287a0 1375 if (aarch64_debug)
b277c936
PL
1376 {
1377 debug_printf ("arg %d in %s\n", info->argnum,
1378 gdbarch_register_name (gdbarch, regnum));
1379 }
07b287a0
MS
1380 return 1;
1381 }
1382 info->nsrn = 8;
1383 return 0;
1384}
1385
1386/* Marshall an argument onto the stack. */
1387
1388static void
1389pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1390 struct value *arg)
07b287a0 1391{
8e80f9d1 1392 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1393 int len = TYPE_LENGTH (type);
1394 int align;
1395 stack_item_t item;
1396
1397 info->argnum++;
1398
1399 align = aarch64_type_align (type);
1400
1401 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1402 Natural alignment of the argument's type. */
1403 align = align_up (align, 8);
1404
1405 /* The AArch64 PCS requires at most doubleword alignment. */
1406 if (align > 16)
1407 align = 16;
1408
1409 if (aarch64_debug)
b277c936
PL
1410 {
1411 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1412 info->nsaa);
1413 }
07b287a0
MS
1414
1415 item.len = len;
1416 item.data = buf;
1417 VEC_safe_push (stack_item_t, info->si, &item);
1418
1419 info->nsaa += len;
1420 if (info->nsaa & (align - 1))
1421 {
1422 /* Push stack alignment padding. */
1423 int pad = align - (info->nsaa & (align - 1));
1424
1425 item.len = pad;
c3c87445 1426 item.data = NULL;
07b287a0
MS
1427
1428 VEC_safe_push (stack_item_t, info->si, &item);
1429 info->nsaa += pad;
1430 }
1431}
1432
1433/* Marshall an argument into a sequence of one or more consecutive X
1434 registers or, if insufficient X registers are available then onto
1435 the stack. */
1436
1437static void
1438pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1439 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1440 struct value *arg)
07b287a0
MS
1441{
1442 int len = TYPE_LENGTH (type);
1443 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1444
1445 /* PCS C.13 - Pass in registers if we have enough spare */
1446 if (info->ngrn + nregs <= 8)
1447 {
8e80f9d1 1448 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1449 info->ngrn += nregs;
1450 }
1451 else
1452 {
1453 info->ngrn = 8;
8e80f9d1 1454 pass_on_stack (info, type, arg);
07b287a0
MS
1455 }
1456}
1457
0e745c60
AH
1458/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1459 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1460 registers. A return value of false is an error state as the value will have
1461 been partially passed to the stack. */
1462static bool
1463pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1464 struct aarch64_call_info *info, struct type *arg_type,
1465 struct value *arg)
07b287a0 1466{
0e745c60
AH
1467 switch (TYPE_CODE (arg_type))
1468 {
1469 case TYPE_CODE_FLT:
1470 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1471 value_contents (arg));
1472 break;
1473
1474 case TYPE_CODE_COMPLEX:
1475 {
1476 const bfd_byte *buf = value_contents (arg);
1477 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1478
1479 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1480 buf))
1481 return false;
1482
1483 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1484 buf + TYPE_LENGTH (target_type));
1485 }
1486
1487 case TYPE_CODE_ARRAY:
1488 if (TYPE_VECTOR (arg_type))
1489 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1490 value_contents (arg));
1491 /* fall through. */
1492
1493 case TYPE_CODE_STRUCT:
1494 case TYPE_CODE_UNION:
1495 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1496 {
1497 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1498 struct type *field_type = check_typedef (value_type (field));
1499
1500 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1501 field))
1502 return false;
1503 }
1504 return true;
1505
1506 default:
1507 return false;
1508 }
07b287a0
MS
1509}
1510
1511/* Implement the "push_dummy_call" gdbarch method. */
1512
1513static CORE_ADDR
1514aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1515 struct regcache *regcache, CORE_ADDR bp_addr,
1516 int nargs,
1517 struct value **args, CORE_ADDR sp, int struct_return,
1518 CORE_ADDR struct_addr)
1519{
07b287a0 1520 int argnum;
07b287a0
MS
1521 struct aarch64_call_info info;
1522 struct type *func_type;
1523 struct type *return_type;
1524 int lang_struct_return;
1525
1526 memset (&info, 0, sizeof (info));
1527
1528 /* We need to know what the type of the called function is in order
1529 to determine the number of named/anonymous arguments for the
1530 actual argument placement, and the return type in order to handle
1531 return value correctly.
1532
1533 The generic code above us views the decision of return in memory
1534 or return in registers as a two stage processes. The language
1535 handler is consulted first and may decide to return in memory (eg
1536 class with copy constructor returned by value), this will cause
1537 the generic code to allocate space AND insert an initial leading
1538 argument.
1539
1540 If the language code does not decide to pass in memory then the
1541 target code is consulted.
1542
1543 If the language code decides to pass in memory we want to move
1544 the pointer inserted as the initial argument from the argument
1545 list and into X8, the conventional AArch64 struct return pointer
1546 register.
1547
1548 This is slightly awkward, ideally the flag "lang_struct_return"
1549 would be passed to the targets implementation of push_dummy_call.
1550 Rather that change the target interface we call the language code
1551 directly ourselves. */
1552
1553 func_type = check_typedef (value_type (function));
1554
1555 /* Dereference function pointer types. */
1556 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1557 func_type = TYPE_TARGET_TYPE (func_type);
1558
1559 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1560 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1561
1562 /* If language_pass_by_reference () returned true we will have been
1563 given an additional initial argument, a hidden pointer to the
1564 return slot in memory. */
1565 return_type = TYPE_TARGET_TYPE (func_type);
1566 lang_struct_return = language_pass_by_reference (return_type);
1567
1568 /* Set the return address. For the AArch64, the return breakpoint
1569 is always at BP_ADDR. */
1570 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1571
1572 /* If we were given an initial argument for the return slot because
1573 lang_struct_return was true, lose it. */
1574 if (lang_struct_return)
1575 {
1576 args++;
1577 nargs--;
1578 }
1579
1580 /* The struct_return pointer occupies X8. */
1581 if (struct_return || lang_struct_return)
1582 {
1583 if (aarch64_debug)
b277c936
PL
1584 {
1585 debug_printf ("struct return in %s = 0x%s\n",
1586 gdbarch_register_name (gdbarch,
1587 AARCH64_STRUCT_RETURN_REGNUM),
1588 paddress (gdbarch, struct_addr));
1589 }
07b287a0
MS
1590 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1591 struct_addr);
1592 }
1593
1594 for (argnum = 0; argnum < nargs; argnum++)
1595 {
1596 struct value *arg = args[argnum];
0e745c60
AH
1597 struct type *arg_type, *fundamental_type;
1598 int len, elements;
07b287a0
MS
1599
1600 arg_type = check_typedef (value_type (arg));
1601 len = TYPE_LENGTH (arg_type);
1602
0e745c60
AH
1603 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1604 if there are enough spare registers. */
1605 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1606 &fundamental_type))
1607 {
1608 if (info.nsrn + elements <= 8)
1609 {
1610 /* We know that we have sufficient registers available therefore
1611 this will never need to fallback to the stack. */
1612 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1613 arg))
1614 gdb_assert_not_reached ("Failed to push args");
1615 }
1616 else
1617 {
1618 info.nsrn = 8;
1619 pass_on_stack (&info, arg_type, arg);
1620 }
1621 continue;
1622 }
1623
07b287a0
MS
1624 switch (TYPE_CODE (arg_type))
1625 {
1626 case TYPE_CODE_INT:
1627 case TYPE_CODE_BOOL:
1628 case TYPE_CODE_CHAR:
1629 case TYPE_CODE_RANGE:
1630 case TYPE_CODE_ENUM:
1631 if (len < 4)
1632 {
1633 /* Promote to 32 bit integer. */
1634 if (TYPE_UNSIGNED (arg_type))
1635 arg_type = builtin_type (gdbarch)->builtin_uint32;
1636 else
1637 arg_type = builtin_type (gdbarch)->builtin_int32;
1638 arg = value_cast (arg_type, arg);
1639 }
8e80f9d1 1640 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1641 break;
1642
07b287a0
MS
1643 case TYPE_CODE_STRUCT:
1644 case TYPE_CODE_ARRAY:
1645 case TYPE_CODE_UNION:
0e745c60 1646 if (len > 16)
07b287a0
MS
1647 {
1648 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1649 invisible reference. */
1650
1651 /* Allocate aligned storage. */
1652 sp = align_down (sp - len, 16);
1653
1654 /* Write the real data into the stack. */
1655 write_memory (sp, value_contents (arg), len);
1656
1657 /* Construct the indirection. */
1658 arg_type = lookup_pointer_type (arg_type);
1659 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1660 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1661 }
1662 else
1663 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1664 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1665 break;
1666
1667 default:
8e80f9d1 1668 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1669 break;
1670 }
1671 }
1672
1673 /* Make sure stack retains 16 byte alignment. */
1674 if (info.nsaa & 15)
1675 sp -= 16 - (info.nsaa & 15);
1676
1677 while (!VEC_empty (stack_item_t, info.si))
1678 {
1679 stack_item_t *si = VEC_last (stack_item_t, info.si);
1680
1681 sp -= si->len;
c3c87445
YQ
1682 if (si->data != NULL)
1683 write_memory (sp, si->data, si->len);
07b287a0
MS
1684 VEC_pop (stack_item_t, info.si);
1685 }
1686
1687 VEC_free (stack_item_t, info.si);
1688
1689 /* Finally, update the SP register. */
1690 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1691
1692 return sp;
1693}
1694
1695/* Implement the "frame_align" gdbarch method. */
1696
1697static CORE_ADDR
1698aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1699{
1700 /* Align the stack to sixteen bytes. */
1701 return sp & ~(CORE_ADDR) 15;
1702}
1703
1704/* Return the type for an AdvSISD Q register. */
1705
1706static struct type *
1707aarch64_vnq_type (struct gdbarch *gdbarch)
1708{
1709 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1710
1711 if (tdep->vnq_type == NULL)
1712 {
1713 struct type *t;
1714 struct type *elem;
1715
1716 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1717 TYPE_CODE_UNION);
1718
1719 elem = builtin_type (gdbarch)->builtin_uint128;
1720 append_composite_type_field (t, "u", elem);
1721
1722 elem = builtin_type (gdbarch)->builtin_int128;
1723 append_composite_type_field (t, "s", elem);
1724
1725 tdep->vnq_type = t;
1726 }
1727
1728 return tdep->vnq_type;
1729}
1730
1731/* Return the type for an AdvSISD D register. */
1732
1733static struct type *
1734aarch64_vnd_type (struct gdbarch *gdbarch)
1735{
1736 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1737
1738 if (tdep->vnd_type == NULL)
1739 {
1740 struct type *t;
1741 struct type *elem;
1742
1743 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1744 TYPE_CODE_UNION);
1745
1746 elem = builtin_type (gdbarch)->builtin_double;
1747 append_composite_type_field (t, "f", elem);
1748
1749 elem = builtin_type (gdbarch)->builtin_uint64;
1750 append_composite_type_field (t, "u", elem);
1751
1752 elem = builtin_type (gdbarch)->builtin_int64;
1753 append_composite_type_field (t, "s", elem);
1754
1755 tdep->vnd_type = t;
1756 }
1757
1758 return tdep->vnd_type;
1759}
1760
1761/* Return the type for an AdvSISD S register. */
1762
1763static struct type *
1764aarch64_vns_type (struct gdbarch *gdbarch)
1765{
1766 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1767
1768 if (tdep->vns_type == NULL)
1769 {
1770 struct type *t;
1771 struct type *elem;
1772
1773 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1774 TYPE_CODE_UNION);
1775
1776 elem = builtin_type (gdbarch)->builtin_float;
1777 append_composite_type_field (t, "f", elem);
1778
1779 elem = builtin_type (gdbarch)->builtin_uint32;
1780 append_composite_type_field (t, "u", elem);
1781
1782 elem = builtin_type (gdbarch)->builtin_int32;
1783 append_composite_type_field (t, "s", elem);
1784
1785 tdep->vns_type = t;
1786 }
1787
1788 return tdep->vns_type;
1789}
1790
1791/* Return the type for an AdvSISD H register. */
1792
1793static struct type *
1794aarch64_vnh_type (struct gdbarch *gdbarch)
1795{
1796 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1797
1798 if (tdep->vnh_type == NULL)
1799 {
1800 struct type *t;
1801 struct type *elem;
1802
1803 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1804 TYPE_CODE_UNION);
1805
1806 elem = builtin_type (gdbarch)->builtin_uint16;
1807 append_composite_type_field (t, "u", elem);
1808
1809 elem = builtin_type (gdbarch)->builtin_int16;
1810 append_composite_type_field (t, "s", elem);
1811
1812 tdep->vnh_type = t;
1813 }
1814
1815 return tdep->vnh_type;
1816}
1817
1818/* Return the type for an AdvSISD B register. */
1819
1820static struct type *
1821aarch64_vnb_type (struct gdbarch *gdbarch)
1822{
1823 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1824
1825 if (tdep->vnb_type == NULL)
1826 {
1827 struct type *t;
1828 struct type *elem;
1829
1830 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1831 TYPE_CODE_UNION);
1832
1833 elem = builtin_type (gdbarch)->builtin_uint8;
1834 append_composite_type_field (t, "u", elem);
1835
1836 elem = builtin_type (gdbarch)->builtin_int8;
1837 append_composite_type_field (t, "s", elem);
1838
1839 tdep->vnb_type = t;
1840 }
1841
1842 return tdep->vnb_type;
1843}
1844
63bad7b6
AH
1845/* Return the type for an AdvSISD V register. */
1846
1847static struct type *
1848aarch64_vnv_type (struct gdbarch *gdbarch)
1849{
1850 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1851
1852 if (tdep->vnv_type == NULL)
1853 {
1854 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1855 TYPE_CODE_UNION);
1856
1857 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1858 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1859 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1860 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1861 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1862
1863 tdep->vnv_type = t;
1864 }
1865
1866 return tdep->vnv_type;
1867}
1868
07b287a0
MS
1869/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1870
1871static int
1872aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1873{
1874 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1875 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1876
1877 if (reg == AARCH64_DWARF_SP)
1878 return AARCH64_SP_REGNUM;
1879
1880 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1881 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1882
65d4cada
AH
1883 if (reg == AARCH64_DWARF_SVE_VG)
1884 return AARCH64_SVE_VG_REGNUM;
1885
1886 if (reg == AARCH64_DWARF_SVE_FFR)
1887 return AARCH64_SVE_FFR_REGNUM;
1888
1889 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1890 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1891
1892 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1893 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1894
07b287a0
MS
1895 return -1;
1896}
07b287a0
MS
1897
1898/* Implement the "print_insn" gdbarch method. */
1899
1900static int
1901aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1902{
1903 info->symbols = NULL;
6394c606 1904 return default_print_insn (memaddr, info);
07b287a0
MS
1905}
1906
1907/* AArch64 BRK software debug mode instruction.
1908 Note that AArch64 code is always little-endian.
1909 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 1910constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 1911
04180708 1912typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
1913
1914/* Extract from an array REGS containing the (raw) register state a
1915 function return value of type TYPE, and copy that, in virtual
1916 format, into VALBUF. */
1917
1918static void
1919aarch64_extract_return_value (struct type *type, struct regcache *regs,
1920 gdb_byte *valbuf)
1921{
ac7936df 1922 struct gdbarch *gdbarch = regs->arch ();
07b287a0 1923 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
1924 int elements;
1925 struct type *fundamental_type;
07b287a0 1926
4f4aedeb
AH
1927 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1928 &fundamental_type))
07b287a0 1929 {
4f4aedeb
AH
1930 int len = TYPE_LENGTH (fundamental_type);
1931
1932 for (int i = 0; i < elements; i++)
1933 {
1934 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
1935 /* Enough space for a full vector register. */
1936 gdb_byte buf[register_size (gdbarch, regno)];
1937 gdb_assert (len <= sizeof (buf));
4f4aedeb
AH
1938
1939 if (aarch64_debug)
1940 {
1941 debug_printf ("read HFA or HVA return value element %d from %s\n",
1942 i + 1,
1943 gdbarch_register_name (gdbarch, regno));
1944 }
1945 regs->cooked_read (regno, buf);
07b287a0 1946
4f4aedeb
AH
1947 memcpy (valbuf, buf, len);
1948 valbuf += len;
1949 }
07b287a0
MS
1950 }
1951 else if (TYPE_CODE (type) == TYPE_CODE_INT
1952 || TYPE_CODE (type) == TYPE_CODE_CHAR
1953 || TYPE_CODE (type) == TYPE_CODE_BOOL
1954 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 1955 || TYPE_IS_REFERENCE (type)
07b287a0
MS
1956 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1957 {
1958 /* If the the type is a plain integer, then the access is
1959 straight-forward. Otherwise we have to play around a bit
1960 more. */
1961 int len = TYPE_LENGTH (type);
1962 int regno = AARCH64_X0_REGNUM;
1963 ULONGEST tmp;
1964
1965 while (len > 0)
1966 {
1967 /* By using store_unsigned_integer we avoid having to do
1968 anything special for small big-endian values. */
1969 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1970 store_unsigned_integer (valbuf,
1971 (len > X_REGISTER_SIZE
1972 ? X_REGISTER_SIZE : len), byte_order, tmp);
1973 len -= X_REGISTER_SIZE;
1974 valbuf += X_REGISTER_SIZE;
1975 }
1976 }
07b287a0
MS
1977 else
1978 {
1979 /* For a structure or union the behaviour is as if the value had
1980 been stored to word-aligned memory and then loaded into
1981 registers with 64-bit load instruction(s). */
1982 int len = TYPE_LENGTH (type);
1983 int regno = AARCH64_X0_REGNUM;
1984 bfd_byte buf[X_REGISTER_SIZE];
1985
1986 while (len > 0)
1987 {
dca08e1f 1988 regs->cooked_read (regno++, buf);
07b287a0
MS
1989 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1990 len -= X_REGISTER_SIZE;
1991 valbuf += X_REGISTER_SIZE;
1992 }
1993 }
1994}
1995
1996
1997/* Will a function return an aggregate type in memory or in a
1998 register? Return 0 if an aggregate type can be returned in a
1999 register, 1 if it must be returned in memory. */
2000
2001static int
2002aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2003{
f168693b 2004 type = check_typedef (type);
4f4aedeb
AH
2005 int elements;
2006 struct type *fundamental_type;
07b287a0 2007
4f4aedeb
AH
2008 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2009 &fundamental_type))
07b287a0 2010 {
cd635f74
YQ
2011 /* v0-v7 are used to return values and one register is allocated
2012 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2013 return 0;
2014 }
2015
2016 if (TYPE_LENGTH (type) > 16)
2017 {
2018 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2019 invisible reference. */
2020
2021 return 1;
2022 }
2023
2024 return 0;
2025}
2026
2027/* Write into appropriate registers a function return value of type
2028 TYPE, given in virtual format. */
2029
2030static void
2031aarch64_store_return_value (struct type *type, struct regcache *regs,
2032 const gdb_byte *valbuf)
2033{
ac7936df 2034 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2035 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2036 int elements;
2037 struct type *fundamental_type;
07b287a0 2038
4f4aedeb
AH
2039 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2040 &fundamental_type))
07b287a0 2041 {
4f4aedeb
AH
2042 int len = TYPE_LENGTH (fundamental_type);
2043
2044 for (int i = 0; i < elements; i++)
2045 {
2046 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2047 /* Enough space for a full vector register. */
2048 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2049 gdb_assert (len <= sizeof (tmpbuf));
4f4aedeb
AH
2050
2051 if (aarch64_debug)
2052 {
2053 debug_printf ("write HFA or HVA return value element %d to %s\n",
2054 i + 1,
2055 gdbarch_register_name (gdbarch, regno));
2056 }
07b287a0 2057
4f4aedeb
AH
2058 memcpy (tmpbuf, valbuf,
2059 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2060 regs->cooked_write (regno, tmpbuf);
2061 valbuf += len;
2062 }
07b287a0
MS
2063 }
2064 else if (TYPE_CODE (type) == TYPE_CODE_INT
2065 || TYPE_CODE (type) == TYPE_CODE_CHAR
2066 || TYPE_CODE (type) == TYPE_CODE_BOOL
2067 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 2068 || TYPE_IS_REFERENCE (type)
07b287a0
MS
2069 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2070 {
2071 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2072 {
2073 /* Values of one word or less are zero/sign-extended and
2074 returned in r0. */
2075 bfd_byte tmpbuf[X_REGISTER_SIZE];
2076 LONGEST val = unpack_long (type, valbuf);
2077
2078 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2079 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2080 }
2081 else
2082 {
2083 /* Integral values greater than one word are stored in
2084 consecutive registers starting with r0. This will always
2085 be a multiple of the regiser size. */
2086 int len = TYPE_LENGTH (type);
2087 int regno = AARCH64_X0_REGNUM;
2088
2089 while (len > 0)
2090 {
b66f5587 2091 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2092 len -= X_REGISTER_SIZE;
2093 valbuf += X_REGISTER_SIZE;
2094 }
2095 }
2096 }
07b287a0
MS
2097 else
2098 {
2099 /* For a structure or union the behaviour is as if the value had
2100 been stored to word-aligned memory and then loaded into
2101 registers with 64-bit load instruction(s). */
2102 int len = TYPE_LENGTH (type);
2103 int regno = AARCH64_X0_REGNUM;
2104 bfd_byte tmpbuf[X_REGISTER_SIZE];
2105
2106 while (len > 0)
2107 {
2108 memcpy (tmpbuf, valbuf,
2109 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2110 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2111 len -= X_REGISTER_SIZE;
2112 valbuf += X_REGISTER_SIZE;
2113 }
2114 }
2115}
2116
2117/* Implement the "return_value" gdbarch method. */
2118
2119static enum return_value_convention
2120aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2121 struct type *valtype, struct regcache *regcache,
2122 gdb_byte *readbuf, const gdb_byte *writebuf)
2123{
07b287a0
MS
2124
2125 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2126 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2127 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2128 {
2129 if (aarch64_return_in_memory (gdbarch, valtype))
2130 {
2131 if (aarch64_debug)
b277c936 2132 debug_printf ("return value in memory\n");
07b287a0
MS
2133 return RETURN_VALUE_STRUCT_CONVENTION;
2134 }
2135 }
2136
2137 if (writebuf)
2138 aarch64_store_return_value (valtype, regcache, writebuf);
2139
2140 if (readbuf)
2141 aarch64_extract_return_value (valtype, regcache, readbuf);
2142
2143 if (aarch64_debug)
b277c936 2144 debug_printf ("return value in registers\n");
07b287a0
MS
2145
2146 return RETURN_VALUE_REGISTER_CONVENTION;
2147}
2148
2149/* Implement the "get_longjmp_target" gdbarch method. */
2150
2151static int
2152aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2153{
2154 CORE_ADDR jb_addr;
2155 gdb_byte buf[X_REGISTER_SIZE];
2156 struct gdbarch *gdbarch = get_frame_arch (frame);
2157 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2158 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2159
2160 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2161
2162 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2163 X_REGISTER_SIZE))
2164 return 0;
2165
2166 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2167 return 1;
2168}
ea873d8e
PL
2169
2170/* Implement the "gen_return_address" gdbarch method. */
2171
2172static void
2173aarch64_gen_return_address (struct gdbarch *gdbarch,
2174 struct agent_expr *ax, struct axs_value *value,
2175 CORE_ADDR scope)
2176{
2177 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2178 value->kind = axs_lvalue_register;
2179 value->u.reg = AARCH64_LR_REGNUM;
2180}
07b287a0
MS
2181\f
2182
2183/* Return the pseudo register name corresponding to register regnum. */
2184
2185static const char *
2186aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2187{
63bad7b6
AH
2188 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2189
07b287a0
MS
2190 static const char *const q_name[] =
2191 {
2192 "q0", "q1", "q2", "q3",
2193 "q4", "q5", "q6", "q7",
2194 "q8", "q9", "q10", "q11",
2195 "q12", "q13", "q14", "q15",
2196 "q16", "q17", "q18", "q19",
2197 "q20", "q21", "q22", "q23",
2198 "q24", "q25", "q26", "q27",
2199 "q28", "q29", "q30", "q31",
2200 };
2201
2202 static const char *const d_name[] =
2203 {
2204 "d0", "d1", "d2", "d3",
2205 "d4", "d5", "d6", "d7",
2206 "d8", "d9", "d10", "d11",
2207 "d12", "d13", "d14", "d15",
2208 "d16", "d17", "d18", "d19",
2209 "d20", "d21", "d22", "d23",
2210 "d24", "d25", "d26", "d27",
2211 "d28", "d29", "d30", "d31",
2212 };
2213
2214 static const char *const s_name[] =
2215 {
2216 "s0", "s1", "s2", "s3",
2217 "s4", "s5", "s6", "s7",
2218 "s8", "s9", "s10", "s11",
2219 "s12", "s13", "s14", "s15",
2220 "s16", "s17", "s18", "s19",
2221 "s20", "s21", "s22", "s23",
2222 "s24", "s25", "s26", "s27",
2223 "s28", "s29", "s30", "s31",
2224 };
2225
2226 static const char *const h_name[] =
2227 {
2228 "h0", "h1", "h2", "h3",
2229 "h4", "h5", "h6", "h7",
2230 "h8", "h9", "h10", "h11",
2231 "h12", "h13", "h14", "h15",
2232 "h16", "h17", "h18", "h19",
2233 "h20", "h21", "h22", "h23",
2234 "h24", "h25", "h26", "h27",
2235 "h28", "h29", "h30", "h31",
2236 };
2237
2238 static const char *const b_name[] =
2239 {
2240 "b0", "b1", "b2", "b3",
2241 "b4", "b5", "b6", "b7",
2242 "b8", "b9", "b10", "b11",
2243 "b12", "b13", "b14", "b15",
2244 "b16", "b17", "b18", "b19",
2245 "b20", "b21", "b22", "b23",
2246 "b24", "b25", "b26", "b27",
2247 "b28", "b29", "b30", "b31",
2248 };
2249
2250 regnum -= gdbarch_num_regs (gdbarch);
2251
2252 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2253 return q_name[regnum - AARCH64_Q0_REGNUM];
2254
2255 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2256 return d_name[regnum - AARCH64_D0_REGNUM];
2257
2258 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2259 return s_name[regnum - AARCH64_S0_REGNUM];
2260
2261 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2262 return h_name[regnum - AARCH64_H0_REGNUM];
2263
2264 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2265 return b_name[regnum - AARCH64_B0_REGNUM];
2266
63bad7b6
AH
2267 if (tdep->has_sve ())
2268 {
2269 static const char *const sve_v_name[] =
2270 {
2271 "v0", "v1", "v2", "v3",
2272 "v4", "v5", "v6", "v7",
2273 "v8", "v9", "v10", "v11",
2274 "v12", "v13", "v14", "v15",
2275 "v16", "v17", "v18", "v19",
2276 "v20", "v21", "v22", "v23",
2277 "v24", "v25", "v26", "v27",
2278 "v28", "v29", "v30", "v31",
2279 };
2280
2281 if (regnum >= AARCH64_SVE_V0_REGNUM
2282 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2283 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2284 }
2285
07b287a0
MS
2286 internal_error (__FILE__, __LINE__,
2287 _("aarch64_pseudo_register_name: bad register number %d"),
2288 regnum);
2289}
2290
2291/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2292
2293static struct type *
2294aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2295{
63bad7b6
AH
2296 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2297
07b287a0
MS
2298 regnum -= gdbarch_num_regs (gdbarch);
2299
2300 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2301 return aarch64_vnq_type (gdbarch);
2302
2303 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2304 return aarch64_vnd_type (gdbarch);
2305
2306 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2307 return aarch64_vns_type (gdbarch);
2308
2309 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2310 return aarch64_vnh_type (gdbarch);
2311
2312 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2313 return aarch64_vnb_type (gdbarch);
2314
63bad7b6
AH
2315 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2316 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2317 return aarch64_vnv_type (gdbarch);
2318
07b287a0
MS
2319 internal_error (__FILE__, __LINE__,
2320 _("aarch64_pseudo_register_type: bad register number %d"),
2321 regnum);
2322}
2323
2324/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2325
2326static int
2327aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2328 struct reggroup *group)
2329{
63bad7b6
AH
2330 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2331
07b287a0
MS
2332 regnum -= gdbarch_num_regs (gdbarch);
2333
2334 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2335 return group == all_reggroup || group == vector_reggroup;
2336 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2337 return (group == all_reggroup || group == vector_reggroup
2338 || group == float_reggroup);
2339 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2340 return (group == all_reggroup || group == vector_reggroup
2341 || group == float_reggroup);
2342 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2343 return group == all_reggroup || group == vector_reggroup;
2344 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2345 return group == all_reggroup || group == vector_reggroup;
63bad7b6
AH
2346 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2347 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2348 return group == all_reggroup || group == vector_reggroup;
07b287a0
MS
2349
2350 return group == all_reggroup;
2351}
2352
3c5cd5c3
AH
2353/* Helper for aarch64_pseudo_read_value. */
2354
2355static struct value *
63bad7b6
AH
2356aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2357 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2358 int regsize, struct value *result_value)
2359{
3c5cd5c3
AH
2360 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2361
63bad7b6
AH
2362 /* Enough space for a full vector register. */
2363 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2364 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2365
3c5cd5c3
AH
2366 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2367 mark_value_bytes_unavailable (result_value, 0,
2368 TYPE_LENGTH (value_type (result_value)));
2369 else
2370 memcpy (value_contents_raw (result_value), reg_buf, regsize);
63bad7b6 2371
3c5cd5c3
AH
2372 return result_value;
2373 }
2374
07b287a0
MS
2375/* Implement the "pseudo_register_read_value" gdbarch method. */
2376
2377static struct value *
3c5cd5c3 2378aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2379 int regnum)
2380{
63bad7b6 2381 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3c5cd5c3 2382 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
07b287a0 2383
07b287a0
MS
2384 VALUE_LVAL (result_value) = lval_register;
2385 VALUE_REGNUM (result_value) = regnum;
07b287a0
MS
2386
2387 regnum -= gdbarch_num_regs (gdbarch);
2388
2389 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2390 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2391 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2392 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2393
2394 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2395 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2396 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2397 D_REGISTER_SIZE, result_value);
07b287a0
MS
2398
2399 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2400 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2401 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2402 S_REGISTER_SIZE, result_value);
07b287a0
MS
2403
2404 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2405 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2406 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2407 H_REGISTER_SIZE, result_value);
07b287a0
MS
2408
2409 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2410 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2411 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2412 B_REGISTER_SIZE, result_value);
07b287a0 2413
63bad7b6
AH
2414 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2415 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2416 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2417 regnum - AARCH64_SVE_V0_REGNUM,
2418 V_REGISTER_SIZE, result_value);
2419
07b287a0
MS
2420 gdb_assert_not_reached ("regnum out of bound");
2421}
2422
3c5cd5c3 2423/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2424
2425static void
63bad7b6
AH
2426aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2427 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2428{
3c5cd5c3 2429 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2430
63bad7b6
AH
2431 /* Enough space for a full vector register. */
2432 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2433 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2434
07b287a0
MS
2435 /* Ensure the register buffer is zero, we want gdb writes of the
2436 various 'scalar' pseudo registers to behavior like architectural
2437 writes, register width bytes are written the remainder are set to
2438 zero. */
63bad7b6 2439 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2440
3c5cd5c3
AH
2441 memcpy (reg_buf, buf, regsize);
2442 regcache->raw_write (v_regnum, reg_buf);
2443}
2444
2445/* Implement the "pseudo_register_write" gdbarch method. */
2446
2447static void
2448aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2449 int regnum, const gdb_byte *buf)
2450{
63bad7b6 2451 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
07b287a0
MS
2452 regnum -= gdbarch_num_regs (gdbarch);
2453
2454 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2455 return aarch64_pseudo_write_1 (gdbarch, regcache,
2456 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2457 buf);
07b287a0
MS
2458
2459 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2460 return aarch64_pseudo_write_1 (gdbarch, regcache,
2461 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2462 buf);
07b287a0
MS
2463
2464 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2465 return aarch64_pseudo_write_1 (gdbarch, regcache,
2466 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2467 buf);
07b287a0
MS
2468
2469 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2470 return aarch64_pseudo_write_1 (gdbarch, regcache,
2471 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2472 buf);
07b287a0
MS
2473
2474 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2475 return aarch64_pseudo_write_1 (gdbarch, regcache,
2476 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2477 buf);
2478
2479 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2480 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2481 return aarch64_pseudo_write_1 (gdbarch, regcache,
2482 regnum - AARCH64_SVE_V0_REGNUM,
2483 V_REGISTER_SIZE, buf);
07b287a0
MS
2484
2485 gdb_assert_not_reached ("regnum out of bound");
2486}
2487
07b287a0
MS
2488/* Callback function for user_reg_add. */
2489
2490static struct value *
2491value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2492{
9a3c8263 2493 const int *reg_p = (const int *) baton;
07b287a0
MS
2494
2495 return value_of_register (*reg_p, frame);
2496}
2497\f
2498
9404b58f
KM
2499/* Implement the "software_single_step" gdbarch method, needed to
2500 single step through atomic sequences on AArch64. */
2501
a0ff9e1a 2502static std::vector<CORE_ADDR>
f5ea389a 2503aarch64_software_single_step (struct regcache *regcache)
9404b58f 2504{
ac7936df 2505 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2506 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2507 const int insn_size = 4;
2508 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2509 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2510 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2511 CORE_ADDR loc = pc;
2512 CORE_ADDR closing_insn = 0;
2513 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2514 byte_order_for_code);
2515 int index;
2516 int insn_count;
2517 int bc_insn_count = 0; /* Conditional branch instruction count. */
2518 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2519 aarch64_inst inst;
2520
561a72d4 2521 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2522 return {};
9404b58f
KM
2523
2524 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2525 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2526 return {};
9404b58f
KM
2527
2528 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2529 {
9404b58f
KM
2530 loc += insn_size;
2531 insn = read_memory_unsigned_integer (loc, insn_size,
2532 byte_order_for_code);
2533
561a72d4 2534 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2535 return {};
9404b58f 2536 /* Check if the instruction is a conditional branch. */
f77ee802 2537 if (inst.opcode->iclass == condbranch)
9404b58f 2538 {
f77ee802
YQ
2539 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2540
9404b58f 2541 if (bc_insn_count >= 1)
a0ff9e1a 2542 return {};
9404b58f
KM
2543
2544 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2545 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2546
2547 bc_insn_count++;
2548 last_breakpoint++;
2549 }
2550
2551 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2552 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2553 {
2554 closing_insn = loc;
2555 break;
2556 }
2557 }
2558
2559 /* We didn't find a closing Store Exclusive instruction, fall back. */
2560 if (!closing_insn)
a0ff9e1a 2561 return {};
9404b58f
KM
2562
2563 /* Insert breakpoint after the end of the atomic sequence. */
2564 breaks[0] = loc + insn_size;
2565
2566 /* Check for duplicated breakpoints, and also check that the second
2567 breakpoint is not within the atomic sequence. */
2568 if (last_breakpoint
2569 && (breaks[1] == breaks[0]
2570 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2571 last_breakpoint = 0;
2572
a0ff9e1a
SM
2573 std::vector<CORE_ADDR> next_pcs;
2574
9404b58f
KM
2575 /* Insert the breakpoint at the end of the sequence, and one at the
2576 destination of the conditional branch, if it exists. */
2577 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 2578 next_pcs.push_back (breaks[index]);
9404b58f 2579
93f9a11f 2580 return next_pcs;
9404b58f
KM
2581}
2582
cfba9872 2583struct aarch64_displaced_step_closure : public displaced_step_closure
b6542f81
YQ
2584{
2585 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2586 is being displaced stepping. */
cfba9872 2587 int cond = 0;
b6542f81
YQ
2588
2589 /* PC adjustment offset after displaced stepping. */
cfba9872 2590 int32_t pc_adjust = 0;
b6542f81
YQ
2591};
2592
2593/* Data when visiting instructions for displaced stepping. */
2594
2595struct aarch64_displaced_step_data
2596{
2597 struct aarch64_insn_data base;
2598
2599 /* The address where the instruction will be executed at. */
2600 CORE_ADDR new_addr;
2601 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2602 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2603 /* Number of instructions in INSN_BUF. */
2604 unsigned insn_count;
2605 /* Registers when doing displaced stepping. */
2606 struct regcache *regs;
2607
cfba9872 2608 aarch64_displaced_step_closure *dsc;
b6542f81
YQ
2609};
2610
2611/* Implementation of aarch64_insn_visitor method "b". */
2612
2613static void
2614aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2615 struct aarch64_insn_data *data)
2616{
2617 struct aarch64_displaced_step_data *dsd
2618 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2619 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2620
2621 if (can_encode_int32 (new_offset, 28))
2622 {
2623 /* Emit B rather than BL, because executing BL on a new address
2624 will get the wrong address into LR. In order to avoid this,
2625 we emit B, and update LR if the instruction is BL. */
2626 emit_b (dsd->insn_buf, 0, new_offset);
2627 dsd->insn_count++;
2628 }
2629 else
2630 {
2631 /* Write NOP. */
2632 emit_nop (dsd->insn_buf);
2633 dsd->insn_count++;
2634 dsd->dsc->pc_adjust = offset;
2635 }
2636
2637 if (is_bl)
2638 {
2639 /* Update LR. */
2640 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2641 data->insn_addr + 4);
2642 }
2643}
2644
2645/* Implementation of aarch64_insn_visitor method "b_cond". */
2646
2647static void
2648aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2649 struct aarch64_insn_data *data)
2650{
2651 struct aarch64_displaced_step_data *dsd
2652 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2653
2654 /* GDB has to fix up PC after displaced step this instruction
2655 differently according to the condition is true or false. Instead
2656 of checking COND against conditional flags, we can use
2657 the following instructions, and GDB can tell how to fix up PC
2658 according to the PC value.
2659
2660 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2661 INSN1 ;
2662 TAKEN:
2663 INSN2
2664 */
2665
2666 emit_bcond (dsd->insn_buf, cond, 8);
2667 dsd->dsc->cond = 1;
2668 dsd->dsc->pc_adjust = offset;
2669 dsd->insn_count = 1;
2670}
2671
2672/* Dynamically allocate a new register. If we know the register
2673 statically, we should make it a global as above instead of using this
2674 helper function. */
2675
2676static struct aarch64_register
2677aarch64_register (unsigned num, int is64)
2678{
2679 return (struct aarch64_register) { num, is64 };
2680}
2681
2682/* Implementation of aarch64_insn_visitor method "cb". */
2683
2684static void
2685aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2686 const unsigned rn, int is64,
2687 struct aarch64_insn_data *data)
2688{
2689 struct aarch64_displaced_step_data *dsd
2690 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2691
2692 /* The offset is out of range for a compare and branch
2693 instruction. We can use the following instructions instead:
2694
2695 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2696 INSN1 ;
2697 TAKEN:
2698 INSN2
2699 */
2700 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2701 dsd->insn_count = 1;
2702 dsd->dsc->cond = 1;
2703 dsd->dsc->pc_adjust = offset;
2704}
2705
2706/* Implementation of aarch64_insn_visitor method "tb". */
2707
2708static void
2709aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2710 const unsigned rt, unsigned bit,
2711 struct aarch64_insn_data *data)
2712{
2713 struct aarch64_displaced_step_data *dsd
2714 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2715
2716 /* The offset is out of range for a test bit and branch
2717 instruction We can use the following instructions instead:
2718
2719 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2720 INSN1 ;
2721 TAKEN:
2722 INSN2
2723
2724 */
2725 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2726 dsd->insn_count = 1;
2727 dsd->dsc->cond = 1;
2728 dsd->dsc->pc_adjust = offset;
2729}
2730
2731/* Implementation of aarch64_insn_visitor method "adr". */
2732
2733static void
2734aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2735 const int is_adrp, struct aarch64_insn_data *data)
2736{
2737 struct aarch64_displaced_step_data *dsd
2738 = (struct aarch64_displaced_step_data *) data;
2739 /* We know exactly the address the ADR{P,} instruction will compute.
2740 We can just write it to the destination register. */
2741 CORE_ADDR address = data->insn_addr + offset;
2742
2743 if (is_adrp)
2744 {
2745 /* Clear the lower 12 bits of the offset to get the 4K page. */
2746 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2747 address & ~0xfff);
2748 }
2749 else
2750 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2751 address);
2752
2753 dsd->dsc->pc_adjust = 4;
2754 emit_nop (dsd->insn_buf);
2755 dsd->insn_count = 1;
2756}
2757
2758/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2759
2760static void
2761aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2762 const unsigned rt, const int is64,
2763 struct aarch64_insn_data *data)
2764{
2765 struct aarch64_displaced_step_data *dsd
2766 = (struct aarch64_displaced_step_data *) data;
2767 CORE_ADDR address = data->insn_addr + offset;
2768 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2769
2770 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2771 address);
2772
2773 if (is_sw)
2774 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2775 aarch64_register (rt, 1), zero);
2776 else
2777 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2778 aarch64_register (rt, 1), zero);
2779
2780 dsd->dsc->pc_adjust = 4;
2781}
2782
2783/* Implementation of aarch64_insn_visitor method "others". */
2784
2785static void
2786aarch64_displaced_step_others (const uint32_t insn,
2787 struct aarch64_insn_data *data)
2788{
2789 struct aarch64_displaced_step_data *dsd
2790 = (struct aarch64_displaced_step_data *) data;
2791
e1c587c3 2792 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2793 dsd->insn_count = 1;
2794
2795 if ((insn & 0xfffffc1f) == 0xd65f0000)
2796 {
2797 /* RET */
2798 dsd->dsc->pc_adjust = 0;
2799 }
2800 else
2801 dsd->dsc->pc_adjust = 4;
2802}
2803
2804static const struct aarch64_insn_visitor visitor =
2805{
2806 aarch64_displaced_step_b,
2807 aarch64_displaced_step_b_cond,
2808 aarch64_displaced_step_cb,
2809 aarch64_displaced_step_tb,
2810 aarch64_displaced_step_adr,
2811 aarch64_displaced_step_ldr_literal,
2812 aarch64_displaced_step_others,
2813};
2814
2815/* Implement the "displaced_step_copy_insn" gdbarch method. */
2816
2817struct displaced_step_closure *
2818aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2819 CORE_ADDR from, CORE_ADDR to,
2820 struct regcache *regs)
2821{
b6542f81
YQ
2822 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2823 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2824 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2825 aarch64_inst inst;
2826
561a72d4 2827 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 2828 return NULL;
b6542f81
YQ
2829
2830 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2831 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2832 {
2833 /* We can't displaced step atomic sequences. */
2834 return NULL;
2835 }
2836
cfba9872
SM
2837 std::unique_ptr<aarch64_displaced_step_closure> dsc
2838 (new aarch64_displaced_step_closure);
b6542f81
YQ
2839 dsd.base.insn_addr = from;
2840 dsd.new_addr = to;
2841 dsd.regs = regs;
cfba9872 2842 dsd.dsc = dsc.get ();
034f1a81 2843 dsd.insn_count = 0;
b6542f81
YQ
2844 aarch64_relocate_instruction (insn, &visitor,
2845 (struct aarch64_insn_data *) &dsd);
2846 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2847
2848 if (dsd.insn_count != 0)
2849 {
2850 int i;
2851
2852 /* Instruction can be relocated to scratch pad. Copy
2853 relocated instruction(s) there. */
2854 for (i = 0; i < dsd.insn_count; i++)
2855 {
2856 if (debug_displaced)
2857 {
2858 debug_printf ("displaced: writing insn ");
2859 debug_printf ("%.8x", dsd.insn_buf[i]);
2860 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2861 }
2862 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2863 (ULONGEST) dsd.insn_buf[i]);
2864 }
2865 }
2866 else
2867 {
b6542f81
YQ
2868 dsc = NULL;
2869 }
2870
cfba9872 2871 return dsc.release ();
b6542f81
YQ
2872}
2873
2874/* Implement the "displaced_step_fixup" gdbarch method. */
2875
2876void
2877aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
cfba9872 2878 struct displaced_step_closure *dsc_,
b6542f81
YQ
2879 CORE_ADDR from, CORE_ADDR to,
2880 struct regcache *regs)
2881{
cfba9872
SM
2882 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2883
b6542f81
YQ
2884 if (dsc->cond)
2885 {
2886 ULONGEST pc;
2887
2888 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2889 if (pc - to == 8)
2890 {
2891 /* Condition is true. */
2892 }
2893 else if (pc - to == 4)
2894 {
2895 /* Condition is false. */
2896 dsc->pc_adjust = 4;
2897 }
2898 else
2899 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2900 }
2901
2902 if (dsc->pc_adjust != 0)
2903 {
2904 if (debug_displaced)
2905 {
2906 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2907 paddress (gdbarch, from), dsc->pc_adjust);
2908 }
2909 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2910 from + dsc->pc_adjust);
2911 }
2912}
2913
2914/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2915
2916int
2917aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2918 struct displaced_step_closure *closure)
2919{
2920 return 1;
2921}
2922
95228a0d
AH
2923/* Get the correct target description for the given VQ value.
2924 If VQ is zero then it is assumed SVE is not supported.
2925 (It is not possible to set VQ to zero on an SVE system). */
da434ccb
AH
2926
2927const target_desc *
39bfb937 2928aarch64_read_description (uint64_t vq)
da434ccb 2929{
95228a0d 2930 if (vq > AARCH64_MAX_SVE_VQ)
39bfb937 2931 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
95228a0d
AH
2932 AARCH64_MAX_SVE_VQ);
2933
2934 struct target_desc *tdesc = tdesc_aarch64_list[vq];
da434ccb 2935
95228a0d
AH
2936 if (tdesc == NULL)
2937 {
2938 tdesc = aarch64_create_target_description (vq);
2939 tdesc_aarch64_list[vq] = tdesc;
2940 }
da434ccb 2941
95228a0d 2942 return tdesc;
da434ccb
AH
2943}
2944
ba2d2bb2
AH
2945/* Return the VQ used when creating the target description TDESC. */
2946
1332a140 2947static uint64_t
ba2d2bb2
AH
2948aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2949{
2950 const struct tdesc_feature *feature_sve;
2951
2952 if (!tdesc_has_registers (tdesc))
2953 return 0;
2954
2955 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2956
2957 if (feature_sve == nullptr)
2958 return 0;
2959
12863263
AH
2960 uint64_t vl = tdesc_register_bitsize (feature_sve,
2961 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
2962 return sve_vq_from_vl (vl);
2963}
2964
2965
07b287a0
MS
2966/* Initialize the current architecture based on INFO. If possible,
2967 re-use an architecture from ARCHES, which is a list of
2968 architectures already created during this debugging session.
2969
2970 Called e.g. at program startup, when reading a core file, and when
2971 reading a binary file. */
2972
2973static struct gdbarch *
2974aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2975{
2976 struct gdbarch_tdep *tdep;
2977 struct gdbarch *gdbarch;
2978 struct gdbarch_list *best_arch;
2979 struct tdesc_arch_data *tdesc_data = NULL;
2980 const struct target_desc *tdesc = info.target_desc;
2981 int i;
07b287a0 2982 int valid_p = 1;
ba2d2bb2
AH
2983 const struct tdesc_feature *feature_core;
2984 const struct tdesc_feature *feature_fpu;
2985 const struct tdesc_feature *feature_sve;
07b287a0
MS
2986 int num_regs = 0;
2987 int num_pseudo_regs = 0;
2988
ba2d2bb2 2989 /* Ensure we always have a target description. */
07b287a0 2990 if (!tdesc_has_registers (tdesc))
ba2d2bb2 2991 tdesc = aarch64_read_description (0);
07b287a0
MS
2992 gdb_assert (tdesc);
2993
ba2d2bb2
AH
2994 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2995 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2996 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
07b287a0 2997
ba2d2bb2 2998 if (feature_core == NULL)
07b287a0
MS
2999 return NULL;
3000
3001 tdesc_data = tdesc_data_alloc ();
3002
ba2d2bb2 3003 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3004 and allocate their numbers. */
3005 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
ba2d2bb2
AH
3006 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3007 AARCH64_X0_REGNUM + i,
3008 aarch64_r_register_names[i]);
07b287a0
MS
3009
3010 num_regs = AARCH64_X0_REGNUM + i;
3011
ba2d2bb2
AH
3012 /* Add the V registers. */
3013 if (feature_fpu != NULL)
07b287a0 3014 {
ba2d2bb2
AH
3015 if (feature_sve != NULL)
3016 error (_("Program contains both fpu and SVE features."));
3017
3018 /* Validate the description provides the mandatory V registers
3019 and allocate their numbers. */
07b287a0 3020 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
ba2d2bb2
AH
3021 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3022 AARCH64_V0_REGNUM + i,
3023 aarch64_v_register_names[i]);
07b287a0
MS
3024
3025 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3026 }
07b287a0 3027
ba2d2bb2
AH
3028 /* Add the SVE registers. */
3029 if (feature_sve != NULL)
3030 {
3031 /* Validate the description provides the mandatory SVE registers
3032 and allocate their numbers. */
3033 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3034 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3035 AARCH64_SVE_Z0_REGNUM + i,
3036 aarch64_sve_register_names[i]);
3037
3038 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3039 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3040 }
3041
3042 if (feature_fpu != NULL || feature_sve != NULL)
3043 {
07b287a0
MS
3044 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3045 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3046 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3047 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3048 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3049 }
3050
3051 if (!valid_p)
3052 {
3053 tdesc_data_cleanup (tdesc_data);
3054 return NULL;
3055 }
3056
3057 /* AArch64 code is always little-endian. */
3058 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3059
3060 /* If there is already a candidate, use it. */
3061 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3062 best_arch != NULL;
3063 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3064 {
3065 /* Found a match. */
3066 break;
3067 }
3068
3069 if (best_arch != NULL)
3070 {
3071 if (tdesc_data != NULL)
3072 tdesc_data_cleanup (tdesc_data);
3073 return best_arch->gdbarch;
3074 }
3075
8d749320 3076 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
3077 gdbarch = gdbarch_alloc (&info, tdep);
3078
3079 /* This should be low enough for everything. */
3080 tdep->lowest_pc = 0x20;
3081 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3082 tdep->jb_elt_size = 8;
ba2d2bb2 3083 tdep->vq = aarch64_get_tdesc_vq (tdesc);
07b287a0
MS
3084
3085 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3086 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3087
07b287a0
MS
3088 /* Frame handling. */
3089 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3090 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3091 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3092
3093 /* Advance PC across function entry code. */
3094 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3095
3096 /* The stack grows downward. */
3097 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3098
3099 /* Breakpoint manipulation. */
04180708
YQ
3100 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3101 aarch64_breakpoint::kind_from_pc);
3102 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3103 aarch64_breakpoint::bp_from_kind);
07b287a0 3104 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3105 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3106
3107 /* Information about registers, etc. */
3108 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3109 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3110 set_gdbarch_num_regs (gdbarch, num_regs);
3111
3112 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3113 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3114 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3115 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3116 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3117 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3118 aarch64_pseudo_register_reggroup_p);
3119
3120 /* ABI */
3121 set_gdbarch_short_bit (gdbarch, 16);
3122 set_gdbarch_int_bit (gdbarch, 32);
3123 set_gdbarch_float_bit (gdbarch, 32);
3124 set_gdbarch_double_bit (gdbarch, 64);
3125 set_gdbarch_long_double_bit (gdbarch, 128);
3126 set_gdbarch_long_bit (gdbarch, 64);
3127 set_gdbarch_long_long_bit (gdbarch, 64);
3128 set_gdbarch_ptr_bit (gdbarch, 64);
3129 set_gdbarch_char_signed (gdbarch, 0);
53375380 3130 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3131 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3132 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3133 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3134
3135 /* Internal <-> external register number maps. */
3136 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3137
3138 /* Returning results. */
3139 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3140
3141 /* Disassembly. */
3142 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3143
3144 /* Virtual tables. */
3145 set_gdbarch_vbit_in_delta (gdbarch, 1);
3146
3147 /* Hook in the ABI-specific overrides, if they have been registered. */
3148 info.target_desc = tdesc;
0dba2a6c 3149 info.tdesc_data = tdesc_data;
07b287a0
MS
3150 gdbarch_init_osabi (info, gdbarch);
3151
3152 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3153
3154 /* Add some default predicates. */
3155 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3156 dwarf2_append_unwinders (gdbarch);
3157 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3158
3159 frame_base_set_default (gdbarch, &aarch64_normal_base);
3160
3161 /* Now we have tuned the configuration, set a few final things,
3162 based on what the OS ABI has told us. */
3163
3164 if (tdep->jb_pc >= 0)
3165 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3166
ea873d8e
PL
3167 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3168
07b287a0
MS
3169 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3170
3171 /* Add standard register aliases. */
3172 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3173 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3174 value_of_aarch64_user_reg,
3175 &aarch64_register_aliases[i].regnum);
3176
e8bf1ce4
JB
3177 register_aarch64_ravenscar_ops (gdbarch);
3178
07b287a0
MS
3179 return gdbarch;
3180}
3181
3182static void
3183aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3184{
3185 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3186
3187 if (tdep == NULL)
3188 return;
3189
3190 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3191 paddress (gdbarch, tdep->lowest_pc));
3192}
3193
0d4c07af 3194#if GDB_SELF_TEST
1e2b521d
YQ
3195namespace selftests
3196{
3197static void aarch64_process_record_test (void);
3198}
0d4c07af 3199#endif
1e2b521d 3200
07b287a0
MS
3201void
3202_initialize_aarch64_tdep (void)
3203{
3204 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3205 aarch64_dump_tdep);
3206
07b287a0
MS
3207 /* Debug this file's internals. */
3208 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3209Set AArch64 debugging."), _("\
3210Show AArch64 debugging."), _("\
3211When on, AArch64 specific debugging is enabled."),
3212 NULL,
3213 show_aarch64_debug,
3214 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3215
3216#if GDB_SELF_TEST
1526853e
SM
3217 selftests::register_test ("aarch64-analyze-prologue",
3218 selftests::aarch64_analyze_prologue_test);
3219 selftests::register_test ("aarch64-process-record",
3220 selftests::aarch64_process_record_test);
6654d750 3221 selftests::record_xml_tdesc ("aarch64.xml",
95228a0d 3222 aarch64_create_target_description (0));
4d9a9006 3223#endif
07b287a0 3224}
99afc88b
OJ
3225
3226/* AArch64 process record-replay related structures, defines etc. */
3227
99afc88b
OJ
3228#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3229 do \
3230 { \
3231 unsigned int reg_len = LENGTH; \
3232 if (reg_len) \
3233 { \
3234 REGS = XNEWVEC (uint32_t, reg_len); \
3235 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3236 } \
3237 } \
3238 while (0)
3239
3240#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3241 do \
3242 { \
3243 unsigned int mem_len = LENGTH; \
3244 if (mem_len) \
3245 { \
3246 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3247 memcpy(&MEMS->len, &RECORD_BUF[0], \
3248 sizeof(struct aarch64_mem_r) * LENGTH); \
3249 } \
3250 } \
3251 while (0)
3252
3253/* AArch64 record/replay structures and enumerations. */
3254
3255struct aarch64_mem_r
3256{
3257 uint64_t len; /* Record length. */
3258 uint64_t addr; /* Memory address. */
3259};
3260
3261enum aarch64_record_result
3262{
3263 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3264 AARCH64_RECORD_UNSUPPORTED,
3265 AARCH64_RECORD_UNKNOWN
3266};
3267
3268typedef struct insn_decode_record_t
3269{
3270 struct gdbarch *gdbarch;
3271 struct regcache *regcache;
3272 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3273 uint32_t aarch64_insn; /* Insn to be recorded. */
3274 uint32_t mem_rec_count; /* Count of memory records. */
3275 uint32_t reg_rec_count; /* Count of register records. */
3276 uint32_t *aarch64_regs; /* Registers to be recorded. */
3277 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3278} insn_decode_record;
3279
3280/* Record handler for data processing - register instructions. */
3281
3282static unsigned int
3283aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3284{
3285 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3286 uint32_t record_buf[4];
3287
3288 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3289 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3290 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3291
3292 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3293 {
3294 uint8_t setflags;
3295
3296 /* Logical (shifted register). */
3297 if (insn_bits24_27 == 0x0a)
3298 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3299 /* Add/subtract. */
3300 else if (insn_bits24_27 == 0x0b)
3301 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3302 else
3303 return AARCH64_RECORD_UNKNOWN;
3304
3305 record_buf[0] = reg_rd;
3306 aarch64_insn_r->reg_rec_count = 1;
3307 if (setflags)
3308 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3309 }
3310 else
3311 {
3312 if (insn_bits24_27 == 0x0b)
3313 {
3314 /* Data-processing (3 source). */
3315 record_buf[0] = reg_rd;
3316 aarch64_insn_r->reg_rec_count = 1;
3317 }
3318 else if (insn_bits24_27 == 0x0a)
3319 {
3320 if (insn_bits21_23 == 0x00)
3321 {
3322 /* Add/subtract (with carry). */
3323 record_buf[0] = reg_rd;
3324 aarch64_insn_r->reg_rec_count = 1;
3325 if (bit (aarch64_insn_r->aarch64_insn, 29))
3326 {
3327 record_buf[1] = AARCH64_CPSR_REGNUM;
3328 aarch64_insn_r->reg_rec_count = 2;
3329 }
3330 }
3331 else if (insn_bits21_23 == 0x02)
3332 {
3333 /* Conditional compare (register) and conditional compare
3334 (immediate) instructions. */
3335 record_buf[0] = AARCH64_CPSR_REGNUM;
3336 aarch64_insn_r->reg_rec_count = 1;
3337 }
3338 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3339 {
3340 /* CConditional select. */
3341 /* Data-processing (2 source). */
3342 /* Data-processing (1 source). */
3343 record_buf[0] = reg_rd;
3344 aarch64_insn_r->reg_rec_count = 1;
3345 }
3346 else
3347 return AARCH64_RECORD_UNKNOWN;
3348 }
3349 }
3350
3351 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3352 record_buf);
3353 return AARCH64_RECORD_SUCCESS;
3354}
3355
3356/* Record handler for data processing - immediate instructions. */
3357
3358static unsigned int
3359aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3360{
78cc6c2d 3361 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3362 uint32_t record_buf[4];
3363
3364 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3365 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3366 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3367
3368 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3369 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3370 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3371 {
3372 record_buf[0] = reg_rd;
3373 aarch64_insn_r->reg_rec_count = 1;
3374 }
3375 else if (insn_bits24_27 == 0x01)
3376 {
3377 /* Add/Subtract (immediate). */
3378 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3379 record_buf[0] = reg_rd;
3380 aarch64_insn_r->reg_rec_count = 1;
3381 if (setflags)
3382 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3383 }
3384 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3385 {
3386 /* Logical (immediate). */
3387 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3388 record_buf[0] = reg_rd;
3389 aarch64_insn_r->reg_rec_count = 1;
3390 if (setflags)
3391 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3392 }
3393 else
3394 return AARCH64_RECORD_UNKNOWN;
3395
3396 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3397 record_buf);
3398 return AARCH64_RECORD_SUCCESS;
3399}
3400
3401/* Record handler for branch, exception generation and system instructions. */
3402
3403static unsigned int
3404aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3405{
3406 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3407 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3408 uint32_t record_buf[4];
3409
3410 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3411 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3412 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3413
3414 if (insn_bits28_31 == 0x0d)
3415 {
3416 /* Exception generation instructions. */
3417 if (insn_bits24_27 == 0x04)
3418 {
5d98d3cd
YQ
3419 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3420 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3421 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3422 {
3423 ULONGEST svc_number;
3424
3425 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3426 &svc_number);
3427 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3428 svc_number);
3429 }
3430 else
3431 return AARCH64_RECORD_UNSUPPORTED;
3432 }
3433 /* System instructions. */
3434 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3435 {
3436 uint32_t reg_rt, reg_crn;
3437
3438 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3439 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3440
3441 /* Record rt in case of sysl and mrs instructions. */
3442 if (bit (aarch64_insn_r->aarch64_insn, 21))
3443 {
3444 record_buf[0] = reg_rt;
3445 aarch64_insn_r->reg_rec_count = 1;
3446 }
3447 /* Record cpsr for hint and msr(immediate) instructions. */
3448 else if (reg_crn == 0x02 || reg_crn == 0x04)
3449 {
3450 record_buf[0] = AARCH64_CPSR_REGNUM;
3451 aarch64_insn_r->reg_rec_count = 1;
3452 }
3453 }
3454 /* Unconditional branch (register). */
3455 else if((insn_bits24_27 & 0x0e) == 0x06)
3456 {
3457 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3458 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3459 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3460 }
3461 else
3462 return AARCH64_RECORD_UNKNOWN;
3463 }
3464 /* Unconditional branch (immediate). */
3465 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3466 {
3467 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3468 if (bit (aarch64_insn_r->aarch64_insn, 31))
3469 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3470 }
3471 else
3472 /* Compare & branch (immediate), Test & branch (immediate) and
3473 Conditional branch (immediate). */
3474 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3475
3476 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3477 record_buf);
3478 return AARCH64_RECORD_SUCCESS;
3479}
3480
3481/* Record handler for advanced SIMD load and store instructions. */
3482
3483static unsigned int
3484aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3485{
3486 CORE_ADDR address;
3487 uint64_t addr_offset = 0;
3488 uint32_t record_buf[24];
3489 uint64_t record_buf_mem[24];
3490 uint32_t reg_rn, reg_rt;
3491 uint32_t reg_index = 0, mem_index = 0;
3492 uint8_t opcode_bits, size_bits;
3493
3494 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3495 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3496 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3497 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3498 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3499
3500 if (record_debug)
b277c936 3501 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3502
3503 /* Load/store single structure. */
3504 if (bit (aarch64_insn_r->aarch64_insn, 24))
3505 {
3506 uint8_t sindex, scale, selem, esize, replicate = 0;
3507 scale = opcode_bits >> 2;
3508 selem = ((opcode_bits & 0x02) |
3509 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3510 switch (scale)
3511 {
3512 case 1:
3513 if (size_bits & 0x01)
3514 return AARCH64_RECORD_UNKNOWN;
3515 break;
3516 case 2:
3517 if ((size_bits >> 1) & 0x01)
3518 return AARCH64_RECORD_UNKNOWN;
3519 if (size_bits & 0x01)
3520 {
3521 if (!((opcode_bits >> 1) & 0x01))
3522 scale = 3;
3523 else
3524 return AARCH64_RECORD_UNKNOWN;
3525 }
3526 break;
3527 case 3:
3528 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3529 {
3530 scale = size_bits;
3531 replicate = 1;
3532 break;
3533 }
3534 else
3535 return AARCH64_RECORD_UNKNOWN;
3536 default:
3537 break;
3538 }
3539 esize = 8 << scale;
3540 if (replicate)
3541 for (sindex = 0; sindex < selem; sindex++)
3542 {
3543 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3544 reg_rt = (reg_rt + 1) % 32;
3545 }
3546 else
3547 {
3548 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3549 {
3550 if (bit (aarch64_insn_r->aarch64_insn, 22))
3551 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3552 else
3553 {
3554 record_buf_mem[mem_index++] = esize / 8;
3555 record_buf_mem[mem_index++] = address + addr_offset;
3556 }
3557 addr_offset = addr_offset + (esize / 8);
3558 reg_rt = (reg_rt + 1) % 32;
3559 }
99afc88b
OJ
3560 }
3561 }
3562 /* Load/store multiple structure. */
3563 else
3564 {
3565 uint8_t selem, esize, rpt, elements;
3566 uint8_t eindex, rindex;
3567
3568 esize = 8 << size_bits;
3569 if (bit (aarch64_insn_r->aarch64_insn, 30))
3570 elements = 128 / esize;
3571 else
3572 elements = 64 / esize;
3573
3574 switch (opcode_bits)
3575 {
3576 /*LD/ST4 (4 Registers). */
3577 case 0:
3578 rpt = 1;
3579 selem = 4;
3580 break;
3581 /*LD/ST1 (4 Registers). */
3582 case 2:
3583 rpt = 4;
3584 selem = 1;
3585 break;
3586 /*LD/ST3 (3 Registers). */
3587 case 4:
3588 rpt = 1;
3589 selem = 3;
3590 break;
3591 /*LD/ST1 (3 Registers). */
3592 case 6:
3593 rpt = 3;
3594 selem = 1;
3595 break;
3596 /*LD/ST1 (1 Register). */
3597 case 7:
3598 rpt = 1;
3599 selem = 1;
3600 break;
3601 /*LD/ST2 (2 Registers). */
3602 case 8:
3603 rpt = 1;
3604 selem = 2;
3605 break;
3606 /*LD/ST1 (2 Registers). */
3607 case 10:
3608 rpt = 2;
3609 selem = 1;
3610 break;
3611 default:
3612 return AARCH64_RECORD_UNSUPPORTED;
3613 break;
3614 }
3615 for (rindex = 0; rindex < rpt; rindex++)
3616 for (eindex = 0; eindex < elements; eindex++)
3617 {
3618 uint8_t reg_tt, sindex;
3619 reg_tt = (reg_rt + rindex) % 32;
3620 for (sindex = 0; sindex < selem; sindex++)
3621 {
3622 if (bit (aarch64_insn_r->aarch64_insn, 22))
3623 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3624 else
3625 {
3626 record_buf_mem[mem_index++] = esize / 8;
3627 record_buf_mem[mem_index++] = address + addr_offset;
3628 }
3629 addr_offset = addr_offset + (esize / 8);
3630 reg_tt = (reg_tt + 1) % 32;
3631 }
3632 }
3633 }
3634
3635 if (bit (aarch64_insn_r->aarch64_insn, 23))
3636 record_buf[reg_index++] = reg_rn;
3637
3638 aarch64_insn_r->reg_rec_count = reg_index;
3639 aarch64_insn_r->mem_rec_count = mem_index / 2;
3640 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3641 record_buf_mem);
3642 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3643 record_buf);
3644 return AARCH64_RECORD_SUCCESS;
3645}
3646
3647/* Record handler for load and store instructions. */
3648
3649static unsigned int
3650aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3651{
3652 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3653 uint8_t insn_bit23, insn_bit21;
3654 uint8_t opc, size_bits, ld_flag, vector_flag;
3655 uint32_t reg_rn, reg_rt, reg_rt2;
3656 uint64_t datasize, offset;
3657 uint32_t record_buf[8];
3658 uint64_t record_buf_mem[8];
3659 CORE_ADDR address;
3660
3661 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3662 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3663 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3664 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3665 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3666 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3667 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3668 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3669 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3670 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3671 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3672
3673 /* Load/store exclusive. */
3674 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3675 {
3676 if (record_debug)
b277c936 3677 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3678
3679 if (ld_flag)
3680 {
3681 record_buf[0] = reg_rt;
3682 aarch64_insn_r->reg_rec_count = 1;
3683 if (insn_bit21)
3684 {
3685 record_buf[1] = reg_rt2;
3686 aarch64_insn_r->reg_rec_count = 2;
3687 }
3688 }
3689 else
3690 {
3691 if (insn_bit21)
3692 datasize = (8 << size_bits) * 2;
3693 else
3694 datasize = (8 << size_bits);
3695 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3696 &address);
3697 record_buf_mem[0] = datasize / 8;
3698 record_buf_mem[1] = address;
3699 aarch64_insn_r->mem_rec_count = 1;
3700 if (!insn_bit23)
3701 {
3702 /* Save register rs. */
3703 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3704 aarch64_insn_r->reg_rec_count = 1;
3705 }
3706 }
3707 }
3708 /* Load register (literal) instructions decoding. */
3709 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3710 {
3711 if (record_debug)
b277c936 3712 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3713 if (vector_flag)
3714 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3715 else
3716 record_buf[0] = reg_rt;
3717 aarch64_insn_r->reg_rec_count = 1;
3718 }
3719 /* All types of load/store pair instructions decoding. */
3720 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3721 {
3722 if (record_debug)
b277c936 3723 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3724
3725 if (ld_flag)
3726 {
3727 if (vector_flag)
3728 {
3729 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3730 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3731 }
3732 else
3733 {
3734 record_buf[0] = reg_rt;
3735 record_buf[1] = reg_rt2;
3736 }
3737 aarch64_insn_r->reg_rec_count = 2;
3738 }
3739 else
3740 {
3741 uint16_t imm7_off;
3742 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3743 if (!vector_flag)
3744 size_bits = size_bits >> 1;
3745 datasize = 8 << (2 + size_bits);
3746 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3747 offset = offset << (2 + size_bits);
3748 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3749 &address);
3750 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3751 {
3752 if (imm7_off & 0x40)
3753 address = address - offset;
3754 else
3755 address = address + offset;
3756 }
3757
3758 record_buf_mem[0] = datasize / 8;
3759 record_buf_mem[1] = address;
3760 record_buf_mem[2] = datasize / 8;
3761 record_buf_mem[3] = address + (datasize / 8);
3762 aarch64_insn_r->mem_rec_count = 2;
3763 }
3764 if (bit (aarch64_insn_r->aarch64_insn, 23))
3765 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3766 }
3767 /* Load/store register (unsigned immediate) instructions. */
3768 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3769 {
3770 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3771 if (!(opc >> 1))
33877125
YQ
3772 {
3773 if (opc & 0x01)
3774 ld_flag = 0x01;
3775 else
3776 ld_flag = 0x0;
3777 }
99afc88b 3778 else
33877125 3779 {
1e2b521d
YQ
3780 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3781 {
3782 /* PRFM (immediate) */
3783 return AARCH64_RECORD_SUCCESS;
3784 }
3785 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3786 {
3787 /* LDRSW (immediate) */
3788 ld_flag = 0x1;
3789 }
33877125 3790 else
1e2b521d
YQ
3791 {
3792 if (opc & 0x01)
3793 ld_flag = 0x01;
3794 else
3795 ld_flag = 0x0;
3796 }
33877125 3797 }
99afc88b
OJ
3798
3799 if (record_debug)
3800 {
b277c936
PL
3801 debug_printf ("Process record: load/store (unsigned immediate):"
3802 " size %x V %d opc %x\n", size_bits, vector_flag,
3803 opc);
99afc88b
OJ
3804 }
3805
3806 if (!ld_flag)
3807 {
3808 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3809 datasize = 8 << size_bits;
3810 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3811 &address);
3812 offset = offset << size_bits;
3813 address = address + offset;
3814
3815 record_buf_mem[0] = datasize >> 3;
3816 record_buf_mem[1] = address;
3817 aarch64_insn_r->mem_rec_count = 1;
3818 }
3819 else
3820 {
3821 if (vector_flag)
3822 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3823 else
3824 record_buf[0] = reg_rt;
3825 aarch64_insn_r->reg_rec_count = 1;
3826 }
3827 }
3828 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3829 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3830 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3831 {
3832 if (record_debug)
b277c936 3833 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3834 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3835 if (!(opc >> 1))
3836 if (opc & 0x01)
3837 ld_flag = 0x01;
3838 else
3839 ld_flag = 0x0;
3840 else
3841 if (size_bits != 0x03)
3842 ld_flag = 0x01;
3843 else
3844 return AARCH64_RECORD_UNKNOWN;
3845
3846 if (!ld_flag)
3847 {
d9436c7c
PA
3848 ULONGEST reg_rm_val;
3849
99afc88b
OJ
3850 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3851 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3852 if (bit (aarch64_insn_r->aarch64_insn, 12))
3853 offset = reg_rm_val << size_bits;
3854 else
3855 offset = reg_rm_val;
3856 datasize = 8 << size_bits;
3857 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3858 &address);
3859 address = address + offset;
3860 record_buf_mem[0] = datasize >> 3;
3861 record_buf_mem[1] = address;
3862 aarch64_insn_r->mem_rec_count = 1;
3863 }
3864 else
3865 {
3866 if (vector_flag)
3867 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3868 else
3869 record_buf[0] = reg_rt;
3870 aarch64_insn_r->reg_rec_count = 1;
3871 }
3872 }
3873 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3874 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3875 && !insn_bit21)
99afc88b
OJ
3876 {
3877 if (record_debug)
3878 {
b277c936
PL
3879 debug_printf ("Process record: load/store "
3880 "(immediate and unprivileged)\n");
99afc88b
OJ
3881 }
3882 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3883 if (!(opc >> 1))
3884 if (opc & 0x01)
3885 ld_flag = 0x01;
3886 else
3887 ld_flag = 0x0;
3888 else
3889 if (size_bits != 0x03)
3890 ld_flag = 0x01;
3891 else
3892 return AARCH64_RECORD_UNKNOWN;
3893
3894 if (!ld_flag)
3895 {
3896 uint16_t imm9_off;
3897 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3898 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3899 datasize = 8 << size_bits;
3900 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3901 &address);
3902 if (insn_bits10_11 != 0x01)
3903 {
3904 if (imm9_off & 0x0100)
3905 address = address - offset;
3906 else
3907 address = address + offset;
3908 }
3909 record_buf_mem[0] = datasize >> 3;
3910 record_buf_mem[1] = address;
3911 aarch64_insn_r->mem_rec_count = 1;
3912 }
3913 else
3914 {
3915 if (vector_flag)
3916 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3917 else
3918 record_buf[0] = reg_rt;
3919 aarch64_insn_r->reg_rec_count = 1;
3920 }
3921 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3922 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3923 }
3924 /* Advanced SIMD load/store instructions. */
3925 else
3926 return aarch64_record_asimd_load_store (aarch64_insn_r);
3927
3928 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3929 record_buf_mem);
3930 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3931 record_buf);
3932 return AARCH64_RECORD_SUCCESS;
3933}
3934
3935/* Record handler for data processing SIMD and floating point instructions. */
3936
3937static unsigned int
3938aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3939{
3940 uint8_t insn_bit21, opcode, rmode, reg_rd;
3941 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3942 uint8_t insn_bits11_14;
3943 uint32_t record_buf[2];
3944
3945 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3946 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3947 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3948 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3949 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3950 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3951 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3952 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3953 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3954
3955 if (record_debug)
b277c936 3956 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3957
3958 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3959 {
3960 /* Floating point - fixed point conversion instructions. */
3961 if (!insn_bit21)
3962 {
3963 if (record_debug)
b277c936 3964 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3965
3966 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3967 record_buf[0] = reg_rd;
3968 else
3969 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3970 }
3971 /* Floating point - conditional compare instructions. */
3972 else if (insn_bits10_11 == 0x01)
3973 {
3974 if (record_debug)
b277c936 3975 debug_printf ("FP - conditional compare");
99afc88b
OJ
3976
3977 record_buf[0] = AARCH64_CPSR_REGNUM;
3978 }
3979 /* Floating point - data processing (2-source) and
3980 conditional select instructions. */
3981 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3982 {
3983 if (record_debug)
b277c936 3984 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3985
3986 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3987 }
3988 else if (insn_bits10_11 == 0x00)
3989 {
3990 /* Floating point - immediate instructions. */
3991 if ((insn_bits12_15 & 0x01) == 0x01
3992 || (insn_bits12_15 & 0x07) == 0x04)
3993 {
3994 if (record_debug)
b277c936 3995 debug_printf ("FP - immediate");
99afc88b
OJ
3996 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3997 }
3998 /* Floating point - compare instructions. */
3999 else if ((insn_bits12_15 & 0x03) == 0x02)
4000 {
4001 if (record_debug)
b277c936 4002 debug_printf ("FP - immediate");
99afc88b
OJ
4003 record_buf[0] = AARCH64_CPSR_REGNUM;
4004 }
4005 /* Floating point - integer conversions instructions. */
f62fce35 4006 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4007 {
4008 /* Convert float to integer instruction. */
4009 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4010 {
4011 if (record_debug)
b277c936 4012 debug_printf ("float to int conversion");
99afc88b
OJ
4013
4014 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4015 }
4016 /* Convert integer to float instruction. */
4017 else if ((opcode >> 1) == 0x01 && !rmode)
4018 {
4019 if (record_debug)
b277c936 4020 debug_printf ("int to float conversion");
99afc88b
OJ
4021
4022 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4023 }
4024 /* Move float to integer instruction. */
4025 else if ((opcode >> 1) == 0x03)
4026 {
4027 if (record_debug)
b277c936 4028 debug_printf ("move float to int");
99afc88b
OJ
4029
4030 if (!(opcode & 0x01))
4031 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4032 else
4033 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4034 }
f62fce35
YQ
4035 else
4036 return AARCH64_RECORD_UNKNOWN;
99afc88b 4037 }
f62fce35
YQ
4038 else
4039 return AARCH64_RECORD_UNKNOWN;
99afc88b 4040 }
f62fce35
YQ
4041 else
4042 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4043 }
4044 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4045 {
4046 if (record_debug)
b277c936 4047 debug_printf ("SIMD copy");
99afc88b
OJ
4048
4049 /* Advanced SIMD copy instructions. */
4050 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4051 && !bit (aarch64_insn_r->aarch64_insn, 15)
4052 && bit (aarch64_insn_r->aarch64_insn, 10))
4053 {
4054 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4055 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4056 else
4057 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4058 }
4059 else
4060 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4061 }
4062 /* All remaining floating point or advanced SIMD instructions. */
4063 else
4064 {
4065 if (record_debug)
b277c936 4066 debug_printf ("all remain");
99afc88b
OJ
4067
4068 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4069 }
4070
4071 if (record_debug)
b277c936 4072 debug_printf ("\n");
99afc88b
OJ
4073
4074 aarch64_insn_r->reg_rec_count++;
4075 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4076 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4077 record_buf);
4078 return AARCH64_RECORD_SUCCESS;
4079}
4080
4081/* Decodes insns type and invokes its record handler. */
4082
4083static unsigned int
4084aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4085{
4086 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4087
4088 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4089 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4090 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4091 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4092
4093 /* Data processing - immediate instructions. */
4094 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4095 return aarch64_record_data_proc_imm (aarch64_insn_r);
4096
4097 /* Branch, exception generation and system instructions. */
4098 if (ins_bit26 && !ins_bit27 && ins_bit28)
4099 return aarch64_record_branch_except_sys (aarch64_insn_r);
4100
4101 /* Load and store instructions. */
4102 if (!ins_bit25 && ins_bit27)
4103 return aarch64_record_load_store (aarch64_insn_r);
4104
4105 /* Data processing - register instructions. */
4106 if (ins_bit25 && !ins_bit26 && ins_bit27)
4107 return aarch64_record_data_proc_reg (aarch64_insn_r);
4108
4109 /* Data processing - SIMD and floating point instructions. */
4110 if (ins_bit25 && ins_bit26 && ins_bit27)
4111 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4112
4113 return AARCH64_RECORD_UNSUPPORTED;
4114}
4115
4116/* Cleans up local record registers and memory allocations. */
4117
4118static void
4119deallocate_reg_mem (insn_decode_record *record)
4120{
4121 xfree (record->aarch64_regs);
4122 xfree (record->aarch64_mems);
4123}
4124
1e2b521d
YQ
4125#if GDB_SELF_TEST
4126namespace selftests {
4127
4128static void
4129aarch64_process_record_test (void)
4130{
4131 struct gdbarch_info info;
4132 uint32_t ret;
4133
4134 gdbarch_info_init (&info);
4135 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4136
4137 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4138 SELF_CHECK (gdbarch != NULL);
4139
4140 insn_decode_record aarch64_record;
4141
4142 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4143 aarch64_record.regcache = NULL;
4144 aarch64_record.this_addr = 0;
4145 aarch64_record.gdbarch = gdbarch;
4146
4147 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4148 aarch64_record.aarch64_insn = 0xf9800020;
4149 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4150 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4151 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4152 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4153
4154 deallocate_reg_mem (&aarch64_record);
4155}
4156
4157} // namespace selftests
4158#endif /* GDB_SELF_TEST */
4159
99afc88b
OJ
4160/* Parse the current instruction and record the values of the registers and
4161 memory that will be changed in current instruction to record_arch_list
4162 return -1 if something is wrong. */
4163
4164int
4165aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4166 CORE_ADDR insn_addr)
4167{
4168 uint32_t rec_no = 0;
4169 uint8_t insn_size = 4;
4170 uint32_t ret = 0;
99afc88b
OJ
4171 gdb_byte buf[insn_size];
4172 insn_decode_record aarch64_record;
4173
4174 memset (&buf[0], 0, insn_size);
4175 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4176 target_read_memory (insn_addr, &buf[0], insn_size);
4177 aarch64_record.aarch64_insn
4178 = (uint32_t) extract_unsigned_integer (&buf[0],
4179 insn_size,
4180 gdbarch_byte_order (gdbarch));
4181 aarch64_record.regcache = regcache;
4182 aarch64_record.this_addr = insn_addr;
4183 aarch64_record.gdbarch = gdbarch;
4184
4185 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4186 if (ret == AARCH64_RECORD_UNSUPPORTED)
4187 {
4188 printf_unfiltered (_("Process record does not support instruction "
4189 "0x%0x at address %s.\n"),
4190 aarch64_record.aarch64_insn,
4191 paddress (gdbarch, insn_addr));
4192 ret = -1;
4193 }
4194
4195 if (0 == ret)
4196 {
4197 /* Record registers. */
4198 record_full_arch_list_add_reg (aarch64_record.regcache,
4199 AARCH64_PC_REGNUM);
4200 /* Always record register CPSR. */
4201 record_full_arch_list_add_reg (aarch64_record.regcache,
4202 AARCH64_CPSR_REGNUM);
4203 if (aarch64_record.aarch64_regs)
4204 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4205 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4206 aarch64_record.aarch64_regs[rec_no]))
4207 ret = -1;
4208
4209 /* Record memories. */
4210 if (aarch64_record.aarch64_mems)
4211 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4212 if (record_full_arch_list_add_mem
4213 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4214 aarch64_record.aarch64_mems[rec_no].len))
4215 ret = -1;
4216
4217 if (record_full_arch_list_add_end ())
4218 ret = -1;
4219 }
4220
4221 deallocate_reg_mem (&aarch64_record);
4222 return ret;
4223}
This page took 0.648743 seconds and 4 git commands to generate.