Aarch64: Func to detect args passed in float regs
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
e2882c85 3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
07b287a0
MS
30#include "value.h"
31#include "arch-utils.h"
32#include "osabi.h"
33#include "frame-unwind.h"
34#include "frame-base.h"
35#include "trad-frame.h"
36#include "objfiles.h"
37#include "dwarf2-frame.h"
38#include "gdbtypes.h"
39#include "prologue-value.h"
40#include "target-descriptions.h"
41#include "user-regs.h"
42#include "language.h"
43#include "infcall.h"
ea873d8e
PL
44#include "ax.h"
45#include "ax-gdb.h"
4d9a9006 46#include "selftest.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
787749ea
PL
57#include "arch/aarch64-insn.h"
58
f77ee802 59#include "opcode/aarch64.h"
325fac50 60#include <algorithm>
f77ee802
YQ
61
62#define submask(x) ((1L << ((x) + 1)) - 1)
63#define bit(obj,st) (((obj) >> (st)) & 1)
64#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
65
07b287a0
MS
66/* Pseudo register base numbers. */
67#define AARCH64_Q0_REGNUM 0
187f5d00 68#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
07b287a0
MS
69#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
63bad7b6 72#define AARCH64_SVE_V0_REGNUM (AARCH64_B0_REGNUM + 32)
07b287a0 73
ea92689a
AH
74/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
75 four members. */
76#define HA_MAX_NUM_FLDS 4
77
95228a0d
AH
78/* All possible aarch64 target descriptors. */
79struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
80
07b287a0
MS
81/* The standard register names, and all the valid aliases for them. */
82static const struct
83{
84 const char *const name;
85 int regnum;
86} aarch64_register_aliases[] =
87{
88 /* 64-bit register names. */
89 {"fp", AARCH64_FP_REGNUM},
90 {"lr", AARCH64_LR_REGNUM},
91 {"sp", AARCH64_SP_REGNUM},
92
93 /* 32-bit register names. */
94 {"w0", AARCH64_X0_REGNUM + 0},
95 {"w1", AARCH64_X0_REGNUM + 1},
96 {"w2", AARCH64_X0_REGNUM + 2},
97 {"w3", AARCH64_X0_REGNUM + 3},
98 {"w4", AARCH64_X0_REGNUM + 4},
99 {"w5", AARCH64_X0_REGNUM + 5},
100 {"w6", AARCH64_X0_REGNUM + 6},
101 {"w7", AARCH64_X0_REGNUM + 7},
102 {"w8", AARCH64_X0_REGNUM + 8},
103 {"w9", AARCH64_X0_REGNUM + 9},
104 {"w10", AARCH64_X0_REGNUM + 10},
105 {"w11", AARCH64_X0_REGNUM + 11},
106 {"w12", AARCH64_X0_REGNUM + 12},
107 {"w13", AARCH64_X0_REGNUM + 13},
108 {"w14", AARCH64_X0_REGNUM + 14},
109 {"w15", AARCH64_X0_REGNUM + 15},
110 {"w16", AARCH64_X0_REGNUM + 16},
111 {"w17", AARCH64_X0_REGNUM + 17},
112 {"w18", AARCH64_X0_REGNUM + 18},
113 {"w19", AARCH64_X0_REGNUM + 19},
114 {"w20", AARCH64_X0_REGNUM + 20},
115 {"w21", AARCH64_X0_REGNUM + 21},
116 {"w22", AARCH64_X0_REGNUM + 22},
117 {"w23", AARCH64_X0_REGNUM + 23},
118 {"w24", AARCH64_X0_REGNUM + 24},
119 {"w25", AARCH64_X0_REGNUM + 25},
120 {"w26", AARCH64_X0_REGNUM + 26},
121 {"w27", AARCH64_X0_REGNUM + 27},
122 {"w28", AARCH64_X0_REGNUM + 28},
123 {"w29", AARCH64_X0_REGNUM + 29},
124 {"w30", AARCH64_X0_REGNUM + 30},
125
126 /* specials */
127 {"ip0", AARCH64_X0_REGNUM + 16},
128 {"ip1", AARCH64_X0_REGNUM + 17}
129};
130
131/* The required core 'R' registers. */
132static const char *const aarch64_r_register_names[] =
133{
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_X0_REGNUM! */
136 "x0", "x1", "x2", "x3",
137 "x4", "x5", "x6", "x7",
138 "x8", "x9", "x10", "x11",
139 "x12", "x13", "x14", "x15",
140 "x16", "x17", "x18", "x19",
141 "x20", "x21", "x22", "x23",
142 "x24", "x25", "x26", "x27",
143 "x28", "x29", "x30", "sp",
144 "pc", "cpsr"
145};
146
147/* The FP/SIMD 'V' registers. */
148static const char *const aarch64_v_register_names[] =
149{
150 /* These registers must appear in consecutive RAW register number
151 order and they must begin with AARCH64_V0_REGNUM! */
152 "v0", "v1", "v2", "v3",
153 "v4", "v5", "v6", "v7",
154 "v8", "v9", "v10", "v11",
155 "v12", "v13", "v14", "v15",
156 "v16", "v17", "v18", "v19",
157 "v20", "v21", "v22", "v23",
158 "v24", "v25", "v26", "v27",
159 "v28", "v29", "v30", "v31",
160 "fpsr",
161 "fpcr"
162};
163
739e8682
AH
164/* The SVE 'Z' and 'P' registers. */
165static const char *const aarch64_sve_register_names[] =
166{
167 /* These registers must appear in consecutive RAW register number
168 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
169 "z0", "z1", "z2", "z3",
170 "z4", "z5", "z6", "z7",
171 "z8", "z9", "z10", "z11",
172 "z12", "z13", "z14", "z15",
173 "z16", "z17", "z18", "z19",
174 "z20", "z21", "z22", "z23",
175 "z24", "z25", "z26", "z27",
176 "z28", "z29", "z30", "z31",
177 "fpsr", "fpcr",
178 "p0", "p1", "p2", "p3",
179 "p4", "p5", "p6", "p7",
180 "p8", "p9", "p10", "p11",
181 "p12", "p13", "p14", "p15",
182 "ffr", "vg"
183};
184
07b287a0
MS
185/* AArch64 prologue cache structure. */
186struct aarch64_prologue_cache
187{
db634143
PL
188 /* The program counter at the start of the function. It is used to
189 identify this frame as a prologue frame. */
190 CORE_ADDR func;
191
192 /* The program counter at the time this frame was created; i.e. where
193 this function was called from. It is used to identify this frame as a
194 stub frame. */
195 CORE_ADDR prev_pc;
196
07b287a0
MS
197 /* The stack pointer at the time this frame was created; i.e. the
198 caller's stack pointer when this function was called. It is used
199 to identify this frame. */
200 CORE_ADDR prev_sp;
201
7dfa3edc
PL
202 /* Is the target available to read from? */
203 int available_p;
204
07b287a0
MS
205 /* The frame base for this frame is just prev_sp - frame size.
206 FRAMESIZE is the distance from the frame pointer to the
207 initial stack pointer. */
208 int framesize;
209
210 /* The register used to hold the frame pointer for this frame. */
211 int framereg;
212
213 /* Saved register offsets. */
214 struct trad_frame_saved_reg *saved_regs;
215};
216
07b287a0
MS
217static void
218show_aarch64_debug (struct ui_file *file, int from_tty,
219 struct cmd_list_element *c, const char *value)
220{
221 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
222}
223
ffdbe864
YQ
224namespace {
225
4d9a9006
YQ
226/* Abstract instruction reader. */
227
228class abstract_instruction_reader
229{
230public:
231 /* Read in one instruction. */
232 virtual ULONGEST read (CORE_ADDR memaddr, int len,
233 enum bfd_endian byte_order) = 0;
234};
235
236/* Instruction reader from real target. */
237
238class instruction_reader : public abstract_instruction_reader
239{
240 public:
241 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 242 override
4d9a9006 243 {
fc2f703e 244 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
245 }
246};
247
ffdbe864
YQ
248} // namespace
249
07b287a0
MS
250/* Analyze a prologue, looking for a recognizable stack frame
251 and frame pointer. Scan until we encounter a store that could
252 clobber the stack frame unexpectedly, or an unknown instruction. */
253
254static CORE_ADDR
255aarch64_analyze_prologue (struct gdbarch *gdbarch,
256 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
257 struct aarch64_prologue_cache *cache,
258 abstract_instruction_reader& reader)
07b287a0
MS
259{
260 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
261 int i;
187f5d00
YQ
262 /* Track X registers and D registers in prologue. */
263 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 264
187f5d00 265 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 266 regs[i] = pv_register (i, 0);
f7b7ed97 267 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
268
269 for (; start < limit; start += 4)
270 {
271 uint32_t insn;
d9ebcbce 272 aarch64_inst inst;
07b287a0 273
4d9a9006 274 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 275
561a72d4 276 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
277 break;
278
279 if (inst.opcode->iclass == addsub_imm
280 && (inst.opcode->op == OP_ADD
281 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 282 {
d9ebcbce
YQ
283 unsigned rd = inst.operands[0].reg.regno;
284 unsigned rn = inst.operands[1].reg.regno;
285
286 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
287 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
288 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
289 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
290
291 if (inst.opcode->op == OP_ADD)
292 {
293 regs[rd] = pv_add_constant (regs[rn],
294 inst.operands[2].imm.value);
295 }
296 else
297 {
298 regs[rd] = pv_add_constant (regs[rn],
299 -inst.operands[2].imm.value);
300 }
301 }
302 else if (inst.opcode->iclass == pcreladdr
303 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
304 {
305 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
306 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
307
308 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 309 }
d9ebcbce 310 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
311 {
312 /* Stop analysis on branch. */
313 break;
314 }
d9ebcbce 315 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
316 {
317 /* Stop analysis on branch. */
318 break;
319 }
d9ebcbce 320 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
321 {
322 /* Stop analysis on branch. */
323 break;
324 }
d9ebcbce 325 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
326 {
327 /* Stop analysis on branch. */
328 break;
329 }
d9ebcbce
YQ
330 else if (inst.opcode->op == OP_MOVZ)
331 {
332 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
333 regs[inst.operands[0].reg.regno] = pv_unknown ();
334 }
335 else if (inst.opcode->iclass == log_shift
336 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 337 {
d9ebcbce
YQ
338 unsigned rd = inst.operands[0].reg.regno;
339 unsigned rn = inst.operands[1].reg.regno;
340 unsigned rm = inst.operands[2].reg.regno;
341
342 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
343 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
344 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
345
346 if (inst.operands[2].shifter.amount == 0
347 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
348 regs[rd] = regs[rm];
349 else
350 {
351 if (aarch64_debug)
b277c936
PL
352 {
353 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 354 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
355 core_addr_to_string_nz (start), insn);
356 }
07b287a0
MS
357 break;
358 }
359 }
d9ebcbce 360 else if (inst.opcode->op == OP_STUR)
07b287a0 361 {
d9ebcbce
YQ
362 unsigned rt = inst.operands[0].reg.regno;
363 unsigned rn = inst.operands[1].addr.base_regno;
364 int is64
365 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
366
367 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
368 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
369 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
370 gdb_assert (!inst.operands[1].addr.offset.is_reg);
371
f7b7ed97
TT
372 stack.store (pv_add_constant (regs[rn],
373 inst.operands[1].addr.offset.imm),
374 is64 ? 8 : 4, regs[rt]);
07b287a0 375 }
d9ebcbce 376 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
377 || (inst.opcode->iclass == ldstpair_indexed
378 && inst.operands[2].addr.preind))
d9ebcbce 379 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 380 {
03bcd739 381 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
382 unsigned rt1;
383 unsigned rt2;
d9ebcbce
YQ
384 unsigned rn = inst.operands[2].addr.base_regno;
385 int32_t imm = inst.operands[2].addr.offset.imm;
386
187f5d00
YQ
387 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
388 || inst.operands[0].type == AARCH64_OPND_Ft);
389 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
390 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
391 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
392 gdb_assert (!inst.operands[2].addr.offset.is_reg);
393
07b287a0
MS
394 /* If recording this store would invalidate the store area
395 (perhaps because rn is not known) then we should abandon
396 further prologue analysis. */
f7b7ed97 397 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
398 break;
399
f7b7ed97 400 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
401 break;
402
187f5d00
YQ
403 rt1 = inst.operands[0].reg.regno;
404 rt2 = inst.operands[1].reg.regno;
405 if (inst.operands[0].type == AARCH64_OPND_Ft)
406 {
407 /* Only bottom 64-bit of each V register (D register) need
408 to be preserved. */
409 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
410 rt1 += AARCH64_X_REGISTER_COUNT;
411 rt2 += AARCH64_X_REGISTER_COUNT;
412 }
413
f7b7ed97
TT
414 stack.store (pv_add_constant (regs[rn], imm), 8,
415 regs[rt1]);
416 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
417 regs[rt2]);
14ac654f 418
d9ebcbce 419 if (inst.operands[2].addr.writeback)
93d96012 420 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 421
07b287a0 422 }
432ec081
YQ
423 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
424 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
425 && (inst.opcode->op == OP_STR_POS
426 || inst.opcode->op == OP_STRF_POS)))
427 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
428 && strcmp ("str", inst.opcode->name) == 0)
429 {
430 /* STR (immediate) */
431 unsigned int rt = inst.operands[0].reg.regno;
432 int32_t imm = inst.operands[1].addr.offset.imm;
433 unsigned int rn = inst.operands[1].addr.base_regno;
434 bool is64
435 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
436 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
437 || inst.operands[0].type == AARCH64_OPND_Ft);
438
439 if (inst.operands[0].type == AARCH64_OPND_Ft)
440 {
441 /* Only bottom 64-bit of each V register (D register) need
442 to be preserved. */
443 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
444 rt += AARCH64_X_REGISTER_COUNT;
445 }
446
f7b7ed97
TT
447 stack.store (pv_add_constant (regs[rn], imm),
448 is64 ? 8 : 4, regs[rt]);
432ec081
YQ
449 if (inst.operands[1].addr.writeback)
450 regs[rn] = pv_add_constant (regs[rn], imm);
451 }
d9ebcbce 452 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
453 {
454 /* Stop analysis on branch. */
455 break;
456 }
457 else
458 {
459 if (aarch64_debug)
b277c936 460 {
0a0da556 461 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
462 " opcode=0x%x\n",
463 core_addr_to_string_nz (start), insn);
464 }
07b287a0
MS
465 break;
466 }
467 }
468
469 if (cache == NULL)
f7b7ed97 470 return start;
07b287a0
MS
471
472 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
473 {
474 /* Frame pointer is fp. Frame size is constant. */
475 cache->framereg = AARCH64_FP_REGNUM;
476 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
477 }
478 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
479 {
480 /* Try the stack pointer. */
481 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
482 cache->framereg = AARCH64_SP_REGNUM;
483 }
484 else
485 {
486 /* We're just out of luck. We don't know where the frame is. */
487 cache->framereg = -1;
488 cache->framesize = 0;
489 }
490
491 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
492 {
493 CORE_ADDR offset;
494
f7b7ed97 495 if (stack.find_reg (gdbarch, i, &offset))
07b287a0
MS
496 cache->saved_regs[i].addr = offset;
497 }
498
187f5d00
YQ
499 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
500 {
501 int regnum = gdbarch_num_regs (gdbarch);
502 CORE_ADDR offset;
503
f7b7ed97
TT
504 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
505 &offset))
187f5d00
YQ
506 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
507 }
508
07b287a0
MS
509 return start;
510}
511
4d9a9006
YQ
512static CORE_ADDR
513aarch64_analyze_prologue (struct gdbarch *gdbarch,
514 CORE_ADDR start, CORE_ADDR limit,
515 struct aarch64_prologue_cache *cache)
516{
517 instruction_reader reader;
518
519 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
520 reader);
521}
522
523#if GDB_SELF_TEST
524
525namespace selftests {
526
527/* Instruction reader from manually cooked instruction sequences. */
528
529class instruction_reader_test : public abstract_instruction_reader
530{
531public:
532 template<size_t SIZE>
533 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
534 : m_insns (insns), m_insns_size (SIZE)
535 {}
536
537 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 538 override
4d9a9006
YQ
539 {
540 SELF_CHECK (len == 4);
541 SELF_CHECK (memaddr % 4 == 0);
542 SELF_CHECK (memaddr / 4 < m_insns_size);
543
544 return m_insns[memaddr / 4];
545 }
546
547private:
548 const uint32_t *m_insns;
549 size_t m_insns_size;
550};
551
552static void
553aarch64_analyze_prologue_test (void)
554{
555 struct gdbarch_info info;
556
557 gdbarch_info_init (&info);
558 info.bfd_arch_info = bfd_scan_arch ("aarch64");
559
560 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
561 SELF_CHECK (gdbarch != NULL);
562
563 /* Test the simple prologue in which frame pointer is used. */
564 {
565 struct aarch64_prologue_cache cache;
566 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
567
568 static const uint32_t insns[] = {
569 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
570 0x910003fd, /* mov x29, sp */
571 0x97ffffe6, /* bl 0x400580 */
572 };
573 instruction_reader_test reader (insns);
574
575 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
576 SELF_CHECK (end == 4 * 2);
577
578 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
579 SELF_CHECK (cache.framesize == 272);
580
581 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
582 {
583 if (i == AARCH64_FP_REGNUM)
584 SELF_CHECK (cache.saved_regs[i].addr == -272);
585 else if (i == AARCH64_LR_REGNUM)
586 SELF_CHECK (cache.saved_regs[i].addr == -264);
587 else
588 SELF_CHECK (cache.saved_regs[i].addr == -1);
589 }
590
591 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
592 {
593 int regnum = gdbarch_num_regs (gdbarch);
594
595 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
596 == -1);
597 }
598 }
432ec081
YQ
599
600 /* Test a prologue in which STR is used and frame pointer is not
601 used. */
602 {
603 struct aarch64_prologue_cache cache;
604 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
605
606 static const uint32_t insns[] = {
607 0xf81d0ff3, /* str x19, [sp, #-48]! */
608 0xb9002fe0, /* str w0, [sp, #44] */
609 0xf90013e1, /* str x1, [sp, #32]*/
610 0xfd000fe0, /* str d0, [sp, #24] */
611 0xaa0203f3, /* mov x19, x2 */
612 0xf94013e0, /* ldr x0, [sp, #32] */
613 };
614 instruction_reader_test reader (insns);
615
616 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
617
618 SELF_CHECK (end == 4 * 5);
619
620 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
621 SELF_CHECK (cache.framesize == 48);
622
623 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
624 {
625 if (i == 1)
626 SELF_CHECK (cache.saved_regs[i].addr == -16);
627 else if (i == 19)
628 SELF_CHECK (cache.saved_regs[i].addr == -48);
629 else
630 SELF_CHECK (cache.saved_regs[i].addr == -1);
631 }
632
633 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
634 {
635 int regnum = gdbarch_num_regs (gdbarch);
636
637 if (i == 0)
638 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
639 == -24);
640 else
641 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
642 == -1);
643 }
644 }
4d9a9006
YQ
645}
646} // namespace selftests
647#endif /* GDB_SELF_TEST */
648
07b287a0
MS
649/* Implement the "skip_prologue" gdbarch method. */
650
651static CORE_ADDR
652aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
653{
07b287a0 654 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
655
656 /* See if we can determine the end of the prologue via the symbol
657 table. If so, then return either PC, or the PC after the
658 prologue, whichever is greater. */
659 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
660 {
661 CORE_ADDR post_prologue_pc
662 = skip_prologue_using_sal (gdbarch, func_addr);
663
664 if (post_prologue_pc != 0)
325fac50 665 return std::max (pc, post_prologue_pc);
07b287a0
MS
666 }
667
668 /* Can't determine prologue from the symbol table, need to examine
669 instructions. */
670
671 /* Find an upper limit on the function prologue using the debug
672 information. If the debug information could not be used to
673 provide that bound, then use an arbitrary large number as the
674 upper bound. */
675 limit_pc = skip_prologue_using_sal (gdbarch, pc);
676 if (limit_pc == 0)
677 limit_pc = pc + 128; /* Magic. */
678
679 /* Try disassembling prologue. */
680 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
681}
682
683/* Scan the function prologue for THIS_FRAME and populate the prologue
684 cache CACHE. */
685
686static void
687aarch64_scan_prologue (struct frame_info *this_frame,
688 struct aarch64_prologue_cache *cache)
689{
690 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
691 CORE_ADDR prologue_start;
692 CORE_ADDR prologue_end;
693 CORE_ADDR prev_pc = get_frame_pc (this_frame);
694 struct gdbarch *gdbarch = get_frame_arch (this_frame);
695
db634143
PL
696 cache->prev_pc = prev_pc;
697
07b287a0
MS
698 /* Assume we do not find a frame. */
699 cache->framereg = -1;
700 cache->framesize = 0;
701
702 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
703 &prologue_end))
704 {
705 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
706
707 if (sal.line == 0)
708 {
709 /* No line info so use the current PC. */
710 prologue_end = prev_pc;
711 }
712 else if (sal.end < prologue_end)
713 {
714 /* The next line begins after the function end. */
715 prologue_end = sal.end;
716 }
717
325fac50 718 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
719 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
720 }
721 else
722 {
723 CORE_ADDR frame_loc;
07b287a0
MS
724
725 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
726 if (frame_loc == 0)
727 return;
728
729 cache->framereg = AARCH64_FP_REGNUM;
730 cache->framesize = 16;
731 cache->saved_regs[29].addr = 0;
732 cache->saved_regs[30].addr = 8;
733 }
734}
735
7dfa3edc
PL
736/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
737 function may throw an exception if the inferior's registers or memory is
738 not available. */
07b287a0 739
7dfa3edc
PL
740static void
741aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
742 struct aarch64_prologue_cache *cache)
07b287a0 743{
07b287a0
MS
744 CORE_ADDR unwound_fp;
745 int reg;
746
07b287a0
MS
747 aarch64_scan_prologue (this_frame, cache);
748
749 if (cache->framereg == -1)
7dfa3edc 750 return;
07b287a0
MS
751
752 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
753 if (unwound_fp == 0)
7dfa3edc 754 return;
07b287a0
MS
755
756 cache->prev_sp = unwound_fp + cache->framesize;
757
758 /* Calculate actual addresses of saved registers using offsets
759 determined by aarch64_analyze_prologue. */
760 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
761 if (trad_frame_addr_p (cache->saved_regs, reg))
762 cache->saved_regs[reg].addr += cache->prev_sp;
763
db634143
PL
764 cache->func = get_frame_func (this_frame);
765
7dfa3edc
PL
766 cache->available_p = 1;
767}
768
769/* Allocate and fill in *THIS_CACHE with information about the prologue of
770 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
771 Return a pointer to the current aarch64_prologue_cache in
772 *THIS_CACHE. */
773
774static struct aarch64_prologue_cache *
775aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
776{
777 struct aarch64_prologue_cache *cache;
778
779 if (*this_cache != NULL)
9a3c8263 780 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
781
782 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
783 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
784 *this_cache = cache;
785
786 TRY
787 {
788 aarch64_make_prologue_cache_1 (this_frame, cache);
789 }
790 CATCH (ex, RETURN_MASK_ERROR)
791 {
792 if (ex.error != NOT_AVAILABLE_ERROR)
793 throw_exception (ex);
794 }
795 END_CATCH
796
07b287a0
MS
797 return cache;
798}
799
7dfa3edc
PL
800/* Implement the "stop_reason" frame_unwind method. */
801
802static enum unwind_stop_reason
803aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
804 void **this_cache)
805{
806 struct aarch64_prologue_cache *cache
807 = aarch64_make_prologue_cache (this_frame, this_cache);
808
809 if (!cache->available_p)
810 return UNWIND_UNAVAILABLE;
811
812 /* Halt the backtrace at "_start". */
813 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
814 return UNWIND_OUTERMOST;
815
816 /* We've hit a wall, stop. */
817 if (cache->prev_sp == 0)
818 return UNWIND_OUTERMOST;
819
820 return UNWIND_NO_REASON;
821}
822
07b287a0
MS
823/* Our frame ID for a normal frame is the current function's starting
824 PC and the caller's SP when we were called. */
825
826static void
827aarch64_prologue_this_id (struct frame_info *this_frame,
828 void **this_cache, struct frame_id *this_id)
829{
7c8edfae
PL
830 struct aarch64_prologue_cache *cache
831 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 832
7dfa3edc
PL
833 if (!cache->available_p)
834 *this_id = frame_id_build_unavailable_stack (cache->func);
835 else
836 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
837}
838
839/* Implement the "prev_register" frame_unwind method. */
840
841static struct value *
842aarch64_prologue_prev_register (struct frame_info *this_frame,
843 void **this_cache, int prev_regnum)
844{
7c8edfae
PL
845 struct aarch64_prologue_cache *cache
846 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
847
848 /* If we are asked to unwind the PC, then we need to return the LR
849 instead. The prologue may save PC, but it will point into this
850 frame's prologue, not the next frame's resume location. */
851 if (prev_regnum == AARCH64_PC_REGNUM)
852 {
853 CORE_ADDR lr;
854
855 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
856 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
857 }
858
859 /* SP is generally not saved to the stack, but this frame is
860 identified by the next frame's stack pointer at the time of the
861 call. The value was already reconstructed into PREV_SP. */
862 /*
863 +----------+ ^
864 | saved lr | |
865 +->| saved fp |--+
866 | | |
867 | | | <- Previous SP
868 | +----------+
869 | | saved lr |
870 +--| saved fp |<- FP
871 | |
872 | |<- SP
873 +----------+ */
874 if (prev_regnum == AARCH64_SP_REGNUM)
875 return frame_unwind_got_constant (this_frame, prev_regnum,
876 cache->prev_sp);
877
878 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
879 prev_regnum);
880}
881
882/* AArch64 prologue unwinder. */
883struct frame_unwind aarch64_prologue_unwind =
884{
885 NORMAL_FRAME,
7dfa3edc 886 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
887 aarch64_prologue_this_id,
888 aarch64_prologue_prev_register,
889 NULL,
890 default_frame_sniffer
891};
892
8b61f75d
PL
893/* Allocate and fill in *THIS_CACHE with information about the prologue of
894 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
895 Return a pointer to the current aarch64_prologue_cache in
896 *THIS_CACHE. */
07b287a0
MS
897
898static struct aarch64_prologue_cache *
8b61f75d 899aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 900{
07b287a0 901 struct aarch64_prologue_cache *cache;
8b61f75d
PL
902
903 if (*this_cache != NULL)
9a3c8263 904 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
905
906 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
907 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 908 *this_cache = cache;
07b287a0 909
02a2a705
PL
910 TRY
911 {
912 cache->prev_sp = get_frame_register_unsigned (this_frame,
913 AARCH64_SP_REGNUM);
914 cache->prev_pc = get_frame_pc (this_frame);
915 cache->available_p = 1;
916 }
917 CATCH (ex, RETURN_MASK_ERROR)
918 {
919 if (ex.error != NOT_AVAILABLE_ERROR)
920 throw_exception (ex);
921 }
922 END_CATCH
07b287a0
MS
923
924 return cache;
925}
926
02a2a705
PL
927/* Implement the "stop_reason" frame_unwind method. */
928
929static enum unwind_stop_reason
930aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
931 void **this_cache)
932{
933 struct aarch64_prologue_cache *cache
934 = aarch64_make_stub_cache (this_frame, this_cache);
935
936 if (!cache->available_p)
937 return UNWIND_UNAVAILABLE;
938
939 return UNWIND_NO_REASON;
940}
941
07b287a0
MS
942/* Our frame ID for a stub frame is the current SP and LR. */
943
944static void
945aarch64_stub_this_id (struct frame_info *this_frame,
946 void **this_cache, struct frame_id *this_id)
947{
8b61f75d
PL
948 struct aarch64_prologue_cache *cache
949 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 950
02a2a705
PL
951 if (cache->available_p)
952 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
953 else
954 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
955}
956
957/* Implement the "sniffer" frame_unwind method. */
958
959static int
960aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
961 struct frame_info *this_frame,
962 void **this_prologue_cache)
963{
964 CORE_ADDR addr_in_block;
965 gdb_byte dummy[4];
966
967 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 968 if (in_plt_section (addr_in_block)
07b287a0
MS
969 /* We also use the stub winder if the target memory is unreadable
970 to avoid having the prologue unwinder trying to read it. */
971 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
972 return 1;
973
974 return 0;
975}
976
977/* AArch64 stub unwinder. */
978struct frame_unwind aarch64_stub_unwind =
979{
980 NORMAL_FRAME,
02a2a705 981 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
982 aarch64_stub_this_id,
983 aarch64_prologue_prev_register,
984 NULL,
985 aarch64_stub_unwind_sniffer
986};
987
988/* Return the frame base address of *THIS_FRAME. */
989
990static CORE_ADDR
991aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
992{
7c8edfae
PL
993 struct aarch64_prologue_cache *cache
994 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
995
996 return cache->prev_sp - cache->framesize;
997}
998
999/* AArch64 default frame base information. */
1000struct frame_base aarch64_normal_base =
1001{
1002 &aarch64_prologue_unwind,
1003 aarch64_normal_frame_base,
1004 aarch64_normal_frame_base,
1005 aarch64_normal_frame_base
1006};
1007
1008/* Assuming THIS_FRAME is a dummy, return the frame ID of that
1009 dummy frame. The frame ID's base needs to match the TOS value
1010 saved by save_dummy_frame_tos () and returned from
1011 aarch64_push_dummy_call, and the PC needs to match the dummy
1012 frame's breakpoint. */
1013
1014static struct frame_id
1015aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1016{
1017 return frame_id_build (get_frame_register_unsigned (this_frame,
1018 AARCH64_SP_REGNUM),
1019 get_frame_pc (this_frame));
1020}
1021
1022/* Implement the "unwind_pc" gdbarch method. */
1023
1024static CORE_ADDR
1025aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1026{
1027 CORE_ADDR pc
1028 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1029
1030 return pc;
1031}
1032
1033/* Implement the "unwind_sp" gdbarch method. */
1034
1035static CORE_ADDR
1036aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1037{
1038 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1039}
1040
1041/* Return the value of the REGNUM register in the previous frame of
1042 *THIS_FRAME. */
1043
1044static struct value *
1045aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1046 void **this_cache, int regnum)
1047{
07b287a0
MS
1048 CORE_ADDR lr;
1049
1050 switch (regnum)
1051 {
1052 case AARCH64_PC_REGNUM:
1053 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1054 return frame_unwind_got_constant (this_frame, regnum, lr);
1055
1056 default:
1057 internal_error (__FILE__, __LINE__,
1058 _("Unexpected register %d"), regnum);
1059 }
1060}
1061
1062/* Implement the "init_reg" dwarf2_frame_ops method. */
1063
1064static void
1065aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1066 struct dwarf2_frame_state_reg *reg,
1067 struct frame_info *this_frame)
1068{
1069 switch (regnum)
1070 {
1071 case AARCH64_PC_REGNUM:
1072 reg->how = DWARF2_FRAME_REG_FN;
1073 reg->loc.fn = aarch64_dwarf2_prev_register;
1074 break;
1075 case AARCH64_SP_REGNUM:
1076 reg->how = DWARF2_FRAME_REG_CFA;
1077 break;
1078 }
1079}
1080
1081/* When arguments must be pushed onto the stack, they go on in reverse
1082 order. The code below implements a FILO (stack) to do this. */
1083
1084typedef struct
1085{
c3c87445
YQ
1086 /* Value to pass on stack. It can be NULL if this item is for stack
1087 padding. */
7c543f7b 1088 const gdb_byte *data;
07b287a0
MS
1089
1090 /* Size in bytes of value to pass on stack. */
1091 int len;
1092} stack_item_t;
1093
1094DEF_VEC_O (stack_item_t);
1095
1096/* Return the alignment (in bytes) of the given type. */
1097
1098static int
1099aarch64_type_align (struct type *t)
1100{
1101 int n;
1102 int align;
1103 int falign;
1104
1105 t = check_typedef (t);
1106 switch (TYPE_CODE (t))
1107 {
1108 default:
1109 /* Should never happen. */
1110 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1111 return 4;
1112
1113 case TYPE_CODE_PTR:
1114 case TYPE_CODE_ENUM:
1115 case TYPE_CODE_INT:
1116 case TYPE_CODE_FLT:
1117 case TYPE_CODE_SET:
1118 case TYPE_CODE_RANGE:
1119 case TYPE_CODE_BITSTRING:
1120 case TYPE_CODE_REF:
aa006118 1121 case TYPE_CODE_RVALUE_REF:
07b287a0
MS
1122 case TYPE_CODE_CHAR:
1123 case TYPE_CODE_BOOL:
1124 return TYPE_LENGTH (t);
1125
1126 case TYPE_CODE_ARRAY:
238f2452
YQ
1127 if (TYPE_VECTOR (t))
1128 {
1129 /* Use the natural alignment for vector types (the same for
1130 scalar type), but the maximum alignment is 128-bit. */
1131 if (TYPE_LENGTH (t) > 16)
1132 return 16;
1133 else
1134 return TYPE_LENGTH (t);
1135 }
1136 else
1137 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
1138 case TYPE_CODE_COMPLEX:
1139 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1140
1141 case TYPE_CODE_STRUCT:
1142 case TYPE_CODE_UNION:
1143 align = 1;
1144 for (n = 0; n < TYPE_NFIELDS (t); n++)
1145 {
1146 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1147 if (falign > align)
1148 align = falign;
1149 }
1150 return align;
1151 }
1152}
1153
cd635f74
YQ
1154/* Return 1 if *TY is a homogeneous floating-point aggregate or
1155 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1156 document; otherwise return 0. */
07b287a0
MS
1157
1158static int
cd635f74 1159is_hfa_or_hva (struct type *ty)
07b287a0
MS
1160{
1161 switch (TYPE_CODE (ty))
1162 {
1163 case TYPE_CODE_ARRAY:
1164 {
1165 struct type *target_ty = TYPE_TARGET_TYPE (ty);
238f2452
YQ
1166
1167 if (TYPE_VECTOR (ty))
1168 return 0;
1169
cd635f74
YQ
1170 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1171 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1172 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1173 && TYPE_VECTOR (target_ty))))
07b287a0
MS
1174 return 1;
1175 break;
1176 }
1177
1178 case TYPE_CODE_UNION:
1179 case TYPE_CODE_STRUCT:
1180 {
cd635f74 1181 /* HFA or HVA has at most four members. */
07b287a0
MS
1182 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1183 {
1184 struct type *member0_type;
1185
1186 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
cd635f74
YQ
1187 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1188 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1189 && TYPE_VECTOR (member0_type)))
07b287a0
MS
1190 {
1191 int i;
1192
1193 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1194 {
1195 struct type *member1_type;
1196
1197 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1198 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1199 || (TYPE_LENGTH (member0_type)
1200 != TYPE_LENGTH (member1_type)))
1201 return 0;
1202 }
1203 return 1;
1204 }
1205 }
1206 return 0;
1207 }
1208
1209 default:
1210 break;
1211 }
1212
1213 return 0;
1214}
1215
ea92689a
AH
1216/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1217
1218 Return the number of register required, or -1 on failure.
1219
1220 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1221 to the element, else fail if the type of this element does not match the
1222 existing value. */
1223
1224static int
1225aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1226 struct type **fundamental_type)
1227{
1228 if (type == nullptr)
1229 return -1;
1230
1231 switch (TYPE_CODE (type))
1232 {
1233 case TYPE_CODE_FLT:
1234 if (TYPE_LENGTH (type) > 16)
1235 return -1;
1236
1237 if (*fundamental_type == nullptr)
1238 *fundamental_type = type;
1239 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1240 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1241 return -1;
1242
1243 return 1;
1244
1245 case TYPE_CODE_COMPLEX:
1246 {
1247 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1248 if (TYPE_LENGTH (target_type) > 16)
1249 return -1;
1250
1251 if (*fundamental_type == nullptr)
1252 *fundamental_type = target_type;
1253 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1254 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1255 return -1;
1256
1257 return 2;
1258 }
1259
1260 case TYPE_CODE_ARRAY:
1261 {
1262 if (TYPE_VECTOR (type))
1263 {
1264 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1265 return -1;
1266
1267 if (*fundamental_type == nullptr)
1268 *fundamental_type = type;
1269 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1270 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1271 return -1;
1272
1273 return 1;
1274 }
1275 else
1276 {
1277 struct type *target_type = TYPE_TARGET_TYPE (type);
1278 int count = aapcs_is_vfp_call_or_return_candidate_1
1279 (target_type, fundamental_type);
1280
1281 if (count == -1)
1282 return count;
1283
1284 count *= TYPE_LENGTH (type);
1285 return count;
1286 }
1287 }
1288
1289 case TYPE_CODE_STRUCT:
1290 case TYPE_CODE_UNION:
1291 {
1292 int count = 0;
1293
1294 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1295 {
1296 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1297
1298 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1299 (member, fundamental_type);
1300 if (sub_count == -1)
1301 return -1;
1302 count += sub_count;
1303 }
1304 return count;
1305 }
1306
1307 default:
1308 break;
1309 }
1310
1311 return -1;
1312}
1313
1314/* Return true if an argument, whose type is described by TYPE, can be passed or
1315 returned in simd/fp registers, providing enough parameter passing registers
1316 are available. This is as described in the AAPCS64.
1317
1318 Upon successful return, *COUNT returns the number of needed registers,
1319 *FUNDAMENTAL_TYPE contains the type of those registers.
1320
1321 Candidate as per the AAPCS64 5.4.2.C is either a:
1322 - float.
1323 - short-vector.
1324 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1325 all the members are floats and has at most 4 members.
1326 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1327 all the members are short vectors and has at most 4 members.
1328 - Complex (7.1.1)
1329
1330 Note that HFAs and HVAs can include nested structures and arrays. */
1331
1332bool
1333aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1334 struct type **fundamental_type)
1335{
1336 if (type == nullptr)
1337 return false;
1338
1339 *fundamental_type = nullptr;
1340
1341 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1342 fundamental_type);
1343
1344 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1345 {
1346 *count = ag_count;
1347 return true;
1348 }
1349 else
1350 return false;
1351}
1352
07b287a0
MS
1353/* AArch64 function call information structure. */
1354struct aarch64_call_info
1355{
1356 /* the current argument number. */
1357 unsigned argnum;
1358
1359 /* The next general purpose register number, equivalent to NGRN as
1360 described in the AArch64 Procedure Call Standard. */
1361 unsigned ngrn;
1362
1363 /* The next SIMD and floating point register number, equivalent to
1364 NSRN as described in the AArch64 Procedure Call Standard. */
1365 unsigned nsrn;
1366
1367 /* The next stacked argument address, equivalent to NSAA as
1368 described in the AArch64 Procedure Call Standard. */
1369 unsigned nsaa;
1370
1371 /* Stack item vector. */
1372 VEC(stack_item_t) *si;
1373};
1374
1375/* Pass a value in a sequence of consecutive X registers. The caller
1376 is responsbile for ensuring sufficient registers are available. */
1377
1378static void
1379pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1380 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1381 struct value *arg)
07b287a0
MS
1382{
1383 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1384 int len = TYPE_LENGTH (type);
1385 enum type_code typecode = TYPE_CODE (type);
1386 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1387 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1388
1389 info->argnum++;
1390
1391 while (len > 0)
1392 {
1393 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1394 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1395 byte_order);
1396
1397
1398 /* Adjust sub-word struct/union args when big-endian. */
1399 if (byte_order == BFD_ENDIAN_BIG
1400 && partial_len < X_REGISTER_SIZE
1401 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1402 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1403
1404 if (aarch64_debug)
b277c936
PL
1405 {
1406 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1407 gdbarch_register_name (gdbarch, regnum),
1408 phex (regval, X_REGISTER_SIZE));
1409 }
07b287a0
MS
1410 regcache_cooked_write_unsigned (regcache, regnum, regval);
1411 len -= partial_len;
1412 buf += partial_len;
1413 regnum++;
1414 }
1415}
1416
1417/* Attempt to marshall a value in a V register. Return 1 if
1418 successful, or 0 if insufficient registers are available. This
1419 function, unlike the equivalent pass_in_x() function does not
1420 handle arguments spread across multiple registers. */
1421
1422static int
1423pass_in_v (struct gdbarch *gdbarch,
1424 struct regcache *regcache,
1425 struct aarch64_call_info *info,
0735fddd 1426 int len, const bfd_byte *buf)
07b287a0
MS
1427{
1428 if (info->nsrn < 8)
1429 {
07b287a0 1430 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1431 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1432
1433 info->argnum++;
1434 info->nsrn++;
1435
0735fddd
YQ
1436 memset (reg, 0, sizeof (reg));
1437 /* PCS C.1, the argument is allocated to the least significant
1438 bits of V register. */
1439 memcpy (reg, buf, len);
b66f5587 1440 regcache->cooked_write (regnum, reg);
0735fddd 1441
07b287a0 1442 if (aarch64_debug)
b277c936
PL
1443 {
1444 debug_printf ("arg %d in %s\n", info->argnum,
1445 gdbarch_register_name (gdbarch, regnum));
1446 }
07b287a0
MS
1447 return 1;
1448 }
1449 info->nsrn = 8;
1450 return 0;
1451}
1452
1453/* Marshall an argument onto the stack. */
1454
1455static void
1456pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1457 struct value *arg)
07b287a0 1458{
8e80f9d1 1459 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1460 int len = TYPE_LENGTH (type);
1461 int align;
1462 stack_item_t item;
1463
1464 info->argnum++;
1465
1466 align = aarch64_type_align (type);
1467
1468 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1469 Natural alignment of the argument's type. */
1470 align = align_up (align, 8);
1471
1472 /* The AArch64 PCS requires at most doubleword alignment. */
1473 if (align > 16)
1474 align = 16;
1475
1476 if (aarch64_debug)
b277c936
PL
1477 {
1478 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1479 info->nsaa);
1480 }
07b287a0
MS
1481
1482 item.len = len;
1483 item.data = buf;
1484 VEC_safe_push (stack_item_t, info->si, &item);
1485
1486 info->nsaa += len;
1487 if (info->nsaa & (align - 1))
1488 {
1489 /* Push stack alignment padding. */
1490 int pad = align - (info->nsaa & (align - 1));
1491
1492 item.len = pad;
c3c87445 1493 item.data = NULL;
07b287a0
MS
1494
1495 VEC_safe_push (stack_item_t, info->si, &item);
1496 info->nsaa += pad;
1497 }
1498}
1499
1500/* Marshall an argument into a sequence of one or more consecutive X
1501 registers or, if insufficient X registers are available then onto
1502 the stack. */
1503
1504static void
1505pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1506 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1507 struct value *arg)
07b287a0
MS
1508{
1509 int len = TYPE_LENGTH (type);
1510 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1511
1512 /* PCS C.13 - Pass in registers if we have enough spare */
1513 if (info->ngrn + nregs <= 8)
1514 {
8e80f9d1 1515 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1516 info->ngrn += nregs;
1517 }
1518 else
1519 {
1520 info->ngrn = 8;
8e80f9d1 1521 pass_on_stack (info, type, arg);
07b287a0
MS
1522 }
1523}
1524
1525/* Pass a value in a V register, or on the stack if insufficient are
1526 available. */
1527
1528static void
1529pass_in_v_or_stack (struct gdbarch *gdbarch,
1530 struct regcache *regcache,
1531 struct aarch64_call_info *info,
1532 struct type *type,
8e80f9d1 1533 struct value *arg)
07b287a0 1534{
0735fddd
YQ
1535 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1536 value_contents (arg)))
8e80f9d1 1537 pass_on_stack (info, type, arg);
07b287a0
MS
1538}
1539
1540/* Implement the "push_dummy_call" gdbarch method. */
1541
1542static CORE_ADDR
1543aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1544 struct regcache *regcache, CORE_ADDR bp_addr,
1545 int nargs,
1546 struct value **args, CORE_ADDR sp, int struct_return,
1547 CORE_ADDR struct_addr)
1548{
07b287a0 1549 int argnum;
07b287a0
MS
1550 struct aarch64_call_info info;
1551 struct type *func_type;
1552 struct type *return_type;
1553 int lang_struct_return;
1554
1555 memset (&info, 0, sizeof (info));
1556
1557 /* We need to know what the type of the called function is in order
1558 to determine the number of named/anonymous arguments for the
1559 actual argument placement, and the return type in order to handle
1560 return value correctly.
1561
1562 The generic code above us views the decision of return in memory
1563 or return in registers as a two stage processes. The language
1564 handler is consulted first and may decide to return in memory (eg
1565 class with copy constructor returned by value), this will cause
1566 the generic code to allocate space AND insert an initial leading
1567 argument.
1568
1569 If the language code does not decide to pass in memory then the
1570 target code is consulted.
1571
1572 If the language code decides to pass in memory we want to move
1573 the pointer inserted as the initial argument from the argument
1574 list and into X8, the conventional AArch64 struct return pointer
1575 register.
1576
1577 This is slightly awkward, ideally the flag "lang_struct_return"
1578 would be passed to the targets implementation of push_dummy_call.
1579 Rather that change the target interface we call the language code
1580 directly ourselves. */
1581
1582 func_type = check_typedef (value_type (function));
1583
1584 /* Dereference function pointer types. */
1585 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1586 func_type = TYPE_TARGET_TYPE (func_type);
1587
1588 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1589 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1590
1591 /* If language_pass_by_reference () returned true we will have been
1592 given an additional initial argument, a hidden pointer to the
1593 return slot in memory. */
1594 return_type = TYPE_TARGET_TYPE (func_type);
1595 lang_struct_return = language_pass_by_reference (return_type);
1596
1597 /* Set the return address. For the AArch64, the return breakpoint
1598 is always at BP_ADDR. */
1599 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1600
1601 /* If we were given an initial argument for the return slot because
1602 lang_struct_return was true, lose it. */
1603 if (lang_struct_return)
1604 {
1605 args++;
1606 nargs--;
1607 }
1608
1609 /* The struct_return pointer occupies X8. */
1610 if (struct_return || lang_struct_return)
1611 {
1612 if (aarch64_debug)
b277c936
PL
1613 {
1614 debug_printf ("struct return in %s = 0x%s\n",
1615 gdbarch_register_name (gdbarch,
1616 AARCH64_STRUCT_RETURN_REGNUM),
1617 paddress (gdbarch, struct_addr));
1618 }
07b287a0
MS
1619 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1620 struct_addr);
1621 }
1622
1623 for (argnum = 0; argnum < nargs; argnum++)
1624 {
1625 struct value *arg = args[argnum];
1626 struct type *arg_type;
1627 int len;
1628
1629 arg_type = check_typedef (value_type (arg));
1630 len = TYPE_LENGTH (arg_type);
1631
1632 switch (TYPE_CODE (arg_type))
1633 {
1634 case TYPE_CODE_INT:
1635 case TYPE_CODE_BOOL:
1636 case TYPE_CODE_CHAR:
1637 case TYPE_CODE_RANGE:
1638 case TYPE_CODE_ENUM:
1639 if (len < 4)
1640 {
1641 /* Promote to 32 bit integer. */
1642 if (TYPE_UNSIGNED (arg_type))
1643 arg_type = builtin_type (gdbarch)->builtin_uint32;
1644 else
1645 arg_type = builtin_type (gdbarch)->builtin_int32;
1646 arg = value_cast (arg_type, arg);
1647 }
8e80f9d1 1648 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1649 break;
1650
1651 case TYPE_CODE_COMPLEX:
1652 if (info.nsrn <= 6)
1653 {
1654 const bfd_byte *buf = value_contents (arg);
1655 struct type *target_type =
1656 check_typedef (TYPE_TARGET_TYPE (arg_type));
1657
07b287a0 1658 pass_in_v (gdbarch, regcache, &info,
0735fddd
YQ
1659 TYPE_LENGTH (target_type), buf);
1660 pass_in_v (gdbarch, regcache, &info,
1661 TYPE_LENGTH (target_type),
07b287a0
MS
1662 buf + TYPE_LENGTH (target_type));
1663 }
1664 else
1665 {
1666 info.nsrn = 8;
8e80f9d1 1667 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1668 }
1669 break;
1670 case TYPE_CODE_FLT:
8e80f9d1 1671 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1672 break;
1673
1674 case TYPE_CODE_STRUCT:
1675 case TYPE_CODE_ARRAY:
1676 case TYPE_CODE_UNION:
cd635f74 1677 if (is_hfa_or_hva (arg_type))
07b287a0
MS
1678 {
1679 int elements = TYPE_NFIELDS (arg_type);
1680
1681 /* Homogeneous Aggregates */
1682 if (info.nsrn + elements < 8)
1683 {
1684 int i;
1685
1686 for (i = 0; i < elements; i++)
1687 {
1688 /* We know that we have sufficient registers
1689 available therefore this will never fallback
1690 to the stack. */
1691 struct value *field =
1692 value_primitive_field (arg, 0, i, arg_type);
1693 struct type *field_type =
1694 check_typedef (value_type (field));
1695
8e80f9d1
YQ
1696 pass_in_v_or_stack (gdbarch, regcache, &info,
1697 field_type, field);
07b287a0
MS
1698 }
1699 }
1700 else
1701 {
1702 info.nsrn = 8;
8e80f9d1 1703 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1704 }
1705 }
238f2452
YQ
1706 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1707 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1708 {
1709 /* Short vector types are passed in V registers. */
1710 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1711 }
07b287a0
MS
1712 else if (len > 16)
1713 {
1714 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1715 invisible reference. */
1716
1717 /* Allocate aligned storage. */
1718 sp = align_down (sp - len, 16);
1719
1720 /* Write the real data into the stack. */
1721 write_memory (sp, value_contents (arg), len);
1722
1723 /* Construct the indirection. */
1724 arg_type = lookup_pointer_type (arg_type);
1725 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1726 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1727 }
1728 else
1729 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1730 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1731 break;
1732
1733 default:
8e80f9d1 1734 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1735 break;
1736 }
1737 }
1738
1739 /* Make sure stack retains 16 byte alignment. */
1740 if (info.nsaa & 15)
1741 sp -= 16 - (info.nsaa & 15);
1742
1743 while (!VEC_empty (stack_item_t, info.si))
1744 {
1745 stack_item_t *si = VEC_last (stack_item_t, info.si);
1746
1747 sp -= si->len;
c3c87445
YQ
1748 if (si->data != NULL)
1749 write_memory (sp, si->data, si->len);
07b287a0
MS
1750 VEC_pop (stack_item_t, info.si);
1751 }
1752
1753 VEC_free (stack_item_t, info.si);
1754
1755 /* Finally, update the SP register. */
1756 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1757
1758 return sp;
1759}
1760
1761/* Implement the "frame_align" gdbarch method. */
1762
1763static CORE_ADDR
1764aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1765{
1766 /* Align the stack to sixteen bytes. */
1767 return sp & ~(CORE_ADDR) 15;
1768}
1769
1770/* Return the type for an AdvSISD Q register. */
1771
1772static struct type *
1773aarch64_vnq_type (struct gdbarch *gdbarch)
1774{
1775 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1776
1777 if (tdep->vnq_type == NULL)
1778 {
1779 struct type *t;
1780 struct type *elem;
1781
1782 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1783 TYPE_CODE_UNION);
1784
1785 elem = builtin_type (gdbarch)->builtin_uint128;
1786 append_composite_type_field (t, "u", elem);
1787
1788 elem = builtin_type (gdbarch)->builtin_int128;
1789 append_composite_type_field (t, "s", elem);
1790
1791 tdep->vnq_type = t;
1792 }
1793
1794 return tdep->vnq_type;
1795}
1796
1797/* Return the type for an AdvSISD D register. */
1798
1799static struct type *
1800aarch64_vnd_type (struct gdbarch *gdbarch)
1801{
1802 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1803
1804 if (tdep->vnd_type == NULL)
1805 {
1806 struct type *t;
1807 struct type *elem;
1808
1809 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1810 TYPE_CODE_UNION);
1811
1812 elem = builtin_type (gdbarch)->builtin_double;
1813 append_composite_type_field (t, "f", elem);
1814
1815 elem = builtin_type (gdbarch)->builtin_uint64;
1816 append_composite_type_field (t, "u", elem);
1817
1818 elem = builtin_type (gdbarch)->builtin_int64;
1819 append_composite_type_field (t, "s", elem);
1820
1821 tdep->vnd_type = t;
1822 }
1823
1824 return tdep->vnd_type;
1825}
1826
1827/* Return the type for an AdvSISD S register. */
1828
1829static struct type *
1830aarch64_vns_type (struct gdbarch *gdbarch)
1831{
1832 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1833
1834 if (tdep->vns_type == NULL)
1835 {
1836 struct type *t;
1837 struct type *elem;
1838
1839 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1840 TYPE_CODE_UNION);
1841
1842 elem = builtin_type (gdbarch)->builtin_float;
1843 append_composite_type_field (t, "f", elem);
1844
1845 elem = builtin_type (gdbarch)->builtin_uint32;
1846 append_composite_type_field (t, "u", elem);
1847
1848 elem = builtin_type (gdbarch)->builtin_int32;
1849 append_composite_type_field (t, "s", elem);
1850
1851 tdep->vns_type = t;
1852 }
1853
1854 return tdep->vns_type;
1855}
1856
1857/* Return the type for an AdvSISD H register. */
1858
1859static struct type *
1860aarch64_vnh_type (struct gdbarch *gdbarch)
1861{
1862 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1863
1864 if (tdep->vnh_type == NULL)
1865 {
1866 struct type *t;
1867 struct type *elem;
1868
1869 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1870 TYPE_CODE_UNION);
1871
1872 elem = builtin_type (gdbarch)->builtin_uint16;
1873 append_composite_type_field (t, "u", elem);
1874
1875 elem = builtin_type (gdbarch)->builtin_int16;
1876 append_composite_type_field (t, "s", elem);
1877
1878 tdep->vnh_type = t;
1879 }
1880
1881 return tdep->vnh_type;
1882}
1883
1884/* Return the type for an AdvSISD B register. */
1885
1886static struct type *
1887aarch64_vnb_type (struct gdbarch *gdbarch)
1888{
1889 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1890
1891 if (tdep->vnb_type == NULL)
1892 {
1893 struct type *t;
1894 struct type *elem;
1895
1896 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1897 TYPE_CODE_UNION);
1898
1899 elem = builtin_type (gdbarch)->builtin_uint8;
1900 append_composite_type_field (t, "u", elem);
1901
1902 elem = builtin_type (gdbarch)->builtin_int8;
1903 append_composite_type_field (t, "s", elem);
1904
1905 tdep->vnb_type = t;
1906 }
1907
1908 return tdep->vnb_type;
1909}
1910
63bad7b6
AH
1911/* Return the type for an AdvSISD V register. */
1912
1913static struct type *
1914aarch64_vnv_type (struct gdbarch *gdbarch)
1915{
1916 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1917
1918 if (tdep->vnv_type == NULL)
1919 {
1920 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1921 TYPE_CODE_UNION);
1922
1923 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1924 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1925 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1926 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1927 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1928
1929 tdep->vnv_type = t;
1930 }
1931
1932 return tdep->vnv_type;
1933}
1934
07b287a0
MS
1935/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1936
1937static int
1938aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1939{
1940 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1941 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1942
1943 if (reg == AARCH64_DWARF_SP)
1944 return AARCH64_SP_REGNUM;
1945
1946 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1947 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1948
65d4cada
AH
1949 if (reg == AARCH64_DWARF_SVE_VG)
1950 return AARCH64_SVE_VG_REGNUM;
1951
1952 if (reg == AARCH64_DWARF_SVE_FFR)
1953 return AARCH64_SVE_FFR_REGNUM;
1954
1955 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1956 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1957
1958 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1959 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1960
07b287a0
MS
1961 return -1;
1962}
07b287a0
MS
1963
1964/* Implement the "print_insn" gdbarch method. */
1965
1966static int
1967aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1968{
1969 info->symbols = NULL;
6394c606 1970 return default_print_insn (memaddr, info);
07b287a0
MS
1971}
1972
1973/* AArch64 BRK software debug mode instruction.
1974 Note that AArch64 code is always little-endian.
1975 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 1976constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 1977
04180708 1978typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
1979
1980/* Extract from an array REGS containing the (raw) register state a
1981 function return value of type TYPE, and copy that, in virtual
1982 format, into VALBUF. */
1983
1984static void
1985aarch64_extract_return_value (struct type *type, struct regcache *regs,
1986 gdb_byte *valbuf)
1987{
ac7936df 1988 struct gdbarch *gdbarch = regs->arch ();
07b287a0
MS
1989 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1990
1991 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1992 {
1993 bfd_byte buf[V_REGISTER_SIZE];
1994 int len = TYPE_LENGTH (type);
1995
dca08e1f 1996 regs->cooked_read (AARCH64_V0_REGNUM, buf);
07b287a0
MS
1997 memcpy (valbuf, buf, len);
1998 }
1999 else if (TYPE_CODE (type) == TYPE_CODE_INT
2000 || TYPE_CODE (type) == TYPE_CODE_CHAR
2001 || TYPE_CODE (type) == TYPE_CODE_BOOL
2002 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 2003 || TYPE_IS_REFERENCE (type)
07b287a0
MS
2004 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2005 {
2006 /* If the the type is a plain integer, then the access is
2007 straight-forward. Otherwise we have to play around a bit
2008 more. */
2009 int len = TYPE_LENGTH (type);
2010 int regno = AARCH64_X0_REGNUM;
2011 ULONGEST tmp;
2012
2013 while (len > 0)
2014 {
2015 /* By using store_unsigned_integer we avoid having to do
2016 anything special for small big-endian values. */
2017 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2018 store_unsigned_integer (valbuf,
2019 (len > X_REGISTER_SIZE
2020 ? X_REGISTER_SIZE : len), byte_order, tmp);
2021 len -= X_REGISTER_SIZE;
2022 valbuf += X_REGISTER_SIZE;
2023 }
2024 }
2025 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
2026 {
2027 int regno = AARCH64_V0_REGNUM;
2028 bfd_byte buf[V_REGISTER_SIZE];
2029 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
2030 int len = TYPE_LENGTH (target_type);
2031
dca08e1f 2032 regs->cooked_read (regno, buf);
07b287a0
MS
2033 memcpy (valbuf, buf, len);
2034 valbuf += len;
dca08e1f 2035 regs->cooked_read (regno + 1, buf);
07b287a0
MS
2036 memcpy (valbuf, buf, len);
2037 valbuf += len;
2038 }
cd635f74 2039 else if (is_hfa_or_hva (type))
07b287a0
MS
2040 {
2041 int elements = TYPE_NFIELDS (type);
2042 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2043 int len = TYPE_LENGTH (member_type);
2044 int i;
2045
2046 for (i = 0; i < elements; i++)
2047 {
2048 int regno = AARCH64_V0_REGNUM + i;
db3516bb 2049 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
2050
2051 if (aarch64_debug)
b277c936 2052 {
cd635f74 2053 debug_printf ("read HFA or HVA return value element %d from %s\n",
b277c936
PL
2054 i + 1,
2055 gdbarch_register_name (gdbarch, regno));
2056 }
dca08e1f 2057 regs->cooked_read (regno, buf);
07b287a0
MS
2058
2059 memcpy (valbuf, buf, len);
2060 valbuf += len;
2061 }
2062 }
238f2452
YQ
2063 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2064 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
2065 {
2066 /* Short vector is returned in V register. */
2067 gdb_byte buf[V_REGISTER_SIZE];
2068
dca08e1f 2069 regs->cooked_read (AARCH64_V0_REGNUM, buf);
238f2452
YQ
2070 memcpy (valbuf, buf, TYPE_LENGTH (type));
2071 }
07b287a0
MS
2072 else
2073 {
2074 /* For a structure or union the behaviour is as if the value had
2075 been stored to word-aligned memory and then loaded into
2076 registers with 64-bit load instruction(s). */
2077 int len = TYPE_LENGTH (type);
2078 int regno = AARCH64_X0_REGNUM;
2079 bfd_byte buf[X_REGISTER_SIZE];
2080
2081 while (len > 0)
2082 {
dca08e1f 2083 regs->cooked_read (regno++, buf);
07b287a0
MS
2084 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2085 len -= X_REGISTER_SIZE;
2086 valbuf += X_REGISTER_SIZE;
2087 }
2088 }
2089}
2090
2091
2092/* Will a function return an aggregate type in memory or in a
2093 register? Return 0 if an aggregate type can be returned in a
2094 register, 1 if it must be returned in memory. */
2095
2096static int
2097aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2098{
f168693b 2099 type = check_typedef (type);
07b287a0 2100
cd635f74 2101 if (is_hfa_or_hva (type))
07b287a0 2102 {
cd635f74
YQ
2103 /* v0-v7 are used to return values and one register is allocated
2104 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2105 return 0;
2106 }
2107
2108 if (TYPE_LENGTH (type) > 16)
2109 {
2110 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2111 invisible reference. */
2112
2113 return 1;
2114 }
2115
2116 return 0;
2117}
2118
2119/* Write into appropriate registers a function return value of type
2120 TYPE, given in virtual format. */
2121
2122static void
2123aarch64_store_return_value (struct type *type, struct regcache *regs,
2124 const gdb_byte *valbuf)
2125{
ac7936df 2126 struct gdbarch *gdbarch = regs->arch ();
07b287a0
MS
2127 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2128
2129 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2130 {
2131 bfd_byte buf[V_REGISTER_SIZE];
2132 int len = TYPE_LENGTH (type);
2133
2134 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
b66f5587 2135 regs->cooked_write (AARCH64_V0_REGNUM, buf);
07b287a0
MS
2136 }
2137 else if (TYPE_CODE (type) == TYPE_CODE_INT
2138 || TYPE_CODE (type) == TYPE_CODE_CHAR
2139 || TYPE_CODE (type) == TYPE_CODE_BOOL
2140 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 2141 || TYPE_IS_REFERENCE (type)
07b287a0
MS
2142 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2143 {
2144 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2145 {
2146 /* Values of one word or less are zero/sign-extended and
2147 returned in r0. */
2148 bfd_byte tmpbuf[X_REGISTER_SIZE];
2149 LONGEST val = unpack_long (type, valbuf);
2150
2151 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2152 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2153 }
2154 else
2155 {
2156 /* Integral values greater than one word are stored in
2157 consecutive registers starting with r0. This will always
2158 be a multiple of the regiser size. */
2159 int len = TYPE_LENGTH (type);
2160 int regno = AARCH64_X0_REGNUM;
2161
2162 while (len > 0)
2163 {
b66f5587 2164 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2165 len -= X_REGISTER_SIZE;
2166 valbuf += X_REGISTER_SIZE;
2167 }
2168 }
2169 }
cd635f74 2170 else if (is_hfa_or_hva (type))
07b287a0
MS
2171 {
2172 int elements = TYPE_NFIELDS (type);
2173 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2174 int len = TYPE_LENGTH (member_type);
2175 int i;
2176
2177 for (i = 0; i < elements; i++)
2178 {
2179 int regno = AARCH64_V0_REGNUM + i;
d1be909e 2180 bfd_byte tmpbuf[V_REGISTER_SIZE];
07b287a0
MS
2181
2182 if (aarch64_debug)
b277c936 2183 {
cd635f74 2184 debug_printf ("write HFA or HVA return value element %d to %s\n",
b277c936
PL
2185 i + 1,
2186 gdbarch_register_name (gdbarch, regno));
2187 }
07b287a0
MS
2188
2189 memcpy (tmpbuf, valbuf, len);
b66f5587 2190 regs->cooked_write (regno, tmpbuf);
07b287a0
MS
2191 valbuf += len;
2192 }
2193 }
238f2452
YQ
2194 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2195 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
2196 {
2197 /* Short vector. */
2198 gdb_byte buf[V_REGISTER_SIZE];
2199
2200 memcpy (buf, valbuf, TYPE_LENGTH (type));
b66f5587 2201 regs->cooked_write (AARCH64_V0_REGNUM, buf);
238f2452 2202 }
07b287a0
MS
2203 else
2204 {
2205 /* For a structure or union the behaviour is as if the value had
2206 been stored to word-aligned memory and then loaded into
2207 registers with 64-bit load instruction(s). */
2208 int len = TYPE_LENGTH (type);
2209 int regno = AARCH64_X0_REGNUM;
2210 bfd_byte tmpbuf[X_REGISTER_SIZE];
2211
2212 while (len > 0)
2213 {
2214 memcpy (tmpbuf, valbuf,
2215 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2216 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2217 len -= X_REGISTER_SIZE;
2218 valbuf += X_REGISTER_SIZE;
2219 }
2220 }
2221}
2222
2223/* Implement the "return_value" gdbarch method. */
2224
2225static enum return_value_convention
2226aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2227 struct type *valtype, struct regcache *regcache,
2228 gdb_byte *readbuf, const gdb_byte *writebuf)
2229{
07b287a0
MS
2230
2231 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2232 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2233 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2234 {
2235 if (aarch64_return_in_memory (gdbarch, valtype))
2236 {
2237 if (aarch64_debug)
b277c936 2238 debug_printf ("return value in memory\n");
07b287a0
MS
2239 return RETURN_VALUE_STRUCT_CONVENTION;
2240 }
2241 }
2242
2243 if (writebuf)
2244 aarch64_store_return_value (valtype, regcache, writebuf);
2245
2246 if (readbuf)
2247 aarch64_extract_return_value (valtype, regcache, readbuf);
2248
2249 if (aarch64_debug)
b277c936 2250 debug_printf ("return value in registers\n");
07b287a0
MS
2251
2252 return RETURN_VALUE_REGISTER_CONVENTION;
2253}
2254
2255/* Implement the "get_longjmp_target" gdbarch method. */
2256
2257static int
2258aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2259{
2260 CORE_ADDR jb_addr;
2261 gdb_byte buf[X_REGISTER_SIZE];
2262 struct gdbarch *gdbarch = get_frame_arch (frame);
2263 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2264 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2265
2266 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2267
2268 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2269 X_REGISTER_SIZE))
2270 return 0;
2271
2272 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2273 return 1;
2274}
ea873d8e
PL
2275
2276/* Implement the "gen_return_address" gdbarch method. */
2277
2278static void
2279aarch64_gen_return_address (struct gdbarch *gdbarch,
2280 struct agent_expr *ax, struct axs_value *value,
2281 CORE_ADDR scope)
2282{
2283 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2284 value->kind = axs_lvalue_register;
2285 value->u.reg = AARCH64_LR_REGNUM;
2286}
07b287a0
MS
2287\f
2288
2289/* Return the pseudo register name corresponding to register regnum. */
2290
2291static const char *
2292aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2293{
63bad7b6
AH
2294 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2295
07b287a0
MS
2296 static const char *const q_name[] =
2297 {
2298 "q0", "q1", "q2", "q3",
2299 "q4", "q5", "q6", "q7",
2300 "q8", "q9", "q10", "q11",
2301 "q12", "q13", "q14", "q15",
2302 "q16", "q17", "q18", "q19",
2303 "q20", "q21", "q22", "q23",
2304 "q24", "q25", "q26", "q27",
2305 "q28", "q29", "q30", "q31",
2306 };
2307
2308 static const char *const d_name[] =
2309 {
2310 "d0", "d1", "d2", "d3",
2311 "d4", "d5", "d6", "d7",
2312 "d8", "d9", "d10", "d11",
2313 "d12", "d13", "d14", "d15",
2314 "d16", "d17", "d18", "d19",
2315 "d20", "d21", "d22", "d23",
2316 "d24", "d25", "d26", "d27",
2317 "d28", "d29", "d30", "d31",
2318 };
2319
2320 static const char *const s_name[] =
2321 {
2322 "s0", "s1", "s2", "s3",
2323 "s4", "s5", "s6", "s7",
2324 "s8", "s9", "s10", "s11",
2325 "s12", "s13", "s14", "s15",
2326 "s16", "s17", "s18", "s19",
2327 "s20", "s21", "s22", "s23",
2328 "s24", "s25", "s26", "s27",
2329 "s28", "s29", "s30", "s31",
2330 };
2331
2332 static const char *const h_name[] =
2333 {
2334 "h0", "h1", "h2", "h3",
2335 "h4", "h5", "h6", "h7",
2336 "h8", "h9", "h10", "h11",
2337 "h12", "h13", "h14", "h15",
2338 "h16", "h17", "h18", "h19",
2339 "h20", "h21", "h22", "h23",
2340 "h24", "h25", "h26", "h27",
2341 "h28", "h29", "h30", "h31",
2342 };
2343
2344 static const char *const b_name[] =
2345 {
2346 "b0", "b1", "b2", "b3",
2347 "b4", "b5", "b6", "b7",
2348 "b8", "b9", "b10", "b11",
2349 "b12", "b13", "b14", "b15",
2350 "b16", "b17", "b18", "b19",
2351 "b20", "b21", "b22", "b23",
2352 "b24", "b25", "b26", "b27",
2353 "b28", "b29", "b30", "b31",
2354 };
2355
2356 regnum -= gdbarch_num_regs (gdbarch);
2357
2358 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2359 return q_name[regnum - AARCH64_Q0_REGNUM];
2360
2361 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2362 return d_name[regnum - AARCH64_D0_REGNUM];
2363
2364 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2365 return s_name[regnum - AARCH64_S0_REGNUM];
2366
2367 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2368 return h_name[regnum - AARCH64_H0_REGNUM];
2369
2370 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2371 return b_name[regnum - AARCH64_B0_REGNUM];
2372
63bad7b6
AH
2373 if (tdep->has_sve ())
2374 {
2375 static const char *const sve_v_name[] =
2376 {
2377 "v0", "v1", "v2", "v3",
2378 "v4", "v5", "v6", "v7",
2379 "v8", "v9", "v10", "v11",
2380 "v12", "v13", "v14", "v15",
2381 "v16", "v17", "v18", "v19",
2382 "v20", "v21", "v22", "v23",
2383 "v24", "v25", "v26", "v27",
2384 "v28", "v29", "v30", "v31",
2385 };
2386
2387 if (regnum >= AARCH64_SVE_V0_REGNUM
2388 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2389 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2390 }
2391
07b287a0
MS
2392 internal_error (__FILE__, __LINE__,
2393 _("aarch64_pseudo_register_name: bad register number %d"),
2394 regnum);
2395}
2396
2397/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2398
2399static struct type *
2400aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2401{
63bad7b6
AH
2402 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2403
07b287a0
MS
2404 regnum -= gdbarch_num_regs (gdbarch);
2405
2406 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2407 return aarch64_vnq_type (gdbarch);
2408
2409 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2410 return aarch64_vnd_type (gdbarch);
2411
2412 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2413 return aarch64_vns_type (gdbarch);
2414
2415 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2416 return aarch64_vnh_type (gdbarch);
2417
2418 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2419 return aarch64_vnb_type (gdbarch);
2420
63bad7b6
AH
2421 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2422 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2423 return aarch64_vnv_type (gdbarch);
2424
07b287a0
MS
2425 internal_error (__FILE__, __LINE__,
2426 _("aarch64_pseudo_register_type: bad register number %d"),
2427 regnum);
2428}
2429
2430/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2431
2432static int
2433aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2434 struct reggroup *group)
2435{
63bad7b6
AH
2436 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2437
07b287a0
MS
2438 regnum -= gdbarch_num_regs (gdbarch);
2439
2440 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2441 return group == all_reggroup || group == vector_reggroup;
2442 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2443 return (group == all_reggroup || group == vector_reggroup
2444 || group == float_reggroup);
2445 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2446 return (group == all_reggroup || group == vector_reggroup
2447 || group == float_reggroup);
2448 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2449 return group == all_reggroup || group == vector_reggroup;
2450 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2451 return group == all_reggroup || group == vector_reggroup;
63bad7b6
AH
2452 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2453 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2454 return group == all_reggroup || group == vector_reggroup;
07b287a0
MS
2455
2456 return group == all_reggroup;
2457}
2458
3c5cd5c3
AH
2459/* Helper for aarch64_pseudo_read_value. */
2460
2461static struct value *
63bad7b6
AH
2462aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2463 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2464 int regsize, struct value *result_value)
2465{
3c5cd5c3
AH
2466 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2467
63bad7b6
AH
2468 /* Enough space for a full vector register. */
2469 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2470 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2471
3c5cd5c3
AH
2472 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2473 mark_value_bytes_unavailable (result_value, 0,
2474 TYPE_LENGTH (value_type (result_value)));
2475 else
2476 memcpy (value_contents_raw (result_value), reg_buf, regsize);
63bad7b6 2477
3c5cd5c3
AH
2478 return result_value;
2479 }
2480
07b287a0
MS
2481/* Implement the "pseudo_register_read_value" gdbarch method. */
2482
2483static struct value *
3c5cd5c3 2484aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2485 int regnum)
2486{
63bad7b6 2487 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3c5cd5c3 2488 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
07b287a0 2489
07b287a0
MS
2490 VALUE_LVAL (result_value) = lval_register;
2491 VALUE_REGNUM (result_value) = regnum;
07b287a0
MS
2492
2493 regnum -= gdbarch_num_regs (gdbarch);
2494
2495 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2496 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2497 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2498 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2499
2500 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2501 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2502 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2503 D_REGISTER_SIZE, result_value);
07b287a0
MS
2504
2505 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2506 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2507 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2508 S_REGISTER_SIZE, result_value);
07b287a0
MS
2509
2510 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2511 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2512 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2513 H_REGISTER_SIZE, result_value);
07b287a0
MS
2514
2515 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2516 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2517 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2518 B_REGISTER_SIZE, result_value);
07b287a0 2519
63bad7b6
AH
2520 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2521 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2522 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2523 regnum - AARCH64_SVE_V0_REGNUM,
2524 V_REGISTER_SIZE, result_value);
2525
07b287a0
MS
2526 gdb_assert_not_reached ("regnum out of bound");
2527}
2528
3c5cd5c3 2529/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2530
2531static void
63bad7b6
AH
2532aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2533 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2534{
3c5cd5c3 2535 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2536
63bad7b6
AH
2537 /* Enough space for a full vector register. */
2538 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2539 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2540
07b287a0
MS
2541 /* Ensure the register buffer is zero, we want gdb writes of the
2542 various 'scalar' pseudo registers to behavior like architectural
2543 writes, register width bytes are written the remainder are set to
2544 zero. */
63bad7b6 2545 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2546
3c5cd5c3
AH
2547 memcpy (reg_buf, buf, regsize);
2548 regcache->raw_write (v_regnum, reg_buf);
2549}
2550
2551/* Implement the "pseudo_register_write" gdbarch method. */
2552
2553static void
2554aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2555 int regnum, const gdb_byte *buf)
2556{
63bad7b6 2557 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
07b287a0
MS
2558 regnum -= gdbarch_num_regs (gdbarch);
2559
2560 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2561 return aarch64_pseudo_write_1 (gdbarch, regcache,
2562 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2563 buf);
07b287a0
MS
2564
2565 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2566 return aarch64_pseudo_write_1 (gdbarch, regcache,
2567 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2568 buf);
07b287a0
MS
2569
2570 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2571 return aarch64_pseudo_write_1 (gdbarch, regcache,
2572 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2573 buf);
07b287a0
MS
2574
2575 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2576 return aarch64_pseudo_write_1 (gdbarch, regcache,
2577 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2578 buf);
07b287a0
MS
2579
2580 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2581 return aarch64_pseudo_write_1 (gdbarch, regcache,
2582 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2583 buf);
2584
2585 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2586 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2587 return aarch64_pseudo_write_1 (gdbarch, regcache,
2588 regnum - AARCH64_SVE_V0_REGNUM,
2589 V_REGISTER_SIZE, buf);
07b287a0
MS
2590
2591 gdb_assert_not_reached ("regnum out of bound");
2592}
2593
07b287a0
MS
2594/* Callback function for user_reg_add. */
2595
2596static struct value *
2597value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2598{
9a3c8263 2599 const int *reg_p = (const int *) baton;
07b287a0
MS
2600
2601 return value_of_register (*reg_p, frame);
2602}
2603\f
2604
9404b58f
KM
2605/* Implement the "software_single_step" gdbarch method, needed to
2606 single step through atomic sequences on AArch64. */
2607
a0ff9e1a 2608static std::vector<CORE_ADDR>
f5ea389a 2609aarch64_software_single_step (struct regcache *regcache)
9404b58f 2610{
ac7936df 2611 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2612 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2613 const int insn_size = 4;
2614 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2615 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2616 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2617 CORE_ADDR loc = pc;
2618 CORE_ADDR closing_insn = 0;
2619 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2620 byte_order_for_code);
2621 int index;
2622 int insn_count;
2623 int bc_insn_count = 0; /* Conditional branch instruction count. */
2624 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2625 aarch64_inst inst;
2626
561a72d4 2627 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2628 return {};
9404b58f
KM
2629
2630 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2631 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2632 return {};
9404b58f
KM
2633
2634 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2635 {
9404b58f
KM
2636 loc += insn_size;
2637 insn = read_memory_unsigned_integer (loc, insn_size,
2638 byte_order_for_code);
2639
561a72d4 2640 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2641 return {};
9404b58f 2642 /* Check if the instruction is a conditional branch. */
f77ee802 2643 if (inst.opcode->iclass == condbranch)
9404b58f 2644 {
f77ee802
YQ
2645 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2646
9404b58f 2647 if (bc_insn_count >= 1)
a0ff9e1a 2648 return {};
9404b58f
KM
2649
2650 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2651 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2652
2653 bc_insn_count++;
2654 last_breakpoint++;
2655 }
2656
2657 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2658 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2659 {
2660 closing_insn = loc;
2661 break;
2662 }
2663 }
2664
2665 /* We didn't find a closing Store Exclusive instruction, fall back. */
2666 if (!closing_insn)
a0ff9e1a 2667 return {};
9404b58f
KM
2668
2669 /* Insert breakpoint after the end of the atomic sequence. */
2670 breaks[0] = loc + insn_size;
2671
2672 /* Check for duplicated breakpoints, and also check that the second
2673 breakpoint is not within the atomic sequence. */
2674 if (last_breakpoint
2675 && (breaks[1] == breaks[0]
2676 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2677 last_breakpoint = 0;
2678
a0ff9e1a
SM
2679 std::vector<CORE_ADDR> next_pcs;
2680
9404b58f
KM
2681 /* Insert the breakpoint at the end of the sequence, and one at the
2682 destination of the conditional branch, if it exists. */
2683 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 2684 next_pcs.push_back (breaks[index]);
9404b58f 2685
93f9a11f 2686 return next_pcs;
9404b58f
KM
2687}
2688
cfba9872 2689struct aarch64_displaced_step_closure : public displaced_step_closure
b6542f81
YQ
2690{
2691 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2692 is being displaced stepping. */
cfba9872 2693 int cond = 0;
b6542f81
YQ
2694
2695 /* PC adjustment offset after displaced stepping. */
cfba9872 2696 int32_t pc_adjust = 0;
b6542f81
YQ
2697};
2698
2699/* Data when visiting instructions for displaced stepping. */
2700
2701struct aarch64_displaced_step_data
2702{
2703 struct aarch64_insn_data base;
2704
2705 /* The address where the instruction will be executed at. */
2706 CORE_ADDR new_addr;
2707 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2708 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2709 /* Number of instructions in INSN_BUF. */
2710 unsigned insn_count;
2711 /* Registers when doing displaced stepping. */
2712 struct regcache *regs;
2713
cfba9872 2714 aarch64_displaced_step_closure *dsc;
b6542f81
YQ
2715};
2716
2717/* Implementation of aarch64_insn_visitor method "b". */
2718
2719static void
2720aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2721 struct aarch64_insn_data *data)
2722{
2723 struct aarch64_displaced_step_data *dsd
2724 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2725 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2726
2727 if (can_encode_int32 (new_offset, 28))
2728 {
2729 /* Emit B rather than BL, because executing BL on a new address
2730 will get the wrong address into LR. In order to avoid this,
2731 we emit B, and update LR if the instruction is BL. */
2732 emit_b (dsd->insn_buf, 0, new_offset);
2733 dsd->insn_count++;
2734 }
2735 else
2736 {
2737 /* Write NOP. */
2738 emit_nop (dsd->insn_buf);
2739 dsd->insn_count++;
2740 dsd->dsc->pc_adjust = offset;
2741 }
2742
2743 if (is_bl)
2744 {
2745 /* Update LR. */
2746 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2747 data->insn_addr + 4);
2748 }
2749}
2750
2751/* Implementation of aarch64_insn_visitor method "b_cond". */
2752
2753static void
2754aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2755 struct aarch64_insn_data *data)
2756{
2757 struct aarch64_displaced_step_data *dsd
2758 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2759
2760 /* GDB has to fix up PC after displaced step this instruction
2761 differently according to the condition is true or false. Instead
2762 of checking COND against conditional flags, we can use
2763 the following instructions, and GDB can tell how to fix up PC
2764 according to the PC value.
2765
2766 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2767 INSN1 ;
2768 TAKEN:
2769 INSN2
2770 */
2771
2772 emit_bcond (dsd->insn_buf, cond, 8);
2773 dsd->dsc->cond = 1;
2774 dsd->dsc->pc_adjust = offset;
2775 dsd->insn_count = 1;
2776}
2777
2778/* Dynamically allocate a new register. If we know the register
2779 statically, we should make it a global as above instead of using this
2780 helper function. */
2781
2782static struct aarch64_register
2783aarch64_register (unsigned num, int is64)
2784{
2785 return (struct aarch64_register) { num, is64 };
2786}
2787
2788/* Implementation of aarch64_insn_visitor method "cb". */
2789
2790static void
2791aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2792 const unsigned rn, int is64,
2793 struct aarch64_insn_data *data)
2794{
2795 struct aarch64_displaced_step_data *dsd
2796 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2797
2798 /* The offset is out of range for a compare and branch
2799 instruction. We can use the following instructions instead:
2800
2801 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2802 INSN1 ;
2803 TAKEN:
2804 INSN2
2805 */
2806 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2807 dsd->insn_count = 1;
2808 dsd->dsc->cond = 1;
2809 dsd->dsc->pc_adjust = offset;
2810}
2811
2812/* Implementation of aarch64_insn_visitor method "tb". */
2813
2814static void
2815aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2816 const unsigned rt, unsigned bit,
2817 struct aarch64_insn_data *data)
2818{
2819 struct aarch64_displaced_step_data *dsd
2820 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2821
2822 /* The offset is out of range for a test bit and branch
2823 instruction We can use the following instructions instead:
2824
2825 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2826 INSN1 ;
2827 TAKEN:
2828 INSN2
2829
2830 */
2831 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2832 dsd->insn_count = 1;
2833 dsd->dsc->cond = 1;
2834 dsd->dsc->pc_adjust = offset;
2835}
2836
2837/* Implementation of aarch64_insn_visitor method "adr". */
2838
2839static void
2840aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2841 const int is_adrp, struct aarch64_insn_data *data)
2842{
2843 struct aarch64_displaced_step_data *dsd
2844 = (struct aarch64_displaced_step_data *) data;
2845 /* We know exactly the address the ADR{P,} instruction will compute.
2846 We can just write it to the destination register. */
2847 CORE_ADDR address = data->insn_addr + offset;
2848
2849 if (is_adrp)
2850 {
2851 /* Clear the lower 12 bits of the offset to get the 4K page. */
2852 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2853 address & ~0xfff);
2854 }
2855 else
2856 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2857 address);
2858
2859 dsd->dsc->pc_adjust = 4;
2860 emit_nop (dsd->insn_buf);
2861 dsd->insn_count = 1;
2862}
2863
2864/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2865
2866static void
2867aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2868 const unsigned rt, const int is64,
2869 struct aarch64_insn_data *data)
2870{
2871 struct aarch64_displaced_step_data *dsd
2872 = (struct aarch64_displaced_step_data *) data;
2873 CORE_ADDR address = data->insn_addr + offset;
2874 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2875
2876 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2877 address);
2878
2879 if (is_sw)
2880 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2881 aarch64_register (rt, 1), zero);
2882 else
2883 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2884 aarch64_register (rt, 1), zero);
2885
2886 dsd->dsc->pc_adjust = 4;
2887}
2888
2889/* Implementation of aarch64_insn_visitor method "others". */
2890
2891static void
2892aarch64_displaced_step_others (const uint32_t insn,
2893 struct aarch64_insn_data *data)
2894{
2895 struct aarch64_displaced_step_data *dsd
2896 = (struct aarch64_displaced_step_data *) data;
2897
e1c587c3 2898 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2899 dsd->insn_count = 1;
2900
2901 if ((insn & 0xfffffc1f) == 0xd65f0000)
2902 {
2903 /* RET */
2904 dsd->dsc->pc_adjust = 0;
2905 }
2906 else
2907 dsd->dsc->pc_adjust = 4;
2908}
2909
2910static const struct aarch64_insn_visitor visitor =
2911{
2912 aarch64_displaced_step_b,
2913 aarch64_displaced_step_b_cond,
2914 aarch64_displaced_step_cb,
2915 aarch64_displaced_step_tb,
2916 aarch64_displaced_step_adr,
2917 aarch64_displaced_step_ldr_literal,
2918 aarch64_displaced_step_others,
2919};
2920
2921/* Implement the "displaced_step_copy_insn" gdbarch method. */
2922
2923struct displaced_step_closure *
2924aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2925 CORE_ADDR from, CORE_ADDR to,
2926 struct regcache *regs)
2927{
b6542f81
YQ
2928 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2929 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2930 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2931 aarch64_inst inst;
2932
561a72d4 2933 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 2934 return NULL;
b6542f81
YQ
2935
2936 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2937 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2938 {
2939 /* We can't displaced step atomic sequences. */
2940 return NULL;
2941 }
2942
cfba9872
SM
2943 std::unique_ptr<aarch64_displaced_step_closure> dsc
2944 (new aarch64_displaced_step_closure);
b6542f81
YQ
2945 dsd.base.insn_addr = from;
2946 dsd.new_addr = to;
2947 dsd.regs = regs;
cfba9872 2948 dsd.dsc = dsc.get ();
034f1a81 2949 dsd.insn_count = 0;
b6542f81
YQ
2950 aarch64_relocate_instruction (insn, &visitor,
2951 (struct aarch64_insn_data *) &dsd);
2952 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2953
2954 if (dsd.insn_count != 0)
2955 {
2956 int i;
2957
2958 /* Instruction can be relocated to scratch pad. Copy
2959 relocated instruction(s) there. */
2960 for (i = 0; i < dsd.insn_count; i++)
2961 {
2962 if (debug_displaced)
2963 {
2964 debug_printf ("displaced: writing insn ");
2965 debug_printf ("%.8x", dsd.insn_buf[i]);
2966 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2967 }
2968 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2969 (ULONGEST) dsd.insn_buf[i]);
2970 }
2971 }
2972 else
2973 {
b6542f81
YQ
2974 dsc = NULL;
2975 }
2976
cfba9872 2977 return dsc.release ();
b6542f81
YQ
2978}
2979
2980/* Implement the "displaced_step_fixup" gdbarch method. */
2981
2982void
2983aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
cfba9872 2984 struct displaced_step_closure *dsc_,
b6542f81
YQ
2985 CORE_ADDR from, CORE_ADDR to,
2986 struct regcache *regs)
2987{
cfba9872
SM
2988 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2989
b6542f81
YQ
2990 if (dsc->cond)
2991 {
2992 ULONGEST pc;
2993
2994 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2995 if (pc - to == 8)
2996 {
2997 /* Condition is true. */
2998 }
2999 else if (pc - to == 4)
3000 {
3001 /* Condition is false. */
3002 dsc->pc_adjust = 4;
3003 }
3004 else
3005 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3006 }
3007
3008 if (dsc->pc_adjust != 0)
3009 {
3010 if (debug_displaced)
3011 {
3012 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3013 paddress (gdbarch, from), dsc->pc_adjust);
3014 }
3015 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3016 from + dsc->pc_adjust);
3017 }
3018}
3019
3020/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3021
3022int
3023aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3024 struct displaced_step_closure *closure)
3025{
3026 return 1;
3027}
3028
95228a0d
AH
3029/* Get the correct target description for the given VQ value.
3030 If VQ is zero then it is assumed SVE is not supported.
3031 (It is not possible to set VQ to zero on an SVE system). */
da434ccb
AH
3032
3033const target_desc *
39bfb937 3034aarch64_read_description (uint64_t vq)
da434ccb 3035{
95228a0d 3036 if (vq > AARCH64_MAX_SVE_VQ)
39bfb937 3037 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
95228a0d
AH
3038 AARCH64_MAX_SVE_VQ);
3039
3040 struct target_desc *tdesc = tdesc_aarch64_list[vq];
da434ccb 3041
95228a0d
AH
3042 if (tdesc == NULL)
3043 {
3044 tdesc = aarch64_create_target_description (vq);
3045 tdesc_aarch64_list[vq] = tdesc;
3046 }
da434ccb 3047
95228a0d 3048 return tdesc;
da434ccb
AH
3049}
3050
ba2d2bb2
AH
3051/* Return the VQ used when creating the target description TDESC. */
3052
1332a140 3053static uint64_t
ba2d2bb2
AH
3054aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3055{
3056 const struct tdesc_feature *feature_sve;
3057
3058 if (!tdesc_has_registers (tdesc))
3059 return 0;
3060
3061 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3062
3063 if (feature_sve == nullptr)
3064 return 0;
3065
12863263
AH
3066 uint64_t vl = tdesc_register_bitsize (feature_sve,
3067 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
3068 return sve_vq_from_vl (vl);
3069}
3070
3071
07b287a0
MS
3072/* Initialize the current architecture based on INFO. If possible,
3073 re-use an architecture from ARCHES, which is a list of
3074 architectures already created during this debugging session.
3075
3076 Called e.g. at program startup, when reading a core file, and when
3077 reading a binary file. */
3078
3079static struct gdbarch *
3080aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3081{
3082 struct gdbarch_tdep *tdep;
3083 struct gdbarch *gdbarch;
3084 struct gdbarch_list *best_arch;
3085 struct tdesc_arch_data *tdesc_data = NULL;
3086 const struct target_desc *tdesc = info.target_desc;
3087 int i;
07b287a0 3088 int valid_p = 1;
ba2d2bb2
AH
3089 const struct tdesc_feature *feature_core;
3090 const struct tdesc_feature *feature_fpu;
3091 const struct tdesc_feature *feature_sve;
07b287a0
MS
3092 int num_regs = 0;
3093 int num_pseudo_regs = 0;
3094
ba2d2bb2 3095 /* Ensure we always have a target description. */
07b287a0 3096 if (!tdesc_has_registers (tdesc))
ba2d2bb2 3097 tdesc = aarch64_read_description (0);
07b287a0
MS
3098 gdb_assert (tdesc);
3099
ba2d2bb2
AH
3100 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
3101 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3102 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
07b287a0 3103
ba2d2bb2 3104 if (feature_core == NULL)
07b287a0
MS
3105 return NULL;
3106
3107 tdesc_data = tdesc_data_alloc ();
3108
ba2d2bb2 3109 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3110 and allocate their numbers. */
3111 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
ba2d2bb2
AH
3112 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3113 AARCH64_X0_REGNUM + i,
3114 aarch64_r_register_names[i]);
07b287a0
MS
3115
3116 num_regs = AARCH64_X0_REGNUM + i;
3117
ba2d2bb2
AH
3118 /* Add the V registers. */
3119 if (feature_fpu != NULL)
07b287a0 3120 {
ba2d2bb2
AH
3121 if (feature_sve != NULL)
3122 error (_("Program contains both fpu and SVE features."));
3123
3124 /* Validate the description provides the mandatory V registers
3125 and allocate their numbers. */
07b287a0 3126 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
ba2d2bb2
AH
3127 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3128 AARCH64_V0_REGNUM + i,
3129 aarch64_v_register_names[i]);
07b287a0
MS
3130
3131 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3132 }
07b287a0 3133
ba2d2bb2
AH
3134 /* Add the SVE registers. */
3135 if (feature_sve != NULL)
3136 {
3137 /* Validate the description provides the mandatory SVE registers
3138 and allocate their numbers. */
3139 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3140 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3141 AARCH64_SVE_Z0_REGNUM + i,
3142 aarch64_sve_register_names[i]);
3143
3144 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3145 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3146 }
3147
3148 if (feature_fpu != NULL || feature_sve != NULL)
3149 {
07b287a0
MS
3150 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3151 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3152 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3153 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3154 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3155 }
3156
3157 if (!valid_p)
3158 {
3159 tdesc_data_cleanup (tdesc_data);
3160 return NULL;
3161 }
3162
3163 /* AArch64 code is always little-endian. */
3164 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3165
3166 /* If there is already a candidate, use it. */
3167 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3168 best_arch != NULL;
3169 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3170 {
3171 /* Found a match. */
3172 break;
3173 }
3174
3175 if (best_arch != NULL)
3176 {
3177 if (tdesc_data != NULL)
3178 tdesc_data_cleanup (tdesc_data);
3179 return best_arch->gdbarch;
3180 }
3181
8d749320 3182 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
3183 gdbarch = gdbarch_alloc (&info, tdep);
3184
3185 /* This should be low enough for everything. */
3186 tdep->lowest_pc = 0x20;
3187 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3188 tdep->jb_elt_size = 8;
ba2d2bb2 3189 tdep->vq = aarch64_get_tdesc_vq (tdesc);
07b287a0
MS
3190
3191 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3192 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3193
07b287a0
MS
3194 /* Frame handling. */
3195 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3196 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3197 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3198
3199 /* Advance PC across function entry code. */
3200 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3201
3202 /* The stack grows downward. */
3203 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3204
3205 /* Breakpoint manipulation. */
04180708
YQ
3206 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3207 aarch64_breakpoint::kind_from_pc);
3208 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3209 aarch64_breakpoint::bp_from_kind);
07b287a0 3210 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3211 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3212
3213 /* Information about registers, etc. */
3214 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3215 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3216 set_gdbarch_num_regs (gdbarch, num_regs);
3217
3218 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3219 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3220 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3221 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3222 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3223 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3224 aarch64_pseudo_register_reggroup_p);
3225
3226 /* ABI */
3227 set_gdbarch_short_bit (gdbarch, 16);
3228 set_gdbarch_int_bit (gdbarch, 32);
3229 set_gdbarch_float_bit (gdbarch, 32);
3230 set_gdbarch_double_bit (gdbarch, 64);
3231 set_gdbarch_long_double_bit (gdbarch, 128);
3232 set_gdbarch_long_bit (gdbarch, 64);
3233 set_gdbarch_long_long_bit (gdbarch, 64);
3234 set_gdbarch_ptr_bit (gdbarch, 64);
3235 set_gdbarch_char_signed (gdbarch, 0);
53375380 3236 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3237 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3238 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3239 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3240
3241 /* Internal <-> external register number maps. */
3242 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3243
3244 /* Returning results. */
3245 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3246
3247 /* Disassembly. */
3248 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3249
3250 /* Virtual tables. */
3251 set_gdbarch_vbit_in_delta (gdbarch, 1);
3252
3253 /* Hook in the ABI-specific overrides, if they have been registered. */
3254 info.target_desc = tdesc;
0dba2a6c 3255 info.tdesc_data = tdesc_data;
07b287a0
MS
3256 gdbarch_init_osabi (info, gdbarch);
3257
3258 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3259
3260 /* Add some default predicates. */
3261 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3262 dwarf2_append_unwinders (gdbarch);
3263 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3264
3265 frame_base_set_default (gdbarch, &aarch64_normal_base);
3266
3267 /* Now we have tuned the configuration, set a few final things,
3268 based on what the OS ABI has told us. */
3269
3270 if (tdep->jb_pc >= 0)
3271 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3272
ea873d8e
PL
3273 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3274
07b287a0
MS
3275 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3276
3277 /* Add standard register aliases. */
3278 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3279 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3280 value_of_aarch64_user_reg,
3281 &aarch64_register_aliases[i].regnum);
3282
3283 return gdbarch;
3284}
3285
3286static void
3287aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3288{
3289 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3290
3291 if (tdep == NULL)
3292 return;
3293
3294 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3295 paddress (gdbarch, tdep->lowest_pc));
3296}
3297
0d4c07af 3298#if GDB_SELF_TEST
1e2b521d
YQ
3299namespace selftests
3300{
3301static void aarch64_process_record_test (void);
3302}
0d4c07af 3303#endif
1e2b521d 3304
07b287a0
MS
3305void
3306_initialize_aarch64_tdep (void)
3307{
3308 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3309 aarch64_dump_tdep);
3310
07b287a0
MS
3311 /* Debug this file's internals. */
3312 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3313Set AArch64 debugging."), _("\
3314Show AArch64 debugging."), _("\
3315When on, AArch64 specific debugging is enabled."),
3316 NULL,
3317 show_aarch64_debug,
3318 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3319
3320#if GDB_SELF_TEST
1526853e
SM
3321 selftests::register_test ("aarch64-analyze-prologue",
3322 selftests::aarch64_analyze_prologue_test);
3323 selftests::register_test ("aarch64-process-record",
3324 selftests::aarch64_process_record_test);
6654d750 3325 selftests::record_xml_tdesc ("aarch64.xml",
95228a0d 3326 aarch64_create_target_description (0));
4d9a9006 3327#endif
07b287a0 3328}
99afc88b
OJ
3329
3330/* AArch64 process record-replay related structures, defines etc. */
3331
99afc88b
OJ
3332#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3333 do \
3334 { \
3335 unsigned int reg_len = LENGTH; \
3336 if (reg_len) \
3337 { \
3338 REGS = XNEWVEC (uint32_t, reg_len); \
3339 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3340 } \
3341 } \
3342 while (0)
3343
3344#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3345 do \
3346 { \
3347 unsigned int mem_len = LENGTH; \
3348 if (mem_len) \
3349 { \
3350 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3351 memcpy(&MEMS->len, &RECORD_BUF[0], \
3352 sizeof(struct aarch64_mem_r) * LENGTH); \
3353 } \
3354 } \
3355 while (0)
3356
3357/* AArch64 record/replay structures and enumerations. */
3358
3359struct aarch64_mem_r
3360{
3361 uint64_t len; /* Record length. */
3362 uint64_t addr; /* Memory address. */
3363};
3364
3365enum aarch64_record_result
3366{
3367 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3368 AARCH64_RECORD_UNSUPPORTED,
3369 AARCH64_RECORD_UNKNOWN
3370};
3371
3372typedef struct insn_decode_record_t
3373{
3374 struct gdbarch *gdbarch;
3375 struct regcache *regcache;
3376 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3377 uint32_t aarch64_insn; /* Insn to be recorded. */
3378 uint32_t mem_rec_count; /* Count of memory records. */
3379 uint32_t reg_rec_count; /* Count of register records. */
3380 uint32_t *aarch64_regs; /* Registers to be recorded. */
3381 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3382} insn_decode_record;
3383
3384/* Record handler for data processing - register instructions. */
3385
3386static unsigned int
3387aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3388{
3389 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3390 uint32_t record_buf[4];
3391
3392 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3393 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3394 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3395
3396 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3397 {
3398 uint8_t setflags;
3399
3400 /* Logical (shifted register). */
3401 if (insn_bits24_27 == 0x0a)
3402 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3403 /* Add/subtract. */
3404 else if (insn_bits24_27 == 0x0b)
3405 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3406 else
3407 return AARCH64_RECORD_UNKNOWN;
3408
3409 record_buf[0] = reg_rd;
3410 aarch64_insn_r->reg_rec_count = 1;
3411 if (setflags)
3412 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3413 }
3414 else
3415 {
3416 if (insn_bits24_27 == 0x0b)
3417 {
3418 /* Data-processing (3 source). */
3419 record_buf[0] = reg_rd;
3420 aarch64_insn_r->reg_rec_count = 1;
3421 }
3422 else if (insn_bits24_27 == 0x0a)
3423 {
3424 if (insn_bits21_23 == 0x00)
3425 {
3426 /* Add/subtract (with carry). */
3427 record_buf[0] = reg_rd;
3428 aarch64_insn_r->reg_rec_count = 1;
3429 if (bit (aarch64_insn_r->aarch64_insn, 29))
3430 {
3431 record_buf[1] = AARCH64_CPSR_REGNUM;
3432 aarch64_insn_r->reg_rec_count = 2;
3433 }
3434 }
3435 else if (insn_bits21_23 == 0x02)
3436 {
3437 /* Conditional compare (register) and conditional compare
3438 (immediate) instructions. */
3439 record_buf[0] = AARCH64_CPSR_REGNUM;
3440 aarch64_insn_r->reg_rec_count = 1;
3441 }
3442 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3443 {
3444 /* CConditional select. */
3445 /* Data-processing (2 source). */
3446 /* Data-processing (1 source). */
3447 record_buf[0] = reg_rd;
3448 aarch64_insn_r->reg_rec_count = 1;
3449 }
3450 else
3451 return AARCH64_RECORD_UNKNOWN;
3452 }
3453 }
3454
3455 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3456 record_buf);
3457 return AARCH64_RECORD_SUCCESS;
3458}
3459
3460/* Record handler for data processing - immediate instructions. */
3461
3462static unsigned int
3463aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3464{
78cc6c2d 3465 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3466 uint32_t record_buf[4];
3467
3468 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3469 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3470 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3471
3472 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3473 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3474 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3475 {
3476 record_buf[0] = reg_rd;
3477 aarch64_insn_r->reg_rec_count = 1;
3478 }
3479 else if (insn_bits24_27 == 0x01)
3480 {
3481 /* Add/Subtract (immediate). */
3482 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3483 record_buf[0] = reg_rd;
3484 aarch64_insn_r->reg_rec_count = 1;
3485 if (setflags)
3486 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3487 }
3488 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3489 {
3490 /* Logical (immediate). */
3491 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3492 record_buf[0] = reg_rd;
3493 aarch64_insn_r->reg_rec_count = 1;
3494 if (setflags)
3495 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3496 }
3497 else
3498 return AARCH64_RECORD_UNKNOWN;
3499
3500 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3501 record_buf);
3502 return AARCH64_RECORD_SUCCESS;
3503}
3504
3505/* Record handler for branch, exception generation and system instructions. */
3506
3507static unsigned int
3508aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3509{
3510 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3511 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3512 uint32_t record_buf[4];
3513
3514 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3515 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3516 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3517
3518 if (insn_bits28_31 == 0x0d)
3519 {
3520 /* Exception generation instructions. */
3521 if (insn_bits24_27 == 0x04)
3522 {
5d98d3cd
YQ
3523 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3524 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3525 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3526 {
3527 ULONGEST svc_number;
3528
3529 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3530 &svc_number);
3531 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3532 svc_number);
3533 }
3534 else
3535 return AARCH64_RECORD_UNSUPPORTED;
3536 }
3537 /* System instructions. */
3538 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3539 {
3540 uint32_t reg_rt, reg_crn;
3541
3542 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3543 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3544
3545 /* Record rt in case of sysl and mrs instructions. */
3546 if (bit (aarch64_insn_r->aarch64_insn, 21))
3547 {
3548 record_buf[0] = reg_rt;
3549 aarch64_insn_r->reg_rec_count = 1;
3550 }
3551 /* Record cpsr for hint and msr(immediate) instructions. */
3552 else if (reg_crn == 0x02 || reg_crn == 0x04)
3553 {
3554 record_buf[0] = AARCH64_CPSR_REGNUM;
3555 aarch64_insn_r->reg_rec_count = 1;
3556 }
3557 }
3558 /* Unconditional branch (register). */
3559 else if((insn_bits24_27 & 0x0e) == 0x06)
3560 {
3561 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3562 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3563 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3564 }
3565 else
3566 return AARCH64_RECORD_UNKNOWN;
3567 }
3568 /* Unconditional branch (immediate). */
3569 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3570 {
3571 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3572 if (bit (aarch64_insn_r->aarch64_insn, 31))
3573 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3574 }
3575 else
3576 /* Compare & branch (immediate), Test & branch (immediate) and
3577 Conditional branch (immediate). */
3578 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3579
3580 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3581 record_buf);
3582 return AARCH64_RECORD_SUCCESS;
3583}
3584
3585/* Record handler for advanced SIMD load and store instructions. */
3586
3587static unsigned int
3588aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3589{
3590 CORE_ADDR address;
3591 uint64_t addr_offset = 0;
3592 uint32_t record_buf[24];
3593 uint64_t record_buf_mem[24];
3594 uint32_t reg_rn, reg_rt;
3595 uint32_t reg_index = 0, mem_index = 0;
3596 uint8_t opcode_bits, size_bits;
3597
3598 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3599 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3600 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3601 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3602 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3603
3604 if (record_debug)
b277c936 3605 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3606
3607 /* Load/store single structure. */
3608 if (bit (aarch64_insn_r->aarch64_insn, 24))
3609 {
3610 uint8_t sindex, scale, selem, esize, replicate = 0;
3611 scale = opcode_bits >> 2;
3612 selem = ((opcode_bits & 0x02) |
3613 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3614 switch (scale)
3615 {
3616 case 1:
3617 if (size_bits & 0x01)
3618 return AARCH64_RECORD_UNKNOWN;
3619 break;
3620 case 2:
3621 if ((size_bits >> 1) & 0x01)
3622 return AARCH64_RECORD_UNKNOWN;
3623 if (size_bits & 0x01)
3624 {
3625 if (!((opcode_bits >> 1) & 0x01))
3626 scale = 3;
3627 else
3628 return AARCH64_RECORD_UNKNOWN;
3629 }
3630 break;
3631 case 3:
3632 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3633 {
3634 scale = size_bits;
3635 replicate = 1;
3636 break;
3637 }
3638 else
3639 return AARCH64_RECORD_UNKNOWN;
3640 default:
3641 break;
3642 }
3643 esize = 8 << scale;
3644 if (replicate)
3645 for (sindex = 0; sindex < selem; sindex++)
3646 {
3647 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3648 reg_rt = (reg_rt + 1) % 32;
3649 }
3650 else
3651 {
3652 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3653 {
3654 if (bit (aarch64_insn_r->aarch64_insn, 22))
3655 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3656 else
3657 {
3658 record_buf_mem[mem_index++] = esize / 8;
3659 record_buf_mem[mem_index++] = address + addr_offset;
3660 }
3661 addr_offset = addr_offset + (esize / 8);
3662 reg_rt = (reg_rt + 1) % 32;
3663 }
99afc88b
OJ
3664 }
3665 }
3666 /* Load/store multiple structure. */
3667 else
3668 {
3669 uint8_t selem, esize, rpt, elements;
3670 uint8_t eindex, rindex;
3671
3672 esize = 8 << size_bits;
3673 if (bit (aarch64_insn_r->aarch64_insn, 30))
3674 elements = 128 / esize;
3675 else
3676 elements = 64 / esize;
3677
3678 switch (opcode_bits)
3679 {
3680 /*LD/ST4 (4 Registers). */
3681 case 0:
3682 rpt = 1;
3683 selem = 4;
3684 break;
3685 /*LD/ST1 (4 Registers). */
3686 case 2:
3687 rpt = 4;
3688 selem = 1;
3689 break;
3690 /*LD/ST3 (3 Registers). */
3691 case 4:
3692 rpt = 1;
3693 selem = 3;
3694 break;
3695 /*LD/ST1 (3 Registers). */
3696 case 6:
3697 rpt = 3;
3698 selem = 1;
3699 break;
3700 /*LD/ST1 (1 Register). */
3701 case 7:
3702 rpt = 1;
3703 selem = 1;
3704 break;
3705 /*LD/ST2 (2 Registers). */
3706 case 8:
3707 rpt = 1;
3708 selem = 2;
3709 break;
3710 /*LD/ST1 (2 Registers). */
3711 case 10:
3712 rpt = 2;
3713 selem = 1;
3714 break;
3715 default:
3716 return AARCH64_RECORD_UNSUPPORTED;
3717 break;
3718 }
3719 for (rindex = 0; rindex < rpt; rindex++)
3720 for (eindex = 0; eindex < elements; eindex++)
3721 {
3722 uint8_t reg_tt, sindex;
3723 reg_tt = (reg_rt + rindex) % 32;
3724 for (sindex = 0; sindex < selem; sindex++)
3725 {
3726 if (bit (aarch64_insn_r->aarch64_insn, 22))
3727 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3728 else
3729 {
3730 record_buf_mem[mem_index++] = esize / 8;
3731 record_buf_mem[mem_index++] = address + addr_offset;
3732 }
3733 addr_offset = addr_offset + (esize / 8);
3734 reg_tt = (reg_tt + 1) % 32;
3735 }
3736 }
3737 }
3738
3739 if (bit (aarch64_insn_r->aarch64_insn, 23))
3740 record_buf[reg_index++] = reg_rn;
3741
3742 aarch64_insn_r->reg_rec_count = reg_index;
3743 aarch64_insn_r->mem_rec_count = mem_index / 2;
3744 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3745 record_buf_mem);
3746 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3747 record_buf);
3748 return AARCH64_RECORD_SUCCESS;
3749}
3750
3751/* Record handler for load and store instructions. */
3752
3753static unsigned int
3754aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3755{
3756 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3757 uint8_t insn_bit23, insn_bit21;
3758 uint8_t opc, size_bits, ld_flag, vector_flag;
3759 uint32_t reg_rn, reg_rt, reg_rt2;
3760 uint64_t datasize, offset;
3761 uint32_t record_buf[8];
3762 uint64_t record_buf_mem[8];
3763 CORE_ADDR address;
3764
3765 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3766 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3767 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3768 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3769 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3770 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3771 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3772 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3773 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3774 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3775 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3776
3777 /* Load/store exclusive. */
3778 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3779 {
3780 if (record_debug)
b277c936 3781 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3782
3783 if (ld_flag)
3784 {
3785 record_buf[0] = reg_rt;
3786 aarch64_insn_r->reg_rec_count = 1;
3787 if (insn_bit21)
3788 {
3789 record_buf[1] = reg_rt2;
3790 aarch64_insn_r->reg_rec_count = 2;
3791 }
3792 }
3793 else
3794 {
3795 if (insn_bit21)
3796 datasize = (8 << size_bits) * 2;
3797 else
3798 datasize = (8 << size_bits);
3799 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3800 &address);
3801 record_buf_mem[0] = datasize / 8;
3802 record_buf_mem[1] = address;
3803 aarch64_insn_r->mem_rec_count = 1;
3804 if (!insn_bit23)
3805 {
3806 /* Save register rs. */
3807 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3808 aarch64_insn_r->reg_rec_count = 1;
3809 }
3810 }
3811 }
3812 /* Load register (literal) instructions decoding. */
3813 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3814 {
3815 if (record_debug)
b277c936 3816 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3817 if (vector_flag)
3818 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3819 else
3820 record_buf[0] = reg_rt;
3821 aarch64_insn_r->reg_rec_count = 1;
3822 }
3823 /* All types of load/store pair instructions decoding. */
3824 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3825 {
3826 if (record_debug)
b277c936 3827 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3828
3829 if (ld_flag)
3830 {
3831 if (vector_flag)
3832 {
3833 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3834 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3835 }
3836 else
3837 {
3838 record_buf[0] = reg_rt;
3839 record_buf[1] = reg_rt2;
3840 }
3841 aarch64_insn_r->reg_rec_count = 2;
3842 }
3843 else
3844 {
3845 uint16_t imm7_off;
3846 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3847 if (!vector_flag)
3848 size_bits = size_bits >> 1;
3849 datasize = 8 << (2 + size_bits);
3850 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3851 offset = offset << (2 + size_bits);
3852 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3853 &address);
3854 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3855 {
3856 if (imm7_off & 0x40)
3857 address = address - offset;
3858 else
3859 address = address + offset;
3860 }
3861
3862 record_buf_mem[0] = datasize / 8;
3863 record_buf_mem[1] = address;
3864 record_buf_mem[2] = datasize / 8;
3865 record_buf_mem[3] = address + (datasize / 8);
3866 aarch64_insn_r->mem_rec_count = 2;
3867 }
3868 if (bit (aarch64_insn_r->aarch64_insn, 23))
3869 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3870 }
3871 /* Load/store register (unsigned immediate) instructions. */
3872 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3873 {
3874 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3875 if (!(opc >> 1))
33877125
YQ
3876 {
3877 if (opc & 0x01)
3878 ld_flag = 0x01;
3879 else
3880 ld_flag = 0x0;
3881 }
99afc88b 3882 else
33877125 3883 {
1e2b521d
YQ
3884 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3885 {
3886 /* PRFM (immediate) */
3887 return AARCH64_RECORD_SUCCESS;
3888 }
3889 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3890 {
3891 /* LDRSW (immediate) */
3892 ld_flag = 0x1;
3893 }
33877125 3894 else
1e2b521d
YQ
3895 {
3896 if (opc & 0x01)
3897 ld_flag = 0x01;
3898 else
3899 ld_flag = 0x0;
3900 }
33877125 3901 }
99afc88b
OJ
3902
3903 if (record_debug)
3904 {
b277c936
PL
3905 debug_printf ("Process record: load/store (unsigned immediate):"
3906 " size %x V %d opc %x\n", size_bits, vector_flag,
3907 opc);
99afc88b
OJ
3908 }
3909
3910 if (!ld_flag)
3911 {
3912 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3913 datasize = 8 << size_bits;
3914 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3915 &address);
3916 offset = offset << size_bits;
3917 address = address + offset;
3918
3919 record_buf_mem[0] = datasize >> 3;
3920 record_buf_mem[1] = address;
3921 aarch64_insn_r->mem_rec_count = 1;
3922 }
3923 else
3924 {
3925 if (vector_flag)
3926 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3927 else
3928 record_buf[0] = reg_rt;
3929 aarch64_insn_r->reg_rec_count = 1;
3930 }
3931 }
3932 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3933 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3934 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3935 {
3936 if (record_debug)
b277c936 3937 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3938 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3939 if (!(opc >> 1))
3940 if (opc & 0x01)
3941 ld_flag = 0x01;
3942 else
3943 ld_flag = 0x0;
3944 else
3945 if (size_bits != 0x03)
3946 ld_flag = 0x01;
3947 else
3948 return AARCH64_RECORD_UNKNOWN;
3949
3950 if (!ld_flag)
3951 {
d9436c7c
PA
3952 ULONGEST reg_rm_val;
3953
99afc88b
OJ
3954 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3955 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3956 if (bit (aarch64_insn_r->aarch64_insn, 12))
3957 offset = reg_rm_val << size_bits;
3958 else
3959 offset = reg_rm_val;
3960 datasize = 8 << size_bits;
3961 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3962 &address);
3963 address = address + offset;
3964 record_buf_mem[0] = datasize >> 3;
3965 record_buf_mem[1] = address;
3966 aarch64_insn_r->mem_rec_count = 1;
3967 }
3968 else
3969 {
3970 if (vector_flag)
3971 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3972 else
3973 record_buf[0] = reg_rt;
3974 aarch64_insn_r->reg_rec_count = 1;
3975 }
3976 }
3977 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3978 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3979 && !insn_bit21)
99afc88b
OJ
3980 {
3981 if (record_debug)
3982 {
b277c936
PL
3983 debug_printf ("Process record: load/store "
3984 "(immediate and unprivileged)\n");
99afc88b
OJ
3985 }
3986 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3987 if (!(opc >> 1))
3988 if (opc & 0x01)
3989 ld_flag = 0x01;
3990 else
3991 ld_flag = 0x0;
3992 else
3993 if (size_bits != 0x03)
3994 ld_flag = 0x01;
3995 else
3996 return AARCH64_RECORD_UNKNOWN;
3997
3998 if (!ld_flag)
3999 {
4000 uint16_t imm9_off;
4001 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4002 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4003 datasize = 8 << size_bits;
4004 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4005 &address);
4006 if (insn_bits10_11 != 0x01)
4007 {
4008 if (imm9_off & 0x0100)
4009 address = address - offset;
4010 else
4011 address = address + offset;
4012 }
4013 record_buf_mem[0] = datasize >> 3;
4014 record_buf_mem[1] = address;
4015 aarch64_insn_r->mem_rec_count = 1;
4016 }
4017 else
4018 {
4019 if (vector_flag)
4020 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4021 else
4022 record_buf[0] = reg_rt;
4023 aarch64_insn_r->reg_rec_count = 1;
4024 }
4025 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4026 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4027 }
4028 /* Advanced SIMD load/store instructions. */
4029 else
4030 return aarch64_record_asimd_load_store (aarch64_insn_r);
4031
4032 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4033 record_buf_mem);
4034 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4035 record_buf);
4036 return AARCH64_RECORD_SUCCESS;
4037}
4038
4039/* Record handler for data processing SIMD and floating point instructions. */
4040
4041static unsigned int
4042aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4043{
4044 uint8_t insn_bit21, opcode, rmode, reg_rd;
4045 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4046 uint8_t insn_bits11_14;
4047 uint32_t record_buf[2];
4048
4049 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4050 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4051 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4052 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4053 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4054 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4055 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4056 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4057 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4058
4059 if (record_debug)
b277c936 4060 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
4061
4062 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4063 {
4064 /* Floating point - fixed point conversion instructions. */
4065 if (!insn_bit21)
4066 {
4067 if (record_debug)
b277c936 4068 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
4069
4070 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4071 record_buf[0] = reg_rd;
4072 else
4073 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4074 }
4075 /* Floating point - conditional compare instructions. */
4076 else if (insn_bits10_11 == 0x01)
4077 {
4078 if (record_debug)
b277c936 4079 debug_printf ("FP - conditional compare");
99afc88b
OJ
4080
4081 record_buf[0] = AARCH64_CPSR_REGNUM;
4082 }
4083 /* Floating point - data processing (2-source) and
4084 conditional select instructions. */
4085 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4086 {
4087 if (record_debug)
b277c936 4088 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
4089
4090 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4091 }
4092 else if (insn_bits10_11 == 0x00)
4093 {
4094 /* Floating point - immediate instructions. */
4095 if ((insn_bits12_15 & 0x01) == 0x01
4096 || (insn_bits12_15 & 0x07) == 0x04)
4097 {
4098 if (record_debug)
b277c936 4099 debug_printf ("FP - immediate");
99afc88b
OJ
4100 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4101 }
4102 /* Floating point - compare instructions. */
4103 else if ((insn_bits12_15 & 0x03) == 0x02)
4104 {
4105 if (record_debug)
b277c936 4106 debug_printf ("FP - immediate");
99afc88b
OJ
4107 record_buf[0] = AARCH64_CPSR_REGNUM;
4108 }
4109 /* Floating point - integer conversions instructions. */
f62fce35 4110 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4111 {
4112 /* Convert float to integer instruction. */
4113 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4114 {
4115 if (record_debug)
b277c936 4116 debug_printf ("float to int conversion");
99afc88b
OJ
4117
4118 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4119 }
4120 /* Convert integer to float instruction. */
4121 else if ((opcode >> 1) == 0x01 && !rmode)
4122 {
4123 if (record_debug)
b277c936 4124 debug_printf ("int to float conversion");
99afc88b
OJ
4125
4126 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4127 }
4128 /* Move float to integer instruction. */
4129 else if ((opcode >> 1) == 0x03)
4130 {
4131 if (record_debug)
b277c936 4132 debug_printf ("move float to int");
99afc88b
OJ
4133
4134 if (!(opcode & 0x01))
4135 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4136 else
4137 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4138 }
f62fce35
YQ
4139 else
4140 return AARCH64_RECORD_UNKNOWN;
99afc88b 4141 }
f62fce35
YQ
4142 else
4143 return AARCH64_RECORD_UNKNOWN;
99afc88b 4144 }
f62fce35
YQ
4145 else
4146 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4147 }
4148 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4149 {
4150 if (record_debug)
b277c936 4151 debug_printf ("SIMD copy");
99afc88b
OJ
4152
4153 /* Advanced SIMD copy instructions. */
4154 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4155 && !bit (aarch64_insn_r->aarch64_insn, 15)
4156 && bit (aarch64_insn_r->aarch64_insn, 10))
4157 {
4158 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4159 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4160 else
4161 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4162 }
4163 else
4164 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4165 }
4166 /* All remaining floating point or advanced SIMD instructions. */
4167 else
4168 {
4169 if (record_debug)
b277c936 4170 debug_printf ("all remain");
99afc88b
OJ
4171
4172 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4173 }
4174
4175 if (record_debug)
b277c936 4176 debug_printf ("\n");
99afc88b
OJ
4177
4178 aarch64_insn_r->reg_rec_count++;
4179 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4180 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4181 record_buf);
4182 return AARCH64_RECORD_SUCCESS;
4183}
4184
4185/* Decodes insns type and invokes its record handler. */
4186
4187static unsigned int
4188aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4189{
4190 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4191
4192 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4193 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4194 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4195 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4196
4197 /* Data processing - immediate instructions. */
4198 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4199 return aarch64_record_data_proc_imm (aarch64_insn_r);
4200
4201 /* Branch, exception generation and system instructions. */
4202 if (ins_bit26 && !ins_bit27 && ins_bit28)
4203 return aarch64_record_branch_except_sys (aarch64_insn_r);
4204
4205 /* Load and store instructions. */
4206 if (!ins_bit25 && ins_bit27)
4207 return aarch64_record_load_store (aarch64_insn_r);
4208
4209 /* Data processing - register instructions. */
4210 if (ins_bit25 && !ins_bit26 && ins_bit27)
4211 return aarch64_record_data_proc_reg (aarch64_insn_r);
4212
4213 /* Data processing - SIMD and floating point instructions. */
4214 if (ins_bit25 && ins_bit26 && ins_bit27)
4215 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4216
4217 return AARCH64_RECORD_UNSUPPORTED;
4218}
4219
4220/* Cleans up local record registers and memory allocations. */
4221
4222static void
4223deallocate_reg_mem (insn_decode_record *record)
4224{
4225 xfree (record->aarch64_regs);
4226 xfree (record->aarch64_mems);
4227}
4228
1e2b521d
YQ
4229#if GDB_SELF_TEST
4230namespace selftests {
4231
4232static void
4233aarch64_process_record_test (void)
4234{
4235 struct gdbarch_info info;
4236 uint32_t ret;
4237
4238 gdbarch_info_init (&info);
4239 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4240
4241 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4242 SELF_CHECK (gdbarch != NULL);
4243
4244 insn_decode_record aarch64_record;
4245
4246 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4247 aarch64_record.regcache = NULL;
4248 aarch64_record.this_addr = 0;
4249 aarch64_record.gdbarch = gdbarch;
4250
4251 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4252 aarch64_record.aarch64_insn = 0xf9800020;
4253 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4254 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4255 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4256 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4257
4258 deallocate_reg_mem (&aarch64_record);
4259}
4260
4261} // namespace selftests
4262#endif /* GDB_SELF_TEST */
4263
99afc88b
OJ
4264/* Parse the current instruction and record the values of the registers and
4265 memory that will be changed in current instruction to record_arch_list
4266 return -1 if something is wrong. */
4267
4268int
4269aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4270 CORE_ADDR insn_addr)
4271{
4272 uint32_t rec_no = 0;
4273 uint8_t insn_size = 4;
4274 uint32_t ret = 0;
99afc88b
OJ
4275 gdb_byte buf[insn_size];
4276 insn_decode_record aarch64_record;
4277
4278 memset (&buf[0], 0, insn_size);
4279 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4280 target_read_memory (insn_addr, &buf[0], insn_size);
4281 aarch64_record.aarch64_insn
4282 = (uint32_t) extract_unsigned_integer (&buf[0],
4283 insn_size,
4284 gdbarch_byte_order (gdbarch));
4285 aarch64_record.regcache = regcache;
4286 aarch64_record.this_addr = insn_addr;
4287 aarch64_record.gdbarch = gdbarch;
4288
4289 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4290 if (ret == AARCH64_RECORD_UNSUPPORTED)
4291 {
4292 printf_unfiltered (_("Process record does not support instruction "
4293 "0x%0x at address %s.\n"),
4294 aarch64_record.aarch64_insn,
4295 paddress (gdbarch, insn_addr));
4296 ret = -1;
4297 }
4298
4299 if (0 == ret)
4300 {
4301 /* Record registers. */
4302 record_full_arch_list_add_reg (aarch64_record.regcache,
4303 AARCH64_PC_REGNUM);
4304 /* Always record register CPSR. */
4305 record_full_arch_list_add_reg (aarch64_record.regcache,
4306 AARCH64_CPSR_REGNUM);
4307 if (aarch64_record.aarch64_regs)
4308 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4309 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4310 aarch64_record.aarch64_regs[rec_no]))
4311 ret = -1;
4312
4313 /* Record memories. */
4314 if (aarch64_record.aarch64_mems)
4315 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4316 if (record_full_arch_list_add_mem
4317 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4318 aarch64_record.aarch64_mems[rec_no].len))
4319 ret = -1;
4320
4321 if (record_full_arch_list_add_end ())
4322 ret = -1;
4323 }
4324
4325 deallocate_reg_mem (&aarch64_record);
4326 return ret;
4327}
This page took 0.561871 seconds and 4 git commands to generate.