AArch64: Close sequences at the end of sections
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
e2882c85 3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
07b287a0
MS
30#include "value.h"
31#include "arch-utils.h"
32#include "osabi.h"
33#include "frame-unwind.h"
34#include "frame-base.h"
35#include "trad-frame.h"
36#include "objfiles.h"
37#include "dwarf2-frame.h"
38#include "gdbtypes.h"
39#include "prologue-value.h"
40#include "target-descriptions.h"
41#include "user-regs.h"
42#include "language.h"
43#include "infcall.h"
ea873d8e
PL
44#include "ax.h"
45#include "ax-gdb.h"
4d9a9006 46#include "selftest.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
787749ea
PL
57#include "arch/aarch64-insn.h"
58
f77ee802 59#include "opcode/aarch64.h"
325fac50 60#include <algorithm>
f77ee802
YQ
61
62#define submask(x) ((1L << ((x) + 1)) - 1)
63#define bit(obj,st) (((obj) >> (st)) & 1)
64#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
65
ea92689a
AH
66/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
67 four members. */
68#define HA_MAX_NUM_FLDS 4
69
95228a0d
AH
70/* All possible aarch64 target descriptors. */
71struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
72
07b287a0
MS
73/* The standard register names, and all the valid aliases for them. */
74static const struct
75{
76 const char *const name;
77 int regnum;
78} aarch64_register_aliases[] =
79{
80 /* 64-bit register names. */
81 {"fp", AARCH64_FP_REGNUM},
82 {"lr", AARCH64_LR_REGNUM},
83 {"sp", AARCH64_SP_REGNUM},
84
85 /* 32-bit register names. */
86 {"w0", AARCH64_X0_REGNUM + 0},
87 {"w1", AARCH64_X0_REGNUM + 1},
88 {"w2", AARCH64_X0_REGNUM + 2},
89 {"w3", AARCH64_X0_REGNUM + 3},
90 {"w4", AARCH64_X0_REGNUM + 4},
91 {"w5", AARCH64_X0_REGNUM + 5},
92 {"w6", AARCH64_X0_REGNUM + 6},
93 {"w7", AARCH64_X0_REGNUM + 7},
94 {"w8", AARCH64_X0_REGNUM + 8},
95 {"w9", AARCH64_X0_REGNUM + 9},
96 {"w10", AARCH64_X0_REGNUM + 10},
97 {"w11", AARCH64_X0_REGNUM + 11},
98 {"w12", AARCH64_X0_REGNUM + 12},
99 {"w13", AARCH64_X0_REGNUM + 13},
100 {"w14", AARCH64_X0_REGNUM + 14},
101 {"w15", AARCH64_X0_REGNUM + 15},
102 {"w16", AARCH64_X0_REGNUM + 16},
103 {"w17", AARCH64_X0_REGNUM + 17},
104 {"w18", AARCH64_X0_REGNUM + 18},
105 {"w19", AARCH64_X0_REGNUM + 19},
106 {"w20", AARCH64_X0_REGNUM + 20},
107 {"w21", AARCH64_X0_REGNUM + 21},
108 {"w22", AARCH64_X0_REGNUM + 22},
109 {"w23", AARCH64_X0_REGNUM + 23},
110 {"w24", AARCH64_X0_REGNUM + 24},
111 {"w25", AARCH64_X0_REGNUM + 25},
112 {"w26", AARCH64_X0_REGNUM + 26},
113 {"w27", AARCH64_X0_REGNUM + 27},
114 {"w28", AARCH64_X0_REGNUM + 28},
115 {"w29", AARCH64_X0_REGNUM + 29},
116 {"w30", AARCH64_X0_REGNUM + 30},
117
118 /* specials */
119 {"ip0", AARCH64_X0_REGNUM + 16},
120 {"ip1", AARCH64_X0_REGNUM + 17}
121};
122
123/* The required core 'R' registers. */
124static const char *const aarch64_r_register_names[] =
125{
126 /* These registers must appear in consecutive RAW register number
127 order and they must begin with AARCH64_X0_REGNUM! */
128 "x0", "x1", "x2", "x3",
129 "x4", "x5", "x6", "x7",
130 "x8", "x9", "x10", "x11",
131 "x12", "x13", "x14", "x15",
132 "x16", "x17", "x18", "x19",
133 "x20", "x21", "x22", "x23",
134 "x24", "x25", "x26", "x27",
135 "x28", "x29", "x30", "sp",
136 "pc", "cpsr"
137};
138
139/* The FP/SIMD 'V' registers. */
140static const char *const aarch64_v_register_names[] =
141{
142 /* These registers must appear in consecutive RAW register number
143 order and they must begin with AARCH64_V0_REGNUM! */
144 "v0", "v1", "v2", "v3",
145 "v4", "v5", "v6", "v7",
146 "v8", "v9", "v10", "v11",
147 "v12", "v13", "v14", "v15",
148 "v16", "v17", "v18", "v19",
149 "v20", "v21", "v22", "v23",
150 "v24", "v25", "v26", "v27",
151 "v28", "v29", "v30", "v31",
152 "fpsr",
153 "fpcr"
154};
155
739e8682
AH
156/* The SVE 'Z' and 'P' registers. */
157static const char *const aarch64_sve_register_names[] =
158{
159 /* These registers must appear in consecutive RAW register number
160 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
161 "z0", "z1", "z2", "z3",
162 "z4", "z5", "z6", "z7",
163 "z8", "z9", "z10", "z11",
164 "z12", "z13", "z14", "z15",
165 "z16", "z17", "z18", "z19",
166 "z20", "z21", "z22", "z23",
167 "z24", "z25", "z26", "z27",
168 "z28", "z29", "z30", "z31",
169 "fpsr", "fpcr",
170 "p0", "p1", "p2", "p3",
171 "p4", "p5", "p6", "p7",
172 "p8", "p9", "p10", "p11",
173 "p12", "p13", "p14", "p15",
174 "ffr", "vg"
175};
176
07b287a0
MS
177/* AArch64 prologue cache structure. */
178struct aarch64_prologue_cache
179{
db634143
PL
180 /* The program counter at the start of the function. It is used to
181 identify this frame as a prologue frame. */
182 CORE_ADDR func;
183
184 /* The program counter at the time this frame was created; i.e. where
185 this function was called from. It is used to identify this frame as a
186 stub frame. */
187 CORE_ADDR prev_pc;
188
07b287a0
MS
189 /* The stack pointer at the time this frame was created; i.e. the
190 caller's stack pointer when this function was called. It is used
191 to identify this frame. */
192 CORE_ADDR prev_sp;
193
7dfa3edc
PL
194 /* Is the target available to read from? */
195 int available_p;
196
07b287a0
MS
197 /* The frame base for this frame is just prev_sp - frame size.
198 FRAMESIZE is the distance from the frame pointer to the
199 initial stack pointer. */
200 int framesize;
201
202 /* The register used to hold the frame pointer for this frame. */
203 int framereg;
204
205 /* Saved register offsets. */
206 struct trad_frame_saved_reg *saved_regs;
207};
208
07b287a0
MS
209static void
210show_aarch64_debug (struct ui_file *file, int from_tty,
211 struct cmd_list_element *c, const char *value)
212{
213 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
214}
215
ffdbe864
YQ
216namespace {
217
4d9a9006
YQ
218/* Abstract instruction reader. */
219
220class abstract_instruction_reader
221{
222public:
223 /* Read in one instruction. */
224 virtual ULONGEST read (CORE_ADDR memaddr, int len,
225 enum bfd_endian byte_order) = 0;
226};
227
228/* Instruction reader from real target. */
229
230class instruction_reader : public abstract_instruction_reader
231{
232 public:
233 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 234 override
4d9a9006 235 {
fc2f703e 236 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
237 }
238};
239
ffdbe864
YQ
240} // namespace
241
07b287a0
MS
242/* Analyze a prologue, looking for a recognizable stack frame
243 and frame pointer. Scan until we encounter a store that could
244 clobber the stack frame unexpectedly, or an unknown instruction. */
245
246static CORE_ADDR
247aarch64_analyze_prologue (struct gdbarch *gdbarch,
248 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
249 struct aarch64_prologue_cache *cache,
250 abstract_instruction_reader& reader)
07b287a0
MS
251{
252 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
253 int i;
187f5d00
YQ
254 /* Track X registers and D registers in prologue. */
255 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 256
187f5d00 257 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 258 regs[i] = pv_register (i, 0);
f7b7ed97 259 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
260
261 for (; start < limit; start += 4)
262 {
263 uint32_t insn;
d9ebcbce 264 aarch64_inst inst;
07b287a0 265
4d9a9006 266 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 267
561a72d4 268 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
269 break;
270
271 if (inst.opcode->iclass == addsub_imm
272 && (inst.opcode->op == OP_ADD
273 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 274 {
d9ebcbce
YQ
275 unsigned rd = inst.operands[0].reg.regno;
276 unsigned rn = inst.operands[1].reg.regno;
277
278 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
279 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
280 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
281 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
282
283 if (inst.opcode->op == OP_ADD)
284 {
285 regs[rd] = pv_add_constant (regs[rn],
286 inst.operands[2].imm.value);
287 }
288 else
289 {
290 regs[rd] = pv_add_constant (regs[rn],
291 -inst.operands[2].imm.value);
292 }
293 }
294 else if (inst.opcode->iclass == pcreladdr
295 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
296 {
297 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
298 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
299
300 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 301 }
d9ebcbce 302 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
303 {
304 /* Stop analysis on branch. */
305 break;
306 }
d9ebcbce 307 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
308 {
309 /* Stop analysis on branch. */
310 break;
311 }
d9ebcbce 312 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
313 {
314 /* Stop analysis on branch. */
315 break;
316 }
d9ebcbce 317 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
318 {
319 /* Stop analysis on branch. */
320 break;
321 }
d9ebcbce
YQ
322 else if (inst.opcode->op == OP_MOVZ)
323 {
324 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
325 regs[inst.operands[0].reg.regno] = pv_unknown ();
326 }
327 else if (inst.opcode->iclass == log_shift
328 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 329 {
d9ebcbce
YQ
330 unsigned rd = inst.operands[0].reg.regno;
331 unsigned rn = inst.operands[1].reg.regno;
332 unsigned rm = inst.operands[2].reg.regno;
333
334 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
335 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
336 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
337
338 if (inst.operands[2].shifter.amount == 0
339 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
340 regs[rd] = regs[rm];
341 else
342 {
343 if (aarch64_debug)
b277c936
PL
344 {
345 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 346 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
347 core_addr_to_string_nz (start), insn);
348 }
07b287a0
MS
349 break;
350 }
351 }
d9ebcbce 352 else if (inst.opcode->op == OP_STUR)
07b287a0 353 {
d9ebcbce
YQ
354 unsigned rt = inst.operands[0].reg.regno;
355 unsigned rn = inst.operands[1].addr.base_regno;
356 int is64
357 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
358
359 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
360 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
361 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
362 gdb_assert (!inst.operands[1].addr.offset.is_reg);
363
f7b7ed97
TT
364 stack.store (pv_add_constant (regs[rn],
365 inst.operands[1].addr.offset.imm),
366 is64 ? 8 : 4, regs[rt]);
07b287a0 367 }
d9ebcbce 368 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
369 || (inst.opcode->iclass == ldstpair_indexed
370 && inst.operands[2].addr.preind))
d9ebcbce 371 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 372 {
03bcd739 373 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
374 unsigned rt1;
375 unsigned rt2;
d9ebcbce
YQ
376 unsigned rn = inst.operands[2].addr.base_regno;
377 int32_t imm = inst.operands[2].addr.offset.imm;
378
187f5d00
YQ
379 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
380 || inst.operands[0].type == AARCH64_OPND_Ft);
381 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
382 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
383 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
384 gdb_assert (!inst.operands[2].addr.offset.is_reg);
385
07b287a0
MS
386 /* If recording this store would invalidate the store area
387 (perhaps because rn is not known) then we should abandon
388 further prologue analysis. */
f7b7ed97 389 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
390 break;
391
f7b7ed97 392 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
393 break;
394
187f5d00
YQ
395 rt1 = inst.operands[0].reg.regno;
396 rt2 = inst.operands[1].reg.regno;
397 if (inst.operands[0].type == AARCH64_OPND_Ft)
398 {
399 /* Only bottom 64-bit of each V register (D register) need
400 to be preserved. */
401 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
402 rt1 += AARCH64_X_REGISTER_COUNT;
403 rt2 += AARCH64_X_REGISTER_COUNT;
404 }
405
f7b7ed97
TT
406 stack.store (pv_add_constant (regs[rn], imm), 8,
407 regs[rt1]);
408 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
409 regs[rt2]);
14ac654f 410
d9ebcbce 411 if (inst.operands[2].addr.writeback)
93d96012 412 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 413
07b287a0 414 }
432ec081
YQ
415 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
416 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
417 && (inst.opcode->op == OP_STR_POS
418 || inst.opcode->op == OP_STRF_POS)))
419 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
420 && strcmp ("str", inst.opcode->name) == 0)
421 {
422 /* STR (immediate) */
423 unsigned int rt = inst.operands[0].reg.regno;
424 int32_t imm = inst.operands[1].addr.offset.imm;
425 unsigned int rn = inst.operands[1].addr.base_regno;
426 bool is64
427 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
428 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
429 || inst.operands[0].type == AARCH64_OPND_Ft);
430
431 if (inst.operands[0].type == AARCH64_OPND_Ft)
432 {
433 /* Only bottom 64-bit of each V register (D register) need
434 to be preserved. */
435 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
436 rt += AARCH64_X_REGISTER_COUNT;
437 }
438
f7b7ed97
TT
439 stack.store (pv_add_constant (regs[rn], imm),
440 is64 ? 8 : 4, regs[rt]);
432ec081
YQ
441 if (inst.operands[1].addr.writeback)
442 regs[rn] = pv_add_constant (regs[rn], imm);
443 }
d9ebcbce 444 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
445 {
446 /* Stop analysis on branch. */
447 break;
448 }
449 else
450 {
451 if (aarch64_debug)
b277c936 452 {
0a0da556 453 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
454 " opcode=0x%x\n",
455 core_addr_to_string_nz (start), insn);
456 }
07b287a0
MS
457 break;
458 }
459 }
460
461 if (cache == NULL)
f7b7ed97 462 return start;
07b287a0
MS
463
464 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
465 {
466 /* Frame pointer is fp. Frame size is constant. */
467 cache->framereg = AARCH64_FP_REGNUM;
468 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
469 }
470 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
471 {
472 /* Try the stack pointer. */
473 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
474 cache->framereg = AARCH64_SP_REGNUM;
475 }
476 else
477 {
478 /* We're just out of luck. We don't know where the frame is. */
479 cache->framereg = -1;
480 cache->framesize = 0;
481 }
482
483 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
484 {
485 CORE_ADDR offset;
486
f7b7ed97 487 if (stack.find_reg (gdbarch, i, &offset))
07b287a0
MS
488 cache->saved_regs[i].addr = offset;
489 }
490
187f5d00
YQ
491 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
492 {
493 int regnum = gdbarch_num_regs (gdbarch);
494 CORE_ADDR offset;
495
f7b7ed97
TT
496 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
497 &offset))
187f5d00
YQ
498 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
499 }
500
07b287a0
MS
501 return start;
502}
503
4d9a9006
YQ
504static CORE_ADDR
505aarch64_analyze_prologue (struct gdbarch *gdbarch,
506 CORE_ADDR start, CORE_ADDR limit,
507 struct aarch64_prologue_cache *cache)
508{
509 instruction_reader reader;
510
511 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
512 reader);
513}
514
515#if GDB_SELF_TEST
516
517namespace selftests {
518
519/* Instruction reader from manually cooked instruction sequences. */
520
521class instruction_reader_test : public abstract_instruction_reader
522{
523public:
524 template<size_t SIZE>
525 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
526 : m_insns (insns), m_insns_size (SIZE)
527 {}
528
529 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 530 override
4d9a9006
YQ
531 {
532 SELF_CHECK (len == 4);
533 SELF_CHECK (memaddr % 4 == 0);
534 SELF_CHECK (memaddr / 4 < m_insns_size);
535
536 return m_insns[memaddr / 4];
537 }
538
539private:
540 const uint32_t *m_insns;
541 size_t m_insns_size;
542};
543
544static void
545aarch64_analyze_prologue_test (void)
546{
547 struct gdbarch_info info;
548
549 gdbarch_info_init (&info);
550 info.bfd_arch_info = bfd_scan_arch ("aarch64");
551
552 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
553 SELF_CHECK (gdbarch != NULL);
554
555 /* Test the simple prologue in which frame pointer is used. */
556 {
557 struct aarch64_prologue_cache cache;
558 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
559
560 static const uint32_t insns[] = {
561 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
562 0x910003fd, /* mov x29, sp */
563 0x97ffffe6, /* bl 0x400580 */
564 };
565 instruction_reader_test reader (insns);
566
567 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
568 SELF_CHECK (end == 4 * 2);
569
570 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
571 SELF_CHECK (cache.framesize == 272);
572
573 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
574 {
575 if (i == AARCH64_FP_REGNUM)
576 SELF_CHECK (cache.saved_regs[i].addr == -272);
577 else if (i == AARCH64_LR_REGNUM)
578 SELF_CHECK (cache.saved_regs[i].addr == -264);
579 else
580 SELF_CHECK (cache.saved_regs[i].addr == -1);
581 }
582
583 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
584 {
585 int regnum = gdbarch_num_regs (gdbarch);
586
587 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
588 == -1);
589 }
590 }
432ec081
YQ
591
592 /* Test a prologue in which STR is used and frame pointer is not
593 used. */
594 {
595 struct aarch64_prologue_cache cache;
596 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
597
598 static const uint32_t insns[] = {
599 0xf81d0ff3, /* str x19, [sp, #-48]! */
600 0xb9002fe0, /* str w0, [sp, #44] */
601 0xf90013e1, /* str x1, [sp, #32]*/
602 0xfd000fe0, /* str d0, [sp, #24] */
603 0xaa0203f3, /* mov x19, x2 */
604 0xf94013e0, /* ldr x0, [sp, #32] */
605 };
606 instruction_reader_test reader (insns);
607
608 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
609
610 SELF_CHECK (end == 4 * 5);
611
612 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
613 SELF_CHECK (cache.framesize == 48);
614
615 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
616 {
617 if (i == 1)
618 SELF_CHECK (cache.saved_regs[i].addr == -16);
619 else if (i == 19)
620 SELF_CHECK (cache.saved_regs[i].addr == -48);
621 else
622 SELF_CHECK (cache.saved_regs[i].addr == -1);
623 }
624
625 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
626 {
627 int regnum = gdbarch_num_regs (gdbarch);
628
629 if (i == 0)
630 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
631 == -24);
632 else
633 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
634 == -1);
635 }
636 }
4d9a9006
YQ
637}
638} // namespace selftests
639#endif /* GDB_SELF_TEST */
640
07b287a0
MS
641/* Implement the "skip_prologue" gdbarch method. */
642
643static CORE_ADDR
644aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
645{
07b287a0 646 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
647
648 /* See if we can determine the end of the prologue via the symbol
649 table. If so, then return either PC, or the PC after the
650 prologue, whichever is greater. */
651 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
652 {
653 CORE_ADDR post_prologue_pc
654 = skip_prologue_using_sal (gdbarch, func_addr);
655
656 if (post_prologue_pc != 0)
325fac50 657 return std::max (pc, post_prologue_pc);
07b287a0
MS
658 }
659
660 /* Can't determine prologue from the symbol table, need to examine
661 instructions. */
662
663 /* Find an upper limit on the function prologue using the debug
664 information. If the debug information could not be used to
665 provide that bound, then use an arbitrary large number as the
666 upper bound. */
667 limit_pc = skip_prologue_using_sal (gdbarch, pc);
668 if (limit_pc == 0)
669 limit_pc = pc + 128; /* Magic. */
670
671 /* Try disassembling prologue. */
672 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
673}
674
675/* Scan the function prologue for THIS_FRAME and populate the prologue
676 cache CACHE. */
677
678static void
679aarch64_scan_prologue (struct frame_info *this_frame,
680 struct aarch64_prologue_cache *cache)
681{
682 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
683 CORE_ADDR prologue_start;
684 CORE_ADDR prologue_end;
685 CORE_ADDR prev_pc = get_frame_pc (this_frame);
686 struct gdbarch *gdbarch = get_frame_arch (this_frame);
687
db634143
PL
688 cache->prev_pc = prev_pc;
689
07b287a0
MS
690 /* Assume we do not find a frame. */
691 cache->framereg = -1;
692 cache->framesize = 0;
693
694 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
695 &prologue_end))
696 {
697 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
698
699 if (sal.line == 0)
700 {
701 /* No line info so use the current PC. */
702 prologue_end = prev_pc;
703 }
704 else if (sal.end < prologue_end)
705 {
706 /* The next line begins after the function end. */
707 prologue_end = sal.end;
708 }
709
325fac50 710 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
711 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
712 }
713 else
714 {
715 CORE_ADDR frame_loc;
07b287a0
MS
716
717 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
718 if (frame_loc == 0)
719 return;
720
721 cache->framereg = AARCH64_FP_REGNUM;
722 cache->framesize = 16;
723 cache->saved_regs[29].addr = 0;
724 cache->saved_regs[30].addr = 8;
725 }
726}
727
7dfa3edc
PL
728/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
729 function may throw an exception if the inferior's registers or memory is
730 not available. */
07b287a0 731
7dfa3edc
PL
732static void
733aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
734 struct aarch64_prologue_cache *cache)
07b287a0 735{
07b287a0
MS
736 CORE_ADDR unwound_fp;
737 int reg;
738
07b287a0
MS
739 aarch64_scan_prologue (this_frame, cache);
740
741 if (cache->framereg == -1)
7dfa3edc 742 return;
07b287a0
MS
743
744 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
745 if (unwound_fp == 0)
7dfa3edc 746 return;
07b287a0
MS
747
748 cache->prev_sp = unwound_fp + cache->framesize;
749
750 /* Calculate actual addresses of saved registers using offsets
751 determined by aarch64_analyze_prologue. */
752 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
753 if (trad_frame_addr_p (cache->saved_regs, reg))
754 cache->saved_regs[reg].addr += cache->prev_sp;
755
db634143
PL
756 cache->func = get_frame_func (this_frame);
757
7dfa3edc
PL
758 cache->available_p = 1;
759}
760
761/* Allocate and fill in *THIS_CACHE with information about the prologue of
762 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
763 Return a pointer to the current aarch64_prologue_cache in
764 *THIS_CACHE. */
765
766static struct aarch64_prologue_cache *
767aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
768{
769 struct aarch64_prologue_cache *cache;
770
771 if (*this_cache != NULL)
9a3c8263 772 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
773
774 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
775 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
776 *this_cache = cache;
777
778 TRY
779 {
780 aarch64_make_prologue_cache_1 (this_frame, cache);
781 }
782 CATCH (ex, RETURN_MASK_ERROR)
783 {
784 if (ex.error != NOT_AVAILABLE_ERROR)
785 throw_exception (ex);
786 }
787 END_CATCH
788
07b287a0
MS
789 return cache;
790}
791
7dfa3edc
PL
792/* Implement the "stop_reason" frame_unwind method. */
793
794static enum unwind_stop_reason
795aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
796 void **this_cache)
797{
798 struct aarch64_prologue_cache *cache
799 = aarch64_make_prologue_cache (this_frame, this_cache);
800
801 if (!cache->available_p)
802 return UNWIND_UNAVAILABLE;
803
804 /* Halt the backtrace at "_start". */
805 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
806 return UNWIND_OUTERMOST;
807
808 /* We've hit a wall, stop. */
809 if (cache->prev_sp == 0)
810 return UNWIND_OUTERMOST;
811
812 return UNWIND_NO_REASON;
813}
814
07b287a0
MS
815/* Our frame ID for a normal frame is the current function's starting
816 PC and the caller's SP when we were called. */
817
818static void
819aarch64_prologue_this_id (struct frame_info *this_frame,
820 void **this_cache, struct frame_id *this_id)
821{
7c8edfae
PL
822 struct aarch64_prologue_cache *cache
823 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 824
7dfa3edc
PL
825 if (!cache->available_p)
826 *this_id = frame_id_build_unavailable_stack (cache->func);
827 else
828 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
829}
830
831/* Implement the "prev_register" frame_unwind method. */
832
833static struct value *
834aarch64_prologue_prev_register (struct frame_info *this_frame,
835 void **this_cache, int prev_regnum)
836{
7c8edfae
PL
837 struct aarch64_prologue_cache *cache
838 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
839
840 /* If we are asked to unwind the PC, then we need to return the LR
841 instead. The prologue may save PC, but it will point into this
842 frame's prologue, not the next frame's resume location. */
843 if (prev_regnum == AARCH64_PC_REGNUM)
844 {
845 CORE_ADDR lr;
846
847 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
848 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
849 }
850
851 /* SP is generally not saved to the stack, but this frame is
852 identified by the next frame's stack pointer at the time of the
853 call. The value was already reconstructed into PREV_SP. */
854 /*
855 +----------+ ^
856 | saved lr | |
857 +->| saved fp |--+
858 | | |
859 | | | <- Previous SP
860 | +----------+
861 | | saved lr |
862 +--| saved fp |<- FP
863 | |
864 | |<- SP
865 +----------+ */
866 if (prev_regnum == AARCH64_SP_REGNUM)
867 return frame_unwind_got_constant (this_frame, prev_regnum,
868 cache->prev_sp);
869
870 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
871 prev_regnum);
872}
873
874/* AArch64 prologue unwinder. */
875struct frame_unwind aarch64_prologue_unwind =
876{
877 NORMAL_FRAME,
7dfa3edc 878 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
879 aarch64_prologue_this_id,
880 aarch64_prologue_prev_register,
881 NULL,
882 default_frame_sniffer
883};
884
8b61f75d
PL
885/* Allocate and fill in *THIS_CACHE with information about the prologue of
886 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
887 Return a pointer to the current aarch64_prologue_cache in
888 *THIS_CACHE. */
07b287a0
MS
889
890static struct aarch64_prologue_cache *
8b61f75d 891aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 892{
07b287a0 893 struct aarch64_prologue_cache *cache;
8b61f75d
PL
894
895 if (*this_cache != NULL)
9a3c8263 896 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
897
898 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
899 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 900 *this_cache = cache;
07b287a0 901
02a2a705
PL
902 TRY
903 {
904 cache->prev_sp = get_frame_register_unsigned (this_frame,
905 AARCH64_SP_REGNUM);
906 cache->prev_pc = get_frame_pc (this_frame);
907 cache->available_p = 1;
908 }
909 CATCH (ex, RETURN_MASK_ERROR)
910 {
911 if (ex.error != NOT_AVAILABLE_ERROR)
912 throw_exception (ex);
913 }
914 END_CATCH
07b287a0
MS
915
916 return cache;
917}
918
02a2a705
PL
919/* Implement the "stop_reason" frame_unwind method. */
920
921static enum unwind_stop_reason
922aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
923 void **this_cache)
924{
925 struct aarch64_prologue_cache *cache
926 = aarch64_make_stub_cache (this_frame, this_cache);
927
928 if (!cache->available_p)
929 return UNWIND_UNAVAILABLE;
930
931 return UNWIND_NO_REASON;
932}
933
07b287a0
MS
934/* Our frame ID for a stub frame is the current SP and LR. */
935
936static void
937aarch64_stub_this_id (struct frame_info *this_frame,
938 void **this_cache, struct frame_id *this_id)
939{
8b61f75d
PL
940 struct aarch64_prologue_cache *cache
941 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 942
02a2a705
PL
943 if (cache->available_p)
944 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
945 else
946 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
947}
948
949/* Implement the "sniffer" frame_unwind method. */
950
951static int
952aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
953 struct frame_info *this_frame,
954 void **this_prologue_cache)
955{
956 CORE_ADDR addr_in_block;
957 gdb_byte dummy[4];
958
959 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 960 if (in_plt_section (addr_in_block)
07b287a0
MS
961 /* We also use the stub winder if the target memory is unreadable
962 to avoid having the prologue unwinder trying to read it. */
963 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
964 return 1;
965
966 return 0;
967}
968
969/* AArch64 stub unwinder. */
970struct frame_unwind aarch64_stub_unwind =
971{
972 NORMAL_FRAME,
02a2a705 973 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
974 aarch64_stub_this_id,
975 aarch64_prologue_prev_register,
976 NULL,
977 aarch64_stub_unwind_sniffer
978};
979
980/* Return the frame base address of *THIS_FRAME. */
981
982static CORE_ADDR
983aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
984{
7c8edfae
PL
985 struct aarch64_prologue_cache *cache
986 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
987
988 return cache->prev_sp - cache->framesize;
989}
990
991/* AArch64 default frame base information. */
992struct frame_base aarch64_normal_base =
993{
994 &aarch64_prologue_unwind,
995 aarch64_normal_frame_base,
996 aarch64_normal_frame_base,
997 aarch64_normal_frame_base
998};
999
1000/* Assuming THIS_FRAME is a dummy, return the frame ID of that
1001 dummy frame. The frame ID's base needs to match the TOS value
1002 saved by save_dummy_frame_tos () and returned from
1003 aarch64_push_dummy_call, and the PC needs to match the dummy
1004 frame's breakpoint. */
1005
1006static struct frame_id
1007aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1008{
1009 return frame_id_build (get_frame_register_unsigned (this_frame,
1010 AARCH64_SP_REGNUM),
1011 get_frame_pc (this_frame));
1012}
1013
1014/* Implement the "unwind_pc" gdbarch method. */
1015
1016static CORE_ADDR
1017aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1018{
1019 CORE_ADDR pc
1020 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1021
1022 return pc;
1023}
1024
1025/* Implement the "unwind_sp" gdbarch method. */
1026
1027static CORE_ADDR
1028aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1029{
1030 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1031}
1032
1033/* Return the value of the REGNUM register in the previous frame of
1034 *THIS_FRAME. */
1035
1036static struct value *
1037aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1038 void **this_cache, int regnum)
1039{
07b287a0
MS
1040 CORE_ADDR lr;
1041
1042 switch (regnum)
1043 {
1044 case AARCH64_PC_REGNUM:
1045 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1046 return frame_unwind_got_constant (this_frame, regnum, lr);
1047
1048 default:
1049 internal_error (__FILE__, __LINE__,
1050 _("Unexpected register %d"), regnum);
1051 }
1052}
1053
1054/* Implement the "init_reg" dwarf2_frame_ops method. */
1055
1056static void
1057aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1058 struct dwarf2_frame_state_reg *reg,
1059 struct frame_info *this_frame)
1060{
1061 switch (regnum)
1062 {
1063 case AARCH64_PC_REGNUM:
1064 reg->how = DWARF2_FRAME_REG_FN;
1065 reg->loc.fn = aarch64_dwarf2_prev_register;
1066 break;
1067 case AARCH64_SP_REGNUM:
1068 reg->how = DWARF2_FRAME_REG_CFA;
1069 break;
1070 }
1071}
1072
1073/* When arguments must be pushed onto the stack, they go on in reverse
1074 order. The code below implements a FILO (stack) to do this. */
1075
1076typedef struct
1077{
c3c87445
YQ
1078 /* Value to pass on stack. It can be NULL if this item is for stack
1079 padding. */
7c543f7b 1080 const gdb_byte *data;
07b287a0
MS
1081
1082 /* Size in bytes of value to pass on stack. */
1083 int len;
1084} stack_item_t;
1085
1086DEF_VEC_O (stack_item_t);
1087
1088/* Return the alignment (in bytes) of the given type. */
1089
1090static int
1091aarch64_type_align (struct type *t)
1092{
1093 int n;
1094 int align;
1095 int falign;
1096
1097 t = check_typedef (t);
1098 switch (TYPE_CODE (t))
1099 {
1100 default:
1101 /* Should never happen. */
1102 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1103 return 4;
1104
1105 case TYPE_CODE_PTR:
1106 case TYPE_CODE_ENUM:
1107 case TYPE_CODE_INT:
1108 case TYPE_CODE_FLT:
1109 case TYPE_CODE_SET:
1110 case TYPE_CODE_RANGE:
1111 case TYPE_CODE_BITSTRING:
1112 case TYPE_CODE_REF:
aa006118 1113 case TYPE_CODE_RVALUE_REF:
07b287a0
MS
1114 case TYPE_CODE_CHAR:
1115 case TYPE_CODE_BOOL:
1116 return TYPE_LENGTH (t);
1117
1118 case TYPE_CODE_ARRAY:
238f2452
YQ
1119 if (TYPE_VECTOR (t))
1120 {
1121 /* Use the natural alignment for vector types (the same for
1122 scalar type), but the maximum alignment is 128-bit. */
1123 if (TYPE_LENGTH (t) > 16)
1124 return 16;
1125 else
1126 return TYPE_LENGTH (t);
1127 }
1128 else
1129 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
1130 case TYPE_CODE_COMPLEX:
1131 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1132
1133 case TYPE_CODE_STRUCT:
1134 case TYPE_CODE_UNION:
1135 align = 1;
1136 for (n = 0; n < TYPE_NFIELDS (t); n++)
1137 {
1138 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1139 if (falign > align)
1140 align = falign;
1141 }
1142 return align;
1143 }
1144}
1145
ea92689a
AH
1146/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1147
1148 Return the number of register required, or -1 on failure.
1149
1150 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1151 to the element, else fail if the type of this element does not match the
1152 existing value. */
1153
1154static int
1155aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1156 struct type **fundamental_type)
1157{
1158 if (type == nullptr)
1159 return -1;
1160
1161 switch (TYPE_CODE (type))
1162 {
1163 case TYPE_CODE_FLT:
1164 if (TYPE_LENGTH (type) > 16)
1165 return -1;
1166
1167 if (*fundamental_type == nullptr)
1168 *fundamental_type = type;
1169 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1170 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1171 return -1;
1172
1173 return 1;
1174
1175 case TYPE_CODE_COMPLEX:
1176 {
1177 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1178 if (TYPE_LENGTH (target_type) > 16)
1179 return -1;
1180
1181 if (*fundamental_type == nullptr)
1182 *fundamental_type = target_type;
1183 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1184 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1185 return -1;
1186
1187 return 2;
1188 }
1189
1190 case TYPE_CODE_ARRAY:
1191 {
1192 if (TYPE_VECTOR (type))
1193 {
1194 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1195 return -1;
1196
1197 if (*fundamental_type == nullptr)
1198 *fundamental_type = type;
1199 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1200 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1201 return -1;
1202
1203 return 1;
1204 }
1205 else
1206 {
1207 struct type *target_type = TYPE_TARGET_TYPE (type);
1208 int count = aapcs_is_vfp_call_or_return_candidate_1
1209 (target_type, fundamental_type);
1210
1211 if (count == -1)
1212 return count;
1213
1214 count *= TYPE_LENGTH (type);
1215 return count;
1216 }
1217 }
1218
1219 case TYPE_CODE_STRUCT:
1220 case TYPE_CODE_UNION:
1221 {
1222 int count = 0;
1223
1224 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1225 {
1226 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1227
1228 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1229 (member, fundamental_type);
1230 if (sub_count == -1)
1231 return -1;
1232 count += sub_count;
1233 }
1234 return count;
1235 }
1236
1237 default:
1238 break;
1239 }
1240
1241 return -1;
1242}
1243
1244/* Return true if an argument, whose type is described by TYPE, can be passed or
1245 returned in simd/fp registers, providing enough parameter passing registers
1246 are available. This is as described in the AAPCS64.
1247
1248 Upon successful return, *COUNT returns the number of needed registers,
1249 *FUNDAMENTAL_TYPE contains the type of those registers.
1250
1251 Candidate as per the AAPCS64 5.4.2.C is either a:
1252 - float.
1253 - short-vector.
1254 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1255 all the members are floats and has at most 4 members.
1256 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1257 all the members are short vectors and has at most 4 members.
1258 - Complex (7.1.1)
1259
1260 Note that HFAs and HVAs can include nested structures and arrays. */
1261
0e745c60 1262static bool
ea92689a
AH
1263aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1264 struct type **fundamental_type)
1265{
1266 if (type == nullptr)
1267 return false;
1268
1269 *fundamental_type = nullptr;
1270
1271 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1272 fundamental_type);
1273
1274 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1275 {
1276 *count = ag_count;
1277 return true;
1278 }
1279 else
1280 return false;
1281}
1282
07b287a0
MS
1283/* AArch64 function call information structure. */
1284struct aarch64_call_info
1285{
1286 /* the current argument number. */
1287 unsigned argnum;
1288
1289 /* The next general purpose register number, equivalent to NGRN as
1290 described in the AArch64 Procedure Call Standard. */
1291 unsigned ngrn;
1292
1293 /* The next SIMD and floating point register number, equivalent to
1294 NSRN as described in the AArch64 Procedure Call Standard. */
1295 unsigned nsrn;
1296
1297 /* The next stacked argument address, equivalent to NSAA as
1298 described in the AArch64 Procedure Call Standard. */
1299 unsigned nsaa;
1300
1301 /* Stack item vector. */
1302 VEC(stack_item_t) *si;
1303};
1304
1305/* Pass a value in a sequence of consecutive X registers. The caller
1306 is responsbile for ensuring sufficient registers are available. */
1307
1308static void
1309pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1310 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1311 struct value *arg)
07b287a0
MS
1312{
1313 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1314 int len = TYPE_LENGTH (type);
1315 enum type_code typecode = TYPE_CODE (type);
1316 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1317 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1318
1319 info->argnum++;
1320
1321 while (len > 0)
1322 {
1323 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1324 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1325 byte_order);
1326
1327
1328 /* Adjust sub-word struct/union args when big-endian. */
1329 if (byte_order == BFD_ENDIAN_BIG
1330 && partial_len < X_REGISTER_SIZE
1331 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1332 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1333
1334 if (aarch64_debug)
b277c936
PL
1335 {
1336 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1337 gdbarch_register_name (gdbarch, regnum),
1338 phex (regval, X_REGISTER_SIZE));
1339 }
07b287a0
MS
1340 regcache_cooked_write_unsigned (regcache, regnum, regval);
1341 len -= partial_len;
1342 buf += partial_len;
1343 regnum++;
1344 }
1345}
1346
1347/* Attempt to marshall a value in a V register. Return 1 if
1348 successful, or 0 if insufficient registers are available. This
1349 function, unlike the equivalent pass_in_x() function does not
1350 handle arguments spread across multiple registers. */
1351
1352static int
1353pass_in_v (struct gdbarch *gdbarch,
1354 struct regcache *regcache,
1355 struct aarch64_call_info *info,
0735fddd 1356 int len, const bfd_byte *buf)
07b287a0
MS
1357{
1358 if (info->nsrn < 8)
1359 {
07b287a0 1360 int regnum = AARCH64_V0_REGNUM + info->nsrn;
3ff2c72e
AH
1361 /* Enough space for a full vector register. */
1362 gdb_byte reg[register_size (gdbarch, regnum)];
1363 gdb_assert (len <= sizeof (reg));
07b287a0
MS
1364
1365 info->argnum++;
1366 info->nsrn++;
1367
0735fddd
YQ
1368 memset (reg, 0, sizeof (reg));
1369 /* PCS C.1, the argument is allocated to the least significant
1370 bits of V register. */
1371 memcpy (reg, buf, len);
b66f5587 1372 regcache->cooked_write (regnum, reg);
0735fddd 1373
07b287a0 1374 if (aarch64_debug)
b277c936
PL
1375 {
1376 debug_printf ("arg %d in %s\n", info->argnum,
1377 gdbarch_register_name (gdbarch, regnum));
1378 }
07b287a0
MS
1379 return 1;
1380 }
1381 info->nsrn = 8;
1382 return 0;
1383}
1384
1385/* Marshall an argument onto the stack. */
1386
1387static void
1388pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1389 struct value *arg)
07b287a0 1390{
8e80f9d1 1391 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1392 int len = TYPE_LENGTH (type);
1393 int align;
1394 stack_item_t item;
1395
1396 info->argnum++;
1397
1398 align = aarch64_type_align (type);
1399
1400 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1401 Natural alignment of the argument's type. */
1402 align = align_up (align, 8);
1403
1404 /* The AArch64 PCS requires at most doubleword alignment. */
1405 if (align > 16)
1406 align = 16;
1407
1408 if (aarch64_debug)
b277c936
PL
1409 {
1410 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1411 info->nsaa);
1412 }
07b287a0
MS
1413
1414 item.len = len;
1415 item.data = buf;
1416 VEC_safe_push (stack_item_t, info->si, &item);
1417
1418 info->nsaa += len;
1419 if (info->nsaa & (align - 1))
1420 {
1421 /* Push stack alignment padding. */
1422 int pad = align - (info->nsaa & (align - 1));
1423
1424 item.len = pad;
c3c87445 1425 item.data = NULL;
07b287a0
MS
1426
1427 VEC_safe_push (stack_item_t, info->si, &item);
1428 info->nsaa += pad;
1429 }
1430}
1431
1432/* Marshall an argument into a sequence of one or more consecutive X
1433 registers or, if insufficient X registers are available then onto
1434 the stack. */
1435
1436static void
1437pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1438 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1439 struct value *arg)
07b287a0
MS
1440{
1441 int len = TYPE_LENGTH (type);
1442 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1443
1444 /* PCS C.13 - Pass in registers if we have enough spare */
1445 if (info->ngrn + nregs <= 8)
1446 {
8e80f9d1 1447 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1448 info->ngrn += nregs;
1449 }
1450 else
1451 {
1452 info->ngrn = 8;
8e80f9d1 1453 pass_on_stack (info, type, arg);
07b287a0
MS
1454 }
1455}
1456
0e745c60
AH
1457/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1458 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1459 registers. A return value of false is an error state as the value will have
1460 been partially passed to the stack. */
1461static bool
1462pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1463 struct aarch64_call_info *info, struct type *arg_type,
1464 struct value *arg)
07b287a0 1465{
0e745c60
AH
1466 switch (TYPE_CODE (arg_type))
1467 {
1468 case TYPE_CODE_FLT:
1469 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1470 value_contents (arg));
1471 break;
1472
1473 case TYPE_CODE_COMPLEX:
1474 {
1475 const bfd_byte *buf = value_contents (arg);
1476 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1477
1478 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1479 buf))
1480 return false;
1481
1482 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1483 buf + TYPE_LENGTH (target_type));
1484 }
1485
1486 case TYPE_CODE_ARRAY:
1487 if (TYPE_VECTOR (arg_type))
1488 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1489 value_contents (arg));
1490 /* fall through. */
1491
1492 case TYPE_CODE_STRUCT:
1493 case TYPE_CODE_UNION:
1494 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1495 {
1496 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1497 struct type *field_type = check_typedef (value_type (field));
1498
1499 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1500 field))
1501 return false;
1502 }
1503 return true;
1504
1505 default:
1506 return false;
1507 }
07b287a0
MS
1508}
1509
1510/* Implement the "push_dummy_call" gdbarch method. */
1511
1512static CORE_ADDR
1513aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1514 struct regcache *regcache, CORE_ADDR bp_addr,
1515 int nargs,
1516 struct value **args, CORE_ADDR sp, int struct_return,
1517 CORE_ADDR struct_addr)
1518{
07b287a0 1519 int argnum;
07b287a0
MS
1520 struct aarch64_call_info info;
1521 struct type *func_type;
1522 struct type *return_type;
1523 int lang_struct_return;
1524
1525 memset (&info, 0, sizeof (info));
1526
1527 /* We need to know what the type of the called function is in order
1528 to determine the number of named/anonymous arguments for the
1529 actual argument placement, and the return type in order to handle
1530 return value correctly.
1531
1532 The generic code above us views the decision of return in memory
1533 or return in registers as a two stage processes. The language
1534 handler is consulted first and may decide to return in memory (eg
1535 class with copy constructor returned by value), this will cause
1536 the generic code to allocate space AND insert an initial leading
1537 argument.
1538
1539 If the language code does not decide to pass in memory then the
1540 target code is consulted.
1541
1542 If the language code decides to pass in memory we want to move
1543 the pointer inserted as the initial argument from the argument
1544 list and into X8, the conventional AArch64 struct return pointer
1545 register.
1546
1547 This is slightly awkward, ideally the flag "lang_struct_return"
1548 would be passed to the targets implementation of push_dummy_call.
1549 Rather that change the target interface we call the language code
1550 directly ourselves. */
1551
1552 func_type = check_typedef (value_type (function));
1553
1554 /* Dereference function pointer types. */
1555 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1556 func_type = TYPE_TARGET_TYPE (func_type);
1557
1558 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1559 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1560
1561 /* If language_pass_by_reference () returned true we will have been
1562 given an additional initial argument, a hidden pointer to the
1563 return slot in memory. */
1564 return_type = TYPE_TARGET_TYPE (func_type);
1565 lang_struct_return = language_pass_by_reference (return_type);
1566
1567 /* Set the return address. For the AArch64, the return breakpoint
1568 is always at BP_ADDR. */
1569 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1570
1571 /* If we were given an initial argument for the return slot because
1572 lang_struct_return was true, lose it. */
1573 if (lang_struct_return)
1574 {
1575 args++;
1576 nargs--;
1577 }
1578
1579 /* The struct_return pointer occupies X8. */
1580 if (struct_return || lang_struct_return)
1581 {
1582 if (aarch64_debug)
b277c936
PL
1583 {
1584 debug_printf ("struct return in %s = 0x%s\n",
1585 gdbarch_register_name (gdbarch,
1586 AARCH64_STRUCT_RETURN_REGNUM),
1587 paddress (gdbarch, struct_addr));
1588 }
07b287a0
MS
1589 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1590 struct_addr);
1591 }
1592
1593 for (argnum = 0; argnum < nargs; argnum++)
1594 {
1595 struct value *arg = args[argnum];
0e745c60
AH
1596 struct type *arg_type, *fundamental_type;
1597 int len, elements;
07b287a0
MS
1598
1599 arg_type = check_typedef (value_type (arg));
1600 len = TYPE_LENGTH (arg_type);
1601
0e745c60
AH
1602 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1603 if there are enough spare registers. */
1604 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1605 &fundamental_type))
1606 {
1607 if (info.nsrn + elements <= 8)
1608 {
1609 /* We know that we have sufficient registers available therefore
1610 this will never need to fallback to the stack. */
1611 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1612 arg))
1613 gdb_assert_not_reached ("Failed to push args");
1614 }
1615 else
1616 {
1617 info.nsrn = 8;
1618 pass_on_stack (&info, arg_type, arg);
1619 }
1620 continue;
1621 }
1622
07b287a0
MS
1623 switch (TYPE_CODE (arg_type))
1624 {
1625 case TYPE_CODE_INT:
1626 case TYPE_CODE_BOOL:
1627 case TYPE_CODE_CHAR:
1628 case TYPE_CODE_RANGE:
1629 case TYPE_CODE_ENUM:
1630 if (len < 4)
1631 {
1632 /* Promote to 32 bit integer. */
1633 if (TYPE_UNSIGNED (arg_type))
1634 arg_type = builtin_type (gdbarch)->builtin_uint32;
1635 else
1636 arg_type = builtin_type (gdbarch)->builtin_int32;
1637 arg = value_cast (arg_type, arg);
1638 }
8e80f9d1 1639 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1640 break;
1641
07b287a0
MS
1642 case TYPE_CODE_STRUCT:
1643 case TYPE_CODE_ARRAY:
1644 case TYPE_CODE_UNION:
0e745c60 1645 if (len > 16)
07b287a0
MS
1646 {
1647 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1648 invisible reference. */
1649
1650 /* Allocate aligned storage. */
1651 sp = align_down (sp - len, 16);
1652
1653 /* Write the real data into the stack. */
1654 write_memory (sp, value_contents (arg), len);
1655
1656 /* Construct the indirection. */
1657 arg_type = lookup_pointer_type (arg_type);
1658 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1659 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1660 }
1661 else
1662 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1663 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1664 break;
1665
1666 default:
8e80f9d1 1667 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1668 break;
1669 }
1670 }
1671
1672 /* Make sure stack retains 16 byte alignment. */
1673 if (info.nsaa & 15)
1674 sp -= 16 - (info.nsaa & 15);
1675
1676 while (!VEC_empty (stack_item_t, info.si))
1677 {
1678 stack_item_t *si = VEC_last (stack_item_t, info.si);
1679
1680 sp -= si->len;
c3c87445
YQ
1681 if (si->data != NULL)
1682 write_memory (sp, si->data, si->len);
07b287a0
MS
1683 VEC_pop (stack_item_t, info.si);
1684 }
1685
1686 VEC_free (stack_item_t, info.si);
1687
1688 /* Finally, update the SP register. */
1689 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1690
1691 return sp;
1692}
1693
1694/* Implement the "frame_align" gdbarch method. */
1695
1696static CORE_ADDR
1697aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1698{
1699 /* Align the stack to sixteen bytes. */
1700 return sp & ~(CORE_ADDR) 15;
1701}
1702
1703/* Return the type for an AdvSISD Q register. */
1704
1705static struct type *
1706aarch64_vnq_type (struct gdbarch *gdbarch)
1707{
1708 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1709
1710 if (tdep->vnq_type == NULL)
1711 {
1712 struct type *t;
1713 struct type *elem;
1714
1715 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1716 TYPE_CODE_UNION);
1717
1718 elem = builtin_type (gdbarch)->builtin_uint128;
1719 append_composite_type_field (t, "u", elem);
1720
1721 elem = builtin_type (gdbarch)->builtin_int128;
1722 append_composite_type_field (t, "s", elem);
1723
1724 tdep->vnq_type = t;
1725 }
1726
1727 return tdep->vnq_type;
1728}
1729
1730/* Return the type for an AdvSISD D register. */
1731
1732static struct type *
1733aarch64_vnd_type (struct gdbarch *gdbarch)
1734{
1735 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1736
1737 if (tdep->vnd_type == NULL)
1738 {
1739 struct type *t;
1740 struct type *elem;
1741
1742 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1743 TYPE_CODE_UNION);
1744
1745 elem = builtin_type (gdbarch)->builtin_double;
1746 append_composite_type_field (t, "f", elem);
1747
1748 elem = builtin_type (gdbarch)->builtin_uint64;
1749 append_composite_type_field (t, "u", elem);
1750
1751 elem = builtin_type (gdbarch)->builtin_int64;
1752 append_composite_type_field (t, "s", elem);
1753
1754 tdep->vnd_type = t;
1755 }
1756
1757 return tdep->vnd_type;
1758}
1759
1760/* Return the type for an AdvSISD S register. */
1761
1762static struct type *
1763aarch64_vns_type (struct gdbarch *gdbarch)
1764{
1765 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1766
1767 if (tdep->vns_type == NULL)
1768 {
1769 struct type *t;
1770 struct type *elem;
1771
1772 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1773 TYPE_CODE_UNION);
1774
1775 elem = builtin_type (gdbarch)->builtin_float;
1776 append_composite_type_field (t, "f", elem);
1777
1778 elem = builtin_type (gdbarch)->builtin_uint32;
1779 append_composite_type_field (t, "u", elem);
1780
1781 elem = builtin_type (gdbarch)->builtin_int32;
1782 append_composite_type_field (t, "s", elem);
1783
1784 tdep->vns_type = t;
1785 }
1786
1787 return tdep->vns_type;
1788}
1789
1790/* Return the type for an AdvSISD H register. */
1791
1792static struct type *
1793aarch64_vnh_type (struct gdbarch *gdbarch)
1794{
1795 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1796
1797 if (tdep->vnh_type == NULL)
1798 {
1799 struct type *t;
1800 struct type *elem;
1801
1802 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1803 TYPE_CODE_UNION);
1804
1805 elem = builtin_type (gdbarch)->builtin_uint16;
1806 append_composite_type_field (t, "u", elem);
1807
1808 elem = builtin_type (gdbarch)->builtin_int16;
1809 append_composite_type_field (t, "s", elem);
1810
1811 tdep->vnh_type = t;
1812 }
1813
1814 return tdep->vnh_type;
1815}
1816
1817/* Return the type for an AdvSISD B register. */
1818
1819static struct type *
1820aarch64_vnb_type (struct gdbarch *gdbarch)
1821{
1822 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1823
1824 if (tdep->vnb_type == NULL)
1825 {
1826 struct type *t;
1827 struct type *elem;
1828
1829 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1830 TYPE_CODE_UNION);
1831
1832 elem = builtin_type (gdbarch)->builtin_uint8;
1833 append_composite_type_field (t, "u", elem);
1834
1835 elem = builtin_type (gdbarch)->builtin_int8;
1836 append_composite_type_field (t, "s", elem);
1837
1838 tdep->vnb_type = t;
1839 }
1840
1841 return tdep->vnb_type;
1842}
1843
63bad7b6
AH
1844/* Return the type for an AdvSISD V register. */
1845
1846static struct type *
1847aarch64_vnv_type (struct gdbarch *gdbarch)
1848{
1849 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1850
1851 if (tdep->vnv_type == NULL)
1852 {
1853 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1854 TYPE_CODE_UNION);
1855
1856 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1857 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1858 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1859 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1860 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1861
1862 tdep->vnv_type = t;
1863 }
1864
1865 return tdep->vnv_type;
1866}
1867
07b287a0
MS
1868/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1869
1870static int
1871aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1872{
1873 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1874 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1875
1876 if (reg == AARCH64_DWARF_SP)
1877 return AARCH64_SP_REGNUM;
1878
1879 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1880 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1881
65d4cada
AH
1882 if (reg == AARCH64_DWARF_SVE_VG)
1883 return AARCH64_SVE_VG_REGNUM;
1884
1885 if (reg == AARCH64_DWARF_SVE_FFR)
1886 return AARCH64_SVE_FFR_REGNUM;
1887
1888 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1889 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1890
1891 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1892 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1893
07b287a0
MS
1894 return -1;
1895}
07b287a0
MS
1896
1897/* Implement the "print_insn" gdbarch method. */
1898
1899static int
1900aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1901{
1902 info->symbols = NULL;
6394c606 1903 return default_print_insn (memaddr, info);
07b287a0
MS
1904}
1905
1906/* AArch64 BRK software debug mode instruction.
1907 Note that AArch64 code is always little-endian.
1908 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 1909constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 1910
04180708 1911typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
1912
1913/* Extract from an array REGS containing the (raw) register state a
1914 function return value of type TYPE, and copy that, in virtual
1915 format, into VALBUF. */
1916
1917static void
1918aarch64_extract_return_value (struct type *type, struct regcache *regs,
1919 gdb_byte *valbuf)
1920{
ac7936df 1921 struct gdbarch *gdbarch = regs->arch ();
07b287a0 1922 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
1923 int elements;
1924 struct type *fundamental_type;
07b287a0 1925
4f4aedeb
AH
1926 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1927 &fundamental_type))
07b287a0 1928 {
4f4aedeb
AH
1929 int len = TYPE_LENGTH (fundamental_type);
1930
1931 for (int i = 0; i < elements; i++)
1932 {
1933 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
1934 /* Enough space for a full vector register. */
1935 gdb_byte buf[register_size (gdbarch, regno)];
1936 gdb_assert (len <= sizeof (buf));
4f4aedeb
AH
1937
1938 if (aarch64_debug)
1939 {
1940 debug_printf ("read HFA or HVA return value element %d from %s\n",
1941 i + 1,
1942 gdbarch_register_name (gdbarch, regno));
1943 }
1944 regs->cooked_read (regno, buf);
07b287a0 1945
4f4aedeb
AH
1946 memcpy (valbuf, buf, len);
1947 valbuf += len;
1948 }
07b287a0
MS
1949 }
1950 else if (TYPE_CODE (type) == TYPE_CODE_INT
1951 || TYPE_CODE (type) == TYPE_CODE_CHAR
1952 || TYPE_CODE (type) == TYPE_CODE_BOOL
1953 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 1954 || TYPE_IS_REFERENCE (type)
07b287a0
MS
1955 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1956 {
1957 /* If the the type is a plain integer, then the access is
1958 straight-forward. Otherwise we have to play around a bit
1959 more. */
1960 int len = TYPE_LENGTH (type);
1961 int regno = AARCH64_X0_REGNUM;
1962 ULONGEST tmp;
1963
1964 while (len > 0)
1965 {
1966 /* By using store_unsigned_integer we avoid having to do
1967 anything special for small big-endian values. */
1968 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1969 store_unsigned_integer (valbuf,
1970 (len > X_REGISTER_SIZE
1971 ? X_REGISTER_SIZE : len), byte_order, tmp);
1972 len -= X_REGISTER_SIZE;
1973 valbuf += X_REGISTER_SIZE;
1974 }
1975 }
07b287a0
MS
1976 else
1977 {
1978 /* For a structure or union the behaviour is as if the value had
1979 been stored to word-aligned memory and then loaded into
1980 registers with 64-bit load instruction(s). */
1981 int len = TYPE_LENGTH (type);
1982 int regno = AARCH64_X0_REGNUM;
1983 bfd_byte buf[X_REGISTER_SIZE];
1984
1985 while (len > 0)
1986 {
dca08e1f 1987 regs->cooked_read (regno++, buf);
07b287a0
MS
1988 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1989 len -= X_REGISTER_SIZE;
1990 valbuf += X_REGISTER_SIZE;
1991 }
1992 }
1993}
1994
1995
1996/* Will a function return an aggregate type in memory or in a
1997 register? Return 0 if an aggregate type can be returned in a
1998 register, 1 if it must be returned in memory. */
1999
2000static int
2001aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2002{
f168693b 2003 type = check_typedef (type);
4f4aedeb
AH
2004 int elements;
2005 struct type *fundamental_type;
07b287a0 2006
4f4aedeb
AH
2007 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2008 &fundamental_type))
07b287a0 2009 {
cd635f74
YQ
2010 /* v0-v7 are used to return values and one register is allocated
2011 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2012 return 0;
2013 }
2014
2015 if (TYPE_LENGTH (type) > 16)
2016 {
2017 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2018 invisible reference. */
2019
2020 return 1;
2021 }
2022
2023 return 0;
2024}
2025
2026/* Write into appropriate registers a function return value of type
2027 TYPE, given in virtual format. */
2028
2029static void
2030aarch64_store_return_value (struct type *type, struct regcache *regs,
2031 const gdb_byte *valbuf)
2032{
ac7936df 2033 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2034 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2035 int elements;
2036 struct type *fundamental_type;
07b287a0 2037
4f4aedeb
AH
2038 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2039 &fundamental_type))
07b287a0 2040 {
4f4aedeb
AH
2041 int len = TYPE_LENGTH (fundamental_type);
2042
2043 for (int i = 0; i < elements; i++)
2044 {
2045 int regno = AARCH64_V0_REGNUM + i;
3ff2c72e
AH
2046 /* Enough space for a full vector register. */
2047 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2048 gdb_assert (len <= sizeof (tmpbuf));
4f4aedeb
AH
2049
2050 if (aarch64_debug)
2051 {
2052 debug_printf ("write HFA or HVA return value element %d to %s\n",
2053 i + 1,
2054 gdbarch_register_name (gdbarch, regno));
2055 }
07b287a0 2056
4f4aedeb
AH
2057 memcpy (tmpbuf, valbuf,
2058 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2059 regs->cooked_write (regno, tmpbuf);
2060 valbuf += len;
2061 }
07b287a0
MS
2062 }
2063 else if (TYPE_CODE (type) == TYPE_CODE_INT
2064 || TYPE_CODE (type) == TYPE_CODE_CHAR
2065 || TYPE_CODE (type) == TYPE_CODE_BOOL
2066 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 2067 || TYPE_IS_REFERENCE (type)
07b287a0
MS
2068 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2069 {
2070 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2071 {
2072 /* Values of one word or less are zero/sign-extended and
2073 returned in r0. */
2074 bfd_byte tmpbuf[X_REGISTER_SIZE];
2075 LONGEST val = unpack_long (type, valbuf);
2076
2077 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2078 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2079 }
2080 else
2081 {
2082 /* Integral values greater than one word are stored in
2083 consecutive registers starting with r0. This will always
2084 be a multiple of the regiser size. */
2085 int len = TYPE_LENGTH (type);
2086 int regno = AARCH64_X0_REGNUM;
2087
2088 while (len > 0)
2089 {
b66f5587 2090 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2091 len -= X_REGISTER_SIZE;
2092 valbuf += X_REGISTER_SIZE;
2093 }
2094 }
2095 }
07b287a0
MS
2096 else
2097 {
2098 /* For a structure or union the behaviour is as if the value had
2099 been stored to word-aligned memory and then loaded into
2100 registers with 64-bit load instruction(s). */
2101 int len = TYPE_LENGTH (type);
2102 int regno = AARCH64_X0_REGNUM;
2103 bfd_byte tmpbuf[X_REGISTER_SIZE];
2104
2105 while (len > 0)
2106 {
2107 memcpy (tmpbuf, valbuf,
2108 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2109 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2110 len -= X_REGISTER_SIZE;
2111 valbuf += X_REGISTER_SIZE;
2112 }
2113 }
2114}
2115
2116/* Implement the "return_value" gdbarch method. */
2117
2118static enum return_value_convention
2119aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2120 struct type *valtype, struct regcache *regcache,
2121 gdb_byte *readbuf, const gdb_byte *writebuf)
2122{
07b287a0
MS
2123
2124 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2125 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2126 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2127 {
2128 if (aarch64_return_in_memory (gdbarch, valtype))
2129 {
2130 if (aarch64_debug)
b277c936 2131 debug_printf ("return value in memory\n");
07b287a0
MS
2132 return RETURN_VALUE_STRUCT_CONVENTION;
2133 }
2134 }
2135
2136 if (writebuf)
2137 aarch64_store_return_value (valtype, regcache, writebuf);
2138
2139 if (readbuf)
2140 aarch64_extract_return_value (valtype, regcache, readbuf);
2141
2142 if (aarch64_debug)
b277c936 2143 debug_printf ("return value in registers\n");
07b287a0
MS
2144
2145 return RETURN_VALUE_REGISTER_CONVENTION;
2146}
2147
2148/* Implement the "get_longjmp_target" gdbarch method. */
2149
2150static int
2151aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2152{
2153 CORE_ADDR jb_addr;
2154 gdb_byte buf[X_REGISTER_SIZE];
2155 struct gdbarch *gdbarch = get_frame_arch (frame);
2156 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2157 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2158
2159 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2160
2161 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2162 X_REGISTER_SIZE))
2163 return 0;
2164
2165 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2166 return 1;
2167}
ea873d8e
PL
2168
2169/* Implement the "gen_return_address" gdbarch method. */
2170
2171static void
2172aarch64_gen_return_address (struct gdbarch *gdbarch,
2173 struct agent_expr *ax, struct axs_value *value,
2174 CORE_ADDR scope)
2175{
2176 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2177 value->kind = axs_lvalue_register;
2178 value->u.reg = AARCH64_LR_REGNUM;
2179}
07b287a0
MS
2180\f
2181
2182/* Return the pseudo register name corresponding to register regnum. */
2183
2184static const char *
2185aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2186{
63bad7b6
AH
2187 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2188
07b287a0
MS
2189 static const char *const q_name[] =
2190 {
2191 "q0", "q1", "q2", "q3",
2192 "q4", "q5", "q6", "q7",
2193 "q8", "q9", "q10", "q11",
2194 "q12", "q13", "q14", "q15",
2195 "q16", "q17", "q18", "q19",
2196 "q20", "q21", "q22", "q23",
2197 "q24", "q25", "q26", "q27",
2198 "q28", "q29", "q30", "q31",
2199 };
2200
2201 static const char *const d_name[] =
2202 {
2203 "d0", "d1", "d2", "d3",
2204 "d4", "d5", "d6", "d7",
2205 "d8", "d9", "d10", "d11",
2206 "d12", "d13", "d14", "d15",
2207 "d16", "d17", "d18", "d19",
2208 "d20", "d21", "d22", "d23",
2209 "d24", "d25", "d26", "d27",
2210 "d28", "d29", "d30", "d31",
2211 };
2212
2213 static const char *const s_name[] =
2214 {
2215 "s0", "s1", "s2", "s3",
2216 "s4", "s5", "s6", "s7",
2217 "s8", "s9", "s10", "s11",
2218 "s12", "s13", "s14", "s15",
2219 "s16", "s17", "s18", "s19",
2220 "s20", "s21", "s22", "s23",
2221 "s24", "s25", "s26", "s27",
2222 "s28", "s29", "s30", "s31",
2223 };
2224
2225 static const char *const h_name[] =
2226 {
2227 "h0", "h1", "h2", "h3",
2228 "h4", "h5", "h6", "h7",
2229 "h8", "h9", "h10", "h11",
2230 "h12", "h13", "h14", "h15",
2231 "h16", "h17", "h18", "h19",
2232 "h20", "h21", "h22", "h23",
2233 "h24", "h25", "h26", "h27",
2234 "h28", "h29", "h30", "h31",
2235 };
2236
2237 static const char *const b_name[] =
2238 {
2239 "b0", "b1", "b2", "b3",
2240 "b4", "b5", "b6", "b7",
2241 "b8", "b9", "b10", "b11",
2242 "b12", "b13", "b14", "b15",
2243 "b16", "b17", "b18", "b19",
2244 "b20", "b21", "b22", "b23",
2245 "b24", "b25", "b26", "b27",
2246 "b28", "b29", "b30", "b31",
2247 };
2248
2249 regnum -= gdbarch_num_regs (gdbarch);
2250
2251 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2252 return q_name[regnum - AARCH64_Q0_REGNUM];
2253
2254 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2255 return d_name[regnum - AARCH64_D0_REGNUM];
2256
2257 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2258 return s_name[regnum - AARCH64_S0_REGNUM];
2259
2260 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2261 return h_name[regnum - AARCH64_H0_REGNUM];
2262
2263 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2264 return b_name[regnum - AARCH64_B0_REGNUM];
2265
63bad7b6
AH
2266 if (tdep->has_sve ())
2267 {
2268 static const char *const sve_v_name[] =
2269 {
2270 "v0", "v1", "v2", "v3",
2271 "v4", "v5", "v6", "v7",
2272 "v8", "v9", "v10", "v11",
2273 "v12", "v13", "v14", "v15",
2274 "v16", "v17", "v18", "v19",
2275 "v20", "v21", "v22", "v23",
2276 "v24", "v25", "v26", "v27",
2277 "v28", "v29", "v30", "v31",
2278 };
2279
2280 if (regnum >= AARCH64_SVE_V0_REGNUM
2281 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2282 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2283 }
2284
07b287a0
MS
2285 internal_error (__FILE__, __LINE__,
2286 _("aarch64_pseudo_register_name: bad register number %d"),
2287 regnum);
2288}
2289
2290/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2291
2292static struct type *
2293aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2294{
63bad7b6
AH
2295 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2296
07b287a0
MS
2297 regnum -= gdbarch_num_regs (gdbarch);
2298
2299 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2300 return aarch64_vnq_type (gdbarch);
2301
2302 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2303 return aarch64_vnd_type (gdbarch);
2304
2305 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2306 return aarch64_vns_type (gdbarch);
2307
2308 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2309 return aarch64_vnh_type (gdbarch);
2310
2311 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2312 return aarch64_vnb_type (gdbarch);
2313
63bad7b6
AH
2314 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2315 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2316 return aarch64_vnv_type (gdbarch);
2317
07b287a0
MS
2318 internal_error (__FILE__, __LINE__,
2319 _("aarch64_pseudo_register_type: bad register number %d"),
2320 regnum);
2321}
2322
2323/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2324
2325static int
2326aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2327 struct reggroup *group)
2328{
63bad7b6
AH
2329 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2330
07b287a0
MS
2331 regnum -= gdbarch_num_regs (gdbarch);
2332
2333 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2334 return group == all_reggroup || group == vector_reggroup;
2335 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2336 return (group == all_reggroup || group == vector_reggroup
2337 || group == float_reggroup);
2338 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2339 return (group == all_reggroup || group == vector_reggroup
2340 || group == float_reggroup);
2341 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2342 return group == all_reggroup || group == vector_reggroup;
2343 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2344 return group == all_reggroup || group == vector_reggroup;
63bad7b6
AH
2345 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2346 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2347 return group == all_reggroup || group == vector_reggroup;
07b287a0
MS
2348
2349 return group == all_reggroup;
2350}
2351
3c5cd5c3
AH
2352/* Helper for aarch64_pseudo_read_value. */
2353
2354static struct value *
63bad7b6
AH
2355aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2356 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2357 int regsize, struct value *result_value)
2358{
3c5cd5c3
AH
2359 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2360
63bad7b6
AH
2361 /* Enough space for a full vector register. */
2362 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2363 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2364
3c5cd5c3
AH
2365 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2366 mark_value_bytes_unavailable (result_value, 0,
2367 TYPE_LENGTH (value_type (result_value)));
2368 else
2369 memcpy (value_contents_raw (result_value), reg_buf, regsize);
63bad7b6 2370
3c5cd5c3
AH
2371 return result_value;
2372 }
2373
07b287a0
MS
2374/* Implement the "pseudo_register_read_value" gdbarch method. */
2375
2376static struct value *
3c5cd5c3 2377aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2378 int regnum)
2379{
63bad7b6 2380 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3c5cd5c3 2381 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
07b287a0 2382
07b287a0
MS
2383 VALUE_LVAL (result_value) = lval_register;
2384 VALUE_REGNUM (result_value) = regnum;
07b287a0
MS
2385
2386 regnum -= gdbarch_num_regs (gdbarch);
2387
2388 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2389 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2390 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2391 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2392
2393 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2394 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2395 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2396 D_REGISTER_SIZE, result_value);
07b287a0
MS
2397
2398 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2399 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2400 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2401 S_REGISTER_SIZE, result_value);
07b287a0
MS
2402
2403 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2404 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2405 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2406 H_REGISTER_SIZE, result_value);
07b287a0
MS
2407
2408 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2409 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2410 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2411 B_REGISTER_SIZE, result_value);
07b287a0 2412
63bad7b6
AH
2413 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2414 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2415 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2416 regnum - AARCH64_SVE_V0_REGNUM,
2417 V_REGISTER_SIZE, result_value);
2418
07b287a0
MS
2419 gdb_assert_not_reached ("regnum out of bound");
2420}
2421
3c5cd5c3 2422/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2423
2424static void
63bad7b6
AH
2425aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2426 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2427{
3c5cd5c3 2428 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2429
63bad7b6
AH
2430 /* Enough space for a full vector register. */
2431 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2432 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2433
07b287a0
MS
2434 /* Ensure the register buffer is zero, we want gdb writes of the
2435 various 'scalar' pseudo registers to behavior like architectural
2436 writes, register width bytes are written the remainder are set to
2437 zero. */
63bad7b6 2438 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2439
3c5cd5c3
AH
2440 memcpy (reg_buf, buf, regsize);
2441 regcache->raw_write (v_regnum, reg_buf);
2442}
2443
2444/* Implement the "pseudo_register_write" gdbarch method. */
2445
2446static void
2447aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2448 int regnum, const gdb_byte *buf)
2449{
63bad7b6 2450 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
07b287a0
MS
2451 regnum -= gdbarch_num_regs (gdbarch);
2452
2453 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2454 return aarch64_pseudo_write_1 (gdbarch, regcache,
2455 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2456 buf);
07b287a0
MS
2457
2458 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2459 return aarch64_pseudo_write_1 (gdbarch, regcache,
2460 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2461 buf);
07b287a0
MS
2462
2463 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2464 return aarch64_pseudo_write_1 (gdbarch, regcache,
2465 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2466 buf);
07b287a0
MS
2467
2468 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2469 return aarch64_pseudo_write_1 (gdbarch, regcache,
2470 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2471 buf);
07b287a0
MS
2472
2473 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2474 return aarch64_pseudo_write_1 (gdbarch, regcache,
2475 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2476 buf);
2477
2478 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2479 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2480 return aarch64_pseudo_write_1 (gdbarch, regcache,
2481 regnum - AARCH64_SVE_V0_REGNUM,
2482 V_REGISTER_SIZE, buf);
07b287a0
MS
2483
2484 gdb_assert_not_reached ("regnum out of bound");
2485}
2486
07b287a0
MS
2487/* Callback function for user_reg_add. */
2488
2489static struct value *
2490value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2491{
9a3c8263 2492 const int *reg_p = (const int *) baton;
07b287a0
MS
2493
2494 return value_of_register (*reg_p, frame);
2495}
2496\f
2497
9404b58f
KM
2498/* Implement the "software_single_step" gdbarch method, needed to
2499 single step through atomic sequences on AArch64. */
2500
a0ff9e1a 2501static std::vector<CORE_ADDR>
f5ea389a 2502aarch64_software_single_step (struct regcache *regcache)
9404b58f 2503{
ac7936df 2504 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2505 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2506 const int insn_size = 4;
2507 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2508 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2509 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2510 CORE_ADDR loc = pc;
2511 CORE_ADDR closing_insn = 0;
2512 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2513 byte_order_for_code);
2514 int index;
2515 int insn_count;
2516 int bc_insn_count = 0; /* Conditional branch instruction count. */
2517 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2518 aarch64_inst inst;
2519
561a72d4 2520 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2521 return {};
9404b58f
KM
2522
2523 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2524 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2525 return {};
9404b58f
KM
2526
2527 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2528 {
9404b58f
KM
2529 loc += insn_size;
2530 insn = read_memory_unsigned_integer (loc, insn_size,
2531 byte_order_for_code);
2532
561a72d4 2533 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2534 return {};
9404b58f 2535 /* Check if the instruction is a conditional branch. */
f77ee802 2536 if (inst.opcode->iclass == condbranch)
9404b58f 2537 {
f77ee802
YQ
2538 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2539
9404b58f 2540 if (bc_insn_count >= 1)
a0ff9e1a 2541 return {};
9404b58f
KM
2542
2543 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2544 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2545
2546 bc_insn_count++;
2547 last_breakpoint++;
2548 }
2549
2550 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2551 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2552 {
2553 closing_insn = loc;
2554 break;
2555 }
2556 }
2557
2558 /* We didn't find a closing Store Exclusive instruction, fall back. */
2559 if (!closing_insn)
a0ff9e1a 2560 return {};
9404b58f
KM
2561
2562 /* Insert breakpoint after the end of the atomic sequence. */
2563 breaks[0] = loc + insn_size;
2564
2565 /* Check for duplicated breakpoints, and also check that the second
2566 breakpoint is not within the atomic sequence. */
2567 if (last_breakpoint
2568 && (breaks[1] == breaks[0]
2569 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2570 last_breakpoint = 0;
2571
a0ff9e1a
SM
2572 std::vector<CORE_ADDR> next_pcs;
2573
9404b58f
KM
2574 /* Insert the breakpoint at the end of the sequence, and one at the
2575 destination of the conditional branch, if it exists. */
2576 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 2577 next_pcs.push_back (breaks[index]);
9404b58f 2578
93f9a11f 2579 return next_pcs;
9404b58f
KM
2580}
2581
cfba9872 2582struct aarch64_displaced_step_closure : public displaced_step_closure
b6542f81
YQ
2583{
2584 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2585 is being displaced stepping. */
cfba9872 2586 int cond = 0;
b6542f81
YQ
2587
2588 /* PC adjustment offset after displaced stepping. */
cfba9872 2589 int32_t pc_adjust = 0;
b6542f81
YQ
2590};
2591
2592/* Data when visiting instructions for displaced stepping. */
2593
2594struct aarch64_displaced_step_data
2595{
2596 struct aarch64_insn_data base;
2597
2598 /* The address where the instruction will be executed at. */
2599 CORE_ADDR new_addr;
2600 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2601 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2602 /* Number of instructions in INSN_BUF. */
2603 unsigned insn_count;
2604 /* Registers when doing displaced stepping. */
2605 struct regcache *regs;
2606
cfba9872 2607 aarch64_displaced_step_closure *dsc;
b6542f81
YQ
2608};
2609
2610/* Implementation of aarch64_insn_visitor method "b". */
2611
2612static void
2613aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2614 struct aarch64_insn_data *data)
2615{
2616 struct aarch64_displaced_step_data *dsd
2617 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2618 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2619
2620 if (can_encode_int32 (new_offset, 28))
2621 {
2622 /* Emit B rather than BL, because executing BL on a new address
2623 will get the wrong address into LR. In order to avoid this,
2624 we emit B, and update LR if the instruction is BL. */
2625 emit_b (dsd->insn_buf, 0, new_offset);
2626 dsd->insn_count++;
2627 }
2628 else
2629 {
2630 /* Write NOP. */
2631 emit_nop (dsd->insn_buf);
2632 dsd->insn_count++;
2633 dsd->dsc->pc_adjust = offset;
2634 }
2635
2636 if (is_bl)
2637 {
2638 /* Update LR. */
2639 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2640 data->insn_addr + 4);
2641 }
2642}
2643
2644/* Implementation of aarch64_insn_visitor method "b_cond". */
2645
2646static void
2647aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2648 struct aarch64_insn_data *data)
2649{
2650 struct aarch64_displaced_step_data *dsd
2651 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2652
2653 /* GDB has to fix up PC after displaced step this instruction
2654 differently according to the condition is true or false. Instead
2655 of checking COND against conditional flags, we can use
2656 the following instructions, and GDB can tell how to fix up PC
2657 according to the PC value.
2658
2659 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2660 INSN1 ;
2661 TAKEN:
2662 INSN2
2663 */
2664
2665 emit_bcond (dsd->insn_buf, cond, 8);
2666 dsd->dsc->cond = 1;
2667 dsd->dsc->pc_adjust = offset;
2668 dsd->insn_count = 1;
2669}
2670
2671/* Dynamically allocate a new register. If we know the register
2672 statically, we should make it a global as above instead of using this
2673 helper function. */
2674
2675static struct aarch64_register
2676aarch64_register (unsigned num, int is64)
2677{
2678 return (struct aarch64_register) { num, is64 };
2679}
2680
2681/* Implementation of aarch64_insn_visitor method "cb". */
2682
2683static void
2684aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2685 const unsigned rn, int is64,
2686 struct aarch64_insn_data *data)
2687{
2688 struct aarch64_displaced_step_data *dsd
2689 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2690
2691 /* The offset is out of range for a compare and branch
2692 instruction. We can use the following instructions instead:
2693
2694 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2695 INSN1 ;
2696 TAKEN:
2697 INSN2
2698 */
2699 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2700 dsd->insn_count = 1;
2701 dsd->dsc->cond = 1;
2702 dsd->dsc->pc_adjust = offset;
2703}
2704
2705/* Implementation of aarch64_insn_visitor method "tb". */
2706
2707static void
2708aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2709 const unsigned rt, unsigned bit,
2710 struct aarch64_insn_data *data)
2711{
2712 struct aarch64_displaced_step_data *dsd
2713 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2714
2715 /* The offset is out of range for a test bit and branch
2716 instruction We can use the following instructions instead:
2717
2718 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2719 INSN1 ;
2720 TAKEN:
2721 INSN2
2722
2723 */
2724 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2725 dsd->insn_count = 1;
2726 dsd->dsc->cond = 1;
2727 dsd->dsc->pc_adjust = offset;
2728}
2729
2730/* Implementation of aarch64_insn_visitor method "adr". */
2731
2732static void
2733aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2734 const int is_adrp, struct aarch64_insn_data *data)
2735{
2736 struct aarch64_displaced_step_data *dsd
2737 = (struct aarch64_displaced_step_data *) data;
2738 /* We know exactly the address the ADR{P,} instruction will compute.
2739 We can just write it to the destination register. */
2740 CORE_ADDR address = data->insn_addr + offset;
2741
2742 if (is_adrp)
2743 {
2744 /* Clear the lower 12 bits of the offset to get the 4K page. */
2745 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2746 address & ~0xfff);
2747 }
2748 else
2749 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2750 address);
2751
2752 dsd->dsc->pc_adjust = 4;
2753 emit_nop (dsd->insn_buf);
2754 dsd->insn_count = 1;
2755}
2756
2757/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2758
2759static void
2760aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2761 const unsigned rt, const int is64,
2762 struct aarch64_insn_data *data)
2763{
2764 struct aarch64_displaced_step_data *dsd
2765 = (struct aarch64_displaced_step_data *) data;
2766 CORE_ADDR address = data->insn_addr + offset;
2767 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2768
2769 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2770 address);
2771
2772 if (is_sw)
2773 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2774 aarch64_register (rt, 1), zero);
2775 else
2776 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2777 aarch64_register (rt, 1), zero);
2778
2779 dsd->dsc->pc_adjust = 4;
2780}
2781
2782/* Implementation of aarch64_insn_visitor method "others". */
2783
2784static void
2785aarch64_displaced_step_others (const uint32_t insn,
2786 struct aarch64_insn_data *data)
2787{
2788 struct aarch64_displaced_step_data *dsd
2789 = (struct aarch64_displaced_step_data *) data;
2790
e1c587c3 2791 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2792 dsd->insn_count = 1;
2793
2794 if ((insn & 0xfffffc1f) == 0xd65f0000)
2795 {
2796 /* RET */
2797 dsd->dsc->pc_adjust = 0;
2798 }
2799 else
2800 dsd->dsc->pc_adjust = 4;
2801}
2802
2803static const struct aarch64_insn_visitor visitor =
2804{
2805 aarch64_displaced_step_b,
2806 aarch64_displaced_step_b_cond,
2807 aarch64_displaced_step_cb,
2808 aarch64_displaced_step_tb,
2809 aarch64_displaced_step_adr,
2810 aarch64_displaced_step_ldr_literal,
2811 aarch64_displaced_step_others,
2812};
2813
2814/* Implement the "displaced_step_copy_insn" gdbarch method. */
2815
2816struct displaced_step_closure *
2817aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2818 CORE_ADDR from, CORE_ADDR to,
2819 struct regcache *regs)
2820{
b6542f81
YQ
2821 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2822 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2823 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2824 aarch64_inst inst;
2825
561a72d4 2826 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 2827 return NULL;
b6542f81
YQ
2828
2829 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2830 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2831 {
2832 /* We can't displaced step atomic sequences. */
2833 return NULL;
2834 }
2835
cfba9872
SM
2836 std::unique_ptr<aarch64_displaced_step_closure> dsc
2837 (new aarch64_displaced_step_closure);
b6542f81
YQ
2838 dsd.base.insn_addr = from;
2839 dsd.new_addr = to;
2840 dsd.regs = regs;
cfba9872 2841 dsd.dsc = dsc.get ();
034f1a81 2842 dsd.insn_count = 0;
b6542f81
YQ
2843 aarch64_relocate_instruction (insn, &visitor,
2844 (struct aarch64_insn_data *) &dsd);
2845 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2846
2847 if (dsd.insn_count != 0)
2848 {
2849 int i;
2850
2851 /* Instruction can be relocated to scratch pad. Copy
2852 relocated instruction(s) there. */
2853 for (i = 0; i < dsd.insn_count; i++)
2854 {
2855 if (debug_displaced)
2856 {
2857 debug_printf ("displaced: writing insn ");
2858 debug_printf ("%.8x", dsd.insn_buf[i]);
2859 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2860 }
2861 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2862 (ULONGEST) dsd.insn_buf[i]);
2863 }
2864 }
2865 else
2866 {
b6542f81
YQ
2867 dsc = NULL;
2868 }
2869
cfba9872 2870 return dsc.release ();
b6542f81
YQ
2871}
2872
2873/* Implement the "displaced_step_fixup" gdbarch method. */
2874
2875void
2876aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
cfba9872 2877 struct displaced_step_closure *dsc_,
b6542f81
YQ
2878 CORE_ADDR from, CORE_ADDR to,
2879 struct regcache *regs)
2880{
cfba9872
SM
2881 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2882
b6542f81
YQ
2883 if (dsc->cond)
2884 {
2885 ULONGEST pc;
2886
2887 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2888 if (pc - to == 8)
2889 {
2890 /* Condition is true. */
2891 }
2892 else if (pc - to == 4)
2893 {
2894 /* Condition is false. */
2895 dsc->pc_adjust = 4;
2896 }
2897 else
2898 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2899 }
2900
2901 if (dsc->pc_adjust != 0)
2902 {
2903 if (debug_displaced)
2904 {
2905 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2906 paddress (gdbarch, from), dsc->pc_adjust);
2907 }
2908 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2909 from + dsc->pc_adjust);
2910 }
2911}
2912
2913/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2914
2915int
2916aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2917 struct displaced_step_closure *closure)
2918{
2919 return 1;
2920}
2921
95228a0d
AH
2922/* Get the correct target description for the given VQ value.
2923 If VQ is zero then it is assumed SVE is not supported.
2924 (It is not possible to set VQ to zero on an SVE system). */
da434ccb
AH
2925
2926const target_desc *
39bfb937 2927aarch64_read_description (uint64_t vq)
da434ccb 2928{
95228a0d 2929 if (vq > AARCH64_MAX_SVE_VQ)
39bfb937 2930 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
95228a0d
AH
2931 AARCH64_MAX_SVE_VQ);
2932
2933 struct target_desc *tdesc = tdesc_aarch64_list[vq];
da434ccb 2934
95228a0d
AH
2935 if (tdesc == NULL)
2936 {
2937 tdesc = aarch64_create_target_description (vq);
2938 tdesc_aarch64_list[vq] = tdesc;
2939 }
da434ccb 2940
95228a0d 2941 return tdesc;
da434ccb
AH
2942}
2943
ba2d2bb2
AH
2944/* Return the VQ used when creating the target description TDESC. */
2945
1332a140 2946static uint64_t
ba2d2bb2
AH
2947aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2948{
2949 const struct tdesc_feature *feature_sve;
2950
2951 if (!tdesc_has_registers (tdesc))
2952 return 0;
2953
2954 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2955
2956 if (feature_sve == nullptr)
2957 return 0;
2958
12863263
AH
2959 uint64_t vl = tdesc_register_bitsize (feature_sve,
2960 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
2961 return sve_vq_from_vl (vl);
2962}
2963
2964
07b287a0
MS
2965/* Initialize the current architecture based on INFO. If possible,
2966 re-use an architecture from ARCHES, which is a list of
2967 architectures already created during this debugging session.
2968
2969 Called e.g. at program startup, when reading a core file, and when
2970 reading a binary file. */
2971
2972static struct gdbarch *
2973aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2974{
2975 struct gdbarch_tdep *tdep;
2976 struct gdbarch *gdbarch;
2977 struct gdbarch_list *best_arch;
2978 struct tdesc_arch_data *tdesc_data = NULL;
2979 const struct target_desc *tdesc = info.target_desc;
2980 int i;
07b287a0 2981 int valid_p = 1;
ba2d2bb2
AH
2982 const struct tdesc_feature *feature_core;
2983 const struct tdesc_feature *feature_fpu;
2984 const struct tdesc_feature *feature_sve;
07b287a0
MS
2985 int num_regs = 0;
2986 int num_pseudo_regs = 0;
2987
ba2d2bb2 2988 /* Ensure we always have a target description. */
07b287a0 2989 if (!tdesc_has_registers (tdesc))
ba2d2bb2 2990 tdesc = aarch64_read_description (0);
07b287a0
MS
2991 gdb_assert (tdesc);
2992
ba2d2bb2
AH
2993 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2994 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2995 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
07b287a0 2996
ba2d2bb2 2997 if (feature_core == NULL)
07b287a0
MS
2998 return NULL;
2999
3000 tdesc_data = tdesc_data_alloc ();
3001
ba2d2bb2 3002 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3003 and allocate their numbers. */
3004 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
ba2d2bb2
AH
3005 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3006 AARCH64_X0_REGNUM + i,
3007 aarch64_r_register_names[i]);
07b287a0
MS
3008
3009 num_regs = AARCH64_X0_REGNUM + i;
3010
ba2d2bb2
AH
3011 /* Add the V registers. */
3012 if (feature_fpu != NULL)
07b287a0 3013 {
ba2d2bb2
AH
3014 if (feature_sve != NULL)
3015 error (_("Program contains both fpu and SVE features."));
3016
3017 /* Validate the description provides the mandatory V registers
3018 and allocate their numbers. */
07b287a0 3019 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
ba2d2bb2
AH
3020 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3021 AARCH64_V0_REGNUM + i,
3022 aarch64_v_register_names[i]);
07b287a0
MS
3023
3024 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3025 }
07b287a0 3026
ba2d2bb2
AH
3027 /* Add the SVE registers. */
3028 if (feature_sve != NULL)
3029 {
3030 /* Validate the description provides the mandatory SVE registers
3031 and allocate their numbers. */
3032 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3033 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3034 AARCH64_SVE_Z0_REGNUM + i,
3035 aarch64_sve_register_names[i]);
3036
3037 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3038 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3039 }
3040
3041 if (feature_fpu != NULL || feature_sve != NULL)
3042 {
07b287a0
MS
3043 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3044 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3045 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3046 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3047 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3048 }
3049
3050 if (!valid_p)
3051 {
3052 tdesc_data_cleanup (tdesc_data);
3053 return NULL;
3054 }
3055
3056 /* AArch64 code is always little-endian. */
3057 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3058
3059 /* If there is already a candidate, use it. */
3060 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3061 best_arch != NULL;
3062 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3063 {
3064 /* Found a match. */
3065 break;
3066 }
3067
3068 if (best_arch != NULL)
3069 {
3070 if (tdesc_data != NULL)
3071 tdesc_data_cleanup (tdesc_data);
3072 return best_arch->gdbarch;
3073 }
3074
8d749320 3075 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
3076 gdbarch = gdbarch_alloc (&info, tdep);
3077
3078 /* This should be low enough for everything. */
3079 tdep->lowest_pc = 0x20;
3080 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3081 tdep->jb_elt_size = 8;
ba2d2bb2 3082 tdep->vq = aarch64_get_tdesc_vq (tdesc);
07b287a0
MS
3083
3084 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3085 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3086
07b287a0
MS
3087 /* Frame handling. */
3088 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3089 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3090 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3091
3092 /* Advance PC across function entry code. */
3093 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3094
3095 /* The stack grows downward. */
3096 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3097
3098 /* Breakpoint manipulation. */
04180708
YQ
3099 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3100 aarch64_breakpoint::kind_from_pc);
3101 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3102 aarch64_breakpoint::bp_from_kind);
07b287a0 3103 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3104 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3105
3106 /* Information about registers, etc. */
3107 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3108 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3109 set_gdbarch_num_regs (gdbarch, num_regs);
3110
3111 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3112 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3113 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3114 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3115 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3116 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3117 aarch64_pseudo_register_reggroup_p);
3118
3119 /* ABI */
3120 set_gdbarch_short_bit (gdbarch, 16);
3121 set_gdbarch_int_bit (gdbarch, 32);
3122 set_gdbarch_float_bit (gdbarch, 32);
3123 set_gdbarch_double_bit (gdbarch, 64);
3124 set_gdbarch_long_double_bit (gdbarch, 128);
3125 set_gdbarch_long_bit (gdbarch, 64);
3126 set_gdbarch_long_long_bit (gdbarch, 64);
3127 set_gdbarch_ptr_bit (gdbarch, 64);
3128 set_gdbarch_char_signed (gdbarch, 0);
53375380 3129 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3130 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3131 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3132 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3133
3134 /* Internal <-> external register number maps. */
3135 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3136
3137 /* Returning results. */
3138 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3139
3140 /* Disassembly. */
3141 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3142
3143 /* Virtual tables. */
3144 set_gdbarch_vbit_in_delta (gdbarch, 1);
3145
3146 /* Hook in the ABI-specific overrides, if they have been registered. */
3147 info.target_desc = tdesc;
0dba2a6c 3148 info.tdesc_data = tdesc_data;
07b287a0
MS
3149 gdbarch_init_osabi (info, gdbarch);
3150
3151 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3152
3153 /* Add some default predicates. */
3154 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3155 dwarf2_append_unwinders (gdbarch);
3156 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3157
3158 frame_base_set_default (gdbarch, &aarch64_normal_base);
3159
3160 /* Now we have tuned the configuration, set a few final things,
3161 based on what the OS ABI has told us. */
3162
3163 if (tdep->jb_pc >= 0)
3164 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3165
ea873d8e
PL
3166 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3167
07b287a0
MS
3168 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3169
3170 /* Add standard register aliases. */
3171 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3172 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3173 value_of_aarch64_user_reg,
3174 &aarch64_register_aliases[i].regnum);
3175
3176 return gdbarch;
3177}
3178
3179static void
3180aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3181{
3182 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3183
3184 if (tdep == NULL)
3185 return;
3186
3187 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3188 paddress (gdbarch, tdep->lowest_pc));
3189}
3190
0d4c07af 3191#if GDB_SELF_TEST
1e2b521d
YQ
3192namespace selftests
3193{
3194static void aarch64_process_record_test (void);
3195}
0d4c07af 3196#endif
1e2b521d 3197
07b287a0
MS
3198void
3199_initialize_aarch64_tdep (void)
3200{
3201 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3202 aarch64_dump_tdep);
3203
07b287a0
MS
3204 /* Debug this file's internals. */
3205 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3206Set AArch64 debugging."), _("\
3207Show AArch64 debugging."), _("\
3208When on, AArch64 specific debugging is enabled."),
3209 NULL,
3210 show_aarch64_debug,
3211 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3212
3213#if GDB_SELF_TEST
1526853e
SM
3214 selftests::register_test ("aarch64-analyze-prologue",
3215 selftests::aarch64_analyze_prologue_test);
3216 selftests::register_test ("aarch64-process-record",
3217 selftests::aarch64_process_record_test);
6654d750 3218 selftests::record_xml_tdesc ("aarch64.xml",
95228a0d 3219 aarch64_create_target_description (0));
4d9a9006 3220#endif
07b287a0 3221}
99afc88b
OJ
3222
3223/* AArch64 process record-replay related structures, defines etc. */
3224
99afc88b
OJ
3225#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3226 do \
3227 { \
3228 unsigned int reg_len = LENGTH; \
3229 if (reg_len) \
3230 { \
3231 REGS = XNEWVEC (uint32_t, reg_len); \
3232 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3233 } \
3234 } \
3235 while (0)
3236
3237#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3238 do \
3239 { \
3240 unsigned int mem_len = LENGTH; \
3241 if (mem_len) \
3242 { \
3243 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3244 memcpy(&MEMS->len, &RECORD_BUF[0], \
3245 sizeof(struct aarch64_mem_r) * LENGTH); \
3246 } \
3247 } \
3248 while (0)
3249
3250/* AArch64 record/replay structures and enumerations. */
3251
3252struct aarch64_mem_r
3253{
3254 uint64_t len; /* Record length. */
3255 uint64_t addr; /* Memory address. */
3256};
3257
3258enum aarch64_record_result
3259{
3260 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3261 AARCH64_RECORD_UNSUPPORTED,
3262 AARCH64_RECORD_UNKNOWN
3263};
3264
3265typedef struct insn_decode_record_t
3266{
3267 struct gdbarch *gdbarch;
3268 struct regcache *regcache;
3269 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3270 uint32_t aarch64_insn; /* Insn to be recorded. */
3271 uint32_t mem_rec_count; /* Count of memory records. */
3272 uint32_t reg_rec_count; /* Count of register records. */
3273 uint32_t *aarch64_regs; /* Registers to be recorded. */
3274 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3275} insn_decode_record;
3276
3277/* Record handler for data processing - register instructions. */
3278
3279static unsigned int
3280aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3281{
3282 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3283 uint32_t record_buf[4];
3284
3285 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3286 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3287 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3288
3289 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3290 {
3291 uint8_t setflags;
3292
3293 /* Logical (shifted register). */
3294 if (insn_bits24_27 == 0x0a)
3295 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3296 /* Add/subtract. */
3297 else if (insn_bits24_27 == 0x0b)
3298 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3299 else
3300 return AARCH64_RECORD_UNKNOWN;
3301
3302 record_buf[0] = reg_rd;
3303 aarch64_insn_r->reg_rec_count = 1;
3304 if (setflags)
3305 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3306 }
3307 else
3308 {
3309 if (insn_bits24_27 == 0x0b)
3310 {
3311 /* Data-processing (3 source). */
3312 record_buf[0] = reg_rd;
3313 aarch64_insn_r->reg_rec_count = 1;
3314 }
3315 else if (insn_bits24_27 == 0x0a)
3316 {
3317 if (insn_bits21_23 == 0x00)
3318 {
3319 /* Add/subtract (with carry). */
3320 record_buf[0] = reg_rd;
3321 aarch64_insn_r->reg_rec_count = 1;
3322 if (bit (aarch64_insn_r->aarch64_insn, 29))
3323 {
3324 record_buf[1] = AARCH64_CPSR_REGNUM;
3325 aarch64_insn_r->reg_rec_count = 2;
3326 }
3327 }
3328 else if (insn_bits21_23 == 0x02)
3329 {
3330 /* Conditional compare (register) and conditional compare
3331 (immediate) instructions. */
3332 record_buf[0] = AARCH64_CPSR_REGNUM;
3333 aarch64_insn_r->reg_rec_count = 1;
3334 }
3335 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3336 {
3337 /* CConditional select. */
3338 /* Data-processing (2 source). */
3339 /* Data-processing (1 source). */
3340 record_buf[0] = reg_rd;
3341 aarch64_insn_r->reg_rec_count = 1;
3342 }
3343 else
3344 return AARCH64_RECORD_UNKNOWN;
3345 }
3346 }
3347
3348 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3349 record_buf);
3350 return AARCH64_RECORD_SUCCESS;
3351}
3352
3353/* Record handler for data processing - immediate instructions. */
3354
3355static unsigned int
3356aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3357{
78cc6c2d 3358 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3359 uint32_t record_buf[4];
3360
3361 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3362 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3363 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3364
3365 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3366 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3367 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3368 {
3369 record_buf[0] = reg_rd;
3370 aarch64_insn_r->reg_rec_count = 1;
3371 }
3372 else if (insn_bits24_27 == 0x01)
3373 {
3374 /* Add/Subtract (immediate). */
3375 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3376 record_buf[0] = reg_rd;
3377 aarch64_insn_r->reg_rec_count = 1;
3378 if (setflags)
3379 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3380 }
3381 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3382 {
3383 /* Logical (immediate). */
3384 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3385 record_buf[0] = reg_rd;
3386 aarch64_insn_r->reg_rec_count = 1;
3387 if (setflags)
3388 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3389 }
3390 else
3391 return AARCH64_RECORD_UNKNOWN;
3392
3393 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3394 record_buf);
3395 return AARCH64_RECORD_SUCCESS;
3396}
3397
3398/* Record handler for branch, exception generation and system instructions. */
3399
3400static unsigned int
3401aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3402{
3403 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3404 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3405 uint32_t record_buf[4];
3406
3407 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3408 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3409 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3410
3411 if (insn_bits28_31 == 0x0d)
3412 {
3413 /* Exception generation instructions. */
3414 if (insn_bits24_27 == 0x04)
3415 {
5d98d3cd
YQ
3416 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3417 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3418 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3419 {
3420 ULONGEST svc_number;
3421
3422 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3423 &svc_number);
3424 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3425 svc_number);
3426 }
3427 else
3428 return AARCH64_RECORD_UNSUPPORTED;
3429 }
3430 /* System instructions. */
3431 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3432 {
3433 uint32_t reg_rt, reg_crn;
3434
3435 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3436 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3437
3438 /* Record rt in case of sysl and mrs instructions. */
3439 if (bit (aarch64_insn_r->aarch64_insn, 21))
3440 {
3441 record_buf[0] = reg_rt;
3442 aarch64_insn_r->reg_rec_count = 1;
3443 }
3444 /* Record cpsr for hint and msr(immediate) instructions. */
3445 else if (reg_crn == 0x02 || reg_crn == 0x04)
3446 {
3447 record_buf[0] = AARCH64_CPSR_REGNUM;
3448 aarch64_insn_r->reg_rec_count = 1;
3449 }
3450 }
3451 /* Unconditional branch (register). */
3452 else if((insn_bits24_27 & 0x0e) == 0x06)
3453 {
3454 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3455 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3456 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3457 }
3458 else
3459 return AARCH64_RECORD_UNKNOWN;
3460 }
3461 /* Unconditional branch (immediate). */
3462 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3463 {
3464 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3465 if (bit (aarch64_insn_r->aarch64_insn, 31))
3466 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3467 }
3468 else
3469 /* Compare & branch (immediate), Test & branch (immediate) and
3470 Conditional branch (immediate). */
3471 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3472
3473 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3474 record_buf);
3475 return AARCH64_RECORD_SUCCESS;
3476}
3477
3478/* Record handler for advanced SIMD load and store instructions. */
3479
3480static unsigned int
3481aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3482{
3483 CORE_ADDR address;
3484 uint64_t addr_offset = 0;
3485 uint32_t record_buf[24];
3486 uint64_t record_buf_mem[24];
3487 uint32_t reg_rn, reg_rt;
3488 uint32_t reg_index = 0, mem_index = 0;
3489 uint8_t opcode_bits, size_bits;
3490
3491 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3492 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3493 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3494 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3495 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3496
3497 if (record_debug)
b277c936 3498 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3499
3500 /* Load/store single structure. */
3501 if (bit (aarch64_insn_r->aarch64_insn, 24))
3502 {
3503 uint8_t sindex, scale, selem, esize, replicate = 0;
3504 scale = opcode_bits >> 2;
3505 selem = ((opcode_bits & 0x02) |
3506 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3507 switch (scale)
3508 {
3509 case 1:
3510 if (size_bits & 0x01)
3511 return AARCH64_RECORD_UNKNOWN;
3512 break;
3513 case 2:
3514 if ((size_bits >> 1) & 0x01)
3515 return AARCH64_RECORD_UNKNOWN;
3516 if (size_bits & 0x01)
3517 {
3518 if (!((opcode_bits >> 1) & 0x01))
3519 scale = 3;
3520 else
3521 return AARCH64_RECORD_UNKNOWN;
3522 }
3523 break;
3524 case 3:
3525 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3526 {
3527 scale = size_bits;
3528 replicate = 1;
3529 break;
3530 }
3531 else
3532 return AARCH64_RECORD_UNKNOWN;
3533 default:
3534 break;
3535 }
3536 esize = 8 << scale;
3537 if (replicate)
3538 for (sindex = 0; sindex < selem; sindex++)
3539 {
3540 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3541 reg_rt = (reg_rt + 1) % 32;
3542 }
3543 else
3544 {
3545 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3546 {
3547 if (bit (aarch64_insn_r->aarch64_insn, 22))
3548 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3549 else
3550 {
3551 record_buf_mem[mem_index++] = esize / 8;
3552 record_buf_mem[mem_index++] = address + addr_offset;
3553 }
3554 addr_offset = addr_offset + (esize / 8);
3555 reg_rt = (reg_rt + 1) % 32;
3556 }
99afc88b
OJ
3557 }
3558 }
3559 /* Load/store multiple structure. */
3560 else
3561 {
3562 uint8_t selem, esize, rpt, elements;
3563 uint8_t eindex, rindex;
3564
3565 esize = 8 << size_bits;
3566 if (bit (aarch64_insn_r->aarch64_insn, 30))
3567 elements = 128 / esize;
3568 else
3569 elements = 64 / esize;
3570
3571 switch (opcode_bits)
3572 {
3573 /*LD/ST4 (4 Registers). */
3574 case 0:
3575 rpt = 1;
3576 selem = 4;
3577 break;
3578 /*LD/ST1 (4 Registers). */
3579 case 2:
3580 rpt = 4;
3581 selem = 1;
3582 break;
3583 /*LD/ST3 (3 Registers). */
3584 case 4:
3585 rpt = 1;
3586 selem = 3;
3587 break;
3588 /*LD/ST1 (3 Registers). */
3589 case 6:
3590 rpt = 3;
3591 selem = 1;
3592 break;
3593 /*LD/ST1 (1 Register). */
3594 case 7:
3595 rpt = 1;
3596 selem = 1;
3597 break;
3598 /*LD/ST2 (2 Registers). */
3599 case 8:
3600 rpt = 1;
3601 selem = 2;
3602 break;
3603 /*LD/ST1 (2 Registers). */
3604 case 10:
3605 rpt = 2;
3606 selem = 1;
3607 break;
3608 default:
3609 return AARCH64_RECORD_UNSUPPORTED;
3610 break;
3611 }
3612 for (rindex = 0; rindex < rpt; rindex++)
3613 for (eindex = 0; eindex < elements; eindex++)
3614 {
3615 uint8_t reg_tt, sindex;
3616 reg_tt = (reg_rt + rindex) % 32;
3617 for (sindex = 0; sindex < selem; sindex++)
3618 {
3619 if (bit (aarch64_insn_r->aarch64_insn, 22))
3620 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3621 else
3622 {
3623 record_buf_mem[mem_index++] = esize / 8;
3624 record_buf_mem[mem_index++] = address + addr_offset;
3625 }
3626 addr_offset = addr_offset + (esize / 8);
3627 reg_tt = (reg_tt + 1) % 32;
3628 }
3629 }
3630 }
3631
3632 if (bit (aarch64_insn_r->aarch64_insn, 23))
3633 record_buf[reg_index++] = reg_rn;
3634
3635 aarch64_insn_r->reg_rec_count = reg_index;
3636 aarch64_insn_r->mem_rec_count = mem_index / 2;
3637 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3638 record_buf_mem);
3639 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3640 record_buf);
3641 return AARCH64_RECORD_SUCCESS;
3642}
3643
3644/* Record handler for load and store instructions. */
3645
3646static unsigned int
3647aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3648{
3649 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3650 uint8_t insn_bit23, insn_bit21;
3651 uint8_t opc, size_bits, ld_flag, vector_flag;
3652 uint32_t reg_rn, reg_rt, reg_rt2;
3653 uint64_t datasize, offset;
3654 uint32_t record_buf[8];
3655 uint64_t record_buf_mem[8];
3656 CORE_ADDR address;
3657
3658 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3659 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3660 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3661 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3662 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3663 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3664 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3665 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3666 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3667 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3668 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3669
3670 /* Load/store exclusive. */
3671 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3672 {
3673 if (record_debug)
b277c936 3674 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3675
3676 if (ld_flag)
3677 {
3678 record_buf[0] = reg_rt;
3679 aarch64_insn_r->reg_rec_count = 1;
3680 if (insn_bit21)
3681 {
3682 record_buf[1] = reg_rt2;
3683 aarch64_insn_r->reg_rec_count = 2;
3684 }
3685 }
3686 else
3687 {
3688 if (insn_bit21)
3689 datasize = (8 << size_bits) * 2;
3690 else
3691 datasize = (8 << size_bits);
3692 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3693 &address);
3694 record_buf_mem[0] = datasize / 8;
3695 record_buf_mem[1] = address;
3696 aarch64_insn_r->mem_rec_count = 1;
3697 if (!insn_bit23)
3698 {
3699 /* Save register rs. */
3700 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3701 aarch64_insn_r->reg_rec_count = 1;
3702 }
3703 }
3704 }
3705 /* Load register (literal) instructions decoding. */
3706 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3707 {
3708 if (record_debug)
b277c936 3709 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3710 if (vector_flag)
3711 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3712 else
3713 record_buf[0] = reg_rt;
3714 aarch64_insn_r->reg_rec_count = 1;
3715 }
3716 /* All types of load/store pair instructions decoding. */
3717 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3718 {
3719 if (record_debug)
b277c936 3720 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3721
3722 if (ld_flag)
3723 {
3724 if (vector_flag)
3725 {
3726 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3727 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3728 }
3729 else
3730 {
3731 record_buf[0] = reg_rt;
3732 record_buf[1] = reg_rt2;
3733 }
3734 aarch64_insn_r->reg_rec_count = 2;
3735 }
3736 else
3737 {
3738 uint16_t imm7_off;
3739 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3740 if (!vector_flag)
3741 size_bits = size_bits >> 1;
3742 datasize = 8 << (2 + size_bits);
3743 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3744 offset = offset << (2 + size_bits);
3745 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3746 &address);
3747 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3748 {
3749 if (imm7_off & 0x40)
3750 address = address - offset;
3751 else
3752 address = address + offset;
3753 }
3754
3755 record_buf_mem[0] = datasize / 8;
3756 record_buf_mem[1] = address;
3757 record_buf_mem[2] = datasize / 8;
3758 record_buf_mem[3] = address + (datasize / 8);
3759 aarch64_insn_r->mem_rec_count = 2;
3760 }
3761 if (bit (aarch64_insn_r->aarch64_insn, 23))
3762 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3763 }
3764 /* Load/store register (unsigned immediate) instructions. */
3765 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3766 {
3767 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3768 if (!(opc >> 1))
33877125
YQ
3769 {
3770 if (opc & 0x01)
3771 ld_flag = 0x01;
3772 else
3773 ld_flag = 0x0;
3774 }
99afc88b 3775 else
33877125 3776 {
1e2b521d
YQ
3777 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3778 {
3779 /* PRFM (immediate) */
3780 return AARCH64_RECORD_SUCCESS;
3781 }
3782 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3783 {
3784 /* LDRSW (immediate) */
3785 ld_flag = 0x1;
3786 }
33877125 3787 else
1e2b521d
YQ
3788 {
3789 if (opc & 0x01)
3790 ld_flag = 0x01;
3791 else
3792 ld_flag = 0x0;
3793 }
33877125 3794 }
99afc88b
OJ
3795
3796 if (record_debug)
3797 {
b277c936
PL
3798 debug_printf ("Process record: load/store (unsigned immediate):"
3799 " size %x V %d opc %x\n", size_bits, vector_flag,
3800 opc);
99afc88b
OJ
3801 }
3802
3803 if (!ld_flag)
3804 {
3805 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3806 datasize = 8 << size_bits;
3807 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3808 &address);
3809 offset = offset << size_bits;
3810 address = address + offset;
3811
3812 record_buf_mem[0] = datasize >> 3;
3813 record_buf_mem[1] = address;
3814 aarch64_insn_r->mem_rec_count = 1;
3815 }
3816 else
3817 {
3818 if (vector_flag)
3819 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3820 else
3821 record_buf[0] = reg_rt;
3822 aarch64_insn_r->reg_rec_count = 1;
3823 }
3824 }
3825 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3826 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3827 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3828 {
3829 if (record_debug)
b277c936 3830 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3831 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3832 if (!(opc >> 1))
3833 if (opc & 0x01)
3834 ld_flag = 0x01;
3835 else
3836 ld_flag = 0x0;
3837 else
3838 if (size_bits != 0x03)
3839 ld_flag = 0x01;
3840 else
3841 return AARCH64_RECORD_UNKNOWN;
3842
3843 if (!ld_flag)
3844 {
d9436c7c
PA
3845 ULONGEST reg_rm_val;
3846
99afc88b
OJ
3847 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3848 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3849 if (bit (aarch64_insn_r->aarch64_insn, 12))
3850 offset = reg_rm_val << size_bits;
3851 else
3852 offset = reg_rm_val;
3853 datasize = 8 << size_bits;
3854 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3855 &address);
3856 address = address + offset;
3857 record_buf_mem[0] = datasize >> 3;
3858 record_buf_mem[1] = address;
3859 aarch64_insn_r->mem_rec_count = 1;
3860 }
3861 else
3862 {
3863 if (vector_flag)
3864 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3865 else
3866 record_buf[0] = reg_rt;
3867 aarch64_insn_r->reg_rec_count = 1;
3868 }
3869 }
3870 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3871 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3872 && !insn_bit21)
99afc88b
OJ
3873 {
3874 if (record_debug)
3875 {
b277c936
PL
3876 debug_printf ("Process record: load/store "
3877 "(immediate and unprivileged)\n");
99afc88b
OJ
3878 }
3879 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3880 if (!(opc >> 1))
3881 if (opc & 0x01)
3882 ld_flag = 0x01;
3883 else
3884 ld_flag = 0x0;
3885 else
3886 if (size_bits != 0x03)
3887 ld_flag = 0x01;
3888 else
3889 return AARCH64_RECORD_UNKNOWN;
3890
3891 if (!ld_flag)
3892 {
3893 uint16_t imm9_off;
3894 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3895 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3896 datasize = 8 << size_bits;
3897 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3898 &address);
3899 if (insn_bits10_11 != 0x01)
3900 {
3901 if (imm9_off & 0x0100)
3902 address = address - offset;
3903 else
3904 address = address + offset;
3905 }
3906 record_buf_mem[0] = datasize >> 3;
3907 record_buf_mem[1] = address;
3908 aarch64_insn_r->mem_rec_count = 1;
3909 }
3910 else
3911 {
3912 if (vector_flag)
3913 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3914 else
3915 record_buf[0] = reg_rt;
3916 aarch64_insn_r->reg_rec_count = 1;
3917 }
3918 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3919 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3920 }
3921 /* Advanced SIMD load/store instructions. */
3922 else
3923 return aarch64_record_asimd_load_store (aarch64_insn_r);
3924
3925 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3926 record_buf_mem);
3927 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3928 record_buf);
3929 return AARCH64_RECORD_SUCCESS;
3930}
3931
3932/* Record handler for data processing SIMD and floating point instructions. */
3933
3934static unsigned int
3935aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3936{
3937 uint8_t insn_bit21, opcode, rmode, reg_rd;
3938 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3939 uint8_t insn_bits11_14;
3940 uint32_t record_buf[2];
3941
3942 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3943 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3944 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3945 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3946 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3947 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3948 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3949 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3950 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3951
3952 if (record_debug)
b277c936 3953 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3954
3955 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3956 {
3957 /* Floating point - fixed point conversion instructions. */
3958 if (!insn_bit21)
3959 {
3960 if (record_debug)
b277c936 3961 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3962
3963 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3964 record_buf[0] = reg_rd;
3965 else
3966 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3967 }
3968 /* Floating point - conditional compare instructions. */
3969 else if (insn_bits10_11 == 0x01)
3970 {
3971 if (record_debug)
b277c936 3972 debug_printf ("FP - conditional compare");
99afc88b
OJ
3973
3974 record_buf[0] = AARCH64_CPSR_REGNUM;
3975 }
3976 /* Floating point - data processing (2-source) and
3977 conditional select instructions. */
3978 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3979 {
3980 if (record_debug)
b277c936 3981 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3982
3983 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3984 }
3985 else if (insn_bits10_11 == 0x00)
3986 {
3987 /* Floating point - immediate instructions. */
3988 if ((insn_bits12_15 & 0x01) == 0x01
3989 || (insn_bits12_15 & 0x07) == 0x04)
3990 {
3991 if (record_debug)
b277c936 3992 debug_printf ("FP - immediate");
99afc88b
OJ
3993 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3994 }
3995 /* Floating point - compare instructions. */
3996 else if ((insn_bits12_15 & 0x03) == 0x02)
3997 {
3998 if (record_debug)
b277c936 3999 debug_printf ("FP - immediate");
99afc88b
OJ
4000 record_buf[0] = AARCH64_CPSR_REGNUM;
4001 }
4002 /* Floating point - integer conversions instructions. */
f62fce35 4003 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4004 {
4005 /* Convert float to integer instruction. */
4006 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4007 {
4008 if (record_debug)
b277c936 4009 debug_printf ("float to int conversion");
99afc88b
OJ
4010
4011 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4012 }
4013 /* Convert integer to float instruction. */
4014 else if ((opcode >> 1) == 0x01 && !rmode)
4015 {
4016 if (record_debug)
b277c936 4017 debug_printf ("int to float conversion");
99afc88b
OJ
4018
4019 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4020 }
4021 /* Move float to integer instruction. */
4022 else if ((opcode >> 1) == 0x03)
4023 {
4024 if (record_debug)
b277c936 4025 debug_printf ("move float to int");
99afc88b
OJ
4026
4027 if (!(opcode & 0x01))
4028 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4029 else
4030 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4031 }
f62fce35
YQ
4032 else
4033 return AARCH64_RECORD_UNKNOWN;
99afc88b 4034 }
f62fce35
YQ
4035 else
4036 return AARCH64_RECORD_UNKNOWN;
99afc88b 4037 }
f62fce35
YQ
4038 else
4039 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4040 }
4041 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4042 {
4043 if (record_debug)
b277c936 4044 debug_printf ("SIMD copy");
99afc88b
OJ
4045
4046 /* Advanced SIMD copy instructions. */
4047 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4048 && !bit (aarch64_insn_r->aarch64_insn, 15)
4049 && bit (aarch64_insn_r->aarch64_insn, 10))
4050 {
4051 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4052 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4053 else
4054 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4055 }
4056 else
4057 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4058 }
4059 /* All remaining floating point or advanced SIMD instructions. */
4060 else
4061 {
4062 if (record_debug)
b277c936 4063 debug_printf ("all remain");
99afc88b
OJ
4064
4065 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4066 }
4067
4068 if (record_debug)
b277c936 4069 debug_printf ("\n");
99afc88b
OJ
4070
4071 aarch64_insn_r->reg_rec_count++;
4072 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4073 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4074 record_buf);
4075 return AARCH64_RECORD_SUCCESS;
4076}
4077
4078/* Decodes insns type and invokes its record handler. */
4079
4080static unsigned int
4081aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4082{
4083 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4084
4085 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4086 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4087 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4088 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4089
4090 /* Data processing - immediate instructions. */
4091 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4092 return aarch64_record_data_proc_imm (aarch64_insn_r);
4093
4094 /* Branch, exception generation and system instructions. */
4095 if (ins_bit26 && !ins_bit27 && ins_bit28)
4096 return aarch64_record_branch_except_sys (aarch64_insn_r);
4097
4098 /* Load and store instructions. */
4099 if (!ins_bit25 && ins_bit27)
4100 return aarch64_record_load_store (aarch64_insn_r);
4101
4102 /* Data processing - register instructions. */
4103 if (ins_bit25 && !ins_bit26 && ins_bit27)
4104 return aarch64_record_data_proc_reg (aarch64_insn_r);
4105
4106 /* Data processing - SIMD and floating point instructions. */
4107 if (ins_bit25 && ins_bit26 && ins_bit27)
4108 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4109
4110 return AARCH64_RECORD_UNSUPPORTED;
4111}
4112
4113/* Cleans up local record registers and memory allocations. */
4114
4115static void
4116deallocate_reg_mem (insn_decode_record *record)
4117{
4118 xfree (record->aarch64_regs);
4119 xfree (record->aarch64_mems);
4120}
4121
1e2b521d
YQ
4122#if GDB_SELF_TEST
4123namespace selftests {
4124
4125static void
4126aarch64_process_record_test (void)
4127{
4128 struct gdbarch_info info;
4129 uint32_t ret;
4130
4131 gdbarch_info_init (&info);
4132 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4133
4134 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4135 SELF_CHECK (gdbarch != NULL);
4136
4137 insn_decode_record aarch64_record;
4138
4139 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4140 aarch64_record.regcache = NULL;
4141 aarch64_record.this_addr = 0;
4142 aarch64_record.gdbarch = gdbarch;
4143
4144 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4145 aarch64_record.aarch64_insn = 0xf9800020;
4146 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4147 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4148 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4149 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4150
4151 deallocate_reg_mem (&aarch64_record);
4152}
4153
4154} // namespace selftests
4155#endif /* GDB_SELF_TEST */
4156
99afc88b
OJ
4157/* Parse the current instruction and record the values of the registers and
4158 memory that will be changed in current instruction to record_arch_list
4159 return -1 if something is wrong. */
4160
4161int
4162aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4163 CORE_ADDR insn_addr)
4164{
4165 uint32_t rec_no = 0;
4166 uint8_t insn_size = 4;
4167 uint32_t ret = 0;
99afc88b
OJ
4168 gdb_byte buf[insn_size];
4169 insn_decode_record aarch64_record;
4170
4171 memset (&buf[0], 0, insn_size);
4172 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4173 target_read_memory (insn_addr, &buf[0], insn_size);
4174 aarch64_record.aarch64_insn
4175 = (uint32_t) extract_unsigned_integer (&buf[0],
4176 insn_size,
4177 gdbarch_byte_order (gdbarch));
4178 aarch64_record.regcache = regcache;
4179 aarch64_record.this_addr = insn_addr;
4180 aarch64_record.gdbarch = gdbarch;
4181
4182 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4183 if (ret == AARCH64_RECORD_UNSUPPORTED)
4184 {
4185 printf_unfiltered (_("Process record does not support instruction "
4186 "0x%0x at address %s.\n"),
4187 aarch64_record.aarch64_insn,
4188 paddress (gdbarch, insn_addr));
4189 ret = -1;
4190 }
4191
4192 if (0 == ret)
4193 {
4194 /* Record registers. */
4195 record_full_arch_list_add_reg (aarch64_record.regcache,
4196 AARCH64_PC_REGNUM);
4197 /* Always record register CPSR. */
4198 record_full_arch_list_add_reg (aarch64_record.regcache,
4199 AARCH64_CPSR_REGNUM);
4200 if (aarch64_record.aarch64_regs)
4201 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4202 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4203 aarch64_record.aarch64_regs[rec_no]))
4204 ret = -1;
4205
4206 /* Record memories. */
4207 if (aarch64_record.aarch64_mems)
4208 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4209 if (record_full_arch_list_add_mem
4210 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4211 aarch64_record.aarch64_mems[rec_no].len))
4212 ret = -1;
4213
4214 if (record_full_arch_list_add_end ())
4215 ret = -1;
4216 }
4217
4218 deallocate_reg_mem (&aarch64_record);
4219 return ret;
4220}
This page took 0.934759 seconds and 4 git commands to generate.