Fix Aarch64 bug in warning filtering.
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
e2882c85 3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
07b287a0
MS
30#include "value.h"
31#include "arch-utils.h"
32#include "osabi.h"
33#include "frame-unwind.h"
34#include "frame-base.h"
35#include "trad-frame.h"
36#include "objfiles.h"
37#include "dwarf2-frame.h"
38#include "gdbtypes.h"
39#include "prologue-value.h"
40#include "target-descriptions.h"
41#include "user-regs.h"
42#include "language.h"
43#include "infcall.h"
ea873d8e
PL
44#include "ax.h"
45#include "ax-gdb.h"
4d9a9006 46#include "selftest.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
787749ea
PL
57#include "arch/aarch64-insn.h"
58
f77ee802 59#include "opcode/aarch64.h"
325fac50 60#include <algorithm>
f77ee802
YQ
61
62#define submask(x) ((1L << ((x) + 1)) - 1)
63#define bit(obj,st) (((obj) >> (st)) & 1)
64#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
65
07b287a0
MS
66/* Pseudo register base numbers. */
67#define AARCH64_Q0_REGNUM 0
187f5d00 68#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
07b287a0
MS
69#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
63bad7b6 72#define AARCH64_SVE_V0_REGNUM (AARCH64_B0_REGNUM + 32)
07b287a0 73
ea92689a
AH
74/* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
75 four members. */
76#define HA_MAX_NUM_FLDS 4
77
95228a0d
AH
78/* All possible aarch64 target descriptors. */
79struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
80
07b287a0
MS
81/* The standard register names, and all the valid aliases for them. */
82static const struct
83{
84 const char *const name;
85 int regnum;
86} aarch64_register_aliases[] =
87{
88 /* 64-bit register names. */
89 {"fp", AARCH64_FP_REGNUM},
90 {"lr", AARCH64_LR_REGNUM},
91 {"sp", AARCH64_SP_REGNUM},
92
93 /* 32-bit register names. */
94 {"w0", AARCH64_X0_REGNUM + 0},
95 {"w1", AARCH64_X0_REGNUM + 1},
96 {"w2", AARCH64_X0_REGNUM + 2},
97 {"w3", AARCH64_X0_REGNUM + 3},
98 {"w4", AARCH64_X0_REGNUM + 4},
99 {"w5", AARCH64_X0_REGNUM + 5},
100 {"w6", AARCH64_X0_REGNUM + 6},
101 {"w7", AARCH64_X0_REGNUM + 7},
102 {"w8", AARCH64_X0_REGNUM + 8},
103 {"w9", AARCH64_X0_REGNUM + 9},
104 {"w10", AARCH64_X0_REGNUM + 10},
105 {"w11", AARCH64_X0_REGNUM + 11},
106 {"w12", AARCH64_X0_REGNUM + 12},
107 {"w13", AARCH64_X0_REGNUM + 13},
108 {"w14", AARCH64_X0_REGNUM + 14},
109 {"w15", AARCH64_X0_REGNUM + 15},
110 {"w16", AARCH64_X0_REGNUM + 16},
111 {"w17", AARCH64_X0_REGNUM + 17},
112 {"w18", AARCH64_X0_REGNUM + 18},
113 {"w19", AARCH64_X0_REGNUM + 19},
114 {"w20", AARCH64_X0_REGNUM + 20},
115 {"w21", AARCH64_X0_REGNUM + 21},
116 {"w22", AARCH64_X0_REGNUM + 22},
117 {"w23", AARCH64_X0_REGNUM + 23},
118 {"w24", AARCH64_X0_REGNUM + 24},
119 {"w25", AARCH64_X0_REGNUM + 25},
120 {"w26", AARCH64_X0_REGNUM + 26},
121 {"w27", AARCH64_X0_REGNUM + 27},
122 {"w28", AARCH64_X0_REGNUM + 28},
123 {"w29", AARCH64_X0_REGNUM + 29},
124 {"w30", AARCH64_X0_REGNUM + 30},
125
126 /* specials */
127 {"ip0", AARCH64_X0_REGNUM + 16},
128 {"ip1", AARCH64_X0_REGNUM + 17}
129};
130
131/* The required core 'R' registers. */
132static const char *const aarch64_r_register_names[] =
133{
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_X0_REGNUM! */
136 "x0", "x1", "x2", "x3",
137 "x4", "x5", "x6", "x7",
138 "x8", "x9", "x10", "x11",
139 "x12", "x13", "x14", "x15",
140 "x16", "x17", "x18", "x19",
141 "x20", "x21", "x22", "x23",
142 "x24", "x25", "x26", "x27",
143 "x28", "x29", "x30", "sp",
144 "pc", "cpsr"
145};
146
147/* The FP/SIMD 'V' registers. */
148static const char *const aarch64_v_register_names[] =
149{
150 /* These registers must appear in consecutive RAW register number
151 order and they must begin with AARCH64_V0_REGNUM! */
152 "v0", "v1", "v2", "v3",
153 "v4", "v5", "v6", "v7",
154 "v8", "v9", "v10", "v11",
155 "v12", "v13", "v14", "v15",
156 "v16", "v17", "v18", "v19",
157 "v20", "v21", "v22", "v23",
158 "v24", "v25", "v26", "v27",
159 "v28", "v29", "v30", "v31",
160 "fpsr",
161 "fpcr"
162};
163
739e8682
AH
164/* The SVE 'Z' and 'P' registers. */
165static const char *const aarch64_sve_register_names[] =
166{
167 /* These registers must appear in consecutive RAW register number
168 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
169 "z0", "z1", "z2", "z3",
170 "z4", "z5", "z6", "z7",
171 "z8", "z9", "z10", "z11",
172 "z12", "z13", "z14", "z15",
173 "z16", "z17", "z18", "z19",
174 "z20", "z21", "z22", "z23",
175 "z24", "z25", "z26", "z27",
176 "z28", "z29", "z30", "z31",
177 "fpsr", "fpcr",
178 "p0", "p1", "p2", "p3",
179 "p4", "p5", "p6", "p7",
180 "p8", "p9", "p10", "p11",
181 "p12", "p13", "p14", "p15",
182 "ffr", "vg"
183};
184
07b287a0
MS
185/* AArch64 prologue cache structure. */
186struct aarch64_prologue_cache
187{
db634143
PL
188 /* The program counter at the start of the function. It is used to
189 identify this frame as a prologue frame. */
190 CORE_ADDR func;
191
192 /* The program counter at the time this frame was created; i.e. where
193 this function was called from. It is used to identify this frame as a
194 stub frame. */
195 CORE_ADDR prev_pc;
196
07b287a0
MS
197 /* The stack pointer at the time this frame was created; i.e. the
198 caller's stack pointer when this function was called. It is used
199 to identify this frame. */
200 CORE_ADDR prev_sp;
201
7dfa3edc
PL
202 /* Is the target available to read from? */
203 int available_p;
204
07b287a0
MS
205 /* The frame base for this frame is just prev_sp - frame size.
206 FRAMESIZE is the distance from the frame pointer to the
207 initial stack pointer. */
208 int framesize;
209
210 /* The register used to hold the frame pointer for this frame. */
211 int framereg;
212
213 /* Saved register offsets. */
214 struct trad_frame_saved_reg *saved_regs;
215};
216
07b287a0
MS
217static void
218show_aarch64_debug (struct ui_file *file, int from_tty,
219 struct cmd_list_element *c, const char *value)
220{
221 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
222}
223
ffdbe864
YQ
224namespace {
225
4d9a9006
YQ
226/* Abstract instruction reader. */
227
228class abstract_instruction_reader
229{
230public:
231 /* Read in one instruction. */
232 virtual ULONGEST read (CORE_ADDR memaddr, int len,
233 enum bfd_endian byte_order) = 0;
234};
235
236/* Instruction reader from real target. */
237
238class instruction_reader : public abstract_instruction_reader
239{
240 public:
241 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 242 override
4d9a9006 243 {
fc2f703e 244 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
245 }
246};
247
ffdbe864
YQ
248} // namespace
249
07b287a0
MS
250/* Analyze a prologue, looking for a recognizable stack frame
251 and frame pointer. Scan until we encounter a store that could
252 clobber the stack frame unexpectedly, or an unknown instruction. */
253
254static CORE_ADDR
255aarch64_analyze_prologue (struct gdbarch *gdbarch,
256 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
257 struct aarch64_prologue_cache *cache,
258 abstract_instruction_reader& reader)
07b287a0
MS
259{
260 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
261 int i;
187f5d00
YQ
262 /* Track X registers and D registers in prologue. */
263 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0 264
187f5d00 265 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0 266 regs[i] = pv_register (i, 0);
f7b7ed97 267 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
07b287a0
MS
268
269 for (; start < limit; start += 4)
270 {
271 uint32_t insn;
d9ebcbce 272 aarch64_inst inst;
07b287a0 273
4d9a9006 274 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 275
561a72d4 276 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
d9ebcbce
YQ
277 break;
278
279 if (inst.opcode->iclass == addsub_imm
280 && (inst.opcode->op == OP_ADD
281 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 282 {
d9ebcbce
YQ
283 unsigned rd = inst.operands[0].reg.regno;
284 unsigned rn = inst.operands[1].reg.regno;
285
286 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
287 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
288 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
289 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
290
291 if (inst.opcode->op == OP_ADD)
292 {
293 regs[rd] = pv_add_constant (regs[rn],
294 inst.operands[2].imm.value);
295 }
296 else
297 {
298 regs[rd] = pv_add_constant (regs[rn],
299 -inst.operands[2].imm.value);
300 }
301 }
302 else if (inst.opcode->iclass == pcreladdr
303 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
304 {
305 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
306 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
307
308 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 309 }
d9ebcbce 310 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
311 {
312 /* Stop analysis on branch. */
313 break;
314 }
d9ebcbce 315 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
316 {
317 /* Stop analysis on branch. */
318 break;
319 }
d9ebcbce 320 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
321 {
322 /* Stop analysis on branch. */
323 break;
324 }
d9ebcbce 325 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
326 {
327 /* Stop analysis on branch. */
328 break;
329 }
d9ebcbce
YQ
330 else if (inst.opcode->op == OP_MOVZ)
331 {
332 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
333 regs[inst.operands[0].reg.regno] = pv_unknown ();
334 }
335 else if (inst.opcode->iclass == log_shift
336 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 337 {
d9ebcbce
YQ
338 unsigned rd = inst.operands[0].reg.regno;
339 unsigned rn = inst.operands[1].reg.regno;
340 unsigned rm = inst.operands[2].reg.regno;
341
342 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
343 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
344 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
345
346 if (inst.operands[2].shifter.amount == 0
347 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
348 regs[rd] = regs[rm];
349 else
350 {
351 if (aarch64_debug)
b277c936
PL
352 {
353 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 354 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
355 core_addr_to_string_nz (start), insn);
356 }
07b287a0
MS
357 break;
358 }
359 }
d9ebcbce 360 else if (inst.opcode->op == OP_STUR)
07b287a0 361 {
d9ebcbce
YQ
362 unsigned rt = inst.operands[0].reg.regno;
363 unsigned rn = inst.operands[1].addr.base_regno;
364 int is64
365 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
366
367 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
368 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
369 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
370 gdb_assert (!inst.operands[1].addr.offset.is_reg);
371
f7b7ed97
TT
372 stack.store (pv_add_constant (regs[rn],
373 inst.operands[1].addr.offset.imm),
374 is64 ? 8 : 4, regs[rt]);
07b287a0 375 }
d9ebcbce 376 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
377 || (inst.opcode->iclass == ldstpair_indexed
378 && inst.operands[2].addr.preind))
d9ebcbce 379 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 380 {
03bcd739 381 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
382 unsigned rt1;
383 unsigned rt2;
d9ebcbce
YQ
384 unsigned rn = inst.operands[2].addr.base_regno;
385 int32_t imm = inst.operands[2].addr.offset.imm;
386
187f5d00
YQ
387 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
388 || inst.operands[0].type == AARCH64_OPND_Ft);
389 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
390 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
391 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
392 gdb_assert (!inst.operands[2].addr.offset.is_reg);
393
07b287a0
MS
394 /* If recording this store would invalidate the store area
395 (perhaps because rn is not known) then we should abandon
396 further prologue analysis. */
f7b7ed97 397 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
07b287a0
MS
398 break;
399
f7b7ed97 400 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
07b287a0
MS
401 break;
402
187f5d00
YQ
403 rt1 = inst.operands[0].reg.regno;
404 rt2 = inst.operands[1].reg.regno;
405 if (inst.operands[0].type == AARCH64_OPND_Ft)
406 {
407 /* Only bottom 64-bit of each V register (D register) need
408 to be preserved. */
409 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
410 rt1 += AARCH64_X_REGISTER_COUNT;
411 rt2 += AARCH64_X_REGISTER_COUNT;
412 }
413
f7b7ed97
TT
414 stack.store (pv_add_constant (regs[rn], imm), 8,
415 regs[rt1]);
416 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
417 regs[rt2]);
14ac654f 418
d9ebcbce 419 if (inst.operands[2].addr.writeback)
93d96012 420 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 421
07b287a0 422 }
432ec081
YQ
423 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
424 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
425 && (inst.opcode->op == OP_STR_POS
426 || inst.opcode->op == OP_STRF_POS)))
427 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
428 && strcmp ("str", inst.opcode->name) == 0)
429 {
430 /* STR (immediate) */
431 unsigned int rt = inst.operands[0].reg.regno;
432 int32_t imm = inst.operands[1].addr.offset.imm;
433 unsigned int rn = inst.operands[1].addr.base_regno;
434 bool is64
435 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
436 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
437 || inst.operands[0].type == AARCH64_OPND_Ft);
438
439 if (inst.operands[0].type == AARCH64_OPND_Ft)
440 {
441 /* Only bottom 64-bit of each V register (D register) need
442 to be preserved. */
443 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
444 rt += AARCH64_X_REGISTER_COUNT;
445 }
446
f7b7ed97
TT
447 stack.store (pv_add_constant (regs[rn], imm),
448 is64 ? 8 : 4, regs[rt]);
432ec081
YQ
449 if (inst.operands[1].addr.writeback)
450 regs[rn] = pv_add_constant (regs[rn], imm);
451 }
d9ebcbce 452 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
453 {
454 /* Stop analysis on branch. */
455 break;
456 }
457 else
458 {
459 if (aarch64_debug)
b277c936 460 {
0a0da556 461 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
462 " opcode=0x%x\n",
463 core_addr_to_string_nz (start), insn);
464 }
07b287a0
MS
465 break;
466 }
467 }
468
469 if (cache == NULL)
f7b7ed97 470 return start;
07b287a0
MS
471
472 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
473 {
474 /* Frame pointer is fp. Frame size is constant. */
475 cache->framereg = AARCH64_FP_REGNUM;
476 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
477 }
478 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
479 {
480 /* Try the stack pointer. */
481 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
482 cache->framereg = AARCH64_SP_REGNUM;
483 }
484 else
485 {
486 /* We're just out of luck. We don't know where the frame is. */
487 cache->framereg = -1;
488 cache->framesize = 0;
489 }
490
491 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
492 {
493 CORE_ADDR offset;
494
f7b7ed97 495 if (stack.find_reg (gdbarch, i, &offset))
07b287a0
MS
496 cache->saved_regs[i].addr = offset;
497 }
498
187f5d00
YQ
499 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
500 {
501 int regnum = gdbarch_num_regs (gdbarch);
502 CORE_ADDR offset;
503
f7b7ed97
TT
504 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
505 &offset))
187f5d00
YQ
506 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
507 }
508
07b287a0
MS
509 return start;
510}
511
4d9a9006
YQ
512static CORE_ADDR
513aarch64_analyze_prologue (struct gdbarch *gdbarch,
514 CORE_ADDR start, CORE_ADDR limit,
515 struct aarch64_prologue_cache *cache)
516{
517 instruction_reader reader;
518
519 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
520 reader);
521}
522
523#if GDB_SELF_TEST
524
525namespace selftests {
526
527/* Instruction reader from manually cooked instruction sequences. */
528
529class instruction_reader_test : public abstract_instruction_reader
530{
531public:
532 template<size_t SIZE>
533 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
534 : m_insns (insns), m_insns_size (SIZE)
535 {}
536
537 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
632e107b 538 override
4d9a9006
YQ
539 {
540 SELF_CHECK (len == 4);
541 SELF_CHECK (memaddr % 4 == 0);
542 SELF_CHECK (memaddr / 4 < m_insns_size);
543
544 return m_insns[memaddr / 4];
545 }
546
547private:
548 const uint32_t *m_insns;
549 size_t m_insns_size;
550};
551
552static void
553aarch64_analyze_prologue_test (void)
554{
555 struct gdbarch_info info;
556
557 gdbarch_info_init (&info);
558 info.bfd_arch_info = bfd_scan_arch ("aarch64");
559
560 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
561 SELF_CHECK (gdbarch != NULL);
562
563 /* Test the simple prologue in which frame pointer is used. */
564 {
565 struct aarch64_prologue_cache cache;
566 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
567
568 static const uint32_t insns[] = {
569 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
570 0x910003fd, /* mov x29, sp */
571 0x97ffffe6, /* bl 0x400580 */
572 };
573 instruction_reader_test reader (insns);
574
575 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
576 SELF_CHECK (end == 4 * 2);
577
578 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
579 SELF_CHECK (cache.framesize == 272);
580
581 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
582 {
583 if (i == AARCH64_FP_REGNUM)
584 SELF_CHECK (cache.saved_regs[i].addr == -272);
585 else if (i == AARCH64_LR_REGNUM)
586 SELF_CHECK (cache.saved_regs[i].addr == -264);
587 else
588 SELF_CHECK (cache.saved_regs[i].addr == -1);
589 }
590
591 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
592 {
593 int regnum = gdbarch_num_regs (gdbarch);
594
595 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
596 == -1);
597 }
598 }
432ec081
YQ
599
600 /* Test a prologue in which STR is used and frame pointer is not
601 used. */
602 {
603 struct aarch64_prologue_cache cache;
604 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
605
606 static const uint32_t insns[] = {
607 0xf81d0ff3, /* str x19, [sp, #-48]! */
608 0xb9002fe0, /* str w0, [sp, #44] */
609 0xf90013e1, /* str x1, [sp, #32]*/
610 0xfd000fe0, /* str d0, [sp, #24] */
611 0xaa0203f3, /* mov x19, x2 */
612 0xf94013e0, /* ldr x0, [sp, #32] */
613 };
614 instruction_reader_test reader (insns);
615
616 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
617
618 SELF_CHECK (end == 4 * 5);
619
620 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
621 SELF_CHECK (cache.framesize == 48);
622
623 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
624 {
625 if (i == 1)
626 SELF_CHECK (cache.saved_regs[i].addr == -16);
627 else if (i == 19)
628 SELF_CHECK (cache.saved_regs[i].addr == -48);
629 else
630 SELF_CHECK (cache.saved_regs[i].addr == -1);
631 }
632
633 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
634 {
635 int regnum = gdbarch_num_regs (gdbarch);
636
637 if (i == 0)
638 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
639 == -24);
640 else
641 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
642 == -1);
643 }
644 }
4d9a9006
YQ
645}
646} // namespace selftests
647#endif /* GDB_SELF_TEST */
648
07b287a0
MS
649/* Implement the "skip_prologue" gdbarch method. */
650
651static CORE_ADDR
652aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
653{
07b287a0 654 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
655
656 /* See if we can determine the end of the prologue via the symbol
657 table. If so, then return either PC, or the PC after the
658 prologue, whichever is greater. */
659 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
660 {
661 CORE_ADDR post_prologue_pc
662 = skip_prologue_using_sal (gdbarch, func_addr);
663
664 if (post_prologue_pc != 0)
325fac50 665 return std::max (pc, post_prologue_pc);
07b287a0
MS
666 }
667
668 /* Can't determine prologue from the symbol table, need to examine
669 instructions. */
670
671 /* Find an upper limit on the function prologue using the debug
672 information. If the debug information could not be used to
673 provide that bound, then use an arbitrary large number as the
674 upper bound. */
675 limit_pc = skip_prologue_using_sal (gdbarch, pc);
676 if (limit_pc == 0)
677 limit_pc = pc + 128; /* Magic. */
678
679 /* Try disassembling prologue. */
680 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
681}
682
683/* Scan the function prologue for THIS_FRAME and populate the prologue
684 cache CACHE. */
685
686static void
687aarch64_scan_prologue (struct frame_info *this_frame,
688 struct aarch64_prologue_cache *cache)
689{
690 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
691 CORE_ADDR prologue_start;
692 CORE_ADDR prologue_end;
693 CORE_ADDR prev_pc = get_frame_pc (this_frame);
694 struct gdbarch *gdbarch = get_frame_arch (this_frame);
695
db634143
PL
696 cache->prev_pc = prev_pc;
697
07b287a0
MS
698 /* Assume we do not find a frame. */
699 cache->framereg = -1;
700 cache->framesize = 0;
701
702 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
703 &prologue_end))
704 {
705 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
706
707 if (sal.line == 0)
708 {
709 /* No line info so use the current PC. */
710 prologue_end = prev_pc;
711 }
712 else if (sal.end < prologue_end)
713 {
714 /* The next line begins after the function end. */
715 prologue_end = sal.end;
716 }
717
325fac50 718 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
719 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
720 }
721 else
722 {
723 CORE_ADDR frame_loc;
07b287a0
MS
724
725 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
726 if (frame_loc == 0)
727 return;
728
729 cache->framereg = AARCH64_FP_REGNUM;
730 cache->framesize = 16;
731 cache->saved_regs[29].addr = 0;
732 cache->saved_regs[30].addr = 8;
733 }
734}
735
7dfa3edc
PL
736/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
737 function may throw an exception if the inferior's registers or memory is
738 not available. */
07b287a0 739
7dfa3edc
PL
740static void
741aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
742 struct aarch64_prologue_cache *cache)
07b287a0 743{
07b287a0
MS
744 CORE_ADDR unwound_fp;
745 int reg;
746
07b287a0
MS
747 aarch64_scan_prologue (this_frame, cache);
748
749 if (cache->framereg == -1)
7dfa3edc 750 return;
07b287a0
MS
751
752 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
753 if (unwound_fp == 0)
7dfa3edc 754 return;
07b287a0
MS
755
756 cache->prev_sp = unwound_fp + cache->framesize;
757
758 /* Calculate actual addresses of saved registers using offsets
759 determined by aarch64_analyze_prologue. */
760 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
761 if (trad_frame_addr_p (cache->saved_regs, reg))
762 cache->saved_regs[reg].addr += cache->prev_sp;
763
db634143
PL
764 cache->func = get_frame_func (this_frame);
765
7dfa3edc
PL
766 cache->available_p = 1;
767}
768
769/* Allocate and fill in *THIS_CACHE with information about the prologue of
770 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
771 Return a pointer to the current aarch64_prologue_cache in
772 *THIS_CACHE. */
773
774static struct aarch64_prologue_cache *
775aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
776{
777 struct aarch64_prologue_cache *cache;
778
779 if (*this_cache != NULL)
9a3c8263 780 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
781
782 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
783 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
784 *this_cache = cache;
785
786 TRY
787 {
788 aarch64_make_prologue_cache_1 (this_frame, cache);
789 }
790 CATCH (ex, RETURN_MASK_ERROR)
791 {
792 if (ex.error != NOT_AVAILABLE_ERROR)
793 throw_exception (ex);
794 }
795 END_CATCH
796
07b287a0
MS
797 return cache;
798}
799
7dfa3edc
PL
800/* Implement the "stop_reason" frame_unwind method. */
801
802static enum unwind_stop_reason
803aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
804 void **this_cache)
805{
806 struct aarch64_prologue_cache *cache
807 = aarch64_make_prologue_cache (this_frame, this_cache);
808
809 if (!cache->available_p)
810 return UNWIND_UNAVAILABLE;
811
812 /* Halt the backtrace at "_start". */
813 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
814 return UNWIND_OUTERMOST;
815
816 /* We've hit a wall, stop. */
817 if (cache->prev_sp == 0)
818 return UNWIND_OUTERMOST;
819
820 return UNWIND_NO_REASON;
821}
822
07b287a0
MS
823/* Our frame ID for a normal frame is the current function's starting
824 PC and the caller's SP when we were called. */
825
826static void
827aarch64_prologue_this_id (struct frame_info *this_frame,
828 void **this_cache, struct frame_id *this_id)
829{
7c8edfae
PL
830 struct aarch64_prologue_cache *cache
831 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 832
7dfa3edc
PL
833 if (!cache->available_p)
834 *this_id = frame_id_build_unavailable_stack (cache->func);
835 else
836 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
837}
838
839/* Implement the "prev_register" frame_unwind method. */
840
841static struct value *
842aarch64_prologue_prev_register (struct frame_info *this_frame,
843 void **this_cache, int prev_regnum)
844{
7c8edfae
PL
845 struct aarch64_prologue_cache *cache
846 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
847
848 /* If we are asked to unwind the PC, then we need to return the LR
849 instead. The prologue may save PC, but it will point into this
850 frame's prologue, not the next frame's resume location. */
851 if (prev_regnum == AARCH64_PC_REGNUM)
852 {
853 CORE_ADDR lr;
854
855 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
856 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
857 }
858
859 /* SP is generally not saved to the stack, but this frame is
860 identified by the next frame's stack pointer at the time of the
861 call. The value was already reconstructed into PREV_SP. */
862 /*
863 +----------+ ^
864 | saved lr | |
865 +->| saved fp |--+
866 | | |
867 | | | <- Previous SP
868 | +----------+
869 | | saved lr |
870 +--| saved fp |<- FP
871 | |
872 | |<- SP
873 +----------+ */
874 if (prev_regnum == AARCH64_SP_REGNUM)
875 return frame_unwind_got_constant (this_frame, prev_regnum,
876 cache->prev_sp);
877
878 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
879 prev_regnum);
880}
881
882/* AArch64 prologue unwinder. */
883struct frame_unwind aarch64_prologue_unwind =
884{
885 NORMAL_FRAME,
7dfa3edc 886 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
887 aarch64_prologue_this_id,
888 aarch64_prologue_prev_register,
889 NULL,
890 default_frame_sniffer
891};
892
8b61f75d
PL
893/* Allocate and fill in *THIS_CACHE with information about the prologue of
894 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
895 Return a pointer to the current aarch64_prologue_cache in
896 *THIS_CACHE. */
07b287a0
MS
897
898static struct aarch64_prologue_cache *
8b61f75d 899aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 900{
07b287a0 901 struct aarch64_prologue_cache *cache;
8b61f75d
PL
902
903 if (*this_cache != NULL)
9a3c8263 904 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
905
906 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
907 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 908 *this_cache = cache;
07b287a0 909
02a2a705
PL
910 TRY
911 {
912 cache->prev_sp = get_frame_register_unsigned (this_frame,
913 AARCH64_SP_REGNUM);
914 cache->prev_pc = get_frame_pc (this_frame);
915 cache->available_p = 1;
916 }
917 CATCH (ex, RETURN_MASK_ERROR)
918 {
919 if (ex.error != NOT_AVAILABLE_ERROR)
920 throw_exception (ex);
921 }
922 END_CATCH
07b287a0
MS
923
924 return cache;
925}
926
02a2a705
PL
927/* Implement the "stop_reason" frame_unwind method. */
928
929static enum unwind_stop_reason
930aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
931 void **this_cache)
932{
933 struct aarch64_prologue_cache *cache
934 = aarch64_make_stub_cache (this_frame, this_cache);
935
936 if (!cache->available_p)
937 return UNWIND_UNAVAILABLE;
938
939 return UNWIND_NO_REASON;
940}
941
07b287a0
MS
942/* Our frame ID for a stub frame is the current SP and LR. */
943
944static void
945aarch64_stub_this_id (struct frame_info *this_frame,
946 void **this_cache, struct frame_id *this_id)
947{
8b61f75d
PL
948 struct aarch64_prologue_cache *cache
949 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 950
02a2a705
PL
951 if (cache->available_p)
952 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
953 else
954 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
955}
956
957/* Implement the "sniffer" frame_unwind method. */
958
959static int
960aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
961 struct frame_info *this_frame,
962 void **this_prologue_cache)
963{
964 CORE_ADDR addr_in_block;
965 gdb_byte dummy[4];
966
967 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 968 if (in_plt_section (addr_in_block)
07b287a0
MS
969 /* We also use the stub winder if the target memory is unreadable
970 to avoid having the prologue unwinder trying to read it. */
971 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
972 return 1;
973
974 return 0;
975}
976
977/* AArch64 stub unwinder. */
978struct frame_unwind aarch64_stub_unwind =
979{
980 NORMAL_FRAME,
02a2a705 981 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
982 aarch64_stub_this_id,
983 aarch64_prologue_prev_register,
984 NULL,
985 aarch64_stub_unwind_sniffer
986};
987
988/* Return the frame base address of *THIS_FRAME. */
989
990static CORE_ADDR
991aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
992{
7c8edfae
PL
993 struct aarch64_prologue_cache *cache
994 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
995
996 return cache->prev_sp - cache->framesize;
997}
998
999/* AArch64 default frame base information. */
1000struct frame_base aarch64_normal_base =
1001{
1002 &aarch64_prologue_unwind,
1003 aarch64_normal_frame_base,
1004 aarch64_normal_frame_base,
1005 aarch64_normal_frame_base
1006};
1007
1008/* Assuming THIS_FRAME is a dummy, return the frame ID of that
1009 dummy frame. The frame ID's base needs to match the TOS value
1010 saved by save_dummy_frame_tos () and returned from
1011 aarch64_push_dummy_call, and the PC needs to match the dummy
1012 frame's breakpoint. */
1013
1014static struct frame_id
1015aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1016{
1017 return frame_id_build (get_frame_register_unsigned (this_frame,
1018 AARCH64_SP_REGNUM),
1019 get_frame_pc (this_frame));
1020}
1021
1022/* Implement the "unwind_pc" gdbarch method. */
1023
1024static CORE_ADDR
1025aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1026{
1027 CORE_ADDR pc
1028 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1029
1030 return pc;
1031}
1032
1033/* Implement the "unwind_sp" gdbarch method. */
1034
1035static CORE_ADDR
1036aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1037{
1038 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1039}
1040
1041/* Return the value of the REGNUM register in the previous frame of
1042 *THIS_FRAME. */
1043
1044static struct value *
1045aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1046 void **this_cache, int regnum)
1047{
07b287a0
MS
1048 CORE_ADDR lr;
1049
1050 switch (regnum)
1051 {
1052 case AARCH64_PC_REGNUM:
1053 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1054 return frame_unwind_got_constant (this_frame, regnum, lr);
1055
1056 default:
1057 internal_error (__FILE__, __LINE__,
1058 _("Unexpected register %d"), regnum);
1059 }
1060}
1061
1062/* Implement the "init_reg" dwarf2_frame_ops method. */
1063
1064static void
1065aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1066 struct dwarf2_frame_state_reg *reg,
1067 struct frame_info *this_frame)
1068{
1069 switch (regnum)
1070 {
1071 case AARCH64_PC_REGNUM:
1072 reg->how = DWARF2_FRAME_REG_FN;
1073 reg->loc.fn = aarch64_dwarf2_prev_register;
1074 break;
1075 case AARCH64_SP_REGNUM:
1076 reg->how = DWARF2_FRAME_REG_CFA;
1077 break;
1078 }
1079}
1080
1081/* When arguments must be pushed onto the stack, they go on in reverse
1082 order. The code below implements a FILO (stack) to do this. */
1083
1084typedef struct
1085{
c3c87445
YQ
1086 /* Value to pass on stack. It can be NULL if this item is for stack
1087 padding. */
7c543f7b 1088 const gdb_byte *data;
07b287a0
MS
1089
1090 /* Size in bytes of value to pass on stack. */
1091 int len;
1092} stack_item_t;
1093
1094DEF_VEC_O (stack_item_t);
1095
1096/* Return the alignment (in bytes) of the given type. */
1097
1098static int
1099aarch64_type_align (struct type *t)
1100{
1101 int n;
1102 int align;
1103 int falign;
1104
1105 t = check_typedef (t);
1106 switch (TYPE_CODE (t))
1107 {
1108 default:
1109 /* Should never happen. */
1110 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1111 return 4;
1112
1113 case TYPE_CODE_PTR:
1114 case TYPE_CODE_ENUM:
1115 case TYPE_CODE_INT:
1116 case TYPE_CODE_FLT:
1117 case TYPE_CODE_SET:
1118 case TYPE_CODE_RANGE:
1119 case TYPE_CODE_BITSTRING:
1120 case TYPE_CODE_REF:
aa006118 1121 case TYPE_CODE_RVALUE_REF:
07b287a0
MS
1122 case TYPE_CODE_CHAR:
1123 case TYPE_CODE_BOOL:
1124 return TYPE_LENGTH (t);
1125
1126 case TYPE_CODE_ARRAY:
238f2452
YQ
1127 if (TYPE_VECTOR (t))
1128 {
1129 /* Use the natural alignment for vector types (the same for
1130 scalar type), but the maximum alignment is 128-bit. */
1131 if (TYPE_LENGTH (t) > 16)
1132 return 16;
1133 else
1134 return TYPE_LENGTH (t);
1135 }
1136 else
1137 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
1138 case TYPE_CODE_COMPLEX:
1139 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1140
1141 case TYPE_CODE_STRUCT:
1142 case TYPE_CODE_UNION:
1143 align = 1;
1144 for (n = 0; n < TYPE_NFIELDS (t); n++)
1145 {
1146 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1147 if (falign > align)
1148 align = falign;
1149 }
1150 return align;
1151 }
1152}
1153
ea92689a
AH
1154/* Worker function for aapcs_is_vfp_call_or_return_candidate.
1155
1156 Return the number of register required, or -1 on failure.
1157
1158 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1159 to the element, else fail if the type of this element does not match the
1160 existing value. */
1161
1162static int
1163aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1164 struct type **fundamental_type)
1165{
1166 if (type == nullptr)
1167 return -1;
1168
1169 switch (TYPE_CODE (type))
1170 {
1171 case TYPE_CODE_FLT:
1172 if (TYPE_LENGTH (type) > 16)
1173 return -1;
1174
1175 if (*fundamental_type == nullptr)
1176 *fundamental_type = type;
1177 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1178 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1179 return -1;
1180
1181 return 1;
1182
1183 case TYPE_CODE_COMPLEX:
1184 {
1185 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1186 if (TYPE_LENGTH (target_type) > 16)
1187 return -1;
1188
1189 if (*fundamental_type == nullptr)
1190 *fundamental_type = target_type;
1191 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1192 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1193 return -1;
1194
1195 return 2;
1196 }
1197
1198 case TYPE_CODE_ARRAY:
1199 {
1200 if (TYPE_VECTOR (type))
1201 {
1202 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1203 return -1;
1204
1205 if (*fundamental_type == nullptr)
1206 *fundamental_type = type;
1207 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1208 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1209 return -1;
1210
1211 return 1;
1212 }
1213 else
1214 {
1215 struct type *target_type = TYPE_TARGET_TYPE (type);
1216 int count = aapcs_is_vfp_call_or_return_candidate_1
1217 (target_type, fundamental_type);
1218
1219 if (count == -1)
1220 return count;
1221
1222 count *= TYPE_LENGTH (type);
1223 return count;
1224 }
1225 }
1226
1227 case TYPE_CODE_STRUCT:
1228 case TYPE_CODE_UNION:
1229 {
1230 int count = 0;
1231
1232 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1233 {
1234 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1235
1236 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1237 (member, fundamental_type);
1238 if (sub_count == -1)
1239 return -1;
1240 count += sub_count;
1241 }
1242 return count;
1243 }
1244
1245 default:
1246 break;
1247 }
1248
1249 return -1;
1250}
1251
1252/* Return true if an argument, whose type is described by TYPE, can be passed or
1253 returned in simd/fp registers, providing enough parameter passing registers
1254 are available. This is as described in the AAPCS64.
1255
1256 Upon successful return, *COUNT returns the number of needed registers,
1257 *FUNDAMENTAL_TYPE contains the type of those registers.
1258
1259 Candidate as per the AAPCS64 5.4.2.C is either a:
1260 - float.
1261 - short-vector.
1262 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1263 all the members are floats and has at most 4 members.
1264 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1265 all the members are short vectors and has at most 4 members.
1266 - Complex (7.1.1)
1267
1268 Note that HFAs and HVAs can include nested structures and arrays. */
1269
0e745c60 1270static bool
ea92689a
AH
1271aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1272 struct type **fundamental_type)
1273{
1274 if (type == nullptr)
1275 return false;
1276
1277 *fundamental_type = nullptr;
1278
1279 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1280 fundamental_type);
1281
1282 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1283 {
1284 *count = ag_count;
1285 return true;
1286 }
1287 else
1288 return false;
1289}
1290
07b287a0
MS
1291/* AArch64 function call information structure. */
1292struct aarch64_call_info
1293{
1294 /* the current argument number. */
1295 unsigned argnum;
1296
1297 /* The next general purpose register number, equivalent to NGRN as
1298 described in the AArch64 Procedure Call Standard. */
1299 unsigned ngrn;
1300
1301 /* The next SIMD and floating point register number, equivalent to
1302 NSRN as described in the AArch64 Procedure Call Standard. */
1303 unsigned nsrn;
1304
1305 /* The next stacked argument address, equivalent to NSAA as
1306 described in the AArch64 Procedure Call Standard. */
1307 unsigned nsaa;
1308
1309 /* Stack item vector. */
1310 VEC(stack_item_t) *si;
1311};
1312
1313/* Pass a value in a sequence of consecutive X registers. The caller
1314 is responsbile for ensuring sufficient registers are available. */
1315
1316static void
1317pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1318 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1319 struct value *arg)
07b287a0
MS
1320{
1321 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1322 int len = TYPE_LENGTH (type);
1323 enum type_code typecode = TYPE_CODE (type);
1324 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1325 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1326
1327 info->argnum++;
1328
1329 while (len > 0)
1330 {
1331 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1332 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1333 byte_order);
1334
1335
1336 /* Adjust sub-word struct/union args when big-endian. */
1337 if (byte_order == BFD_ENDIAN_BIG
1338 && partial_len < X_REGISTER_SIZE
1339 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1340 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1341
1342 if (aarch64_debug)
b277c936
PL
1343 {
1344 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1345 gdbarch_register_name (gdbarch, regnum),
1346 phex (regval, X_REGISTER_SIZE));
1347 }
07b287a0
MS
1348 regcache_cooked_write_unsigned (regcache, regnum, regval);
1349 len -= partial_len;
1350 buf += partial_len;
1351 regnum++;
1352 }
1353}
1354
1355/* Attempt to marshall a value in a V register. Return 1 if
1356 successful, or 0 if insufficient registers are available. This
1357 function, unlike the equivalent pass_in_x() function does not
1358 handle arguments spread across multiple registers. */
1359
1360static int
1361pass_in_v (struct gdbarch *gdbarch,
1362 struct regcache *regcache,
1363 struct aarch64_call_info *info,
0735fddd 1364 int len, const bfd_byte *buf)
07b287a0
MS
1365{
1366 if (info->nsrn < 8)
1367 {
07b287a0 1368 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1369 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1370
1371 info->argnum++;
1372 info->nsrn++;
1373
0735fddd
YQ
1374 memset (reg, 0, sizeof (reg));
1375 /* PCS C.1, the argument is allocated to the least significant
1376 bits of V register. */
1377 memcpy (reg, buf, len);
b66f5587 1378 regcache->cooked_write (regnum, reg);
0735fddd 1379
07b287a0 1380 if (aarch64_debug)
b277c936
PL
1381 {
1382 debug_printf ("arg %d in %s\n", info->argnum,
1383 gdbarch_register_name (gdbarch, regnum));
1384 }
07b287a0
MS
1385 return 1;
1386 }
1387 info->nsrn = 8;
1388 return 0;
1389}
1390
1391/* Marshall an argument onto the stack. */
1392
1393static void
1394pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1395 struct value *arg)
07b287a0 1396{
8e80f9d1 1397 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1398 int len = TYPE_LENGTH (type);
1399 int align;
1400 stack_item_t item;
1401
1402 info->argnum++;
1403
1404 align = aarch64_type_align (type);
1405
1406 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1407 Natural alignment of the argument's type. */
1408 align = align_up (align, 8);
1409
1410 /* The AArch64 PCS requires at most doubleword alignment. */
1411 if (align > 16)
1412 align = 16;
1413
1414 if (aarch64_debug)
b277c936
PL
1415 {
1416 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1417 info->nsaa);
1418 }
07b287a0
MS
1419
1420 item.len = len;
1421 item.data = buf;
1422 VEC_safe_push (stack_item_t, info->si, &item);
1423
1424 info->nsaa += len;
1425 if (info->nsaa & (align - 1))
1426 {
1427 /* Push stack alignment padding. */
1428 int pad = align - (info->nsaa & (align - 1));
1429
1430 item.len = pad;
c3c87445 1431 item.data = NULL;
07b287a0
MS
1432
1433 VEC_safe_push (stack_item_t, info->si, &item);
1434 info->nsaa += pad;
1435 }
1436}
1437
1438/* Marshall an argument into a sequence of one or more consecutive X
1439 registers or, if insufficient X registers are available then onto
1440 the stack. */
1441
1442static void
1443pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1444 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1445 struct value *arg)
07b287a0
MS
1446{
1447 int len = TYPE_LENGTH (type);
1448 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1449
1450 /* PCS C.13 - Pass in registers if we have enough spare */
1451 if (info->ngrn + nregs <= 8)
1452 {
8e80f9d1 1453 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1454 info->ngrn += nregs;
1455 }
1456 else
1457 {
1458 info->ngrn = 8;
8e80f9d1 1459 pass_on_stack (info, type, arg);
07b287a0
MS
1460 }
1461}
1462
0e745c60
AH
1463/* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1464 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1465 registers. A return value of false is an error state as the value will have
1466 been partially passed to the stack. */
1467static bool
1468pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1469 struct aarch64_call_info *info, struct type *arg_type,
1470 struct value *arg)
07b287a0 1471{
0e745c60
AH
1472 switch (TYPE_CODE (arg_type))
1473 {
1474 case TYPE_CODE_FLT:
1475 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1476 value_contents (arg));
1477 break;
1478
1479 case TYPE_CODE_COMPLEX:
1480 {
1481 const bfd_byte *buf = value_contents (arg);
1482 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1483
1484 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1485 buf))
1486 return false;
1487
1488 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1489 buf + TYPE_LENGTH (target_type));
1490 }
1491
1492 case TYPE_CODE_ARRAY:
1493 if (TYPE_VECTOR (arg_type))
1494 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1495 value_contents (arg));
1496 /* fall through. */
1497
1498 case TYPE_CODE_STRUCT:
1499 case TYPE_CODE_UNION:
1500 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1501 {
1502 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1503 struct type *field_type = check_typedef (value_type (field));
1504
1505 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1506 field))
1507 return false;
1508 }
1509 return true;
1510
1511 default:
1512 return false;
1513 }
07b287a0
MS
1514}
1515
1516/* Implement the "push_dummy_call" gdbarch method. */
1517
1518static CORE_ADDR
1519aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1520 struct regcache *regcache, CORE_ADDR bp_addr,
1521 int nargs,
1522 struct value **args, CORE_ADDR sp, int struct_return,
1523 CORE_ADDR struct_addr)
1524{
07b287a0 1525 int argnum;
07b287a0
MS
1526 struct aarch64_call_info info;
1527 struct type *func_type;
1528 struct type *return_type;
1529 int lang_struct_return;
1530
1531 memset (&info, 0, sizeof (info));
1532
1533 /* We need to know what the type of the called function is in order
1534 to determine the number of named/anonymous arguments for the
1535 actual argument placement, and the return type in order to handle
1536 return value correctly.
1537
1538 The generic code above us views the decision of return in memory
1539 or return in registers as a two stage processes. The language
1540 handler is consulted first and may decide to return in memory (eg
1541 class with copy constructor returned by value), this will cause
1542 the generic code to allocate space AND insert an initial leading
1543 argument.
1544
1545 If the language code does not decide to pass in memory then the
1546 target code is consulted.
1547
1548 If the language code decides to pass in memory we want to move
1549 the pointer inserted as the initial argument from the argument
1550 list and into X8, the conventional AArch64 struct return pointer
1551 register.
1552
1553 This is slightly awkward, ideally the flag "lang_struct_return"
1554 would be passed to the targets implementation of push_dummy_call.
1555 Rather that change the target interface we call the language code
1556 directly ourselves. */
1557
1558 func_type = check_typedef (value_type (function));
1559
1560 /* Dereference function pointer types. */
1561 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1562 func_type = TYPE_TARGET_TYPE (func_type);
1563
1564 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1565 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1566
1567 /* If language_pass_by_reference () returned true we will have been
1568 given an additional initial argument, a hidden pointer to the
1569 return slot in memory. */
1570 return_type = TYPE_TARGET_TYPE (func_type);
1571 lang_struct_return = language_pass_by_reference (return_type);
1572
1573 /* Set the return address. For the AArch64, the return breakpoint
1574 is always at BP_ADDR. */
1575 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1576
1577 /* If we were given an initial argument for the return slot because
1578 lang_struct_return was true, lose it. */
1579 if (lang_struct_return)
1580 {
1581 args++;
1582 nargs--;
1583 }
1584
1585 /* The struct_return pointer occupies X8. */
1586 if (struct_return || lang_struct_return)
1587 {
1588 if (aarch64_debug)
b277c936
PL
1589 {
1590 debug_printf ("struct return in %s = 0x%s\n",
1591 gdbarch_register_name (gdbarch,
1592 AARCH64_STRUCT_RETURN_REGNUM),
1593 paddress (gdbarch, struct_addr));
1594 }
07b287a0
MS
1595 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1596 struct_addr);
1597 }
1598
1599 for (argnum = 0; argnum < nargs; argnum++)
1600 {
1601 struct value *arg = args[argnum];
0e745c60
AH
1602 struct type *arg_type, *fundamental_type;
1603 int len, elements;
07b287a0
MS
1604
1605 arg_type = check_typedef (value_type (arg));
1606 len = TYPE_LENGTH (arg_type);
1607
0e745c60
AH
1608 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1609 if there are enough spare registers. */
1610 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1611 &fundamental_type))
1612 {
1613 if (info.nsrn + elements <= 8)
1614 {
1615 /* We know that we have sufficient registers available therefore
1616 this will never need to fallback to the stack. */
1617 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1618 arg))
1619 gdb_assert_not_reached ("Failed to push args");
1620 }
1621 else
1622 {
1623 info.nsrn = 8;
1624 pass_on_stack (&info, arg_type, arg);
1625 }
1626 continue;
1627 }
1628
07b287a0
MS
1629 switch (TYPE_CODE (arg_type))
1630 {
1631 case TYPE_CODE_INT:
1632 case TYPE_CODE_BOOL:
1633 case TYPE_CODE_CHAR:
1634 case TYPE_CODE_RANGE:
1635 case TYPE_CODE_ENUM:
1636 if (len < 4)
1637 {
1638 /* Promote to 32 bit integer. */
1639 if (TYPE_UNSIGNED (arg_type))
1640 arg_type = builtin_type (gdbarch)->builtin_uint32;
1641 else
1642 arg_type = builtin_type (gdbarch)->builtin_int32;
1643 arg = value_cast (arg_type, arg);
1644 }
8e80f9d1 1645 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1646 break;
1647
07b287a0
MS
1648 case TYPE_CODE_STRUCT:
1649 case TYPE_CODE_ARRAY:
1650 case TYPE_CODE_UNION:
0e745c60 1651 if (len > 16)
07b287a0
MS
1652 {
1653 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1654 invisible reference. */
1655
1656 /* Allocate aligned storage. */
1657 sp = align_down (sp - len, 16);
1658
1659 /* Write the real data into the stack. */
1660 write_memory (sp, value_contents (arg), len);
1661
1662 /* Construct the indirection. */
1663 arg_type = lookup_pointer_type (arg_type);
1664 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1665 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1666 }
1667 else
1668 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1669 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1670 break;
1671
1672 default:
8e80f9d1 1673 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1674 break;
1675 }
1676 }
1677
1678 /* Make sure stack retains 16 byte alignment. */
1679 if (info.nsaa & 15)
1680 sp -= 16 - (info.nsaa & 15);
1681
1682 while (!VEC_empty (stack_item_t, info.si))
1683 {
1684 stack_item_t *si = VEC_last (stack_item_t, info.si);
1685
1686 sp -= si->len;
c3c87445
YQ
1687 if (si->data != NULL)
1688 write_memory (sp, si->data, si->len);
07b287a0
MS
1689 VEC_pop (stack_item_t, info.si);
1690 }
1691
1692 VEC_free (stack_item_t, info.si);
1693
1694 /* Finally, update the SP register. */
1695 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1696
1697 return sp;
1698}
1699
1700/* Implement the "frame_align" gdbarch method. */
1701
1702static CORE_ADDR
1703aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1704{
1705 /* Align the stack to sixteen bytes. */
1706 return sp & ~(CORE_ADDR) 15;
1707}
1708
1709/* Return the type for an AdvSISD Q register. */
1710
1711static struct type *
1712aarch64_vnq_type (struct gdbarch *gdbarch)
1713{
1714 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1715
1716 if (tdep->vnq_type == NULL)
1717 {
1718 struct type *t;
1719 struct type *elem;
1720
1721 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1722 TYPE_CODE_UNION);
1723
1724 elem = builtin_type (gdbarch)->builtin_uint128;
1725 append_composite_type_field (t, "u", elem);
1726
1727 elem = builtin_type (gdbarch)->builtin_int128;
1728 append_composite_type_field (t, "s", elem);
1729
1730 tdep->vnq_type = t;
1731 }
1732
1733 return tdep->vnq_type;
1734}
1735
1736/* Return the type for an AdvSISD D register. */
1737
1738static struct type *
1739aarch64_vnd_type (struct gdbarch *gdbarch)
1740{
1741 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1742
1743 if (tdep->vnd_type == NULL)
1744 {
1745 struct type *t;
1746 struct type *elem;
1747
1748 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1749 TYPE_CODE_UNION);
1750
1751 elem = builtin_type (gdbarch)->builtin_double;
1752 append_composite_type_field (t, "f", elem);
1753
1754 elem = builtin_type (gdbarch)->builtin_uint64;
1755 append_composite_type_field (t, "u", elem);
1756
1757 elem = builtin_type (gdbarch)->builtin_int64;
1758 append_composite_type_field (t, "s", elem);
1759
1760 tdep->vnd_type = t;
1761 }
1762
1763 return tdep->vnd_type;
1764}
1765
1766/* Return the type for an AdvSISD S register. */
1767
1768static struct type *
1769aarch64_vns_type (struct gdbarch *gdbarch)
1770{
1771 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1772
1773 if (tdep->vns_type == NULL)
1774 {
1775 struct type *t;
1776 struct type *elem;
1777
1778 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1779 TYPE_CODE_UNION);
1780
1781 elem = builtin_type (gdbarch)->builtin_float;
1782 append_composite_type_field (t, "f", elem);
1783
1784 elem = builtin_type (gdbarch)->builtin_uint32;
1785 append_composite_type_field (t, "u", elem);
1786
1787 elem = builtin_type (gdbarch)->builtin_int32;
1788 append_composite_type_field (t, "s", elem);
1789
1790 tdep->vns_type = t;
1791 }
1792
1793 return tdep->vns_type;
1794}
1795
1796/* Return the type for an AdvSISD H register. */
1797
1798static struct type *
1799aarch64_vnh_type (struct gdbarch *gdbarch)
1800{
1801 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1802
1803 if (tdep->vnh_type == NULL)
1804 {
1805 struct type *t;
1806 struct type *elem;
1807
1808 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1809 TYPE_CODE_UNION);
1810
1811 elem = builtin_type (gdbarch)->builtin_uint16;
1812 append_composite_type_field (t, "u", elem);
1813
1814 elem = builtin_type (gdbarch)->builtin_int16;
1815 append_composite_type_field (t, "s", elem);
1816
1817 tdep->vnh_type = t;
1818 }
1819
1820 return tdep->vnh_type;
1821}
1822
1823/* Return the type for an AdvSISD B register. */
1824
1825static struct type *
1826aarch64_vnb_type (struct gdbarch *gdbarch)
1827{
1828 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1829
1830 if (tdep->vnb_type == NULL)
1831 {
1832 struct type *t;
1833 struct type *elem;
1834
1835 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1836 TYPE_CODE_UNION);
1837
1838 elem = builtin_type (gdbarch)->builtin_uint8;
1839 append_composite_type_field (t, "u", elem);
1840
1841 elem = builtin_type (gdbarch)->builtin_int8;
1842 append_composite_type_field (t, "s", elem);
1843
1844 tdep->vnb_type = t;
1845 }
1846
1847 return tdep->vnb_type;
1848}
1849
63bad7b6
AH
1850/* Return the type for an AdvSISD V register. */
1851
1852static struct type *
1853aarch64_vnv_type (struct gdbarch *gdbarch)
1854{
1855 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1856
1857 if (tdep->vnv_type == NULL)
1858 {
1859 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1860 TYPE_CODE_UNION);
1861
1862 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1863 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1864 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1865 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1866 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1867
1868 tdep->vnv_type = t;
1869 }
1870
1871 return tdep->vnv_type;
1872}
1873
07b287a0
MS
1874/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1875
1876static int
1877aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1878{
1879 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1880 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1881
1882 if (reg == AARCH64_DWARF_SP)
1883 return AARCH64_SP_REGNUM;
1884
1885 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1886 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1887
65d4cada
AH
1888 if (reg == AARCH64_DWARF_SVE_VG)
1889 return AARCH64_SVE_VG_REGNUM;
1890
1891 if (reg == AARCH64_DWARF_SVE_FFR)
1892 return AARCH64_SVE_FFR_REGNUM;
1893
1894 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1895 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1896
1897 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1898 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1899
07b287a0
MS
1900 return -1;
1901}
07b287a0
MS
1902
1903/* Implement the "print_insn" gdbarch method. */
1904
1905static int
1906aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1907{
1908 info->symbols = NULL;
6394c606 1909 return default_print_insn (memaddr, info);
07b287a0
MS
1910}
1911
1912/* AArch64 BRK software debug mode instruction.
1913 Note that AArch64 code is always little-endian.
1914 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 1915constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 1916
04180708 1917typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
1918
1919/* Extract from an array REGS containing the (raw) register state a
1920 function return value of type TYPE, and copy that, in virtual
1921 format, into VALBUF. */
1922
1923static void
1924aarch64_extract_return_value (struct type *type, struct regcache *regs,
1925 gdb_byte *valbuf)
1926{
ac7936df 1927 struct gdbarch *gdbarch = regs->arch ();
07b287a0 1928 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
1929 int elements;
1930 struct type *fundamental_type;
07b287a0 1931
4f4aedeb
AH
1932 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1933 &fundamental_type))
07b287a0 1934 {
4f4aedeb
AH
1935 int len = TYPE_LENGTH (fundamental_type);
1936
1937 for (int i = 0; i < elements; i++)
1938 {
1939 int regno = AARCH64_V0_REGNUM + i;
1940 bfd_byte buf[V_REGISTER_SIZE];
1941
1942 if (aarch64_debug)
1943 {
1944 debug_printf ("read HFA or HVA return value element %d from %s\n",
1945 i + 1,
1946 gdbarch_register_name (gdbarch, regno));
1947 }
1948 regs->cooked_read (regno, buf);
07b287a0 1949
4f4aedeb
AH
1950 memcpy (valbuf, buf, len);
1951 valbuf += len;
1952 }
07b287a0
MS
1953 }
1954 else if (TYPE_CODE (type) == TYPE_CODE_INT
1955 || TYPE_CODE (type) == TYPE_CODE_CHAR
1956 || TYPE_CODE (type) == TYPE_CODE_BOOL
1957 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 1958 || TYPE_IS_REFERENCE (type)
07b287a0
MS
1959 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1960 {
1961 /* If the the type is a plain integer, then the access is
1962 straight-forward. Otherwise we have to play around a bit
1963 more. */
1964 int len = TYPE_LENGTH (type);
1965 int regno = AARCH64_X0_REGNUM;
1966 ULONGEST tmp;
1967
1968 while (len > 0)
1969 {
1970 /* By using store_unsigned_integer we avoid having to do
1971 anything special for small big-endian values. */
1972 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1973 store_unsigned_integer (valbuf,
1974 (len > X_REGISTER_SIZE
1975 ? X_REGISTER_SIZE : len), byte_order, tmp);
1976 len -= X_REGISTER_SIZE;
1977 valbuf += X_REGISTER_SIZE;
1978 }
1979 }
07b287a0
MS
1980 else
1981 {
1982 /* For a structure or union the behaviour is as if the value had
1983 been stored to word-aligned memory and then loaded into
1984 registers with 64-bit load instruction(s). */
1985 int len = TYPE_LENGTH (type);
1986 int regno = AARCH64_X0_REGNUM;
1987 bfd_byte buf[X_REGISTER_SIZE];
1988
1989 while (len > 0)
1990 {
dca08e1f 1991 regs->cooked_read (regno++, buf);
07b287a0
MS
1992 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1993 len -= X_REGISTER_SIZE;
1994 valbuf += X_REGISTER_SIZE;
1995 }
1996 }
1997}
1998
1999
2000/* Will a function return an aggregate type in memory or in a
2001 register? Return 0 if an aggregate type can be returned in a
2002 register, 1 if it must be returned in memory. */
2003
2004static int
2005aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2006{
f168693b 2007 type = check_typedef (type);
4f4aedeb
AH
2008 int elements;
2009 struct type *fundamental_type;
07b287a0 2010
4f4aedeb
AH
2011 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2012 &fundamental_type))
07b287a0 2013 {
cd635f74
YQ
2014 /* v0-v7 are used to return values and one register is allocated
2015 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
2016 return 0;
2017 }
2018
2019 if (TYPE_LENGTH (type) > 16)
2020 {
2021 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2022 invisible reference. */
2023
2024 return 1;
2025 }
2026
2027 return 0;
2028}
2029
2030/* Write into appropriate registers a function return value of type
2031 TYPE, given in virtual format. */
2032
2033static void
2034aarch64_store_return_value (struct type *type, struct regcache *regs,
2035 const gdb_byte *valbuf)
2036{
ac7936df 2037 struct gdbarch *gdbarch = regs->arch ();
07b287a0 2038 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4f4aedeb
AH
2039 int elements;
2040 struct type *fundamental_type;
07b287a0 2041
4f4aedeb
AH
2042 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2043 &fundamental_type))
07b287a0 2044 {
4f4aedeb
AH
2045 int len = TYPE_LENGTH (fundamental_type);
2046
2047 for (int i = 0; i < elements; i++)
2048 {
2049 int regno = AARCH64_V0_REGNUM + i;
2050 bfd_byte tmpbuf[V_REGISTER_SIZE];
2051
2052 if (aarch64_debug)
2053 {
2054 debug_printf ("write HFA or HVA return value element %d to %s\n",
2055 i + 1,
2056 gdbarch_register_name (gdbarch, regno));
2057 }
07b287a0 2058
4f4aedeb
AH
2059 memcpy (tmpbuf, valbuf,
2060 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2061 regs->cooked_write (regno, tmpbuf);
2062 valbuf += len;
2063 }
07b287a0
MS
2064 }
2065 else if (TYPE_CODE (type) == TYPE_CODE_INT
2066 || TYPE_CODE (type) == TYPE_CODE_CHAR
2067 || TYPE_CODE (type) == TYPE_CODE_BOOL
2068 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 2069 || TYPE_IS_REFERENCE (type)
07b287a0
MS
2070 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2071 {
2072 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2073 {
2074 /* Values of one word or less are zero/sign-extended and
2075 returned in r0. */
2076 bfd_byte tmpbuf[X_REGISTER_SIZE];
2077 LONGEST val = unpack_long (type, valbuf);
2078
2079 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
b66f5587 2080 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
07b287a0
MS
2081 }
2082 else
2083 {
2084 /* Integral values greater than one word are stored in
2085 consecutive registers starting with r0. This will always
2086 be a multiple of the regiser size. */
2087 int len = TYPE_LENGTH (type);
2088 int regno = AARCH64_X0_REGNUM;
2089
2090 while (len > 0)
2091 {
b66f5587 2092 regs->cooked_write (regno++, valbuf);
07b287a0
MS
2093 len -= X_REGISTER_SIZE;
2094 valbuf += X_REGISTER_SIZE;
2095 }
2096 }
2097 }
07b287a0
MS
2098 else
2099 {
2100 /* For a structure or union the behaviour is as if the value had
2101 been stored to word-aligned memory and then loaded into
2102 registers with 64-bit load instruction(s). */
2103 int len = TYPE_LENGTH (type);
2104 int regno = AARCH64_X0_REGNUM;
2105 bfd_byte tmpbuf[X_REGISTER_SIZE];
2106
2107 while (len > 0)
2108 {
2109 memcpy (tmpbuf, valbuf,
2110 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
b66f5587 2111 regs->cooked_write (regno++, tmpbuf);
07b287a0
MS
2112 len -= X_REGISTER_SIZE;
2113 valbuf += X_REGISTER_SIZE;
2114 }
2115 }
2116}
2117
2118/* Implement the "return_value" gdbarch method. */
2119
2120static enum return_value_convention
2121aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2122 struct type *valtype, struct regcache *regcache,
2123 gdb_byte *readbuf, const gdb_byte *writebuf)
2124{
07b287a0
MS
2125
2126 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2127 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2128 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2129 {
2130 if (aarch64_return_in_memory (gdbarch, valtype))
2131 {
2132 if (aarch64_debug)
b277c936 2133 debug_printf ("return value in memory\n");
07b287a0
MS
2134 return RETURN_VALUE_STRUCT_CONVENTION;
2135 }
2136 }
2137
2138 if (writebuf)
2139 aarch64_store_return_value (valtype, regcache, writebuf);
2140
2141 if (readbuf)
2142 aarch64_extract_return_value (valtype, regcache, readbuf);
2143
2144 if (aarch64_debug)
b277c936 2145 debug_printf ("return value in registers\n");
07b287a0
MS
2146
2147 return RETURN_VALUE_REGISTER_CONVENTION;
2148}
2149
2150/* Implement the "get_longjmp_target" gdbarch method. */
2151
2152static int
2153aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2154{
2155 CORE_ADDR jb_addr;
2156 gdb_byte buf[X_REGISTER_SIZE];
2157 struct gdbarch *gdbarch = get_frame_arch (frame);
2158 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2159 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2160
2161 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2162
2163 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2164 X_REGISTER_SIZE))
2165 return 0;
2166
2167 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2168 return 1;
2169}
ea873d8e
PL
2170
2171/* Implement the "gen_return_address" gdbarch method. */
2172
2173static void
2174aarch64_gen_return_address (struct gdbarch *gdbarch,
2175 struct agent_expr *ax, struct axs_value *value,
2176 CORE_ADDR scope)
2177{
2178 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2179 value->kind = axs_lvalue_register;
2180 value->u.reg = AARCH64_LR_REGNUM;
2181}
07b287a0
MS
2182\f
2183
2184/* Return the pseudo register name corresponding to register regnum. */
2185
2186static const char *
2187aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2188{
63bad7b6
AH
2189 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2190
07b287a0
MS
2191 static const char *const q_name[] =
2192 {
2193 "q0", "q1", "q2", "q3",
2194 "q4", "q5", "q6", "q7",
2195 "q8", "q9", "q10", "q11",
2196 "q12", "q13", "q14", "q15",
2197 "q16", "q17", "q18", "q19",
2198 "q20", "q21", "q22", "q23",
2199 "q24", "q25", "q26", "q27",
2200 "q28", "q29", "q30", "q31",
2201 };
2202
2203 static const char *const d_name[] =
2204 {
2205 "d0", "d1", "d2", "d3",
2206 "d4", "d5", "d6", "d7",
2207 "d8", "d9", "d10", "d11",
2208 "d12", "d13", "d14", "d15",
2209 "d16", "d17", "d18", "d19",
2210 "d20", "d21", "d22", "d23",
2211 "d24", "d25", "d26", "d27",
2212 "d28", "d29", "d30", "d31",
2213 };
2214
2215 static const char *const s_name[] =
2216 {
2217 "s0", "s1", "s2", "s3",
2218 "s4", "s5", "s6", "s7",
2219 "s8", "s9", "s10", "s11",
2220 "s12", "s13", "s14", "s15",
2221 "s16", "s17", "s18", "s19",
2222 "s20", "s21", "s22", "s23",
2223 "s24", "s25", "s26", "s27",
2224 "s28", "s29", "s30", "s31",
2225 };
2226
2227 static const char *const h_name[] =
2228 {
2229 "h0", "h1", "h2", "h3",
2230 "h4", "h5", "h6", "h7",
2231 "h8", "h9", "h10", "h11",
2232 "h12", "h13", "h14", "h15",
2233 "h16", "h17", "h18", "h19",
2234 "h20", "h21", "h22", "h23",
2235 "h24", "h25", "h26", "h27",
2236 "h28", "h29", "h30", "h31",
2237 };
2238
2239 static const char *const b_name[] =
2240 {
2241 "b0", "b1", "b2", "b3",
2242 "b4", "b5", "b6", "b7",
2243 "b8", "b9", "b10", "b11",
2244 "b12", "b13", "b14", "b15",
2245 "b16", "b17", "b18", "b19",
2246 "b20", "b21", "b22", "b23",
2247 "b24", "b25", "b26", "b27",
2248 "b28", "b29", "b30", "b31",
2249 };
2250
2251 regnum -= gdbarch_num_regs (gdbarch);
2252
2253 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2254 return q_name[regnum - AARCH64_Q0_REGNUM];
2255
2256 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2257 return d_name[regnum - AARCH64_D0_REGNUM];
2258
2259 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2260 return s_name[regnum - AARCH64_S0_REGNUM];
2261
2262 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2263 return h_name[regnum - AARCH64_H0_REGNUM];
2264
2265 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2266 return b_name[regnum - AARCH64_B0_REGNUM];
2267
63bad7b6
AH
2268 if (tdep->has_sve ())
2269 {
2270 static const char *const sve_v_name[] =
2271 {
2272 "v0", "v1", "v2", "v3",
2273 "v4", "v5", "v6", "v7",
2274 "v8", "v9", "v10", "v11",
2275 "v12", "v13", "v14", "v15",
2276 "v16", "v17", "v18", "v19",
2277 "v20", "v21", "v22", "v23",
2278 "v24", "v25", "v26", "v27",
2279 "v28", "v29", "v30", "v31",
2280 };
2281
2282 if (regnum >= AARCH64_SVE_V0_REGNUM
2283 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2284 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2285 }
2286
07b287a0
MS
2287 internal_error (__FILE__, __LINE__,
2288 _("aarch64_pseudo_register_name: bad register number %d"),
2289 regnum);
2290}
2291
2292/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2293
2294static struct type *
2295aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2296{
63bad7b6
AH
2297 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2298
07b287a0
MS
2299 regnum -= gdbarch_num_regs (gdbarch);
2300
2301 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2302 return aarch64_vnq_type (gdbarch);
2303
2304 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2305 return aarch64_vnd_type (gdbarch);
2306
2307 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2308 return aarch64_vns_type (gdbarch);
2309
2310 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2311 return aarch64_vnh_type (gdbarch);
2312
2313 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2314 return aarch64_vnb_type (gdbarch);
2315
63bad7b6
AH
2316 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2317 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2318 return aarch64_vnv_type (gdbarch);
2319
07b287a0
MS
2320 internal_error (__FILE__, __LINE__,
2321 _("aarch64_pseudo_register_type: bad register number %d"),
2322 regnum);
2323}
2324
2325/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2326
2327static int
2328aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2329 struct reggroup *group)
2330{
63bad7b6
AH
2331 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2332
07b287a0
MS
2333 regnum -= gdbarch_num_regs (gdbarch);
2334
2335 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2336 return group == all_reggroup || group == vector_reggroup;
2337 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2338 return (group == all_reggroup || group == vector_reggroup
2339 || group == float_reggroup);
2340 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2341 return (group == all_reggroup || group == vector_reggroup
2342 || group == float_reggroup);
2343 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2344 return group == all_reggroup || group == vector_reggroup;
2345 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2346 return group == all_reggroup || group == vector_reggroup;
63bad7b6
AH
2347 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2348 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2349 return group == all_reggroup || group == vector_reggroup;
07b287a0
MS
2350
2351 return group == all_reggroup;
2352}
2353
3c5cd5c3
AH
2354/* Helper for aarch64_pseudo_read_value. */
2355
2356static struct value *
63bad7b6
AH
2357aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2358 readable_regcache *regcache, int regnum_offset,
3c5cd5c3
AH
2359 int regsize, struct value *result_value)
2360{
3c5cd5c3
AH
2361 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2362
63bad7b6
AH
2363 /* Enough space for a full vector register. */
2364 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2365 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2366
3c5cd5c3
AH
2367 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2368 mark_value_bytes_unavailable (result_value, 0,
2369 TYPE_LENGTH (value_type (result_value)));
2370 else
2371 memcpy (value_contents_raw (result_value), reg_buf, regsize);
63bad7b6 2372
3c5cd5c3
AH
2373 return result_value;
2374 }
2375
07b287a0
MS
2376/* Implement the "pseudo_register_read_value" gdbarch method. */
2377
2378static struct value *
3c5cd5c3 2379aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
07b287a0
MS
2380 int regnum)
2381{
63bad7b6 2382 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3c5cd5c3 2383 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
07b287a0 2384
07b287a0
MS
2385 VALUE_LVAL (result_value) = lval_register;
2386 VALUE_REGNUM (result_value) = regnum;
07b287a0
MS
2387
2388 regnum -= gdbarch_num_regs (gdbarch);
2389
2390 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2391 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2392 regnum - AARCH64_Q0_REGNUM,
3c5cd5c3 2393 Q_REGISTER_SIZE, result_value);
07b287a0
MS
2394
2395 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2396 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2397 regnum - AARCH64_D0_REGNUM,
3c5cd5c3 2398 D_REGISTER_SIZE, result_value);
07b287a0
MS
2399
2400 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2401 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2402 regnum - AARCH64_S0_REGNUM,
3c5cd5c3 2403 S_REGISTER_SIZE, result_value);
07b287a0
MS
2404
2405 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2406 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2407 regnum - AARCH64_H0_REGNUM,
3c5cd5c3 2408 H_REGISTER_SIZE, result_value);
07b287a0
MS
2409
2410 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2411 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2412 regnum - AARCH64_B0_REGNUM,
3c5cd5c3 2413 B_REGISTER_SIZE, result_value);
07b287a0 2414
63bad7b6
AH
2415 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2416 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2417 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2418 regnum - AARCH64_SVE_V0_REGNUM,
2419 V_REGISTER_SIZE, result_value);
2420
07b287a0
MS
2421 gdb_assert_not_reached ("regnum out of bound");
2422}
2423
3c5cd5c3 2424/* Helper for aarch64_pseudo_write. */
07b287a0
MS
2425
2426static void
63bad7b6
AH
2427aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2428 int regnum_offset, int regsize, const gdb_byte *buf)
07b287a0 2429{
3c5cd5c3 2430 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
07b287a0 2431
63bad7b6
AH
2432 /* Enough space for a full vector register. */
2433 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2434 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2435
07b287a0
MS
2436 /* Ensure the register buffer is zero, we want gdb writes of the
2437 various 'scalar' pseudo registers to behavior like architectural
2438 writes, register width bytes are written the remainder are set to
2439 zero. */
63bad7b6 2440 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
07b287a0 2441
3c5cd5c3
AH
2442 memcpy (reg_buf, buf, regsize);
2443 regcache->raw_write (v_regnum, reg_buf);
2444}
2445
2446/* Implement the "pseudo_register_write" gdbarch method. */
2447
2448static void
2449aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2450 int regnum, const gdb_byte *buf)
2451{
63bad7b6 2452 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
07b287a0
MS
2453 regnum -= gdbarch_num_regs (gdbarch);
2454
2455 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
63bad7b6
AH
2456 return aarch64_pseudo_write_1 (gdbarch, regcache,
2457 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2458 buf);
07b287a0
MS
2459
2460 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
63bad7b6
AH
2461 return aarch64_pseudo_write_1 (gdbarch, regcache,
2462 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2463 buf);
07b287a0
MS
2464
2465 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
63bad7b6
AH
2466 return aarch64_pseudo_write_1 (gdbarch, regcache,
2467 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2468 buf);
07b287a0
MS
2469
2470 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
63bad7b6
AH
2471 return aarch64_pseudo_write_1 (gdbarch, regcache,
2472 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2473 buf);
07b287a0
MS
2474
2475 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
63bad7b6
AH
2476 return aarch64_pseudo_write_1 (gdbarch, regcache,
2477 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2478 buf);
2479
2480 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2481 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2482 return aarch64_pseudo_write_1 (gdbarch, regcache,
2483 regnum - AARCH64_SVE_V0_REGNUM,
2484 V_REGISTER_SIZE, buf);
07b287a0
MS
2485
2486 gdb_assert_not_reached ("regnum out of bound");
2487}
2488
07b287a0
MS
2489/* Callback function for user_reg_add. */
2490
2491static struct value *
2492value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2493{
9a3c8263 2494 const int *reg_p = (const int *) baton;
07b287a0
MS
2495
2496 return value_of_register (*reg_p, frame);
2497}
2498\f
2499
9404b58f
KM
2500/* Implement the "software_single_step" gdbarch method, needed to
2501 single step through atomic sequences on AArch64. */
2502
a0ff9e1a 2503static std::vector<CORE_ADDR>
f5ea389a 2504aarch64_software_single_step (struct regcache *regcache)
9404b58f 2505{
ac7936df 2506 struct gdbarch *gdbarch = regcache->arch ();
9404b58f
KM
2507 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2508 const int insn_size = 4;
2509 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2510 CORE_ADDR pc = regcache_read_pc (regcache);
70ab8ccd 2511 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
9404b58f
KM
2512 CORE_ADDR loc = pc;
2513 CORE_ADDR closing_insn = 0;
2514 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2515 byte_order_for_code);
2516 int index;
2517 int insn_count;
2518 int bc_insn_count = 0; /* Conditional branch instruction count. */
2519 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2520 aarch64_inst inst;
2521
561a72d4 2522 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2523 return {};
9404b58f
KM
2524
2525 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2526 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2527 return {};
9404b58f
KM
2528
2529 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2530 {
9404b58f
KM
2531 loc += insn_size;
2532 insn = read_memory_unsigned_integer (loc, insn_size,
2533 byte_order_for_code);
2534
561a72d4 2535 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
a0ff9e1a 2536 return {};
9404b58f 2537 /* Check if the instruction is a conditional branch. */
f77ee802 2538 if (inst.opcode->iclass == condbranch)
9404b58f 2539 {
f77ee802
YQ
2540 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2541
9404b58f 2542 if (bc_insn_count >= 1)
a0ff9e1a 2543 return {};
9404b58f
KM
2544
2545 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2546 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2547
2548 bc_insn_count++;
2549 last_breakpoint++;
2550 }
2551
2552 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2553 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2554 {
2555 closing_insn = loc;
2556 break;
2557 }
2558 }
2559
2560 /* We didn't find a closing Store Exclusive instruction, fall back. */
2561 if (!closing_insn)
a0ff9e1a 2562 return {};
9404b58f
KM
2563
2564 /* Insert breakpoint after the end of the atomic sequence. */
2565 breaks[0] = loc + insn_size;
2566
2567 /* Check for duplicated breakpoints, and also check that the second
2568 breakpoint is not within the atomic sequence. */
2569 if (last_breakpoint
2570 && (breaks[1] == breaks[0]
2571 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2572 last_breakpoint = 0;
2573
a0ff9e1a
SM
2574 std::vector<CORE_ADDR> next_pcs;
2575
9404b58f
KM
2576 /* Insert the breakpoint at the end of the sequence, and one at the
2577 destination of the conditional branch, if it exists. */
2578 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 2579 next_pcs.push_back (breaks[index]);
9404b58f 2580
93f9a11f 2581 return next_pcs;
9404b58f
KM
2582}
2583
cfba9872 2584struct aarch64_displaced_step_closure : public displaced_step_closure
b6542f81
YQ
2585{
2586 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2587 is being displaced stepping. */
cfba9872 2588 int cond = 0;
b6542f81
YQ
2589
2590 /* PC adjustment offset after displaced stepping. */
cfba9872 2591 int32_t pc_adjust = 0;
b6542f81
YQ
2592};
2593
2594/* Data when visiting instructions for displaced stepping. */
2595
2596struct aarch64_displaced_step_data
2597{
2598 struct aarch64_insn_data base;
2599
2600 /* The address where the instruction will be executed at. */
2601 CORE_ADDR new_addr;
2602 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2603 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2604 /* Number of instructions in INSN_BUF. */
2605 unsigned insn_count;
2606 /* Registers when doing displaced stepping. */
2607 struct regcache *regs;
2608
cfba9872 2609 aarch64_displaced_step_closure *dsc;
b6542f81
YQ
2610};
2611
2612/* Implementation of aarch64_insn_visitor method "b". */
2613
2614static void
2615aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2616 struct aarch64_insn_data *data)
2617{
2618 struct aarch64_displaced_step_data *dsd
2619 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2620 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2621
2622 if (can_encode_int32 (new_offset, 28))
2623 {
2624 /* Emit B rather than BL, because executing BL on a new address
2625 will get the wrong address into LR. In order to avoid this,
2626 we emit B, and update LR if the instruction is BL. */
2627 emit_b (dsd->insn_buf, 0, new_offset);
2628 dsd->insn_count++;
2629 }
2630 else
2631 {
2632 /* Write NOP. */
2633 emit_nop (dsd->insn_buf);
2634 dsd->insn_count++;
2635 dsd->dsc->pc_adjust = offset;
2636 }
2637
2638 if (is_bl)
2639 {
2640 /* Update LR. */
2641 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2642 data->insn_addr + 4);
2643 }
2644}
2645
2646/* Implementation of aarch64_insn_visitor method "b_cond". */
2647
2648static void
2649aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2650 struct aarch64_insn_data *data)
2651{
2652 struct aarch64_displaced_step_data *dsd
2653 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2654
2655 /* GDB has to fix up PC after displaced step this instruction
2656 differently according to the condition is true or false. Instead
2657 of checking COND against conditional flags, we can use
2658 the following instructions, and GDB can tell how to fix up PC
2659 according to the PC value.
2660
2661 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2662 INSN1 ;
2663 TAKEN:
2664 INSN2
2665 */
2666
2667 emit_bcond (dsd->insn_buf, cond, 8);
2668 dsd->dsc->cond = 1;
2669 dsd->dsc->pc_adjust = offset;
2670 dsd->insn_count = 1;
2671}
2672
2673/* Dynamically allocate a new register. If we know the register
2674 statically, we should make it a global as above instead of using this
2675 helper function. */
2676
2677static struct aarch64_register
2678aarch64_register (unsigned num, int is64)
2679{
2680 return (struct aarch64_register) { num, is64 };
2681}
2682
2683/* Implementation of aarch64_insn_visitor method "cb". */
2684
2685static void
2686aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2687 const unsigned rn, int is64,
2688 struct aarch64_insn_data *data)
2689{
2690 struct aarch64_displaced_step_data *dsd
2691 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2692
2693 /* The offset is out of range for a compare and branch
2694 instruction. We can use the following instructions instead:
2695
2696 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2697 INSN1 ;
2698 TAKEN:
2699 INSN2
2700 */
2701 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2702 dsd->insn_count = 1;
2703 dsd->dsc->cond = 1;
2704 dsd->dsc->pc_adjust = offset;
2705}
2706
2707/* Implementation of aarch64_insn_visitor method "tb". */
2708
2709static void
2710aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2711 const unsigned rt, unsigned bit,
2712 struct aarch64_insn_data *data)
2713{
2714 struct aarch64_displaced_step_data *dsd
2715 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2716
2717 /* The offset is out of range for a test bit and branch
2718 instruction We can use the following instructions instead:
2719
2720 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2721 INSN1 ;
2722 TAKEN:
2723 INSN2
2724
2725 */
2726 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2727 dsd->insn_count = 1;
2728 dsd->dsc->cond = 1;
2729 dsd->dsc->pc_adjust = offset;
2730}
2731
2732/* Implementation of aarch64_insn_visitor method "adr". */
2733
2734static void
2735aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2736 const int is_adrp, struct aarch64_insn_data *data)
2737{
2738 struct aarch64_displaced_step_data *dsd
2739 = (struct aarch64_displaced_step_data *) data;
2740 /* We know exactly the address the ADR{P,} instruction will compute.
2741 We can just write it to the destination register. */
2742 CORE_ADDR address = data->insn_addr + offset;
2743
2744 if (is_adrp)
2745 {
2746 /* Clear the lower 12 bits of the offset to get the 4K page. */
2747 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2748 address & ~0xfff);
2749 }
2750 else
2751 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2752 address);
2753
2754 dsd->dsc->pc_adjust = 4;
2755 emit_nop (dsd->insn_buf);
2756 dsd->insn_count = 1;
2757}
2758
2759/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2760
2761static void
2762aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2763 const unsigned rt, const int is64,
2764 struct aarch64_insn_data *data)
2765{
2766 struct aarch64_displaced_step_data *dsd
2767 = (struct aarch64_displaced_step_data *) data;
2768 CORE_ADDR address = data->insn_addr + offset;
2769 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2770
2771 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2772 address);
2773
2774 if (is_sw)
2775 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2776 aarch64_register (rt, 1), zero);
2777 else
2778 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2779 aarch64_register (rt, 1), zero);
2780
2781 dsd->dsc->pc_adjust = 4;
2782}
2783
2784/* Implementation of aarch64_insn_visitor method "others". */
2785
2786static void
2787aarch64_displaced_step_others (const uint32_t insn,
2788 struct aarch64_insn_data *data)
2789{
2790 struct aarch64_displaced_step_data *dsd
2791 = (struct aarch64_displaced_step_data *) data;
2792
e1c587c3 2793 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2794 dsd->insn_count = 1;
2795
2796 if ((insn & 0xfffffc1f) == 0xd65f0000)
2797 {
2798 /* RET */
2799 dsd->dsc->pc_adjust = 0;
2800 }
2801 else
2802 dsd->dsc->pc_adjust = 4;
2803}
2804
2805static const struct aarch64_insn_visitor visitor =
2806{
2807 aarch64_displaced_step_b,
2808 aarch64_displaced_step_b_cond,
2809 aarch64_displaced_step_cb,
2810 aarch64_displaced_step_tb,
2811 aarch64_displaced_step_adr,
2812 aarch64_displaced_step_ldr_literal,
2813 aarch64_displaced_step_others,
2814};
2815
2816/* Implement the "displaced_step_copy_insn" gdbarch method. */
2817
2818struct displaced_step_closure *
2819aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2820 CORE_ADDR from, CORE_ADDR to,
2821 struct regcache *regs)
2822{
b6542f81
YQ
2823 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2824 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2825 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2826 aarch64_inst inst;
2827
561a72d4 2828 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
c86a40c6 2829 return NULL;
b6542f81
YQ
2830
2831 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2832 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2833 {
2834 /* We can't displaced step atomic sequences. */
2835 return NULL;
2836 }
2837
cfba9872
SM
2838 std::unique_ptr<aarch64_displaced_step_closure> dsc
2839 (new aarch64_displaced_step_closure);
b6542f81
YQ
2840 dsd.base.insn_addr = from;
2841 dsd.new_addr = to;
2842 dsd.regs = regs;
cfba9872 2843 dsd.dsc = dsc.get ();
034f1a81 2844 dsd.insn_count = 0;
b6542f81
YQ
2845 aarch64_relocate_instruction (insn, &visitor,
2846 (struct aarch64_insn_data *) &dsd);
2847 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2848
2849 if (dsd.insn_count != 0)
2850 {
2851 int i;
2852
2853 /* Instruction can be relocated to scratch pad. Copy
2854 relocated instruction(s) there. */
2855 for (i = 0; i < dsd.insn_count; i++)
2856 {
2857 if (debug_displaced)
2858 {
2859 debug_printf ("displaced: writing insn ");
2860 debug_printf ("%.8x", dsd.insn_buf[i]);
2861 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2862 }
2863 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2864 (ULONGEST) dsd.insn_buf[i]);
2865 }
2866 }
2867 else
2868 {
b6542f81
YQ
2869 dsc = NULL;
2870 }
2871
cfba9872 2872 return dsc.release ();
b6542f81
YQ
2873}
2874
2875/* Implement the "displaced_step_fixup" gdbarch method. */
2876
2877void
2878aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
cfba9872 2879 struct displaced_step_closure *dsc_,
b6542f81
YQ
2880 CORE_ADDR from, CORE_ADDR to,
2881 struct regcache *regs)
2882{
cfba9872
SM
2883 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2884
b6542f81
YQ
2885 if (dsc->cond)
2886 {
2887 ULONGEST pc;
2888
2889 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2890 if (pc - to == 8)
2891 {
2892 /* Condition is true. */
2893 }
2894 else if (pc - to == 4)
2895 {
2896 /* Condition is false. */
2897 dsc->pc_adjust = 4;
2898 }
2899 else
2900 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2901 }
2902
2903 if (dsc->pc_adjust != 0)
2904 {
2905 if (debug_displaced)
2906 {
2907 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2908 paddress (gdbarch, from), dsc->pc_adjust);
2909 }
2910 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2911 from + dsc->pc_adjust);
2912 }
2913}
2914
2915/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2916
2917int
2918aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2919 struct displaced_step_closure *closure)
2920{
2921 return 1;
2922}
2923
95228a0d
AH
2924/* Get the correct target description for the given VQ value.
2925 If VQ is zero then it is assumed SVE is not supported.
2926 (It is not possible to set VQ to zero on an SVE system). */
da434ccb
AH
2927
2928const target_desc *
39bfb937 2929aarch64_read_description (uint64_t vq)
da434ccb 2930{
95228a0d 2931 if (vq > AARCH64_MAX_SVE_VQ)
39bfb937 2932 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
95228a0d
AH
2933 AARCH64_MAX_SVE_VQ);
2934
2935 struct target_desc *tdesc = tdesc_aarch64_list[vq];
da434ccb 2936
95228a0d
AH
2937 if (tdesc == NULL)
2938 {
2939 tdesc = aarch64_create_target_description (vq);
2940 tdesc_aarch64_list[vq] = tdesc;
2941 }
da434ccb 2942
95228a0d 2943 return tdesc;
da434ccb
AH
2944}
2945
ba2d2bb2
AH
2946/* Return the VQ used when creating the target description TDESC. */
2947
1332a140 2948static uint64_t
ba2d2bb2
AH
2949aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2950{
2951 const struct tdesc_feature *feature_sve;
2952
2953 if (!tdesc_has_registers (tdesc))
2954 return 0;
2955
2956 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2957
2958 if (feature_sve == nullptr)
2959 return 0;
2960
12863263
AH
2961 uint64_t vl = tdesc_register_bitsize (feature_sve,
2962 aarch64_sve_register_names[0]) / 8;
ba2d2bb2
AH
2963 return sve_vq_from_vl (vl);
2964}
2965
2966
07b287a0
MS
2967/* Initialize the current architecture based on INFO. If possible,
2968 re-use an architecture from ARCHES, which is a list of
2969 architectures already created during this debugging session.
2970
2971 Called e.g. at program startup, when reading a core file, and when
2972 reading a binary file. */
2973
2974static struct gdbarch *
2975aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2976{
2977 struct gdbarch_tdep *tdep;
2978 struct gdbarch *gdbarch;
2979 struct gdbarch_list *best_arch;
2980 struct tdesc_arch_data *tdesc_data = NULL;
2981 const struct target_desc *tdesc = info.target_desc;
2982 int i;
07b287a0 2983 int valid_p = 1;
ba2d2bb2
AH
2984 const struct tdesc_feature *feature_core;
2985 const struct tdesc_feature *feature_fpu;
2986 const struct tdesc_feature *feature_sve;
07b287a0
MS
2987 int num_regs = 0;
2988 int num_pseudo_regs = 0;
2989
ba2d2bb2 2990 /* Ensure we always have a target description. */
07b287a0 2991 if (!tdesc_has_registers (tdesc))
ba2d2bb2 2992 tdesc = aarch64_read_description (0);
07b287a0
MS
2993 gdb_assert (tdesc);
2994
ba2d2bb2
AH
2995 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2996 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2997 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
07b287a0 2998
ba2d2bb2 2999 if (feature_core == NULL)
07b287a0
MS
3000 return NULL;
3001
3002 tdesc_data = tdesc_data_alloc ();
3003
ba2d2bb2 3004 /* Validate the description provides the mandatory core R registers
07b287a0
MS
3005 and allocate their numbers. */
3006 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
ba2d2bb2
AH
3007 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3008 AARCH64_X0_REGNUM + i,
3009 aarch64_r_register_names[i]);
07b287a0
MS
3010
3011 num_regs = AARCH64_X0_REGNUM + i;
3012
ba2d2bb2
AH
3013 /* Add the V registers. */
3014 if (feature_fpu != NULL)
07b287a0 3015 {
ba2d2bb2
AH
3016 if (feature_sve != NULL)
3017 error (_("Program contains both fpu and SVE features."));
3018
3019 /* Validate the description provides the mandatory V registers
3020 and allocate their numbers. */
07b287a0 3021 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
ba2d2bb2
AH
3022 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3023 AARCH64_V0_REGNUM + i,
3024 aarch64_v_register_names[i]);
07b287a0
MS
3025
3026 num_regs = AARCH64_V0_REGNUM + i;
ba2d2bb2 3027 }
07b287a0 3028
ba2d2bb2
AH
3029 /* Add the SVE registers. */
3030 if (feature_sve != NULL)
3031 {
3032 /* Validate the description provides the mandatory SVE registers
3033 and allocate their numbers. */
3034 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3035 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3036 AARCH64_SVE_Z0_REGNUM + i,
3037 aarch64_sve_register_names[i]);
3038
3039 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3040 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3041 }
3042
3043 if (feature_fpu != NULL || feature_sve != NULL)
3044 {
07b287a0
MS
3045 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3046 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3047 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3048 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3049 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3050 }
3051
3052 if (!valid_p)
3053 {
3054 tdesc_data_cleanup (tdesc_data);
3055 return NULL;
3056 }
3057
3058 /* AArch64 code is always little-endian. */
3059 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3060
3061 /* If there is already a candidate, use it. */
3062 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3063 best_arch != NULL;
3064 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3065 {
3066 /* Found a match. */
3067 break;
3068 }
3069
3070 if (best_arch != NULL)
3071 {
3072 if (tdesc_data != NULL)
3073 tdesc_data_cleanup (tdesc_data);
3074 return best_arch->gdbarch;
3075 }
3076
8d749320 3077 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
3078 gdbarch = gdbarch_alloc (&info, tdep);
3079
3080 /* This should be low enough for everything. */
3081 tdep->lowest_pc = 0x20;
3082 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3083 tdep->jb_elt_size = 8;
ba2d2bb2 3084 tdep->vq = aarch64_get_tdesc_vq (tdesc);
07b287a0
MS
3085
3086 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3087 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3088
07b287a0
MS
3089 /* Frame handling. */
3090 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3091 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3092 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3093
3094 /* Advance PC across function entry code. */
3095 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3096
3097 /* The stack grows downward. */
3098 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3099
3100 /* Breakpoint manipulation. */
04180708
YQ
3101 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3102 aarch64_breakpoint::kind_from_pc);
3103 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3104 aarch64_breakpoint::bp_from_kind);
07b287a0 3105 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 3106 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
3107
3108 /* Information about registers, etc. */
3109 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3110 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3111 set_gdbarch_num_regs (gdbarch, num_regs);
3112
3113 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3114 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3115 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3116 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3117 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3118 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3119 aarch64_pseudo_register_reggroup_p);
3120
3121 /* ABI */
3122 set_gdbarch_short_bit (gdbarch, 16);
3123 set_gdbarch_int_bit (gdbarch, 32);
3124 set_gdbarch_float_bit (gdbarch, 32);
3125 set_gdbarch_double_bit (gdbarch, 64);
3126 set_gdbarch_long_double_bit (gdbarch, 128);
3127 set_gdbarch_long_bit (gdbarch, 64);
3128 set_gdbarch_long_long_bit (gdbarch, 64);
3129 set_gdbarch_ptr_bit (gdbarch, 64);
3130 set_gdbarch_char_signed (gdbarch, 0);
53375380 3131 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
3132 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3133 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3134 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3135
3136 /* Internal <-> external register number maps. */
3137 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3138
3139 /* Returning results. */
3140 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3141
3142 /* Disassembly. */
3143 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3144
3145 /* Virtual tables. */
3146 set_gdbarch_vbit_in_delta (gdbarch, 1);
3147
3148 /* Hook in the ABI-specific overrides, if they have been registered. */
3149 info.target_desc = tdesc;
0dba2a6c 3150 info.tdesc_data = tdesc_data;
07b287a0
MS
3151 gdbarch_init_osabi (info, gdbarch);
3152
3153 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3154
3155 /* Add some default predicates. */
3156 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3157 dwarf2_append_unwinders (gdbarch);
3158 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3159
3160 frame_base_set_default (gdbarch, &aarch64_normal_base);
3161
3162 /* Now we have tuned the configuration, set a few final things,
3163 based on what the OS ABI has told us. */
3164
3165 if (tdep->jb_pc >= 0)
3166 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3167
ea873d8e
PL
3168 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3169
07b287a0
MS
3170 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3171
3172 /* Add standard register aliases. */
3173 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3174 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3175 value_of_aarch64_user_reg,
3176 &aarch64_register_aliases[i].regnum);
3177
3178 return gdbarch;
3179}
3180
3181static void
3182aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3183{
3184 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3185
3186 if (tdep == NULL)
3187 return;
3188
3189 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3190 paddress (gdbarch, tdep->lowest_pc));
3191}
3192
0d4c07af 3193#if GDB_SELF_TEST
1e2b521d
YQ
3194namespace selftests
3195{
3196static void aarch64_process_record_test (void);
3197}
0d4c07af 3198#endif
1e2b521d 3199
07b287a0
MS
3200void
3201_initialize_aarch64_tdep (void)
3202{
3203 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3204 aarch64_dump_tdep);
3205
07b287a0
MS
3206 /* Debug this file's internals. */
3207 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3208Set AArch64 debugging."), _("\
3209Show AArch64 debugging."), _("\
3210When on, AArch64 specific debugging is enabled."),
3211 NULL,
3212 show_aarch64_debug,
3213 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3214
3215#if GDB_SELF_TEST
1526853e
SM
3216 selftests::register_test ("aarch64-analyze-prologue",
3217 selftests::aarch64_analyze_prologue_test);
3218 selftests::register_test ("aarch64-process-record",
3219 selftests::aarch64_process_record_test);
6654d750 3220 selftests::record_xml_tdesc ("aarch64.xml",
95228a0d 3221 aarch64_create_target_description (0));
4d9a9006 3222#endif
07b287a0 3223}
99afc88b
OJ
3224
3225/* AArch64 process record-replay related structures, defines etc. */
3226
99afc88b
OJ
3227#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3228 do \
3229 { \
3230 unsigned int reg_len = LENGTH; \
3231 if (reg_len) \
3232 { \
3233 REGS = XNEWVEC (uint32_t, reg_len); \
3234 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3235 } \
3236 } \
3237 while (0)
3238
3239#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3240 do \
3241 { \
3242 unsigned int mem_len = LENGTH; \
3243 if (mem_len) \
3244 { \
3245 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3246 memcpy(&MEMS->len, &RECORD_BUF[0], \
3247 sizeof(struct aarch64_mem_r) * LENGTH); \
3248 } \
3249 } \
3250 while (0)
3251
3252/* AArch64 record/replay structures and enumerations. */
3253
3254struct aarch64_mem_r
3255{
3256 uint64_t len; /* Record length. */
3257 uint64_t addr; /* Memory address. */
3258};
3259
3260enum aarch64_record_result
3261{
3262 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3263 AARCH64_RECORD_UNSUPPORTED,
3264 AARCH64_RECORD_UNKNOWN
3265};
3266
3267typedef struct insn_decode_record_t
3268{
3269 struct gdbarch *gdbarch;
3270 struct regcache *regcache;
3271 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3272 uint32_t aarch64_insn; /* Insn to be recorded. */
3273 uint32_t mem_rec_count; /* Count of memory records. */
3274 uint32_t reg_rec_count; /* Count of register records. */
3275 uint32_t *aarch64_regs; /* Registers to be recorded. */
3276 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3277} insn_decode_record;
3278
3279/* Record handler for data processing - register instructions. */
3280
3281static unsigned int
3282aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3283{
3284 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3285 uint32_t record_buf[4];
3286
3287 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3288 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3289 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3290
3291 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3292 {
3293 uint8_t setflags;
3294
3295 /* Logical (shifted register). */
3296 if (insn_bits24_27 == 0x0a)
3297 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3298 /* Add/subtract. */
3299 else if (insn_bits24_27 == 0x0b)
3300 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3301 else
3302 return AARCH64_RECORD_UNKNOWN;
3303
3304 record_buf[0] = reg_rd;
3305 aarch64_insn_r->reg_rec_count = 1;
3306 if (setflags)
3307 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3308 }
3309 else
3310 {
3311 if (insn_bits24_27 == 0x0b)
3312 {
3313 /* Data-processing (3 source). */
3314 record_buf[0] = reg_rd;
3315 aarch64_insn_r->reg_rec_count = 1;
3316 }
3317 else if (insn_bits24_27 == 0x0a)
3318 {
3319 if (insn_bits21_23 == 0x00)
3320 {
3321 /* Add/subtract (with carry). */
3322 record_buf[0] = reg_rd;
3323 aarch64_insn_r->reg_rec_count = 1;
3324 if (bit (aarch64_insn_r->aarch64_insn, 29))
3325 {
3326 record_buf[1] = AARCH64_CPSR_REGNUM;
3327 aarch64_insn_r->reg_rec_count = 2;
3328 }
3329 }
3330 else if (insn_bits21_23 == 0x02)
3331 {
3332 /* Conditional compare (register) and conditional compare
3333 (immediate) instructions. */
3334 record_buf[0] = AARCH64_CPSR_REGNUM;
3335 aarch64_insn_r->reg_rec_count = 1;
3336 }
3337 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3338 {
3339 /* CConditional select. */
3340 /* Data-processing (2 source). */
3341 /* Data-processing (1 source). */
3342 record_buf[0] = reg_rd;
3343 aarch64_insn_r->reg_rec_count = 1;
3344 }
3345 else
3346 return AARCH64_RECORD_UNKNOWN;
3347 }
3348 }
3349
3350 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3351 record_buf);
3352 return AARCH64_RECORD_SUCCESS;
3353}
3354
3355/* Record handler for data processing - immediate instructions. */
3356
3357static unsigned int
3358aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3359{
78cc6c2d 3360 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3361 uint32_t record_buf[4];
3362
3363 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3364 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3365 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3366
3367 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3368 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3369 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3370 {
3371 record_buf[0] = reg_rd;
3372 aarch64_insn_r->reg_rec_count = 1;
3373 }
3374 else if (insn_bits24_27 == 0x01)
3375 {
3376 /* Add/Subtract (immediate). */
3377 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3378 record_buf[0] = reg_rd;
3379 aarch64_insn_r->reg_rec_count = 1;
3380 if (setflags)
3381 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3382 }
3383 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3384 {
3385 /* Logical (immediate). */
3386 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3387 record_buf[0] = reg_rd;
3388 aarch64_insn_r->reg_rec_count = 1;
3389 if (setflags)
3390 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3391 }
3392 else
3393 return AARCH64_RECORD_UNKNOWN;
3394
3395 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3396 record_buf);
3397 return AARCH64_RECORD_SUCCESS;
3398}
3399
3400/* Record handler for branch, exception generation and system instructions. */
3401
3402static unsigned int
3403aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3404{
3405 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3406 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3407 uint32_t record_buf[4];
3408
3409 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3410 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3411 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3412
3413 if (insn_bits28_31 == 0x0d)
3414 {
3415 /* Exception generation instructions. */
3416 if (insn_bits24_27 == 0x04)
3417 {
5d98d3cd
YQ
3418 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3419 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3420 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3421 {
3422 ULONGEST svc_number;
3423
3424 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3425 &svc_number);
3426 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3427 svc_number);
3428 }
3429 else
3430 return AARCH64_RECORD_UNSUPPORTED;
3431 }
3432 /* System instructions. */
3433 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3434 {
3435 uint32_t reg_rt, reg_crn;
3436
3437 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3438 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3439
3440 /* Record rt in case of sysl and mrs instructions. */
3441 if (bit (aarch64_insn_r->aarch64_insn, 21))
3442 {
3443 record_buf[0] = reg_rt;
3444 aarch64_insn_r->reg_rec_count = 1;
3445 }
3446 /* Record cpsr for hint and msr(immediate) instructions. */
3447 else if (reg_crn == 0x02 || reg_crn == 0x04)
3448 {
3449 record_buf[0] = AARCH64_CPSR_REGNUM;
3450 aarch64_insn_r->reg_rec_count = 1;
3451 }
3452 }
3453 /* Unconditional branch (register). */
3454 else if((insn_bits24_27 & 0x0e) == 0x06)
3455 {
3456 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3457 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3458 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3459 }
3460 else
3461 return AARCH64_RECORD_UNKNOWN;
3462 }
3463 /* Unconditional branch (immediate). */
3464 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3465 {
3466 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3467 if (bit (aarch64_insn_r->aarch64_insn, 31))
3468 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3469 }
3470 else
3471 /* Compare & branch (immediate), Test & branch (immediate) and
3472 Conditional branch (immediate). */
3473 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3474
3475 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3476 record_buf);
3477 return AARCH64_RECORD_SUCCESS;
3478}
3479
3480/* Record handler for advanced SIMD load and store instructions. */
3481
3482static unsigned int
3483aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3484{
3485 CORE_ADDR address;
3486 uint64_t addr_offset = 0;
3487 uint32_t record_buf[24];
3488 uint64_t record_buf_mem[24];
3489 uint32_t reg_rn, reg_rt;
3490 uint32_t reg_index = 0, mem_index = 0;
3491 uint8_t opcode_bits, size_bits;
3492
3493 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3494 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3495 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3496 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3497 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3498
3499 if (record_debug)
b277c936 3500 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3501
3502 /* Load/store single structure. */
3503 if (bit (aarch64_insn_r->aarch64_insn, 24))
3504 {
3505 uint8_t sindex, scale, selem, esize, replicate = 0;
3506 scale = opcode_bits >> 2;
3507 selem = ((opcode_bits & 0x02) |
3508 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3509 switch (scale)
3510 {
3511 case 1:
3512 if (size_bits & 0x01)
3513 return AARCH64_RECORD_UNKNOWN;
3514 break;
3515 case 2:
3516 if ((size_bits >> 1) & 0x01)
3517 return AARCH64_RECORD_UNKNOWN;
3518 if (size_bits & 0x01)
3519 {
3520 if (!((opcode_bits >> 1) & 0x01))
3521 scale = 3;
3522 else
3523 return AARCH64_RECORD_UNKNOWN;
3524 }
3525 break;
3526 case 3:
3527 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3528 {
3529 scale = size_bits;
3530 replicate = 1;
3531 break;
3532 }
3533 else
3534 return AARCH64_RECORD_UNKNOWN;
3535 default:
3536 break;
3537 }
3538 esize = 8 << scale;
3539 if (replicate)
3540 for (sindex = 0; sindex < selem; sindex++)
3541 {
3542 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3543 reg_rt = (reg_rt + 1) % 32;
3544 }
3545 else
3546 {
3547 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3548 {
3549 if (bit (aarch64_insn_r->aarch64_insn, 22))
3550 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3551 else
3552 {
3553 record_buf_mem[mem_index++] = esize / 8;
3554 record_buf_mem[mem_index++] = address + addr_offset;
3555 }
3556 addr_offset = addr_offset + (esize / 8);
3557 reg_rt = (reg_rt + 1) % 32;
3558 }
99afc88b
OJ
3559 }
3560 }
3561 /* Load/store multiple structure. */
3562 else
3563 {
3564 uint8_t selem, esize, rpt, elements;
3565 uint8_t eindex, rindex;
3566
3567 esize = 8 << size_bits;
3568 if (bit (aarch64_insn_r->aarch64_insn, 30))
3569 elements = 128 / esize;
3570 else
3571 elements = 64 / esize;
3572
3573 switch (opcode_bits)
3574 {
3575 /*LD/ST4 (4 Registers). */
3576 case 0:
3577 rpt = 1;
3578 selem = 4;
3579 break;
3580 /*LD/ST1 (4 Registers). */
3581 case 2:
3582 rpt = 4;
3583 selem = 1;
3584 break;
3585 /*LD/ST3 (3 Registers). */
3586 case 4:
3587 rpt = 1;
3588 selem = 3;
3589 break;
3590 /*LD/ST1 (3 Registers). */
3591 case 6:
3592 rpt = 3;
3593 selem = 1;
3594 break;
3595 /*LD/ST1 (1 Register). */
3596 case 7:
3597 rpt = 1;
3598 selem = 1;
3599 break;
3600 /*LD/ST2 (2 Registers). */
3601 case 8:
3602 rpt = 1;
3603 selem = 2;
3604 break;
3605 /*LD/ST1 (2 Registers). */
3606 case 10:
3607 rpt = 2;
3608 selem = 1;
3609 break;
3610 default:
3611 return AARCH64_RECORD_UNSUPPORTED;
3612 break;
3613 }
3614 for (rindex = 0; rindex < rpt; rindex++)
3615 for (eindex = 0; eindex < elements; eindex++)
3616 {
3617 uint8_t reg_tt, sindex;
3618 reg_tt = (reg_rt + rindex) % 32;
3619 for (sindex = 0; sindex < selem; sindex++)
3620 {
3621 if (bit (aarch64_insn_r->aarch64_insn, 22))
3622 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3623 else
3624 {
3625 record_buf_mem[mem_index++] = esize / 8;
3626 record_buf_mem[mem_index++] = address + addr_offset;
3627 }
3628 addr_offset = addr_offset + (esize / 8);
3629 reg_tt = (reg_tt + 1) % 32;
3630 }
3631 }
3632 }
3633
3634 if (bit (aarch64_insn_r->aarch64_insn, 23))
3635 record_buf[reg_index++] = reg_rn;
3636
3637 aarch64_insn_r->reg_rec_count = reg_index;
3638 aarch64_insn_r->mem_rec_count = mem_index / 2;
3639 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3640 record_buf_mem);
3641 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3642 record_buf);
3643 return AARCH64_RECORD_SUCCESS;
3644}
3645
3646/* Record handler for load and store instructions. */
3647
3648static unsigned int
3649aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3650{
3651 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3652 uint8_t insn_bit23, insn_bit21;
3653 uint8_t opc, size_bits, ld_flag, vector_flag;
3654 uint32_t reg_rn, reg_rt, reg_rt2;
3655 uint64_t datasize, offset;
3656 uint32_t record_buf[8];
3657 uint64_t record_buf_mem[8];
3658 CORE_ADDR address;
3659
3660 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3661 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3662 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3663 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3664 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3665 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3666 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3667 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3668 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3669 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3670 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3671
3672 /* Load/store exclusive. */
3673 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3674 {
3675 if (record_debug)
b277c936 3676 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3677
3678 if (ld_flag)
3679 {
3680 record_buf[0] = reg_rt;
3681 aarch64_insn_r->reg_rec_count = 1;
3682 if (insn_bit21)
3683 {
3684 record_buf[1] = reg_rt2;
3685 aarch64_insn_r->reg_rec_count = 2;
3686 }
3687 }
3688 else
3689 {
3690 if (insn_bit21)
3691 datasize = (8 << size_bits) * 2;
3692 else
3693 datasize = (8 << size_bits);
3694 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3695 &address);
3696 record_buf_mem[0] = datasize / 8;
3697 record_buf_mem[1] = address;
3698 aarch64_insn_r->mem_rec_count = 1;
3699 if (!insn_bit23)
3700 {
3701 /* Save register rs. */
3702 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3703 aarch64_insn_r->reg_rec_count = 1;
3704 }
3705 }
3706 }
3707 /* Load register (literal) instructions decoding. */
3708 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3709 {
3710 if (record_debug)
b277c936 3711 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3712 if (vector_flag)
3713 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3714 else
3715 record_buf[0] = reg_rt;
3716 aarch64_insn_r->reg_rec_count = 1;
3717 }
3718 /* All types of load/store pair instructions decoding. */
3719 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3720 {
3721 if (record_debug)
b277c936 3722 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3723
3724 if (ld_flag)
3725 {
3726 if (vector_flag)
3727 {
3728 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3729 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3730 }
3731 else
3732 {
3733 record_buf[0] = reg_rt;
3734 record_buf[1] = reg_rt2;
3735 }
3736 aarch64_insn_r->reg_rec_count = 2;
3737 }
3738 else
3739 {
3740 uint16_t imm7_off;
3741 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3742 if (!vector_flag)
3743 size_bits = size_bits >> 1;
3744 datasize = 8 << (2 + size_bits);
3745 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3746 offset = offset << (2 + size_bits);
3747 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3748 &address);
3749 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3750 {
3751 if (imm7_off & 0x40)
3752 address = address - offset;
3753 else
3754 address = address + offset;
3755 }
3756
3757 record_buf_mem[0] = datasize / 8;
3758 record_buf_mem[1] = address;
3759 record_buf_mem[2] = datasize / 8;
3760 record_buf_mem[3] = address + (datasize / 8);
3761 aarch64_insn_r->mem_rec_count = 2;
3762 }
3763 if (bit (aarch64_insn_r->aarch64_insn, 23))
3764 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3765 }
3766 /* Load/store register (unsigned immediate) instructions. */
3767 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3768 {
3769 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3770 if (!(opc >> 1))
33877125
YQ
3771 {
3772 if (opc & 0x01)
3773 ld_flag = 0x01;
3774 else
3775 ld_flag = 0x0;
3776 }
99afc88b 3777 else
33877125 3778 {
1e2b521d
YQ
3779 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3780 {
3781 /* PRFM (immediate) */
3782 return AARCH64_RECORD_SUCCESS;
3783 }
3784 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3785 {
3786 /* LDRSW (immediate) */
3787 ld_flag = 0x1;
3788 }
33877125 3789 else
1e2b521d
YQ
3790 {
3791 if (opc & 0x01)
3792 ld_flag = 0x01;
3793 else
3794 ld_flag = 0x0;
3795 }
33877125 3796 }
99afc88b
OJ
3797
3798 if (record_debug)
3799 {
b277c936
PL
3800 debug_printf ("Process record: load/store (unsigned immediate):"
3801 " size %x V %d opc %x\n", size_bits, vector_flag,
3802 opc);
99afc88b
OJ
3803 }
3804
3805 if (!ld_flag)
3806 {
3807 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3808 datasize = 8 << size_bits;
3809 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3810 &address);
3811 offset = offset << size_bits;
3812 address = address + offset;
3813
3814 record_buf_mem[0] = datasize >> 3;
3815 record_buf_mem[1] = address;
3816 aarch64_insn_r->mem_rec_count = 1;
3817 }
3818 else
3819 {
3820 if (vector_flag)
3821 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3822 else
3823 record_buf[0] = reg_rt;
3824 aarch64_insn_r->reg_rec_count = 1;
3825 }
3826 }
3827 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3828 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3829 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3830 {
3831 if (record_debug)
b277c936 3832 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3833 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3834 if (!(opc >> 1))
3835 if (opc & 0x01)
3836 ld_flag = 0x01;
3837 else
3838 ld_flag = 0x0;
3839 else
3840 if (size_bits != 0x03)
3841 ld_flag = 0x01;
3842 else
3843 return AARCH64_RECORD_UNKNOWN;
3844
3845 if (!ld_flag)
3846 {
d9436c7c
PA
3847 ULONGEST reg_rm_val;
3848
99afc88b
OJ
3849 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3850 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3851 if (bit (aarch64_insn_r->aarch64_insn, 12))
3852 offset = reg_rm_val << size_bits;
3853 else
3854 offset = reg_rm_val;
3855 datasize = 8 << size_bits;
3856 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3857 &address);
3858 address = address + offset;
3859 record_buf_mem[0] = datasize >> 3;
3860 record_buf_mem[1] = address;
3861 aarch64_insn_r->mem_rec_count = 1;
3862 }
3863 else
3864 {
3865 if (vector_flag)
3866 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3867 else
3868 record_buf[0] = reg_rt;
3869 aarch64_insn_r->reg_rec_count = 1;
3870 }
3871 }
3872 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3873 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3874 && !insn_bit21)
99afc88b
OJ
3875 {
3876 if (record_debug)
3877 {
b277c936
PL
3878 debug_printf ("Process record: load/store "
3879 "(immediate and unprivileged)\n");
99afc88b
OJ
3880 }
3881 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3882 if (!(opc >> 1))
3883 if (opc & 0x01)
3884 ld_flag = 0x01;
3885 else
3886 ld_flag = 0x0;
3887 else
3888 if (size_bits != 0x03)
3889 ld_flag = 0x01;
3890 else
3891 return AARCH64_RECORD_UNKNOWN;
3892
3893 if (!ld_flag)
3894 {
3895 uint16_t imm9_off;
3896 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3897 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3898 datasize = 8 << size_bits;
3899 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3900 &address);
3901 if (insn_bits10_11 != 0x01)
3902 {
3903 if (imm9_off & 0x0100)
3904 address = address - offset;
3905 else
3906 address = address + offset;
3907 }
3908 record_buf_mem[0] = datasize >> 3;
3909 record_buf_mem[1] = address;
3910 aarch64_insn_r->mem_rec_count = 1;
3911 }
3912 else
3913 {
3914 if (vector_flag)
3915 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3916 else
3917 record_buf[0] = reg_rt;
3918 aarch64_insn_r->reg_rec_count = 1;
3919 }
3920 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3921 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3922 }
3923 /* Advanced SIMD load/store instructions. */
3924 else
3925 return aarch64_record_asimd_load_store (aarch64_insn_r);
3926
3927 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3928 record_buf_mem);
3929 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3930 record_buf);
3931 return AARCH64_RECORD_SUCCESS;
3932}
3933
3934/* Record handler for data processing SIMD and floating point instructions. */
3935
3936static unsigned int
3937aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3938{
3939 uint8_t insn_bit21, opcode, rmode, reg_rd;
3940 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3941 uint8_t insn_bits11_14;
3942 uint32_t record_buf[2];
3943
3944 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3945 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3946 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3947 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3948 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3949 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3950 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3951 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3952 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3953
3954 if (record_debug)
b277c936 3955 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3956
3957 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3958 {
3959 /* Floating point - fixed point conversion instructions. */
3960 if (!insn_bit21)
3961 {
3962 if (record_debug)
b277c936 3963 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3964
3965 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3966 record_buf[0] = reg_rd;
3967 else
3968 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3969 }
3970 /* Floating point - conditional compare instructions. */
3971 else if (insn_bits10_11 == 0x01)
3972 {
3973 if (record_debug)
b277c936 3974 debug_printf ("FP - conditional compare");
99afc88b
OJ
3975
3976 record_buf[0] = AARCH64_CPSR_REGNUM;
3977 }
3978 /* Floating point - data processing (2-source) and
3979 conditional select instructions. */
3980 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3981 {
3982 if (record_debug)
b277c936 3983 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3984
3985 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3986 }
3987 else if (insn_bits10_11 == 0x00)
3988 {
3989 /* Floating point - immediate instructions. */
3990 if ((insn_bits12_15 & 0x01) == 0x01
3991 || (insn_bits12_15 & 0x07) == 0x04)
3992 {
3993 if (record_debug)
b277c936 3994 debug_printf ("FP - immediate");
99afc88b
OJ
3995 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3996 }
3997 /* Floating point - compare instructions. */
3998 else if ((insn_bits12_15 & 0x03) == 0x02)
3999 {
4000 if (record_debug)
b277c936 4001 debug_printf ("FP - immediate");
99afc88b
OJ
4002 record_buf[0] = AARCH64_CPSR_REGNUM;
4003 }
4004 /* Floating point - integer conversions instructions. */
f62fce35 4005 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
4006 {
4007 /* Convert float to integer instruction. */
4008 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4009 {
4010 if (record_debug)
b277c936 4011 debug_printf ("float to int conversion");
99afc88b
OJ
4012
4013 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4014 }
4015 /* Convert integer to float instruction. */
4016 else if ((opcode >> 1) == 0x01 && !rmode)
4017 {
4018 if (record_debug)
b277c936 4019 debug_printf ("int to float conversion");
99afc88b
OJ
4020
4021 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4022 }
4023 /* Move float to integer instruction. */
4024 else if ((opcode >> 1) == 0x03)
4025 {
4026 if (record_debug)
b277c936 4027 debug_printf ("move float to int");
99afc88b
OJ
4028
4029 if (!(opcode & 0x01))
4030 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4031 else
4032 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4033 }
f62fce35
YQ
4034 else
4035 return AARCH64_RECORD_UNKNOWN;
99afc88b 4036 }
f62fce35
YQ
4037 else
4038 return AARCH64_RECORD_UNKNOWN;
99afc88b 4039 }
f62fce35
YQ
4040 else
4041 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
4042 }
4043 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4044 {
4045 if (record_debug)
b277c936 4046 debug_printf ("SIMD copy");
99afc88b
OJ
4047
4048 /* Advanced SIMD copy instructions. */
4049 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4050 && !bit (aarch64_insn_r->aarch64_insn, 15)
4051 && bit (aarch64_insn_r->aarch64_insn, 10))
4052 {
4053 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4054 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4055 else
4056 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4057 }
4058 else
4059 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4060 }
4061 /* All remaining floating point or advanced SIMD instructions. */
4062 else
4063 {
4064 if (record_debug)
b277c936 4065 debug_printf ("all remain");
99afc88b
OJ
4066
4067 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4068 }
4069
4070 if (record_debug)
b277c936 4071 debug_printf ("\n");
99afc88b
OJ
4072
4073 aarch64_insn_r->reg_rec_count++;
4074 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4075 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4076 record_buf);
4077 return AARCH64_RECORD_SUCCESS;
4078}
4079
4080/* Decodes insns type and invokes its record handler. */
4081
4082static unsigned int
4083aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4084{
4085 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4086
4087 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4088 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4089 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4090 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4091
4092 /* Data processing - immediate instructions. */
4093 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4094 return aarch64_record_data_proc_imm (aarch64_insn_r);
4095
4096 /* Branch, exception generation and system instructions. */
4097 if (ins_bit26 && !ins_bit27 && ins_bit28)
4098 return aarch64_record_branch_except_sys (aarch64_insn_r);
4099
4100 /* Load and store instructions. */
4101 if (!ins_bit25 && ins_bit27)
4102 return aarch64_record_load_store (aarch64_insn_r);
4103
4104 /* Data processing - register instructions. */
4105 if (ins_bit25 && !ins_bit26 && ins_bit27)
4106 return aarch64_record_data_proc_reg (aarch64_insn_r);
4107
4108 /* Data processing - SIMD and floating point instructions. */
4109 if (ins_bit25 && ins_bit26 && ins_bit27)
4110 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4111
4112 return AARCH64_RECORD_UNSUPPORTED;
4113}
4114
4115/* Cleans up local record registers and memory allocations. */
4116
4117static void
4118deallocate_reg_mem (insn_decode_record *record)
4119{
4120 xfree (record->aarch64_regs);
4121 xfree (record->aarch64_mems);
4122}
4123
1e2b521d
YQ
4124#if GDB_SELF_TEST
4125namespace selftests {
4126
4127static void
4128aarch64_process_record_test (void)
4129{
4130 struct gdbarch_info info;
4131 uint32_t ret;
4132
4133 gdbarch_info_init (&info);
4134 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4135
4136 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4137 SELF_CHECK (gdbarch != NULL);
4138
4139 insn_decode_record aarch64_record;
4140
4141 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4142 aarch64_record.regcache = NULL;
4143 aarch64_record.this_addr = 0;
4144 aarch64_record.gdbarch = gdbarch;
4145
4146 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4147 aarch64_record.aarch64_insn = 0xf9800020;
4148 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4149 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4150 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4151 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4152
4153 deallocate_reg_mem (&aarch64_record);
4154}
4155
4156} // namespace selftests
4157#endif /* GDB_SELF_TEST */
4158
99afc88b
OJ
4159/* Parse the current instruction and record the values of the registers and
4160 memory that will be changed in current instruction to record_arch_list
4161 return -1 if something is wrong. */
4162
4163int
4164aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4165 CORE_ADDR insn_addr)
4166{
4167 uint32_t rec_no = 0;
4168 uint8_t insn_size = 4;
4169 uint32_t ret = 0;
99afc88b
OJ
4170 gdb_byte buf[insn_size];
4171 insn_decode_record aarch64_record;
4172
4173 memset (&buf[0], 0, insn_size);
4174 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4175 target_read_memory (insn_addr, &buf[0], insn_size);
4176 aarch64_record.aarch64_insn
4177 = (uint32_t) extract_unsigned_integer (&buf[0],
4178 insn_size,
4179 gdbarch_byte_order (gdbarch));
4180 aarch64_record.regcache = regcache;
4181 aarch64_record.this_addr = insn_addr;
4182 aarch64_record.gdbarch = gdbarch;
4183
4184 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4185 if (ret == AARCH64_RECORD_UNSUPPORTED)
4186 {
4187 printf_unfiltered (_("Process record does not support instruction "
4188 "0x%0x at address %s.\n"),
4189 aarch64_record.aarch64_insn,
4190 paddress (gdbarch, insn_addr));
4191 ret = -1;
4192 }
4193
4194 if (0 == ret)
4195 {
4196 /* Record registers. */
4197 record_full_arch_list_add_reg (aarch64_record.regcache,
4198 AARCH64_PC_REGNUM);
4199 /* Always record register CPSR. */
4200 record_full_arch_list_add_reg (aarch64_record.regcache,
4201 AARCH64_CPSR_REGNUM);
4202 if (aarch64_record.aarch64_regs)
4203 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4204 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4205 aarch64_record.aarch64_regs[rec_no]))
4206 ret = -1;
4207
4208 /* Record memories. */
4209 if (aarch64_record.aarch64_mems)
4210 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4211 if (record_full_arch_list_add_mem
4212 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4213 aarch64_record.aarch64_mems[rec_no].len))
4214 ret = -1;
4215
4216 if (record_full_arch_list_add_end ())
4217 ret = -1;
4218 }
4219
4220 deallocate_reg_mem (&aarch64_record);
4221 return ret;
4222}
This page took 0.791615 seconds and 4 git commands to generate.