linux low: Make the arch code free arch_process_info
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
61baf725 3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
07b287a0
MS
30#include "value.h"
31#include "arch-utils.h"
32#include "osabi.h"
33#include "frame-unwind.h"
34#include "frame-base.h"
35#include "trad-frame.h"
36#include "objfiles.h"
37#include "dwarf2-frame.h"
38#include "gdbtypes.h"
39#include "prologue-value.h"
40#include "target-descriptions.h"
41#include "user-regs.h"
42#include "language.h"
43#include "infcall.h"
ea873d8e
PL
44#include "ax.h"
45#include "ax-gdb.h"
4d9a9006 46#include "selftest.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
57
07b287a0 58#include "features/aarch64.c"
07b287a0 59
787749ea
PL
60#include "arch/aarch64-insn.h"
61
f77ee802 62#include "opcode/aarch64.h"
325fac50 63#include <algorithm>
f77ee802
YQ
64
65#define submask(x) ((1L << ((x) + 1)) - 1)
66#define bit(obj,st) (((obj) >> (st)) & 1)
67#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68
07b287a0
MS
69/* Pseudo register base numbers. */
70#define AARCH64_Q0_REGNUM 0
187f5d00 71#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
07b287a0
MS
72#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
73#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
74#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
75
76/* The standard register names, and all the valid aliases for them. */
77static const struct
78{
79 const char *const name;
80 int regnum;
81} aarch64_register_aliases[] =
82{
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
87
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
120
121 /* specials */
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
124};
125
126/* The required core 'R' registers. */
127static const char *const aarch64_r_register_names[] =
128{
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
139 "pc", "cpsr"
140};
141
142/* The FP/SIMD 'V' registers. */
143static const char *const aarch64_v_register_names[] =
144{
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
155 "fpsr",
156 "fpcr"
157};
158
159/* AArch64 prologue cache structure. */
160struct aarch64_prologue_cache
161{
db634143
PL
162 /* The program counter at the start of the function. It is used to
163 identify this frame as a prologue frame. */
164 CORE_ADDR func;
165
166 /* The program counter at the time this frame was created; i.e. where
167 this function was called from. It is used to identify this frame as a
168 stub frame. */
169 CORE_ADDR prev_pc;
170
07b287a0
MS
171 /* The stack pointer at the time this frame was created; i.e. the
172 caller's stack pointer when this function was called. It is used
173 to identify this frame. */
174 CORE_ADDR prev_sp;
175
7dfa3edc
PL
176 /* Is the target available to read from? */
177 int available_p;
178
07b287a0
MS
179 /* The frame base for this frame is just prev_sp - frame size.
180 FRAMESIZE is the distance from the frame pointer to the
181 initial stack pointer. */
182 int framesize;
183
184 /* The register used to hold the frame pointer for this frame. */
185 int framereg;
186
187 /* Saved register offsets. */
188 struct trad_frame_saved_reg *saved_regs;
189};
190
07b287a0
MS
191static void
192show_aarch64_debug (struct ui_file *file, int from_tty,
193 struct cmd_list_element *c, const char *value)
194{
195 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
196}
197
ffdbe864
YQ
198namespace {
199
4d9a9006
YQ
200/* Abstract instruction reader. */
201
202class abstract_instruction_reader
203{
204public:
205 /* Read in one instruction. */
206 virtual ULONGEST read (CORE_ADDR memaddr, int len,
207 enum bfd_endian byte_order) = 0;
208};
209
210/* Instruction reader from real target. */
211
212class instruction_reader : public abstract_instruction_reader
213{
214 public:
215 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
216 {
fc2f703e 217 return read_code_unsigned_integer (memaddr, len, byte_order);
4d9a9006
YQ
218 }
219};
220
ffdbe864
YQ
221} // namespace
222
07b287a0
MS
223/* Analyze a prologue, looking for a recognizable stack frame
224 and frame pointer. Scan until we encounter a store that could
225 clobber the stack frame unexpectedly, or an unknown instruction. */
226
227static CORE_ADDR
228aarch64_analyze_prologue (struct gdbarch *gdbarch,
229 CORE_ADDR start, CORE_ADDR limit,
4d9a9006
YQ
230 struct aarch64_prologue_cache *cache,
231 abstract_instruction_reader& reader)
07b287a0
MS
232{
233 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
234 int i;
187f5d00
YQ
235 /* Track X registers and D registers in prologue. */
236 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
07b287a0
MS
237 struct pv_area *stack;
238 struct cleanup *back_to;
239
187f5d00 240 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
07b287a0
MS
241 regs[i] = pv_register (i, 0);
242 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
243 back_to = make_cleanup_free_pv_area (stack);
244
245 for (; start < limit; start += 4)
246 {
247 uint32_t insn;
d9ebcbce 248 aarch64_inst inst;
07b287a0 249
4d9a9006 250 insn = reader.read (start, 4, byte_order_for_code);
07b287a0 251
d9ebcbce
YQ
252 if (aarch64_decode_insn (insn, &inst, 1) != 0)
253 break;
254
255 if (inst.opcode->iclass == addsub_imm
256 && (inst.opcode->op == OP_ADD
257 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 258 {
d9ebcbce
YQ
259 unsigned rd = inst.operands[0].reg.regno;
260 unsigned rn = inst.operands[1].reg.regno;
261
262 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
263 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
264 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
265 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
266
267 if (inst.opcode->op == OP_ADD)
268 {
269 regs[rd] = pv_add_constant (regs[rn],
270 inst.operands[2].imm.value);
271 }
272 else
273 {
274 regs[rd] = pv_add_constant (regs[rn],
275 -inst.operands[2].imm.value);
276 }
277 }
278 else if (inst.opcode->iclass == pcreladdr
279 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
280 {
281 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
282 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
283
284 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 285 }
d9ebcbce 286 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
287 {
288 /* Stop analysis on branch. */
289 break;
290 }
d9ebcbce 291 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
292 {
293 /* Stop analysis on branch. */
294 break;
295 }
d9ebcbce 296 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
297 {
298 /* Stop analysis on branch. */
299 break;
300 }
d9ebcbce 301 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
302 {
303 /* Stop analysis on branch. */
304 break;
305 }
d9ebcbce
YQ
306 else if (inst.opcode->op == OP_MOVZ)
307 {
308 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
309 regs[inst.operands[0].reg.regno] = pv_unknown ();
310 }
311 else if (inst.opcode->iclass == log_shift
312 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 313 {
d9ebcbce
YQ
314 unsigned rd = inst.operands[0].reg.regno;
315 unsigned rn = inst.operands[1].reg.regno;
316 unsigned rm = inst.operands[2].reg.regno;
317
318 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
319 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
320 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
321
322 if (inst.operands[2].shifter.amount == 0
323 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
324 regs[rd] = regs[rm];
325 else
326 {
327 if (aarch64_debug)
b277c936
PL
328 {
329 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 330 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
331 core_addr_to_string_nz (start), insn);
332 }
07b287a0
MS
333 break;
334 }
335 }
d9ebcbce 336 else if (inst.opcode->op == OP_STUR)
07b287a0 337 {
d9ebcbce
YQ
338 unsigned rt = inst.operands[0].reg.regno;
339 unsigned rn = inst.operands[1].addr.base_regno;
340 int is64
341 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
342
343 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
344 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
345 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
346 gdb_assert (!inst.operands[1].addr.offset.is_reg);
347
348 pv_area_store (stack, pv_add_constant (regs[rn],
349 inst.operands[1].addr.offset.imm),
07b287a0
MS
350 is64 ? 8 : 4, regs[rt]);
351 }
d9ebcbce 352 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
353 || (inst.opcode->iclass == ldstpair_indexed
354 && inst.operands[2].addr.preind))
d9ebcbce 355 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 356 {
03bcd739 357 /* STP with addressing mode Pre-indexed and Base register. */
187f5d00
YQ
358 unsigned rt1;
359 unsigned rt2;
d9ebcbce
YQ
360 unsigned rn = inst.operands[2].addr.base_regno;
361 int32_t imm = inst.operands[2].addr.offset.imm;
362
187f5d00
YQ
363 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
364 || inst.operands[0].type == AARCH64_OPND_Ft);
365 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
366 || inst.operands[1].type == AARCH64_OPND_Ft2);
d9ebcbce
YQ
367 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
368 gdb_assert (!inst.operands[2].addr.offset.is_reg);
369
07b287a0
MS
370 /* If recording this store would invalidate the store area
371 (perhaps because rn is not known) then we should abandon
372 further prologue analysis. */
373 if (pv_area_store_would_trash (stack,
374 pv_add_constant (regs[rn], imm)))
375 break;
376
377 if (pv_area_store_would_trash (stack,
378 pv_add_constant (regs[rn], imm + 8)))
379 break;
380
187f5d00
YQ
381 rt1 = inst.operands[0].reg.regno;
382 rt2 = inst.operands[1].reg.regno;
383 if (inst.operands[0].type == AARCH64_OPND_Ft)
384 {
385 /* Only bottom 64-bit of each V register (D register) need
386 to be preserved. */
387 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
388 rt1 += AARCH64_X_REGISTER_COUNT;
389 rt2 += AARCH64_X_REGISTER_COUNT;
390 }
391
07b287a0
MS
392 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
393 regs[rt1]);
394 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
395 regs[rt2]);
14ac654f 396
d9ebcbce 397 if (inst.operands[2].addr.writeback)
93d96012 398 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 399
07b287a0 400 }
432ec081
YQ
401 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
402 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
403 && (inst.opcode->op == OP_STR_POS
404 || inst.opcode->op == OP_STRF_POS)))
405 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
406 && strcmp ("str", inst.opcode->name) == 0)
407 {
408 /* STR (immediate) */
409 unsigned int rt = inst.operands[0].reg.regno;
410 int32_t imm = inst.operands[1].addr.offset.imm;
411 unsigned int rn = inst.operands[1].addr.base_regno;
412 bool is64
413 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
414 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
415 || inst.operands[0].type == AARCH64_OPND_Ft);
416
417 if (inst.operands[0].type == AARCH64_OPND_Ft)
418 {
419 /* Only bottom 64-bit of each V register (D register) need
420 to be preserved. */
421 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
422 rt += AARCH64_X_REGISTER_COUNT;
423 }
424
425 pv_area_store (stack, pv_add_constant (regs[rn], imm),
426 is64 ? 8 : 4, regs[rt]);
427 if (inst.operands[1].addr.writeback)
428 regs[rn] = pv_add_constant (regs[rn], imm);
429 }
d9ebcbce 430 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
431 {
432 /* Stop analysis on branch. */
433 break;
434 }
435 else
436 {
437 if (aarch64_debug)
b277c936 438 {
0a0da556 439 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
440 " opcode=0x%x\n",
441 core_addr_to_string_nz (start), insn);
442 }
07b287a0
MS
443 break;
444 }
445 }
446
447 if (cache == NULL)
448 {
449 do_cleanups (back_to);
450 return start;
451 }
452
453 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
454 {
455 /* Frame pointer is fp. Frame size is constant. */
456 cache->framereg = AARCH64_FP_REGNUM;
457 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
458 }
459 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
460 {
461 /* Try the stack pointer. */
462 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
463 cache->framereg = AARCH64_SP_REGNUM;
464 }
465 else
466 {
467 /* We're just out of luck. We don't know where the frame is. */
468 cache->framereg = -1;
469 cache->framesize = 0;
470 }
471
472 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
473 {
474 CORE_ADDR offset;
475
476 if (pv_area_find_reg (stack, gdbarch, i, &offset))
477 cache->saved_regs[i].addr = offset;
478 }
479
187f5d00
YQ
480 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
481 {
482 int regnum = gdbarch_num_regs (gdbarch);
483 CORE_ADDR offset;
484
485 if (pv_area_find_reg (stack, gdbarch, i + AARCH64_X_REGISTER_COUNT,
486 &offset))
487 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
488 }
489
07b287a0
MS
490 do_cleanups (back_to);
491 return start;
492}
493
4d9a9006
YQ
494static CORE_ADDR
495aarch64_analyze_prologue (struct gdbarch *gdbarch,
496 CORE_ADDR start, CORE_ADDR limit,
497 struct aarch64_prologue_cache *cache)
498{
499 instruction_reader reader;
500
501 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
502 reader);
503}
504
505#if GDB_SELF_TEST
506
507namespace selftests {
508
509/* Instruction reader from manually cooked instruction sequences. */
510
511class instruction_reader_test : public abstract_instruction_reader
512{
513public:
514 template<size_t SIZE>
515 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
516 : m_insns (insns), m_insns_size (SIZE)
517 {}
518
519 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
520 {
521 SELF_CHECK (len == 4);
522 SELF_CHECK (memaddr % 4 == 0);
523 SELF_CHECK (memaddr / 4 < m_insns_size);
524
525 return m_insns[memaddr / 4];
526 }
527
528private:
529 const uint32_t *m_insns;
530 size_t m_insns_size;
531};
532
533static void
534aarch64_analyze_prologue_test (void)
535{
536 struct gdbarch_info info;
537
538 gdbarch_info_init (&info);
539 info.bfd_arch_info = bfd_scan_arch ("aarch64");
540
541 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
542 SELF_CHECK (gdbarch != NULL);
543
544 /* Test the simple prologue in which frame pointer is used. */
545 {
546 struct aarch64_prologue_cache cache;
547 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
548
549 static const uint32_t insns[] = {
550 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
551 0x910003fd, /* mov x29, sp */
552 0x97ffffe6, /* bl 0x400580 */
553 };
554 instruction_reader_test reader (insns);
555
556 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
557 SELF_CHECK (end == 4 * 2);
558
559 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
560 SELF_CHECK (cache.framesize == 272);
561
562 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
563 {
564 if (i == AARCH64_FP_REGNUM)
565 SELF_CHECK (cache.saved_regs[i].addr == -272);
566 else if (i == AARCH64_LR_REGNUM)
567 SELF_CHECK (cache.saved_regs[i].addr == -264);
568 else
569 SELF_CHECK (cache.saved_regs[i].addr == -1);
570 }
571
572 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
573 {
574 int regnum = gdbarch_num_regs (gdbarch);
575
576 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
577 == -1);
578 }
579 }
432ec081
YQ
580
581 /* Test a prologue in which STR is used and frame pointer is not
582 used. */
583 {
584 struct aarch64_prologue_cache cache;
585 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
586
587 static const uint32_t insns[] = {
588 0xf81d0ff3, /* str x19, [sp, #-48]! */
589 0xb9002fe0, /* str w0, [sp, #44] */
590 0xf90013e1, /* str x1, [sp, #32]*/
591 0xfd000fe0, /* str d0, [sp, #24] */
592 0xaa0203f3, /* mov x19, x2 */
593 0xf94013e0, /* ldr x0, [sp, #32] */
594 };
595 instruction_reader_test reader (insns);
596
597 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
598
599 SELF_CHECK (end == 4 * 5);
600
601 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
602 SELF_CHECK (cache.framesize == 48);
603
604 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
605 {
606 if (i == 1)
607 SELF_CHECK (cache.saved_regs[i].addr == -16);
608 else if (i == 19)
609 SELF_CHECK (cache.saved_regs[i].addr == -48);
610 else
611 SELF_CHECK (cache.saved_regs[i].addr == -1);
612 }
613
614 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
615 {
616 int regnum = gdbarch_num_regs (gdbarch);
617
618 if (i == 0)
619 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
620 == -24);
621 else
622 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
623 == -1);
624 }
625 }
4d9a9006
YQ
626}
627} // namespace selftests
628#endif /* GDB_SELF_TEST */
629
07b287a0
MS
630/* Implement the "skip_prologue" gdbarch method. */
631
632static CORE_ADDR
633aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
634{
07b287a0 635 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
636
637 /* See if we can determine the end of the prologue via the symbol
638 table. If so, then return either PC, or the PC after the
639 prologue, whichever is greater. */
640 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
641 {
642 CORE_ADDR post_prologue_pc
643 = skip_prologue_using_sal (gdbarch, func_addr);
644
645 if (post_prologue_pc != 0)
325fac50 646 return std::max (pc, post_prologue_pc);
07b287a0
MS
647 }
648
649 /* Can't determine prologue from the symbol table, need to examine
650 instructions. */
651
652 /* Find an upper limit on the function prologue using the debug
653 information. If the debug information could not be used to
654 provide that bound, then use an arbitrary large number as the
655 upper bound. */
656 limit_pc = skip_prologue_using_sal (gdbarch, pc);
657 if (limit_pc == 0)
658 limit_pc = pc + 128; /* Magic. */
659
660 /* Try disassembling prologue. */
661 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
662}
663
664/* Scan the function prologue for THIS_FRAME and populate the prologue
665 cache CACHE. */
666
667static void
668aarch64_scan_prologue (struct frame_info *this_frame,
669 struct aarch64_prologue_cache *cache)
670{
671 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
672 CORE_ADDR prologue_start;
673 CORE_ADDR prologue_end;
674 CORE_ADDR prev_pc = get_frame_pc (this_frame);
675 struct gdbarch *gdbarch = get_frame_arch (this_frame);
676
db634143
PL
677 cache->prev_pc = prev_pc;
678
07b287a0
MS
679 /* Assume we do not find a frame. */
680 cache->framereg = -1;
681 cache->framesize = 0;
682
683 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
684 &prologue_end))
685 {
686 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
687
688 if (sal.line == 0)
689 {
690 /* No line info so use the current PC. */
691 prologue_end = prev_pc;
692 }
693 else if (sal.end < prologue_end)
694 {
695 /* The next line begins after the function end. */
696 prologue_end = sal.end;
697 }
698
325fac50 699 prologue_end = std::min (prologue_end, prev_pc);
07b287a0
MS
700 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
701 }
702 else
703 {
704 CORE_ADDR frame_loc;
07b287a0
MS
705
706 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
707 if (frame_loc == 0)
708 return;
709
710 cache->framereg = AARCH64_FP_REGNUM;
711 cache->framesize = 16;
712 cache->saved_regs[29].addr = 0;
713 cache->saved_regs[30].addr = 8;
714 }
715}
716
7dfa3edc
PL
717/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
718 function may throw an exception if the inferior's registers or memory is
719 not available. */
07b287a0 720
7dfa3edc
PL
721static void
722aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
723 struct aarch64_prologue_cache *cache)
07b287a0 724{
07b287a0
MS
725 CORE_ADDR unwound_fp;
726 int reg;
727
07b287a0
MS
728 aarch64_scan_prologue (this_frame, cache);
729
730 if (cache->framereg == -1)
7dfa3edc 731 return;
07b287a0
MS
732
733 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
734 if (unwound_fp == 0)
7dfa3edc 735 return;
07b287a0
MS
736
737 cache->prev_sp = unwound_fp + cache->framesize;
738
739 /* Calculate actual addresses of saved registers using offsets
740 determined by aarch64_analyze_prologue. */
741 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
742 if (trad_frame_addr_p (cache->saved_regs, reg))
743 cache->saved_regs[reg].addr += cache->prev_sp;
744
db634143
PL
745 cache->func = get_frame_func (this_frame);
746
7dfa3edc
PL
747 cache->available_p = 1;
748}
749
750/* Allocate and fill in *THIS_CACHE with information about the prologue of
751 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
752 Return a pointer to the current aarch64_prologue_cache in
753 *THIS_CACHE. */
754
755static struct aarch64_prologue_cache *
756aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
757{
758 struct aarch64_prologue_cache *cache;
759
760 if (*this_cache != NULL)
9a3c8263 761 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
762
763 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
764 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
765 *this_cache = cache;
766
767 TRY
768 {
769 aarch64_make_prologue_cache_1 (this_frame, cache);
770 }
771 CATCH (ex, RETURN_MASK_ERROR)
772 {
773 if (ex.error != NOT_AVAILABLE_ERROR)
774 throw_exception (ex);
775 }
776 END_CATCH
777
07b287a0
MS
778 return cache;
779}
780
7dfa3edc
PL
781/* Implement the "stop_reason" frame_unwind method. */
782
783static enum unwind_stop_reason
784aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
785 void **this_cache)
786{
787 struct aarch64_prologue_cache *cache
788 = aarch64_make_prologue_cache (this_frame, this_cache);
789
790 if (!cache->available_p)
791 return UNWIND_UNAVAILABLE;
792
793 /* Halt the backtrace at "_start". */
794 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
795 return UNWIND_OUTERMOST;
796
797 /* We've hit a wall, stop. */
798 if (cache->prev_sp == 0)
799 return UNWIND_OUTERMOST;
800
801 return UNWIND_NO_REASON;
802}
803
07b287a0
MS
804/* Our frame ID for a normal frame is the current function's starting
805 PC and the caller's SP when we were called. */
806
807static void
808aarch64_prologue_this_id (struct frame_info *this_frame,
809 void **this_cache, struct frame_id *this_id)
810{
7c8edfae
PL
811 struct aarch64_prologue_cache *cache
812 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 813
7dfa3edc
PL
814 if (!cache->available_p)
815 *this_id = frame_id_build_unavailable_stack (cache->func);
816 else
817 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
818}
819
820/* Implement the "prev_register" frame_unwind method. */
821
822static struct value *
823aarch64_prologue_prev_register (struct frame_info *this_frame,
824 void **this_cache, int prev_regnum)
825{
7c8edfae
PL
826 struct aarch64_prologue_cache *cache
827 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
828
829 /* If we are asked to unwind the PC, then we need to return the LR
830 instead. The prologue may save PC, but it will point into this
831 frame's prologue, not the next frame's resume location. */
832 if (prev_regnum == AARCH64_PC_REGNUM)
833 {
834 CORE_ADDR lr;
835
836 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
837 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
838 }
839
840 /* SP is generally not saved to the stack, but this frame is
841 identified by the next frame's stack pointer at the time of the
842 call. The value was already reconstructed into PREV_SP. */
843 /*
844 +----------+ ^
845 | saved lr | |
846 +->| saved fp |--+
847 | | |
848 | | | <- Previous SP
849 | +----------+
850 | | saved lr |
851 +--| saved fp |<- FP
852 | |
853 | |<- SP
854 +----------+ */
855 if (prev_regnum == AARCH64_SP_REGNUM)
856 return frame_unwind_got_constant (this_frame, prev_regnum,
857 cache->prev_sp);
858
859 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
860 prev_regnum);
861}
862
863/* AArch64 prologue unwinder. */
864struct frame_unwind aarch64_prologue_unwind =
865{
866 NORMAL_FRAME,
7dfa3edc 867 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
868 aarch64_prologue_this_id,
869 aarch64_prologue_prev_register,
870 NULL,
871 default_frame_sniffer
872};
873
8b61f75d
PL
874/* Allocate and fill in *THIS_CACHE with information about the prologue of
875 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
876 Return a pointer to the current aarch64_prologue_cache in
877 *THIS_CACHE. */
07b287a0
MS
878
879static struct aarch64_prologue_cache *
8b61f75d 880aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 881{
07b287a0 882 struct aarch64_prologue_cache *cache;
8b61f75d
PL
883
884 if (*this_cache != NULL)
9a3c8263 885 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
886
887 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
888 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 889 *this_cache = cache;
07b287a0 890
02a2a705
PL
891 TRY
892 {
893 cache->prev_sp = get_frame_register_unsigned (this_frame,
894 AARCH64_SP_REGNUM);
895 cache->prev_pc = get_frame_pc (this_frame);
896 cache->available_p = 1;
897 }
898 CATCH (ex, RETURN_MASK_ERROR)
899 {
900 if (ex.error != NOT_AVAILABLE_ERROR)
901 throw_exception (ex);
902 }
903 END_CATCH
07b287a0
MS
904
905 return cache;
906}
907
02a2a705
PL
908/* Implement the "stop_reason" frame_unwind method. */
909
910static enum unwind_stop_reason
911aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
912 void **this_cache)
913{
914 struct aarch64_prologue_cache *cache
915 = aarch64_make_stub_cache (this_frame, this_cache);
916
917 if (!cache->available_p)
918 return UNWIND_UNAVAILABLE;
919
920 return UNWIND_NO_REASON;
921}
922
07b287a0
MS
923/* Our frame ID for a stub frame is the current SP and LR. */
924
925static void
926aarch64_stub_this_id (struct frame_info *this_frame,
927 void **this_cache, struct frame_id *this_id)
928{
8b61f75d
PL
929 struct aarch64_prologue_cache *cache
930 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 931
02a2a705
PL
932 if (cache->available_p)
933 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
934 else
935 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
936}
937
938/* Implement the "sniffer" frame_unwind method. */
939
940static int
941aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
942 struct frame_info *this_frame,
943 void **this_prologue_cache)
944{
945 CORE_ADDR addr_in_block;
946 gdb_byte dummy[4];
947
948 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 949 if (in_plt_section (addr_in_block)
07b287a0
MS
950 /* We also use the stub winder if the target memory is unreadable
951 to avoid having the prologue unwinder trying to read it. */
952 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
953 return 1;
954
955 return 0;
956}
957
958/* AArch64 stub unwinder. */
959struct frame_unwind aarch64_stub_unwind =
960{
961 NORMAL_FRAME,
02a2a705 962 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
963 aarch64_stub_this_id,
964 aarch64_prologue_prev_register,
965 NULL,
966 aarch64_stub_unwind_sniffer
967};
968
969/* Return the frame base address of *THIS_FRAME. */
970
971static CORE_ADDR
972aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
973{
7c8edfae
PL
974 struct aarch64_prologue_cache *cache
975 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
976
977 return cache->prev_sp - cache->framesize;
978}
979
980/* AArch64 default frame base information. */
981struct frame_base aarch64_normal_base =
982{
983 &aarch64_prologue_unwind,
984 aarch64_normal_frame_base,
985 aarch64_normal_frame_base,
986 aarch64_normal_frame_base
987};
988
989/* Assuming THIS_FRAME is a dummy, return the frame ID of that
990 dummy frame. The frame ID's base needs to match the TOS value
991 saved by save_dummy_frame_tos () and returned from
992 aarch64_push_dummy_call, and the PC needs to match the dummy
993 frame's breakpoint. */
994
995static struct frame_id
996aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
997{
998 return frame_id_build (get_frame_register_unsigned (this_frame,
999 AARCH64_SP_REGNUM),
1000 get_frame_pc (this_frame));
1001}
1002
1003/* Implement the "unwind_pc" gdbarch method. */
1004
1005static CORE_ADDR
1006aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1007{
1008 CORE_ADDR pc
1009 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1010
1011 return pc;
1012}
1013
1014/* Implement the "unwind_sp" gdbarch method. */
1015
1016static CORE_ADDR
1017aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1018{
1019 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1020}
1021
1022/* Return the value of the REGNUM register in the previous frame of
1023 *THIS_FRAME. */
1024
1025static struct value *
1026aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1027 void **this_cache, int regnum)
1028{
07b287a0
MS
1029 CORE_ADDR lr;
1030
1031 switch (regnum)
1032 {
1033 case AARCH64_PC_REGNUM:
1034 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1035 return frame_unwind_got_constant (this_frame, regnum, lr);
1036
1037 default:
1038 internal_error (__FILE__, __LINE__,
1039 _("Unexpected register %d"), regnum);
1040 }
1041}
1042
1043/* Implement the "init_reg" dwarf2_frame_ops method. */
1044
1045static void
1046aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1047 struct dwarf2_frame_state_reg *reg,
1048 struct frame_info *this_frame)
1049{
1050 switch (regnum)
1051 {
1052 case AARCH64_PC_REGNUM:
1053 reg->how = DWARF2_FRAME_REG_FN;
1054 reg->loc.fn = aarch64_dwarf2_prev_register;
1055 break;
1056 case AARCH64_SP_REGNUM:
1057 reg->how = DWARF2_FRAME_REG_CFA;
1058 break;
1059 }
1060}
1061
1062/* When arguments must be pushed onto the stack, they go on in reverse
1063 order. The code below implements a FILO (stack) to do this. */
1064
1065typedef struct
1066{
c3c87445
YQ
1067 /* Value to pass on stack. It can be NULL if this item is for stack
1068 padding. */
7c543f7b 1069 const gdb_byte *data;
07b287a0
MS
1070
1071 /* Size in bytes of value to pass on stack. */
1072 int len;
1073} stack_item_t;
1074
1075DEF_VEC_O (stack_item_t);
1076
1077/* Return the alignment (in bytes) of the given type. */
1078
1079static int
1080aarch64_type_align (struct type *t)
1081{
1082 int n;
1083 int align;
1084 int falign;
1085
1086 t = check_typedef (t);
1087 switch (TYPE_CODE (t))
1088 {
1089 default:
1090 /* Should never happen. */
1091 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1092 return 4;
1093
1094 case TYPE_CODE_PTR:
1095 case TYPE_CODE_ENUM:
1096 case TYPE_CODE_INT:
1097 case TYPE_CODE_FLT:
1098 case TYPE_CODE_SET:
1099 case TYPE_CODE_RANGE:
1100 case TYPE_CODE_BITSTRING:
1101 case TYPE_CODE_REF:
aa006118 1102 case TYPE_CODE_RVALUE_REF:
07b287a0
MS
1103 case TYPE_CODE_CHAR:
1104 case TYPE_CODE_BOOL:
1105 return TYPE_LENGTH (t);
1106
1107 case TYPE_CODE_ARRAY:
238f2452
YQ
1108 if (TYPE_VECTOR (t))
1109 {
1110 /* Use the natural alignment for vector types (the same for
1111 scalar type), but the maximum alignment is 128-bit. */
1112 if (TYPE_LENGTH (t) > 16)
1113 return 16;
1114 else
1115 return TYPE_LENGTH (t);
1116 }
1117 else
1118 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
1119 case TYPE_CODE_COMPLEX:
1120 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1121
1122 case TYPE_CODE_STRUCT:
1123 case TYPE_CODE_UNION:
1124 align = 1;
1125 for (n = 0; n < TYPE_NFIELDS (t); n++)
1126 {
1127 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1128 if (falign > align)
1129 align = falign;
1130 }
1131 return align;
1132 }
1133}
1134
cd635f74
YQ
1135/* Return 1 if *TY is a homogeneous floating-point aggregate or
1136 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1137 document; otherwise return 0. */
07b287a0
MS
1138
1139static int
cd635f74 1140is_hfa_or_hva (struct type *ty)
07b287a0
MS
1141{
1142 switch (TYPE_CODE (ty))
1143 {
1144 case TYPE_CODE_ARRAY:
1145 {
1146 struct type *target_ty = TYPE_TARGET_TYPE (ty);
238f2452
YQ
1147
1148 if (TYPE_VECTOR (ty))
1149 return 0;
1150
cd635f74
YQ
1151 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1152 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1153 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1154 && TYPE_VECTOR (target_ty))))
07b287a0
MS
1155 return 1;
1156 break;
1157 }
1158
1159 case TYPE_CODE_UNION:
1160 case TYPE_CODE_STRUCT:
1161 {
cd635f74 1162 /* HFA or HVA has at most four members. */
07b287a0
MS
1163 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1164 {
1165 struct type *member0_type;
1166
1167 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
cd635f74
YQ
1168 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1169 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1170 && TYPE_VECTOR (member0_type)))
07b287a0
MS
1171 {
1172 int i;
1173
1174 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1175 {
1176 struct type *member1_type;
1177
1178 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1179 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1180 || (TYPE_LENGTH (member0_type)
1181 != TYPE_LENGTH (member1_type)))
1182 return 0;
1183 }
1184 return 1;
1185 }
1186 }
1187 return 0;
1188 }
1189
1190 default:
1191 break;
1192 }
1193
1194 return 0;
1195}
1196
1197/* AArch64 function call information structure. */
1198struct aarch64_call_info
1199{
1200 /* the current argument number. */
1201 unsigned argnum;
1202
1203 /* The next general purpose register number, equivalent to NGRN as
1204 described in the AArch64 Procedure Call Standard. */
1205 unsigned ngrn;
1206
1207 /* The next SIMD and floating point register number, equivalent to
1208 NSRN as described in the AArch64 Procedure Call Standard. */
1209 unsigned nsrn;
1210
1211 /* The next stacked argument address, equivalent to NSAA as
1212 described in the AArch64 Procedure Call Standard. */
1213 unsigned nsaa;
1214
1215 /* Stack item vector. */
1216 VEC(stack_item_t) *si;
1217};
1218
1219/* Pass a value in a sequence of consecutive X registers. The caller
1220 is responsbile for ensuring sufficient registers are available. */
1221
1222static void
1223pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1224 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1225 struct value *arg)
07b287a0
MS
1226{
1227 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1228 int len = TYPE_LENGTH (type);
1229 enum type_code typecode = TYPE_CODE (type);
1230 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1231 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1232
1233 info->argnum++;
1234
1235 while (len > 0)
1236 {
1237 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1238 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1239 byte_order);
1240
1241
1242 /* Adjust sub-word struct/union args when big-endian. */
1243 if (byte_order == BFD_ENDIAN_BIG
1244 && partial_len < X_REGISTER_SIZE
1245 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1246 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1247
1248 if (aarch64_debug)
b277c936
PL
1249 {
1250 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1251 gdbarch_register_name (gdbarch, regnum),
1252 phex (regval, X_REGISTER_SIZE));
1253 }
07b287a0
MS
1254 regcache_cooked_write_unsigned (regcache, regnum, regval);
1255 len -= partial_len;
1256 buf += partial_len;
1257 regnum++;
1258 }
1259}
1260
1261/* Attempt to marshall a value in a V register. Return 1 if
1262 successful, or 0 if insufficient registers are available. This
1263 function, unlike the equivalent pass_in_x() function does not
1264 handle arguments spread across multiple registers. */
1265
1266static int
1267pass_in_v (struct gdbarch *gdbarch,
1268 struct regcache *regcache,
1269 struct aarch64_call_info *info,
0735fddd 1270 int len, const bfd_byte *buf)
07b287a0
MS
1271{
1272 if (info->nsrn < 8)
1273 {
07b287a0 1274 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1275 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1276
1277 info->argnum++;
1278 info->nsrn++;
1279
0735fddd
YQ
1280 memset (reg, 0, sizeof (reg));
1281 /* PCS C.1, the argument is allocated to the least significant
1282 bits of V register. */
1283 memcpy (reg, buf, len);
1284 regcache_cooked_write (regcache, regnum, reg);
1285
07b287a0 1286 if (aarch64_debug)
b277c936
PL
1287 {
1288 debug_printf ("arg %d in %s\n", info->argnum,
1289 gdbarch_register_name (gdbarch, regnum));
1290 }
07b287a0
MS
1291 return 1;
1292 }
1293 info->nsrn = 8;
1294 return 0;
1295}
1296
1297/* Marshall an argument onto the stack. */
1298
1299static void
1300pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1301 struct value *arg)
07b287a0 1302{
8e80f9d1 1303 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1304 int len = TYPE_LENGTH (type);
1305 int align;
1306 stack_item_t item;
1307
1308 info->argnum++;
1309
1310 align = aarch64_type_align (type);
1311
1312 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1313 Natural alignment of the argument's type. */
1314 align = align_up (align, 8);
1315
1316 /* The AArch64 PCS requires at most doubleword alignment. */
1317 if (align > 16)
1318 align = 16;
1319
1320 if (aarch64_debug)
b277c936
PL
1321 {
1322 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1323 info->nsaa);
1324 }
07b287a0
MS
1325
1326 item.len = len;
1327 item.data = buf;
1328 VEC_safe_push (stack_item_t, info->si, &item);
1329
1330 info->nsaa += len;
1331 if (info->nsaa & (align - 1))
1332 {
1333 /* Push stack alignment padding. */
1334 int pad = align - (info->nsaa & (align - 1));
1335
1336 item.len = pad;
c3c87445 1337 item.data = NULL;
07b287a0
MS
1338
1339 VEC_safe_push (stack_item_t, info->si, &item);
1340 info->nsaa += pad;
1341 }
1342}
1343
1344/* Marshall an argument into a sequence of one or more consecutive X
1345 registers or, if insufficient X registers are available then onto
1346 the stack. */
1347
1348static void
1349pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1350 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1351 struct value *arg)
07b287a0
MS
1352{
1353 int len = TYPE_LENGTH (type);
1354 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1355
1356 /* PCS C.13 - Pass in registers if we have enough spare */
1357 if (info->ngrn + nregs <= 8)
1358 {
8e80f9d1 1359 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1360 info->ngrn += nregs;
1361 }
1362 else
1363 {
1364 info->ngrn = 8;
8e80f9d1 1365 pass_on_stack (info, type, arg);
07b287a0
MS
1366 }
1367}
1368
1369/* Pass a value in a V register, or on the stack if insufficient are
1370 available. */
1371
1372static void
1373pass_in_v_or_stack (struct gdbarch *gdbarch,
1374 struct regcache *regcache,
1375 struct aarch64_call_info *info,
1376 struct type *type,
8e80f9d1 1377 struct value *arg)
07b287a0 1378{
0735fddd
YQ
1379 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1380 value_contents (arg)))
8e80f9d1 1381 pass_on_stack (info, type, arg);
07b287a0
MS
1382}
1383
1384/* Implement the "push_dummy_call" gdbarch method. */
1385
1386static CORE_ADDR
1387aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1388 struct regcache *regcache, CORE_ADDR bp_addr,
1389 int nargs,
1390 struct value **args, CORE_ADDR sp, int struct_return,
1391 CORE_ADDR struct_addr)
1392{
07b287a0 1393 int argnum;
07b287a0
MS
1394 struct aarch64_call_info info;
1395 struct type *func_type;
1396 struct type *return_type;
1397 int lang_struct_return;
1398
1399 memset (&info, 0, sizeof (info));
1400
1401 /* We need to know what the type of the called function is in order
1402 to determine the number of named/anonymous arguments for the
1403 actual argument placement, and the return type in order to handle
1404 return value correctly.
1405
1406 The generic code above us views the decision of return in memory
1407 or return in registers as a two stage processes. The language
1408 handler is consulted first and may decide to return in memory (eg
1409 class with copy constructor returned by value), this will cause
1410 the generic code to allocate space AND insert an initial leading
1411 argument.
1412
1413 If the language code does not decide to pass in memory then the
1414 target code is consulted.
1415
1416 If the language code decides to pass in memory we want to move
1417 the pointer inserted as the initial argument from the argument
1418 list and into X8, the conventional AArch64 struct return pointer
1419 register.
1420
1421 This is slightly awkward, ideally the flag "lang_struct_return"
1422 would be passed to the targets implementation of push_dummy_call.
1423 Rather that change the target interface we call the language code
1424 directly ourselves. */
1425
1426 func_type = check_typedef (value_type (function));
1427
1428 /* Dereference function pointer types. */
1429 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1430 func_type = TYPE_TARGET_TYPE (func_type);
1431
1432 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1433 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1434
1435 /* If language_pass_by_reference () returned true we will have been
1436 given an additional initial argument, a hidden pointer to the
1437 return slot in memory. */
1438 return_type = TYPE_TARGET_TYPE (func_type);
1439 lang_struct_return = language_pass_by_reference (return_type);
1440
1441 /* Set the return address. For the AArch64, the return breakpoint
1442 is always at BP_ADDR. */
1443 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1444
1445 /* If we were given an initial argument for the return slot because
1446 lang_struct_return was true, lose it. */
1447 if (lang_struct_return)
1448 {
1449 args++;
1450 nargs--;
1451 }
1452
1453 /* The struct_return pointer occupies X8. */
1454 if (struct_return || lang_struct_return)
1455 {
1456 if (aarch64_debug)
b277c936
PL
1457 {
1458 debug_printf ("struct return in %s = 0x%s\n",
1459 gdbarch_register_name (gdbarch,
1460 AARCH64_STRUCT_RETURN_REGNUM),
1461 paddress (gdbarch, struct_addr));
1462 }
07b287a0
MS
1463 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1464 struct_addr);
1465 }
1466
1467 for (argnum = 0; argnum < nargs; argnum++)
1468 {
1469 struct value *arg = args[argnum];
1470 struct type *arg_type;
1471 int len;
1472
1473 arg_type = check_typedef (value_type (arg));
1474 len = TYPE_LENGTH (arg_type);
1475
1476 switch (TYPE_CODE (arg_type))
1477 {
1478 case TYPE_CODE_INT:
1479 case TYPE_CODE_BOOL:
1480 case TYPE_CODE_CHAR:
1481 case TYPE_CODE_RANGE:
1482 case TYPE_CODE_ENUM:
1483 if (len < 4)
1484 {
1485 /* Promote to 32 bit integer. */
1486 if (TYPE_UNSIGNED (arg_type))
1487 arg_type = builtin_type (gdbarch)->builtin_uint32;
1488 else
1489 arg_type = builtin_type (gdbarch)->builtin_int32;
1490 arg = value_cast (arg_type, arg);
1491 }
8e80f9d1 1492 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1493 break;
1494
1495 case TYPE_CODE_COMPLEX:
1496 if (info.nsrn <= 6)
1497 {
1498 const bfd_byte *buf = value_contents (arg);
1499 struct type *target_type =
1500 check_typedef (TYPE_TARGET_TYPE (arg_type));
1501
07b287a0 1502 pass_in_v (gdbarch, regcache, &info,
0735fddd
YQ
1503 TYPE_LENGTH (target_type), buf);
1504 pass_in_v (gdbarch, regcache, &info,
1505 TYPE_LENGTH (target_type),
07b287a0
MS
1506 buf + TYPE_LENGTH (target_type));
1507 }
1508 else
1509 {
1510 info.nsrn = 8;
8e80f9d1 1511 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1512 }
1513 break;
1514 case TYPE_CODE_FLT:
8e80f9d1 1515 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1516 break;
1517
1518 case TYPE_CODE_STRUCT:
1519 case TYPE_CODE_ARRAY:
1520 case TYPE_CODE_UNION:
cd635f74 1521 if (is_hfa_or_hva (arg_type))
07b287a0
MS
1522 {
1523 int elements = TYPE_NFIELDS (arg_type);
1524
1525 /* Homogeneous Aggregates */
1526 if (info.nsrn + elements < 8)
1527 {
1528 int i;
1529
1530 for (i = 0; i < elements; i++)
1531 {
1532 /* We know that we have sufficient registers
1533 available therefore this will never fallback
1534 to the stack. */
1535 struct value *field =
1536 value_primitive_field (arg, 0, i, arg_type);
1537 struct type *field_type =
1538 check_typedef (value_type (field));
1539
8e80f9d1
YQ
1540 pass_in_v_or_stack (gdbarch, regcache, &info,
1541 field_type, field);
07b287a0
MS
1542 }
1543 }
1544 else
1545 {
1546 info.nsrn = 8;
8e80f9d1 1547 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1548 }
1549 }
238f2452
YQ
1550 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1551 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1552 {
1553 /* Short vector types are passed in V registers. */
1554 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1555 }
07b287a0
MS
1556 else if (len > 16)
1557 {
1558 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1559 invisible reference. */
1560
1561 /* Allocate aligned storage. */
1562 sp = align_down (sp - len, 16);
1563
1564 /* Write the real data into the stack. */
1565 write_memory (sp, value_contents (arg), len);
1566
1567 /* Construct the indirection. */
1568 arg_type = lookup_pointer_type (arg_type);
1569 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1570 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1571 }
1572 else
1573 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1574 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1575 break;
1576
1577 default:
8e80f9d1 1578 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1579 break;
1580 }
1581 }
1582
1583 /* Make sure stack retains 16 byte alignment. */
1584 if (info.nsaa & 15)
1585 sp -= 16 - (info.nsaa & 15);
1586
1587 while (!VEC_empty (stack_item_t, info.si))
1588 {
1589 stack_item_t *si = VEC_last (stack_item_t, info.si);
1590
1591 sp -= si->len;
c3c87445
YQ
1592 if (si->data != NULL)
1593 write_memory (sp, si->data, si->len);
07b287a0
MS
1594 VEC_pop (stack_item_t, info.si);
1595 }
1596
1597 VEC_free (stack_item_t, info.si);
1598
1599 /* Finally, update the SP register. */
1600 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1601
1602 return sp;
1603}
1604
1605/* Implement the "frame_align" gdbarch method. */
1606
1607static CORE_ADDR
1608aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1609{
1610 /* Align the stack to sixteen bytes. */
1611 return sp & ~(CORE_ADDR) 15;
1612}
1613
1614/* Return the type for an AdvSISD Q register. */
1615
1616static struct type *
1617aarch64_vnq_type (struct gdbarch *gdbarch)
1618{
1619 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1620
1621 if (tdep->vnq_type == NULL)
1622 {
1623 struct type *t;
1624 struct type *elem;
1625
1626 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1627 TYPE_CODE_UNION);
1628
1629 elem = builtin_type (gdbarch)->builtin_uint128;
1630 append_composite_type_field (t, "u", elem);
1631
1632 elem = builtin_type (gdbarch)->builtin_int128;
1633 append_composite_type_field (t, "s", elem);
1634
1635 tdep->vnq_type = t;
1636 }
1637
1638 return tdep->vnq_type;
1639}
1640
1641/* Return the type for an AdvSISD D register. */
1642
1643static struct type *
1644aarch64_vnd_type (struct gdbarch *gdbarch)
1645{
1646 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1647
1648 if (tdep->vnd_type == NULL)
1649 {
1650 struct type *t;
1651 struct type *elem;
1652
1653 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1654 TYPE_CODE_UNION);
1655
1656 elem = builtin_type (gdbarch)->builtin_double;
1657 append_composite_type_field (t, "f", elem);
1658
1659 elem = builtin_type (gdbarch)->builtin_uint64;
1660 append_composite_type_field (t, "u", elem);
1661
1662 elem = builtin_type (gdbarch)->builtin_int64;
1663 append_composite_type_field (t, "s", elem);
1664
1665 tdep->vnd_type = t;
1666 }
1667
1668 return tdep->vnd_type;
1669}
1670
1671/* Return the type for an AdvSISD S register. */
1672
1673static struct type *
1674aarch64_vns_type (struct gdbarch *gdbarch)
1675{
1676 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1677
1678 if (tdep->vns_type == NULL)
1679 {
1680 struct type *t;
1681 struct type *elem;
1682
1683 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1684 TYPE_CODE_UNION);
1685
1686 elem = builtin_type (gdbarch)->builtin_float;
1687 append_composite_type_field (t, "f", elem);
1688
1689 elem = builtin_type (gdbarch)->builtin_uint32;
1690 append_composite_type_field (t, "u", elem);
1691
1692 elem = builtin_type (gdbarch)->builtin_int32;
1693 append_composite_type_field (t, "s", elem);
1694
1695 tdep->vns_type = t;
1696 }
1697
1698 return tdep->vns_type;
1699}
1700
1701/* Return the type for an AdvSISD H register. */
1702
1703static struct type *
1704aarch64_vnh_type (struct gdbarch *gdbarch)
1705{
1706 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1707
1708 if (tdep->vnh_type == NULL)
1709 {
1710 struct type *t;
1711 struct type *elem;
1712
1713 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1714 TYPE_CODE_UNION);
1715
1716 elem = builtin_type (gdbarch)->builtin_uint16;
1717 append_composite_type_field (t, "u", elem);
1718
1719 elem = builtin_type (gdbarch)->builtin_int16;
1720 append_composite_type_field (t, "s", elem);
1721
1722 tdep->vnh_type = t;
1723 }
1724
1725 return tdep->vnh_type;
1726}
1727
1728/* Return the type for an AdvSISD B register. */
1729
1730static struct type *
1731aarch64_vnb_type (struct gdbarch *gdbarch)
1732{
1733 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1734
1735 if (tdep->vnb_type == NULL)
1736 {
1737 struct type *t;
1738 struct type *elem;
1739
1740 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1741 TYPE_CODE_UNION);
1742
1743 elem = builtin_type (gdbarch)->builtin_uint8;
1744 append_composite_type_field (t, "u", elem);
1745
1746 elem = builtin_type (gdbarch)->builtin_int8;
1747 append_composite_type_field (t, "s", elem);
1748
1749 tdep->vnb_type = t;
1750 }
1751
1752 return tdep->vnb_type;
1753}
1754
1755/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1756
1757static int
1758aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1759{
1760 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1761 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1762
1763 if (reg == AARCH64_DWARF_SP)
1764 return AARCH64_SP_REGNUM;
1765
1766 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1767 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1768
1769 return -1;
1770}
1771\f
1772
1773/* Implement the "print_insn" gdbarch method. */
1774
1775static int
1776aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1777{
1778 info->symbols = NULL;
6394c606 1779 return default_print_insn (memaddr, info);
07b287a0
MS
1780}
1781
1782/* AArch64 BRK software debug mode instruction.
1783 Note that AArch64 code is always little-endian.
1784 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
04180708 1785constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0 1786
04180708 1787typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
07b287a0
MS
1788
1789/* Extract from an array REGS containing the (raw) register state a
1790 function return value of type TYPE, and copy that, in virtual
1791 format, into VALBUF. */
1792
1793static void
1794aarch64_extract_return_value (struct type *type, struct regcache *regs,
1795 gdb_byte *valbuf)
1796{
1797 struct gdbarch *gdbarch = get_regcache_arch (regs);
1798 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1799
1800 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1801 {
1802 bfd_byte buf[V_REGISTER_SIZE];
1803 int len = TYPE_LENGTH (type);
1804
1805 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1806 memcpy (valbuf, buf, len);
1807 }
1808 else if (TYPE_CODE (type) == TYPE_CODE_INT
1809 || TYPE_CODE (type) == TYPE_CODE_CHAR
1810 || TYPE_CODE (type) == TYPE_CODE_BOOL
1811 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 1812 || TYPE_IS_REFERENCE (type)
07b287a0
MS
1813 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1814 {
1815 /* If the the type is a plain integer, then the access is
1816 straight-forward. Otherwise we have to play around a bit
1817 more. */
1818 int len = TYPE_LENGTH (type);
1819 int regno = AARCH64_X0_REGNUM;
1820 ULONGEST tmp;
1821
1822 while (len > 0)
1823 {
1824 /* By using store_unsigned_integer we avoid having to do
1825 anything special for small big-endian values. */
1826 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1827 store_unsigned_integer (valbuf,
1828 (len > X_REGISTER_SIZE
1829 ? X_REGISTER_SIZE : len), byte_order, tmp);
1830 len -= X_REGISTER_SIZE;
1831 valbuf += X_REGISTER_SIZE;
1832 }
1833 }
1834 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1835 {
1836 int regno = AARCH64_V0_REGNUM;
1837 bfd_byte buf[V_REGISTER_SIZE];
1838 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1839 int len = TYPE_LENGTH (target_type);
1840
1841 regcache_cooked_read (regs, regno, buf);
1842 memcpy (valbuf, buf, len);
1843 valbuf += len;
1844 regcache_cooked_read (regs, regno + 1, buf);
1845 memcpy (valbuf, buf, len);
1846 valbuf += len;
1847 }
cd635f74 1848 else if (is_hfa_or_hva (type))
07b287a0
MS
1849 {
1850 int elements = TYPE_NFIELDS (type);
1851 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1852 int len = TYPE_LENGTH (member_type);
1853 int i;
1854
1855 for (i = 0; i < elements; i++)
1856 {
1857 int regno = AARCH64_V0_REGNUM + i;
db3516bb 1858 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
1859
1860 if (aarch64_debug)
b277c936 1861 {
cd635f74 1862 debug_printf ("read HFA or HVA return value element %d from %s\n",
b277c936
PL
1863 i + 1,
1864 gdbarch_register_name (gdbarch, regno));
1865 }
07b287a0
MS
1866 regcache_cooked_read (regs, regno, buf);
1867
1868 memcpy (valbuf, buf, len);
1869 valbuf += len;
1870 }
1871 }
238f2452
YQ
1872 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1873 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1874 {
1875 /* Short vector is returned in V register. */
1876 gdb_byte buf[V_REGISTER_SIZE];
1877
1878 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1879 memcpy (valbuf, buf, TYPE_LENGTH (type));
1880 }
07b287a0
MS
1881 else
1882 {
1883 /* For a structure or union the behaviour is as if the value had
1884 been stored to word-aligned memory and then loaded into
1885 registers with 64-bit load instruction(s). */
1886 int len = TYPE_LENGTH (type);
1887 int regno = AARCH64_X0_REGNUM;
1888 bfd_byte buf[X_REGISTER_SIZE];
1889
1890 while (len > 0)
1891 {
1892 regcache_cooked_read (regs, regno++, buf);
1893 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1894 len -= X_REGISTER_SIZE;
1895 valbuf += X_REGISTER_SIZE;
1896 }
1897 }
1898}
1899
1900
1901/* Will a function return an aggregate type in memory or in a
1902 register? Return 0 if an aggregate type can be returned in a
1903 register, 1 if it must be returned in memory. */
1904
1905static int
1906aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1907{
f168693b 1908 type = check_typedef (type);
07b287a0 1909
cd635f74 1910 if (is_hfa_or_hva (type))
07b287a0 1911 {
cd635f74
YQ
1912 /* v0-v7 are used to return values and one register is allocated
1913 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
1914 return 0;
1915 }
1916
1917 if (TYPE_LENGTH (type) > 16)
1918 {
1919 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1920 invisible reference. */
1921
1922 return 1;
1923 }
1924
1925 return 0;
1926}
1927
1928/* Write into appropriate registers a function return value of type
1929 TYPE, given in virtual format. */
1930
1931static void
1932aarch64_store_return_value (struct type *type, struct regcache *regs,
1933 const gdb_byte *valbuf)
1934{
1935 struct gdbarch *gdbarch = get_regcache_arch (regs);
1936 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1937
1938 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1939 {
1940 bfd_byte buf[V_REGISTER_SIZE];
1941 int len = TYPE_LENGTH (type);
1942
1943 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1944 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1945 }
1946 else if (TYPE_CODE (type) == TYPE_CODE_INT
1947 || TYPE_CODE (type) == TYPE_CODE_CHAR
1948 || TYPE_CODE (type) == TYPE_CODE_BOOL
1949 || TYPE_CODE (type) == TYPE_CODE_PTR
aa006118 1950 || TYPE_IS_REFERENCE (type)
07b287a0
MS
1951 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1952 {
1953 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1954 {
1955 /* Values of one word or less are zero/sign-extended and
1956 returned in r0. */
1957 bfd_byte tmpbuf[X_REGISTER_SIZE];
1958 LONGEST val = unpack_long (type, valbuf);
1959
1960 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1961 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1962 }
1963 else
1964 {
1965 /* Integral values greater than one word are stored in
1966 consecutive registers starting with r0. This will always
1967 be a multiple of the regiser size. */
1968 int len = TYPE_LENGTH (type);
1969 int regno = AARCH64_X0_REGNUM;
1970
1971 while (len > 0)
1972 {
1973 regcache_cooked_write (regs, regno++, valbuf);
1974 len -= X_REGISTER_SIZE;
1975 valbuf += X_REGISTER_SIZE;
1976 }
1977 }
1978 }
cd635f74 1979 else if (is_hfa_or_hva (type))
07b287a0
MS
1980 {
1981 int elements = TYPE_NFIELDS (type);
1982 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1983 int len = TYPE_LENGTH (member_type);
1984 int i;
1985
1986 for (i = 0; i < elements; i++)
1987 {
1988 int regno = AARCH64_V0_REGNUM + i;
d1be909e 1989 bfd_byte tmpbuf[V_REGISTER_SIZE];
07b287a0
MS
1990
1991 if (aarch64_debug)
b277c936 1992 {
cd635f74 1993 debug_printf ("write HFA or HVA return value element %d to %s\n",
b277c936
PL
1994 i + 1,
1995 gdbarch_register_name (gdbarch, regno));
1996 }
07b287a0
MS
1997
1998 memcpy (tmpbuf, valbuf, len);
1999 regcache_cooked_write (regs, regno, tmpbuf);
2000 valbuf += len;
2001 }
2002 }
238f2452
YQ
2003 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2004 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
2005 {
2006 /* Short vector. */
2007 gdb_byte buf[V_REGISTER_SIZE];
2008
2009 memcpy (buf, valbuf, TYPE_LENGTH (type));
2010 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2011 }
07b287a0
MS
2012 else
2013 {
2014 /* For a structure or union the behaviour is as if the value had
2015 been stored to word-aligned memory and then loaded into
2016 registers with 64-bit load instruction(s). */
2017 int len = TYPE_LENGTH (type);
2018 int regno = AARCH64_X0_REGNUM;
2019 bfd_byte tmpbuf[X_REGISTER_SIZE];
2020
2021 while (len > 0)
2022 {
2023 memcpy (tmpbuf, valbuf,
2024 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2025 regcache_cooked_write (regs, regno++, tmpbuf);
2026 len -= X_REGISTER_SIZE;
2027 valbuf += X_REGISTER_SIZE;
2028 }
2029 }
2030}
2031
2032/* Implement the "return_value" gdbarch method. */
2033
2034static enum return_value_convention
2035aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2036 struct type *valtype, struct regcache *regcache,
2037 gdb_byte *readbuf, const gdb_byte *writebuf)
2038{
07b287a0
MS
2039
2040 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2041 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2042 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2043 {
2044 if (aarch64_return_in_memory (gdbarch, valtype))
2045 {
2046 if (aarch64_debug)
b277c936 2047 debug_printf ("return value in memory\n");
07b287a0
MS
2048 return RETURN_VALUE_STRUCT_CONVENTION;
2049 }
2050 }
2051
2052 if (writebuf)
2053 aarch64_store_return_value (valtype, regcache, writebuf);
2054
2055 if (readbuf)
2056 aarch64_extract_return_value (valtype, regcache, readbuf);
2057
2058 if (aarch64_debug)
b277c936 2059 debug_printf ("return value in registers\n");
07b287a0
MS
2060
2061 return RETURN_VALUE_REGISTER_CONVENTION;
2062}
2063
2064/* Implement the "get_longjmp_target" gdbarch method. */
2065
2066static int
2067aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2068{
2069 CORE_ADDR jb_addr;
2070 gdb_byte buf[X_REGISTER_SIZE];
2071 struct gdbarch *gdbarch = get_frame_arch (frame);
2072 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2073 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2074
2075 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2076
2077 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2078 X_REGISTER_SIZE))
2079 return 0;
2080
2081 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2082 return 1;
2083}
ea873d8e
PL
2084
2085/* Implement the "gen_return_address" gdbarch method. */
2086
2087static void
2088aarch64_gen_return_address (struct gdbarch *gdbarch,
2089 struct agent_expr *ax, struct axs_value *value,
2090 CORE_ADDR scope)
2091{
2092 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2093 value->kind = axs_lvalue_register;
2094 value->u.reg = AARCH64_LR_REGNUM;
2095}
07b287a0
MS
2096\f
2097
2098/* Return the pseudo register name corresponding to register regnum. */
2099
2100static const char *
2101aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2102{
2103 static const char *const q_name[] =
2104 {
2105 "q0", "q1", "q2", "q3",
2106 "q4", "q5", "q6", "q7",
2107 "q8", "q9", "q10", "q11",
2108 "q12", "q13", "q14", "q15",
2109 "q16", "q17", "q18", "q19",
2110 "q20", "q21", "q22", "q23",
2111 "q24", "q25", "q26", "q27",
2112 "q28", "q29", "q30", "q31",
2113 };
2114
2115 static const char *const d_name[] =
2116 {
2117 "d0", "d1", "d2", "d3",
2118 "d4", "d5", "d6", "d7",
2119 "d8", "d9", "d10", "d11",
2120 "d12", "d13", "d14", "d15",
2121 "d16", "d17", "d18", "d19",
2122 "d20", "d21", "d22", "d23",
2123 "d24", "d25", "d26", "d27",
2124 "d28", "d29", "d30", "d31",
2125 };
2126
2127 static const char *const s_name[] =
2128 {
2129 "s0", "s1", "s2", "s3",
2130 "s4", "s5", "s6", "s7",
2131 "s8", "s9", "s10", "s11",
2132 "s12", "s13", "s14", "s15",
2133 "s16", "s17", "s18", "s19",
2134 "s20", "s21", "s22", "s23",
2135 "s24", "s25", "s26", "s27",
2136 "s28", "s29", "s30", "s31",
2137 };
2138
2139 static const char *const h_name[] =
2140 {
2141 "h0", "h1", "h2", "h3",
2142 "h4", "h5", "h6", "h7",
2143 "h8", "h9", "h10", "h11",
2144 "h12", "h13", "h14", "h15",
2145 "h16", "h17", "h18", "h19",
2146 "h20", "h21", "h22", "h23",
2147 "h24", "h25", "h26", "h27",
2148 "h28", "h29", "h30", "h31",
2149 };
2150
2151 static const char *const b_name[] =
2152 {
2153 "b0", "b1", "b2", "b3",
2154 "b4", "b5", "b6", "b7",
2155 "b8", "b9", "b10", "b11",
2156 "b12", "b13", "b14", "b15",
2157 "b16", "b17", "b18", "b19",
2158 "b20", "b21", "b22", "b23",
2159 "b24", "b25", "b26", "b27",
2160 "b28", "b29", "b30", "b31",
2161 };
2162
2163 regnum -= gdbarch_num_regs (gdbarch);
2164
2165 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2166 return q_name[regnum - AARCH64_Q0_REGNUM];
2167
2168 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2169 return d_name[regnum - AARCH64_D0_REGNUM];
2170
2171 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2172 return s_name[regnum - AARCH64_S0_REGNUM];
2173
2174 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2175 return h_name[regnum - AARCH64_H0_REGNUM];
2176
2177 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2178 return b_name[regnum - AARCH64_B0_REGNUM];
2179
2180 internal_error (__FILE__, __LINE__,
2181 _("aarch64_pseudo_register_name: bad register number %d"),
2182 regnum);
2183}
2184
2185/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2186
2187static struct type *
2188aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2189{
2190 regnum -= gdbarch_num_regs (gdbarch);
2191
2192 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2193 return aarch64_vnq_type (gdbarch);
2194
2195 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2196 return aarch64_vnd_type (gdbarch);
2197
2198 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2199 return aarch64_vns_type (gdbarch);
2200
2201 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2202 return aarch64_vnh_type (gdbarch);
2203
2204 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2205 return aarch64_vnb_type (gdbarch);
2206
2207 internal_error (__FILE__, __LINE__,
2208 _("aarch64_pseudo_register_type: bad register number %d"),
2209 regnum);
2210}
2211
2212/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2213
2214static int
2215aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2216 struct reggroup *group)
2217{
2218 regnum -= gdbarch_num_regs (gdbarch);
2219
2220 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2221 return group == all_reggroup || group == vector_reggroup;
2222 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2223 return (group == all_reggroup || group == vector_reggroup
2224 || group == float_reggroup);
2225 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2226 return (group == all_reggroup || group == vector_reggroup
2227 || group == float_reggroup);
2228 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2229 return group == all_reggroup || group == vector_reggroup;
2230 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2231 return group == all_reggroup || group == vector_reggroup;
2232
2233 return group == all_reggroup;
2234}
2235
2236/* Implement the "pseudo_register_read_value" gdbarch method. */
2237
2238static struct value *
2239aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2240 struct regcache *regcache,
2241 int regnum)
2242{
d1be909e 2243 gdb_byte reg_buf[V_REGISTER_SIZE];
07b287a0
MS
2244 struct value *result_value;
2245 gdb_byte *buf;
2246
2247 result_value = allocate_value (register_type (gdbarch, regnum));
2248 VALUE_LVAL (result_value) = lval_register;
2249 VALUE_REGNUM (result_value) = regnum;
2250 buf = value_contents_raw (result_value);
2251
2252 regnum -= gdbarch_num_regs (gdbarch);
2253
2254 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2255 {
2256 enum register_status status;
2257 unsigned v_regnum;
2258
2259 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2260 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2261 if (status != REG_VALID)
2262 mark_value_bytes_unavailable (result_value, 0,
2263 TYPE_LENGTH (value_type (result_value)));
2264 else
2265 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2266 return result_value;
2267 }
2268
2269 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2270 {
2271 enum register_status status;
2272 unsigned v_regnum;
2273
2274 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2275 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2276 if (status != REG_VALID)
2277 mark_value_bytes_unavailable (result_value, 0,
2278 TYPE_LENGTH (value_type (result_value)));
2279 else
2280 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2281 return result_value;
2282 }
2283
2284 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2285 {
2286 enum register_status status;
2287 unsigned v_regnum;
2288
2289 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2290 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2291 if (status != REG_VALID)
2292 mark_value_bytes_unavailable (result_value, 0,
2293 TYPE_LENGTH (value_type (result_value)));
2294 else
2295 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2296 return result_value;
2297 }
2298
2299 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2300 {
2301 enum register_status status;
2302 unsigned v_regnum;
2303
2304 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2305 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2306 if (status != REG_VALID)
2307 mark_value_bytes_unavailable (result_value, 0,
2308 TYPE_LENGTH (value_type (result_value)));
2309 else
2310 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2311 return result_value;
2312 }
2313
2314 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2315 {
2316 enum register_status status;
2317 unsigned v_regnum;
2318
2319 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2320 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2321 if (status != REG_VALID)
2322 mark_value_bytes_unavailable (result_value, 0,
2323 TYPE_LENGTH (value_type (result_value)));
2324 else
2325 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2326 return result_value;
2327 }
2328
2329 gdb_assert_not_reached ("regnum out of bound");
2330}
2331
2332/* Implement the "pseudo_register_write" gdbarch method. */
2333
2334static void
2335aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2336 int regnum, const gdb_byte *buf)
2337{
d1be909e 2338 gdb_byte reg_buf[V_REGISTER_SIZE];
07b287a0
MS
2339
2340 /* Ensure the register buffer is zero, we want gdb writes of the
2341 various 'scalar' pseudo registers to behavior like architectural
2342 writes, register width bytes are written the remainder are set to
2343 zero. */
2344 memset (reg_buf, 0, sizeof (reg_buf));
2345
2346 regnum -= gdbarch_num_regs (gdbarch);
2347
2348 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2349 {
2350 /* pseudo Q registers */
2351 unsigned v_regnum;
2352
2353 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2354 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2355 regcache_raw_write (regcache, v_regnum, reg_buf);
2356 return;
2357 }
2358
2359 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2360 {
2361 /* pseudo D registers */
2362 unsigned v_regnum;
2363
2364 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2365 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2366 regcache_raw_write (regcache, v_regnum, reg_buf);
2367 return;
2368 }
2369
2370 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2371 {
2372 unsigned v_regnum;
2373
2374 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2375 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2376 regcache_raw_write (regcache, v_regnum, reg_buf);
2377 return;
2378 }
2379
2380 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2381 {
2382 /* pseudo H registers */
2383 unsigned v_regnum;
2384
2385 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2386 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2387 regcache_raw_write (regcache, v_regnum, reg_buf);
2388 return;
2389 }
2390
2391 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2392 {
2393 /* pseudo B registers */
2394 unsigned v_regnum;
2395
2396 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2397 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2398 regcache_raw_write (regcache, v_regnum, reg_buf);
2399 return;
2400 }
2401
2402 gdb_assert_not_reached ("regnum out of bound");
2403}
2404
07b287a0
MS
2405/* Callback function for user_reg_add. */
2406
2407static struct value *
2408value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2409{
9a3c8263 2410 const int *reg_p = (const int *) baton;
07b287a0
MS
2411
2412 return value_of_register (*reg_p, frame);
2413}
2414\f
2415
9404b58f
KM
2416/* Implement the "software_single_step" gdbarch method, needed to
2417 single step through atomic sequences on AArch64. */
2418
a0ff9e1a 2419static std::vector<CORE_ADDR>
f5ea389a 2420aarch64_software_single_step (struct regcache *regcache)
9404b58f 2421{
0187a92f 2422 struct gdbarch *gdbarch = get_regcache_arch (regcache);
9404b58f
KM
2423 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2424 const int insn_size = 4;
2425 const int atomic_sequence_length = 16; /* Instruction sequence length. */
0187a92f 2426 CORE_ADDR pc = regcache_read_pc (regcache);
9404b58f
KM
2427 CORE_ADDR breaks[2] = { -1, -1 };
2428 CORE_ADDR loc = pc;
2429 CORE_ADDR closing_insn = 0;
2430 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2431 byte_order_for_code);
2432 int index;
2433 int insn_count;
2434 int bc_insn_count = 0; /* Conditional branch instruction count. */
2435 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2436 aarch64_inst inst;
2437
43cdf5ae 2438 if (aarch64_decode_insn (insn, &inst, 1) != 0)
a0ff9e1a 2439 return {};
9404b58f
KM
2440
2441 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2442 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
a0ff9e1a 2443 return {};
9404b58f
KM
2444
2445 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2446 {
9404b58f
KM
2447 loc += insn_size;
2448 insn = read_memory_unsigned_integer (loc, insn_size,
2449 byte_order_for_code);
2450
43cdf5ae 2451 if (aarch64_decode_insn (insn, &inst, 1) != 0)
a0ff9e1a 2452 return {};
9404b58f 2453 /* Check if the instruction is a conditional branch. */
f77ee802 2454 if (inst.opcode->iclass == condbranch)
9404b58f 2455 {
f77ee802
YQ
2456 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2457
9404b58f 2458 if (bc_insn_count >= 1)
a0ff9e1a 2459 return {};
9404b58f
KM
2460
2461 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2462 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2463
2464 bc_insn_count++;
2465 last_breakpoint++;
2466 }
2467
2468 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2469 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2470 {
2471 closing_insn = loc;
2472 break;
2473 }
2474 }
2475
2476 /* We didn't find a closing Store Exclusive instruction, fall back. */
2477 if (!closing_insn)
a0ff9e1a 2478 return {};
9404b58f
KM
2479
2480 /* Insert breakpoint after the end of the atomic sequence. */
2481 breaks[0] = loc + insn_size;
2482
2483 /* Check for duplicated breakpoints, and also check that the second
2484 breakpoint is not within the atomic sequence. */
2485 if (last_breakpoint
2486 && (breaks[1] == breaks[0]
2487 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2488 last_breakpoint = 0;
2489
a0ff9e1a
SM
2490 std::vector<CORE_ADDR> next_pcs;
2491
9404b58f
KM
2492 /* Insert the breakpoint at the end of the sequence, and one at the
2493 destination of the conditional branch, if it exists. */
2494 for (index = 0; index <= last_breakpoint; index++)
a0ff9e1a 2495 next_pcs.push_back (breaks[index]);
9404b58f 2496
93f9a11f 2497 return next_pcs;
9404b58f
KM
2498}
2499
b6542f81
YQ
2500struct displaced_step_closure
2501{
2502 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2503 is being displaced stepping. */
2504 int cond;
2505
2506 /* PC adjustment offset after displaced stepping. */
2507 int32_t pc_adjust;
2508};
2509
2510/* Data when visiting instructions for displaced stepping. */
2511
2512struct aarch64_displaced_step_data
2513{
2514 struct aarch64_insn_data base;
2515
2516 /* The address where the instruction will be executed at. */
2517 CORE_ADDR new_addr;
2518 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2519 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2520 /* Number of instructions in INSN_BUF. */
2521 unsigned insn_count;
2522 /* Registers when doing displaced stepping. */
2523 struct regcache *regs;
2524
2525 struct displaced_step_closure *dsc;
2526};
2527
2528/* Implementation of aarch64_insn_visitor method "b". */
2529
2530static void
2531aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2532 struct aarch64_insn_data *data)
2533{
2534 struct aarch64_displaced_step_data *dsd
2535 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2536 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2537
2538 if (can_encode_int32 (new_offset, 28))
2539 {
2540 /* Emit B rather than BL, because executing BL on a new address
2541 will get the wrong address into LR. In order to avoid this,
2542 we emit B, and update LR if the instruction is BL. */
2543 emit_b (dsd->insn_buf, 0, new_offset);
2544 dsd->insn_count++;
2545 }
2546 else
2547 {
2548 /* Write NOP. */
2549 emit_nop (dsd->insn_buf);
2550 dsd->insn_count++;
2551 dsd->dsc->pc_adjust = offset;
2552 }
2553
2554 if (is_bl)
2555 {
2556 /* Update LR. */
2557 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2558 data->insn_addr + 4);
2559 }
2560}
2561
2562/* Implementation of aarch64_insn_visitor method "b_cond". */
2563
2564static void
2565aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2566 struct aarch64_insn_data *data)
2567{
2568 struct aarch64_displaced_step_data *dsd
2569 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2570
2571 /* GDB has to fix up PC after displaced step this instruction
2572 differently according to the condition is true or false. Instead
2573 of checking COND against conditional flags, we can use
2574 the following instructions, and GDB can tell how to fix up PC
2575 according to the PC value.
2576
2577 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2578 INSN1 ;
2579 TAKEN:
2580 INSN2
2581 */
2582
2583 emit_bcond (dsd->insn_buf, cond, 8);
2584 dsd->dsc->cond = 1;
2585 dsd->dsc->pc_adjust = offset;
2586 dsd->insn_count = 1;
2587}
2588
2589/* Dynamically allocate a new register. If we know the register
2590 statically, we should make it a global as above instead of using this
2591 helper function. */
2592
2593static struct aarch64_register
2594aarch64_register (unsigned num, int is64)
2595{
2596 return (struct aarch64_register) { num, is64 };
2597}
2598
2599/* Implementation of aarch64_insn_visitor method "cb". */
2600
2601static void
2602aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2603 const unsigned rn, int is64,
2604 struct aarch64_insn_data *data)
2605{
2606 struct aarch64_displaced_step_data *dsd
2607 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2608
2609 /* The offset is out of range for a compare and branch
2610 instruction. We can use the following instructions instead:
2611
2612 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2613 INSN1 ;
2614 TAKEN:
2615 INSN2
2616 */
2617 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2618 dsd->insn_count = 1;
2619 dsd->dsc->cond = 1;
2620 dsd->dsc->pc_adjust = offset;
2621}
2622
2623/* Implementation of aarch64_insn_visitor method "tb". */
2624
2625static void
2626aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2627 const unsigned rt, unsigned bit,
2628 struct aarch64_insn_data *data)
2629{
2630 struct aarch64_displaced_step_data *dsd
2631 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2632
2633 /* The offset is out of range for a test bit and branch
2634 instruction We can use the following instructions instead:
2635
2636 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2637 INSN1 ;
2638 TAKEN:
2639 INSN2
2640
2641 */
2642 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2643 dsd->insn_count = 1;
2644 dsd->dsc->cond = 1;
2645 dsd->dsc->pc_adjust = offset;
2646}
2647
2648/* Implementation of aarch64_insn_visitor method "adr". */
2649
2650static void
2651aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2652 const int is_adrp, struct aarch64_insn_data *data)
2653{
2654 struct aarch64_displaced_step_data *dsd
2655 = (struct aarch64_displaced_step_data *) data;
2656 /* We know exactly the address the ADR{P,} instruction will compute.
2657 We can just write it to the destination register. */
2658 CORE_ADDR address = data->insn_addr + offset;
2659
2660 if (is_adrp)
2661 {
2662 /* Clear the lower 12 bits of the offset to get the 4K page. */
2663 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2664 address & ~0xfff);
2665 }
2666 else
2667 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2668 address);
2669
2670 dsd->dsc->pc_adjust = 4;
2671 emit_nop (dsd->insn_buf);
2672 dsd->insn_count = 1;
2673}
2674
2675/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2676
2677static void
2678aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2679 const unsigned rt, const int is64,
2680 struct aarch64_insn_data *data)
2681{
2682 struct aarch64_displaced_step_data *dsd
2683 = (struct aarch64_displaced_step_data *) data;
2684 CORE_ADDR address = data->insn_addr + offset;
2685 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2686
2687 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2688 address);
2689
2690 if (is_sw)
2691 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2692 aarch64_register (rt, 1), zero);
2693 else
2694 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2695 aarch64_register (rt, 1), zero);
2696
2697 dsd->dsc->pc_adjust = 4;
2698}
2699
2700/* Implementation of aarch64_insn_visitor method "others". */
2701
2702static void
2703aarch64_displaced_step_others (const uint32_t insn,
2704 struct aarch64_insn_data *data)
2705{
2706 struct aarch64_displaced_step_data *dsd
2707 = (struct aarch64_displaced_step_data *) data;
2708
e1c587c3 2709 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2710 dsd->insn_count = 1;
2711
2712 if ((insn & 0xfffffc1f) == 0xd65f0000)
2713 {
2714 /* RET */
2715 dsd->dsc->pc_adjust = 0;
2716 }
2717 else
2718 dsd->dsc->pc_adjust = 4;
2719}
2720
2721static const struct aarch64_insn_visitor visitor =
2722{
2723 aarch64_displaced_step_b,
2724 aarch64_displaced_step_b_cond,
2725 aarch64_displaced_step_cb,
2726 aarch64_displaced_step_tb,
2727 aarch64_displaced_step_adr,
2728 aarch64_displaced_step_ldr_literal,
2729 aarch64_displaced_step_others,
2730};
2731
2732/* Implement the "displaced_step_copy_insn" gdbarch method. */
2733
2734struct displaced_step_closure *
2735aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2736 CORE_ADDR from, CORE_ADDR to,
2737 struct regcache *regs)
2738{
2739 struct displaced_step_closure *dsc = NULL;
2740 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2741 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2742 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2743 aarch64_inst inst;
2744
2745 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2746 return NULL;
b6542f81
YQ
2747
2748 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2749 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2750 {
2751 /* We can't displaced step atomic sequences. */
2752 return NULL;
2753 }
2754
2755 dsc = XCNEW (struct displaced_step_closure);
2756 dsd.base.insn_addr = from;
2757 dsd.new_addr = to;
2758 dsd.regs = regs;
2759 dsd.dsc = dsc;
034f1a81 2760 dsd.insn_count = 0;
b6542f81
YQ
2761 aarch64_relocate_instruction (insn, &visitor,
2762 (struct aarch64_insn_data *) &dsd);
2763 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2764
2765 if (dsd.insn_count != 0)
2766 {
2767 int i;
2768
2769 /* Instruction can be relocated to scratch pad. Copy
2770 relocated instruction(s) there. */
2771 for (i = 0; i < dsd.insn_count; i++)
2772 {
2773 if (debug_displaced)
2774 {
2775 debug_printf ("displaced: writing insn ");
2776 debug_printf ("%.8x", dsd.insn_buf[i]);
2777 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2778 }
2779 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2780 (ULONGEST) dsd.insn_buf[i]);
2781 }
2782 }
2783 else
2784 {
2785 xfree (dsc);
2786 dsc = NULL;
2787 }
2788
2789 return dsc;
2790}
2791
2792/* Implement the "displaced_step_fixup" gdbarch method. */
2793
2794void
2795aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2796 struct displaced_step_closure *dsc,
2797 CORE_ADDR from, CORE_ADDR to,
2798 struct regcache *regs)
2799{
2800 if (dsc->cond)
2801 {
2802 ULONGEST pc;
2803
2804 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2805 if (pc - to == 8)
2806 {
2807 /* Condition is true. */
2808 }
2809 else if (pc - to == 4)
2810 {
2811 /* Condition is false. */
2812 dsc->pc_adjust = 4;
2813 }
2814 else
2815 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2816 }
2817
2818 if (dsc->pc_adjust != 0)
2819 {
2820 if (debug_displaced)
2821 {
2822 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2823 paddress (gdbarch, from), dsc->pc_adjust);
2824 }
2825 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2826 from + dsc->pc_adjust);
2827 }
2828}
2829
2830/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2831
2832int
2833aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2834 struct displaced_step_closure *closure)
2835{
2836 return 1;
2837}
2838
07b287a0
MS
2839/* Initialize the current architecture based on INFO. If possible,
2840 re-use an architecture from ARCHES, which is a list of
2841 architectures already created during this debugging session.
2842
2843 Called e.g. at program startup, when reading a core file, and when
2844 reading a binary file. */
2845
2846static struct gdbarch *
2847aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2848{
2849 struct gdbarch_tdep *tdep;
2850 struct gdbarch *gdbarch;
2851 struct gdbarch_list *best_arch;
2852 struct tdesc_arch_data *tdesc_data = NULL;
2853 const struct target_desc *tdesc = info.target_desc;
2854 int i;
07b287a0
MS
2855 int valid_p = 1;
2856 const struct tdesc_feature *feature;
2857 int num_regs = 0;
2858 int num_pseudo_regs = 0;
2859
2860 /* Ensure we always have a target descriptor. */
2861 if (!tdesc_has_registers (tdesc))
2862 tdesc = tdesc_aarch64;
2863
2864 gdb_assert (tdesc);
2865
2866 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2867
2868 if (feature == NULL)
2869 return NULL;
2870
2871 tdesc_data = tdesc_data_alloc ();
2872
2873 /* Validate the descriptor provides the mandatory core R registers
2874 and allocate their numbers. */
2875 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2876 valid_p &=
2877 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2878 aarch64_r_register_names[i]);
2879
2880 num_regs = AARCH64_X0_REGNUM + i;
2881
2882 /* Look for the V registers. */
2883 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2884 if (feature)
2885 {
2886 /* Validate the descriptor provides the mandatory V registers
2887 and allocate their numbers. */
2888 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2889 valid_p &=
2890 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2891 aarch64_v_register_names[i]);
2892
2893 num_regs = AARCH64_V0_REGNUM + i;
2894
2895 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2896 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2897 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2898 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2899 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2900 }
2901
2902 if (!valid_p)
2903 {
2904 tdesc_data_cleanup (tdesc_data);
2905 return NULL;
2906 }
2907
2908 /* AArch64 code is always little-endian. */
2909 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2910
2911 /* If there is already a candidate, use it. */
2912 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2913 best_arch != NULL;
2914 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2915 {
2916 /* Found a match. */
2917 break;
2918 }
2919
2920 if (best_arch != NULL)
2921 {
2922 if (tdesc_data != NULL)
2923 tdesc_data_cleanup (tdesc_data);
2924 return best_arch->gdbarch;
2925 }
2926
8d749320 2927 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2928 gdbarch = gdbarch_alloc (&info, tdep);
2929
2930 /* This should be low enough for everything. */
2931 tdep->lowest_pc = 0x20;
2932 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2933 tdep->jb_elt_size = 8;
2934
2935 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2936 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2937
07b287a0
MS
2938 /* Frame handling. */
2939 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2940 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2941 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2942
2943 /* Advance PC across function entry code. */
2944 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2945
2946 /* The stack grows downward. */
2947 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2948
2949 /* Breakpoint manipulation. */
04180708
YQ
2950 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2951 aarch64_breakpoint::kind_from_pc);
2952 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2953 aarch64_breakpoint::bp_from_kind);
07b287a0 2954 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2955 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2956
2957 /* Information about registers, etc. */
2958 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2959 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2960 set_gdbarch_num_regs (gdbarch, num_regs);
2961
2962 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2963 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2964 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2965 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2966 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2967 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2968 aarch64_pseudo_register_reggroup_p);
2969
2970 /* ABI */
2971 set_gdbarch_short_bit (gdbarch, 16);
2972 set_gdbarch_int_bit (gdbarch, 32);
2973 set_gdbarch_float_bit (gdbarch, 32);
2974 set_gdbarch_double_bit (gdbarch, 64);
2975 set_gdbarch_long_double_bit (gdbarch, 128);
2976 set_gdbarch_long_bit (gdbarch, 64);
2977 set_gdbarch_long_long_bit (gdbarch, 64);
2978 set_gdbarch_ptr_bit (gdbarch, 64);
2979 set_gdbarch_char_signed (gdbarch, 0);
53375380 2980 set_gdbarch_wchar_signed (gdbarch, 0);
07b287a0
MS
2981 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2982 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2983 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2984
2985 /* Internal <-> external register number maps. */
2986 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2987
2988 /* Returning results. */
2989 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2990
2991 /* Disassembly. */
2992 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2993
2994 /* Virtual tables. */
2995 set_gdbarch_vbit_in_delta (gdbarch, 1);
2996
2997 /* Hook in the ABI-specific overrides, if they have been registered. */
2998 info.target_desc = tdesc;
0dba2a6c 2999 info.tdesc_data = tdesc_data;
07b287a0
MS
3000 gdbarch_init_osabi (info, gdbarch);
3001
3002 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3003
3004 /* Add some default predicates. */
3005 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3006 dwarf2_append_unwinders (gdbarch);
3007 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3008
3009 frame_base_set_default (gdbarch, &aarch64_normal_base);
3010
3011 /* Now we have tuned the configuration, set a few final things,
3012 based on what the OS ABI has told us. */
3013
3014 if (tdep->jb_pc >= 0)
3015 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3016
ea873d8e
PL
3017 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3018
07b287a0
MS
3019 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3020
3021 /* Add standard register aliases. */
3022 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3023 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3024 value_of_aarch64_user_reg,
3025 &aarch64_register_aliases[i].regnum);
3026
3027 return gdbarch;
3028}
3029
3030static void
3031aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3032{
3033 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3034
3035 if (tdep == NULL)
3036 return;
3037
3038 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3039 paddress (gdbarch, tdep->lowest_pc));
3040}
3041
0d4c07af 3042#if GDB_SELF_TEST
1e2b521d
YQ
3043namespace selftests
3044{
3045static void aarch64_process_record_test (void);
3046}
0d4c07af 3047#endif
1e2b521d 3048
07b287a0
MS
3049void
3050_initialize_aarch64_tdep (void)
3051{
3052 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3053 aarch64_dump_tdep);
3054
3055 initialize_tdesc_aarch64 ();
07b287a0
MS
3056
3057 /* Debug this file's internals. */
3058 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3059Set AArch64 debugging."), _("\
3060Show AArch64 debugging."), _("\
3061When on, AArch64 specific debugging is enabled."),
3062 NULL,
3063 show_aarch64_debug,
3064 &setdebuglist, &showdebuglist);
4d9a9006
YQ
3065
3066#if GDB_SELF_TEST
1526853e
SM
3067 selftests::register_test ("aarch64-analyze-prologue",
3068 selftests::aarch64_analyze_prologue_test);
3069 selftests::register_test ("aarch64-process-record",
3070 selftests::aarch64_process_record_test);
4d9a9006 3071#endif
07b287a0 3072}
99afc88b
OJ
3073
3074/* AArch64 process record-replay related structures, defines etc. */
3075
99afc88b
OJ
3076#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3077 do \
3078 { \
3079 unsigned int reg_len = LENGTH; \
3080 if (reg_len) \
3081 { \
3082 REGS = XNEWVEC (uint32_t, reg_len); \
3083 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3084 } \
3085 } \
3086 while (0)
3087
3088#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3089 do \
3090 { \
3091 unsigned int mem_len = LENGTH; \
3092 if (mem_len) \
3093 { \
3094 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3095 memcpy(&MEMS->len, &RECORD_BUF[0], \
3096 sizeof(struct aarch64_mem_r) * LENGTH); \
3097 } \
3098 } \
3099 while (0)
3100
3101/* AArch64 record/replay structures and enumerations. */
3102
3103struct aarch64_mem_r
3104{
3105 uint64_t len; /* Record length. */
3106 uint64_t addr; /* Memory address. */
3107};
3108
3109enum aarch64_record_result
3110{
3111 AARCH64_RECORD_SUCCESS,
99afc88b
OJ
3112 AARCH64_RECORD_UNSUPPORTED,
3113 AARCH64_RECORD_UNKNOWN
3114};
3115
3116typedef struct insn_decode_record_t
3117{
3118 struct gdbarch *gdbarch;
3119 struct regcache *regcache;
3120 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3121 uint32_t aarch64_insn; /* Insn to be recorded. */
3122 uint32_t mem_rec_count; /* Count of memory records. */
3123 uint32_t reg_rec_count; /* Count of register records. */
3124 uint32_t *aarch64_regs; /* Registers to be recorded. */
3125 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3126} insn_decode_record;
3127
3128/* Record handler for data processing - register instructions. */
3129
3130static unsigned int
3131aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3132{
3133 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3134 uint32_t record_buf[4];
3135
3136 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3137 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3138 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3139
3140 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3141 {
3142 uint8_t setflags;
3143
3144 /* Logical (shifted register). */
3145 if (insn_bits24_27 == 0x0a)
3146 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3147 /* Add/subtract. */
3148 else if (insn_bits24_27 == 0x0b)
3149 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3150 else
3151 return AARCH64_RECORD_UNKNOWN;
3152
3153 record_buf[0] = reg_rd;
3154 aarch64_insn_r->reg_rec_count = 1;
3155 if (setflags)
3156 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3157 }
3158 else
3159 {
3160 if (insn_bits24_27 == 0x0b)
3161 {
3162 /* Data-processing (3 source). */
3163 record_buf[0] = reg_rd;
3164 aarch64_insn_r->reg_rec_count = 1;
3165 }
3166 else if (insn_bits24_27 == 0x0a)
3167 {
3168 if (insn_bits21_23 == 0x00)
3169 {
3170 /* Add/subtract (with carry). */
3171 record_buf[0] = reg_rd;
3172 aarch64_insn_r->reg_rec_count = 1;
3173 if (bit (aarch64_insn_r->aarch64_insn, 29))
3174 {
3175 record_buf[1] = AARCH64_CPSR_REGNUM;
3176 aarch64_insn_r->reg_rec_count = 2;
3177 }
3178 }
3179 else if (insn_bits21_23 == 0x02)
3180 {
3181 /* Conditional compare (register) and conditional compare
3182 (immediate) instructions. */
3183 record_buf[0] = AARCH64_CPSR_REGNUM;
3184 aarch64_insn_r->reg_rec_count = 1;
3185 }
3186 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3187 {
3188 /* CConditional select. */
3189 /* Data-processing (2 source). */
3190 /* Data-processing (1 source). */
3191 record_buf[0] = reg_rd;
3192 aarch64_insn_r->reg_rec_count = 1;
3193 }
3194 else
3195 return AARCH64_RECORD_UNKNOWN;
3196 }
3197 }
3198
3199 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3200 record_buf);
3201 return AARCH64_RECORD_SUCCESS;
3202}
3203
3204/* Record handler for data processing - immediate instructions. */
3205
3206static unsigned int
3207aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3208{
78cc6c2d 3209 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
3210 uint32_t record_buf[4];
3211
3212 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
3213 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3214 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3215
3216 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3217 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3218 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3219 {
3220 record_buf[0] = reg_rd;
3221 aarch64_insn_r->reg_rec_count = 1;
3222 }
3223 else if (insn_bits24_27 == 0x01)
3224 {
3225 /* Add/Subtract (immediate). */
3226 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3227 record_buf[0] = reg_rd;
3228 aarch64_insn_r->reg_rec_count = 1;
3229 if (setflags)
3230 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3231 }
3232 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3233 {
3234 /* Logical (immediate). */
3235 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3236 record_buf[0] = reg_rd;
3237 aarch64_insn_r->reg_rec_count = 1;
3238 if (setflags)
3239 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3240 }
3241 else
3242 return AARCH64_RECORD_UNKNOWN;
3243
3244 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3245 record_buf);
3246 return AARCH64_RECORD_SUCCESS;
3247}
3248
3249/* Record handler for branch, exception generation and system instructions. */
3250
3251static unsigned int
3252aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3253{
3254 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3255 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3256 uint32_t record_buf[4];
3257
3258 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3259 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3260 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3261
3262 if (insn_bits28_31 == 0x0d)
3263 {
3264 /* Exception generation instructions. */
3265 if (insn_bits24_27 == 0x04)
3266 {
5d98d3cd
YQ
3267 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3268 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3269 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3270 {
3271 ULONGEST svc_number;
3272
3273 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3274 &svc_number);
3275 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3276 svc_number);
3277 }
3278 else
3279 return AARCH64_RECORD_UNSUPPORTED;
3280 }
3281 /* System instructions. */
3282 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3283 {
3284 uint32_t reg_rt, reg_crn;
3285
3286 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3287 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3288
3289 /* Record rt in case of sysl and mrs instructions. */
3290 if (bit (aarch64_insn_r->aarch64_insn, 21))
3291 {
3292 record_buf[0] = reg_rt;
3293 aarch64_insn_r->reg_rec_count = 1;
3294 }
3295 /* Record cpsr for hint and msr(immediate) instructions. */
3296 else if (reg_crn == 0x02 || reg_crn == 0x04)
3297 {
3298 record_buf[0] = AARCH64_CPSR_REGNUM;
3299 aarch64_insn_r->reg_rec_count = 1;
3300 }
3301 }
3302 /* Unconditional branch (register). */
3303 else if((insn_bits24_27 & 0x0e) == 0x06)
3304 {
3305 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3306 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3307 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3308 }
3309 else
3310 return AARCH64_RECORD_UNKNOWN;
3311 }
3312 /* Unconditional branch (immediate). */
3313 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3314 {
3315 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3316 if (bit (aarch64_insn_r->aarch64_insn, 31))
3317 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3318 }
3319 else
3320 /* Compare & branch (immediate), Test & branch (immediate) and
3321 Conditional branch (immediate). */
3322 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3323
3324 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3325 record_buf);
3326 return AARCH64_RECORD_SUCCESS;
3327}
3328
3329/* Record handler for advanced SIMD load and store instructions. */
3330
3331static unsigned int
3332aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3333{
3334 CORE_ADDR address;
3335 uint64_t addr_offset = 0;
3336 uint32_t record_buf[24];
3337 uint64_t record_buf_mem[24];
3338 uint32_t reg_rn, reg_rt;
3339 uint32_t reg_index = 0, mem_index = 0;
3340 uint8_t opcode_bits, size_bits;
3341
3342 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3343 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3344 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3345 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3346 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3347
3348 if (record_debug)
b277c936 3349 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3350
3351 /* Load/store single structure. */
3352 if (bit (aarch64_insn_r->aarch64_insn, 24))
3353 {
3354 uint8_t sindex, scale, selem, esize, replicate = 0;
3355 scale = opcode_bits >> 2;
3356 selem = ((opcode_bits & 0x02) |
3357 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3358 switch (scale)
3359 {
3360 case 1:
3361 if (size_bits & 0x01)
3362 return AARCH64_RECORD_UNKNOWN;
3363 break;
3364 case 2:
3365 if ((size_bits >> 1) & 0x01)
3366 return AARCH64_RECORD_UNKNOWN;
3367 if (size_bits & 0x01)
3368 {
3369 if (!((opcode_bits >> 1) & 0x01))
3370 scale = 3;
3371 else
3372 return AARCH64_RECORD_UNKNOWN;
3373 }
3374 break;
3375 case 3:
3376 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3377 {
3378 scale = size_bits;
3379 replicate = 1;
3380 break;
3381 }
3382 else
3383 return AARCH64_RECORD_UNKNOWN;
3384 default:
3385 break;
3386 }
3387 esize = 8 << scale;
3388 if (replicate)
3389 for (sindex = 0; sindex < selem; sindex++)
3390 {
3391 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3392 reg_rt = (reg_rt + 1) % 32;
3393 }
3394 else
3395 {
3396 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3397 {
3398 if (bit (aarch64_insn_r->aarch64_insn, 22))
3399 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3400 else
3401 {
3402 record_buf_mem[mem_index++] = esize / 8;
3403 record_buf_mem[mem_index++] = address + addr_offset;
3404 }
3405 addr_offset = addr_offset + (esize / 8);
3406 reg_rt = (reg_rt + 1) % 32;
3407 }
99afc88b
OJ
3408 }
3409 }
3410 /* Load/store multiple structure. */
3411 else
3412 {
3413 uint8_t selem, esize, rpt, elements;
3414 uint8_t eindex, rindex;
3415
3416 esize = 8 << size_bits;
3417 if (bit (aarch64_insn_r->aarch64_insn, 30))
3418 elements = 128 / esize;
3419 else
3420 elements = 64 / esize;
3421
3422 switch (opcode_bits)
3423 {
3424 /*LD/ST4 (4 Registers). */
3425 case 0:
3426 rpt = 1;
3427 selem = 4;
3428 break;
3429 /*LD/ST1 (4 Registers). */
3430 case 2:
3431 rpt = 4;
3432 selem = 1;
3433 break;
3434 /*LD/ST3 (3 Registers). */
3435 case 4:
3436 rpt = 1;
3437 selem = 3;
3438 break;
3439 /*LD/ST1 (3 Registers). */
3440 case 6:
3441 rpt = 3;
3442 selem = 1;
3443 break;
3444 /*LD/ST1 (1 Register). */
3445 case 7:
3446 rpt = 1;
3447 selem = 1;
3448 break;
3449 /*LD/ST2 (2 Registers). */
3450 case 8:
3451 rpt = 1;
3452 selem = 2;
3453 break;
3454 /*LD/ST1 (2 Registers). */
3455 case 10:
3456 rpt = 2;
3457 selem = 1;
3458 break;
3459 default:
3460 return AARCH64_RECORD_UNSUPPORTED;
3461 break;
3462 }
3463 for (rindex = 0; rindex < rpt; rindex++)
3464 for (eindex = 0; eindex < elements; eindex++)
3465 {
3466 uint8_t reg_tt, sindex;
3467 reg_tt = (reg_rt + rindex) % 32;
3468 for (sindex = 0; sindex < selem; sindex++)
3469 {
3470 if (bit (aarch64_insn_r->aarch64_insn, 22))
3471 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3472 else
3473 {
3474 record_buf_mem[mem_index++] = esize / 8;
3475 record_buf_mem[mem_index++] = address + addr_offset;
3476 }
3477 addr_offset = addr_offset + (esize / 8);
3478 reg_tt = (reg_tt + 1) % 32;
3479 }
3480 }
3481 }
3482
3483 if (bit (aarch64_insn_r->aarch64_insn, 23))
3484 record_buf[reg_index++] = reg_rn;
3485
3486 aarch64_insn_r->reg_rec_count = reg_index;
3487 aarch64_insn_r->mem_rec_count = mem_index / 2;
3488 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3489 record_buf_mem);
3490 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3491 record_buf);
3492 return AARCH64_RECORD_SUCCESS;
3493}
3494
3495/* Record handler for load and store instructions. */
3496
3497static unsigned int
3498aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3499{
3500 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3501 uint8_t insn_bit23, insn_bit21;
3502 uint8_t opc, size_bits, ld_flag, vector_flag;
3503 uint32_t reg_rn, reg_rt, reg_rt2;
3504 uint64_t datasize, offset;
3505 uint32_t record_buf[8];
3506 uint64_t record_buf_mem[8];
3507 CORE_ADDR address;
3508
3509 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3510 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3511 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3512 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3513 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3514 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3515 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3516 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3517 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3518 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3519 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3520
3521 /* Load/store exclusive. */
3522 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3523 {
3524 if (record_debug)
b277c936 3525 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3526
3527 if (ld_flag)
3528 {
3529 record_buf[0] = reg_rt;
3530 aarch64_insn_r->reg_rec_count = 1;
3531 if (insn_bit21)
3532 {
3533 record_buf[1] = reg_rt2;
3534 aarch64_insn_r->reg_rec_count = 2;
3535 }
3536 }
3537 else
3538 {
3539 if (insn_bit21)
3540 datasize = (8 << size_bits) * 2;
3541 else
3542 datasize = (8 << size_bits);
3543 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3544 &address);
3545 record_buf_mem[0] = datasize / 8;
3546 record_buf_mem[1] = address;
3547 aarch64_insn_r->mem_rec_count = 1;
3548 if (!insn_bit23)
3549 {
3550 /* Save register rs. */
3551 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3552 aarch64_insn_r->reg_rec_count = 1;
3553 }
3554 }
3555 }
3556 /* Load register (literal) instructions decoding. */
3557 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3558 {
3559 if (record_debug)
b277c936 3560 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3561 if (vector_flag)
3562 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3563 else
3564 record_buf[0] = reg_rt;
3565 aarch64_insn_r->reg_rec_count = 1;
3566 }
3567 /* All types of load/store pair instructions decoding. */
3568 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3569 {
3570 if (record_debug)
b277c936 3571 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3572
3573 if (ld_flag)
3574 {
3575 if (vector_flag)
3576 {
3577 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3578 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3579 }
3580 else
3581 {
3582 record_buf[0] = reg_rt;
3583 record_buf[1] = reg_rt2;
3584 }
3585 aarch64_insn_r->reg_rec_count = 2;
3586 }
3587 else
3588 {
3589 uint16_t imm7_off;
3590 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3591 if (!vector_flag)
3592 size_bits = size_bits >> 1;
3593 datasize = 8 << (2 + size_bits);
3594 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3595 offset = offset << (2 + size_bits);
3596 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3597 &address);
3598 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3599 {
3600 if (imm7_off & 0x40)
3601 address = address - offset;
3602 else
3603 address = address + offset;
3604 }
3605
3606 record_buf_mem[0] = datasize / 8;
3607 record_buf_mem[1] = address;
3608 record_buf_mem[2] = datasize / 8;
3609 record_buf_mem[3] = address + (datasize / 8);
3610 aarch64_insn_r->mem_rec_count = 2;
3611 }
3612 if (bit (aarch64_insn_r->aarch64_insn, 23))
3613 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3614 }
3615 /* Load/store register (unsigned immediate) instructions. */
3616 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3617 {
3618 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3619 if (!(opc >> 1))
33877125
YQ
3620 {
3621 if (opc & 0x01)
3622 ld_flag = 0x01;
3623 else
3624 ld_flag = 0x0;
3625 }
99afc88b 3626 else
33877125 3627 {
1e2b521d
YQ
3628 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3629 {
3630 /* PRFM (immediate) */
3631 return AARCH64_RECORD_SUCCESS;
3632 }
3633 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3634 {
3635 /* LDRSW (immediate) */
3636 ld_flag = 0x1;
3637 }
33877125 3638 else
1e2b521d
YQ
3639 {
3640 if (opc & 0x01)
3641 ld_flag = 0x01;
3642 else
3643 ld_flag = 0x0;
3644 }
33877125 3645 }
99afc88b
OJ
3646
3647 if (record_debug)
3648 {
b277c936
PL
3649 debug_printf ("Process record: load/store (unsigned immediate):"
3650 " size %x V %d opc %x\n", size_bits, vector_flag,
3651 opc);
99afc88b
OJ
3652 }
3653
3654 if (!ld_flag)
3655 {
3656 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3657 datasize = 8 << size_bits;
3658 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3659 &address);
3660 offset = offset << size_bits;
3661 address = address + offset;
3662
3663 record_buf_mem[0] = datasize >> 3;
3664 record_buf_mem[1] = address;
3665 aarch64_insn_r->mem_rec_count = 1;
3666 }
3667 else
3668 {
3669 if (vector_flag)
3670 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3671 else
3672 record_buf[0] = reg_rt;
3673 aarch64_insn_r->reg_rec_count = 1;
3674 }
3675 }
3676 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3677 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3678 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3679 {
3680 if (record_debug)
b277c936 3681 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3682 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3683 if (!(opc >> 1))
3684 if (opc & 0x01)
3685 ld_flag = 0x01;
3686 else
3687 ld_flag = 0x0;
3688 else
3689 if (size_bits != 0x03)
3690 ld_flag = 0x01;
3691 else
3692 return AARCH64_RECORD_UNKNOWN;
3693
3694 if (!ld_flag)
3695 {
d9436c7c
PA
3696 ULONGEST reg_rm_val;
3697
99afc88b
OJ
3698 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3699 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3700 if (bit (aarch64_insn_r->aarch64_insn, 12))
3701 offset = reg_rm_val << size_bits;
3702 else
3703 offset = reg_rm_val;
3704 datasize = 8 << size_bits;
3705 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3706 &address);
3707 address = address + offset;
3708 record_buf_mem[0] = datasize >> 3;
3709 record_buf_mem[1] = address;
3710 aarch64_insn_r->mem_rec_count = 1;
3711 }
3712 else
3713 {
3714 if (vector_flag)
3715 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3716 else
3717 record_buf[0] = reg_rt;
3718 aarch64_insn_r->reg_rec_count = 1;
3719 }
3720 }
3721 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3722 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3723 && !insn_bit21)
99afc88b
OJ
3724 {
3725 if (record_debug)
3726 {
b277c936
PL
3727 debug_printf ("Process record: load/store "
3728 "(immediate and unprivileged)\n");
99afc88b
OJ
3729 }
3730 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3731 if (!(opc >> 1))
3732 if (opc & 0x01)
3733 ld_flag = 0x01;
3734 else
3735 ld_flag = 0x0;
3736 else
3737 if (size_bits != 0x03)
3738 ld_flag = 0x01;
3739 else
3740 return AARCH64_RECORD_UNKNOWN;
3741
3742 if (!ld_flag)
3743 {
3744 uint16_t imm9_off;
3745 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3746 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3747 datasize = 8 << size_bits;
3748 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3749 &address);
3750 if (insn_bits10_11 != 0x01)
3751 {
3752 if (imm9_off & 0x0100)
3753 address = address - offset;
3754 else
3755 address = address + offset;
3756 }
3757 record_buf_mem[0] = datasize >> 3;
3758 record_buf_mem[1] = address;
3759 aarch64_insn_r->mem_rec_count = 1;
3760 }
3761 else
3762 {
3763 if (vector_flag)
3764 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3765 else
3766 record_buf[0] = reg_rt;
3767 aarch64_insn_r->reg_rec_count = 1;
3768 }
3769 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3770 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3771 }
3772 /* Advanced SIMD load/store instructions. */
3773 else
3774 return aarch64_record_asimd_load_store (aarch64_insn_r);
3775
3776 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3777 record_buf_mem);
3778 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3779 record_buf);
3780 return AARCH64_RECORD_SUCCESS;
3781}
3782
3783/* Record handler for data processing SIMD and floating point instructions. */
3784
3785static unsigned int
3786aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3787{
3788 uint8_t insn_bit21, opcode, rmode, reg_rd;
3789 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3790 uint8_t insn_bits11_14;
3791 uint32_t record_buf[2];
3792
3793 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3794 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3795 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3796 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3797 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3798 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3799 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3800 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3801 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3802
3803 if (record_debug)
b277c936 3804 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3805
3806 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3807 {
3808 /* Floating point - fixed point conversion instructions. */
3809 if (!insn_bit21)
3810 {
3811 if (record_debug)
b277c936 3812 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3813
3814 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3815 record_buf[0] = reg_rd;
3816 else
3817 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3818 }
3819 /* Floating point - conditional compare instructions. */
3820 else if (insn_bits10_11 == 0x01)
3821 {
3822 if (record_debug)
b277c936 3823 debug_printf ("FP - conditional compare");
99afc88b
OJ
3824
3825 record_buf[0] = AARCH64_CPSR_REGNUM;
3826 }
3827 /* Floating point - data processing (2-source) and
3828 conditional select instructions. */
3829 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3830 {
3831 if (record_debug)
b277c936 3832 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3833
3834 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3835 }
3836 else if (insn_bits10_11 == 0x00)
3837 {
3838 /* Floating point - immediate instructions. */
3839 if ((insn_bits12_15 & 0x01) == 0x01
3840 || (insn_bits12_15 & 0x07) == 0x04)
3841 {
3842 if (record_debug)
b277c936 3843 debug_printf ("FP - immediate");
99afc88b
OJ
3844 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3845 }
3846 /* Floating point - compare instructions. */
3847 else if ((insn_bits12_15 & 0x03) == 0x02)
3848 {
3849 if (record_debug)
b277c936 3850 debug_printf ("FP - immediate");
99afc88b
OJ
3851 record_buf[0] = AARCH64_CPSR_REGNUM;
3852 }
3853 /* Floating point - integer conversions instructions. */
f62fce35 3854 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3855 {
3856 /* Convert float to integer instruction. */
3857 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3858 {
3859 if (record_debug)
b277c936 3860 debug_printf ("float to int conversion");
99afc88b
OJ
3861
3862 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3863 }
3864 /* Convert integer to float instruction. */
3865 else if ((opcode >> 1) == 0x01 && !rmode)
3866 {
3867 if (record_debug)
b277c936 3868 debug_printf ("int to float conversion");
99afc88b
OJ
3869
3870 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3871 }
3872 /* Move float to integer instruction. */
3873 else if ((opcode >> 1) == 0x03)
3874 {
3875 if (record_debug)
b277c936 3876 debug_printf ("move float to int");
99afc88b
OJ
3877
3878 if (!(opcode & 0x01))
3879 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3880 else
3881 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3882 }
f62fce35
YQ
3883 else
3884 return AARCH64_RECORD_UNKNOWN;
99afc88b 3885 }
f62fce35
YQ
3886 else
3887 return AARCH64_RECORD_UNKNOWN;
99afc88b 3888 }
f62fce35
YQ
3889 else
3890 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3891 }
3892 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3893 {
3894 if (record_debug)
b277c936 3895 debug_printf ("SIMD copy");
99afc88b
OJ
3896
3897 /* Advanced SIMD copy instructions. */
3898 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3899 && !bit (aarch64_insn_r->aarch64_insn, 15)
3900 && bit (aarch64_insn_r->aarch64_insn, 10))
3901 {
3902 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3903 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3904 else
3905 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3906 }
3907 else
3908 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3909 }
3910 /* All remaining floating point or advanced SIMD instructions. */
3911 else
3912 {
3913 if (record_debug)
b277c936 3914 debug_printf ("all remain");
99afc88b
OJ
3915
3916 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3917 }
3918
3919 if (record_debug)
b277c936 3920 debug_printf ("\n");
99afc88b
OJ
3921
3922 aarch64_insn_r->reg_rec_count++;
3923 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3924 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3925 record_buf);
3926 return AARCH64_RECORD_SUCCESS;
3927}
3928
3929/* Decodes insns type and invokes its record handler. */
3930
3931static unsigned int
3932aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3933{
3934 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3935
3936 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3937 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3938 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3939 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3940
3941 /* Data processing - immediate instructions. */
3942 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3943 return aarch64_record_data_proc_imm (aarch64_insn_r);
3944
3945 /* Branch, exception generation and system instructions. */
3946 if (ins_bit26 && !ins_bit27 && ins_bit28)
3947 return aarch64_record_branch_except_sys (aarch64_insn_r);
3948
3949 /* Load and store instructions. */
3950 if (!ins_bit25 && ins_bit27)
3951 return aarch64_record_load_store (aarch64_insn_r);
3952
3953 /* Data processing - register instructions. */
3954 if (ins_bit25 && !ins_bit26 && ins_bit27)
3955 return aarch64_record_data_proc_reg (aarch64_insn_r);
3956
3957 /* Data processing - SIMD and floating point instructions. */
3958 if (ins_bit25 && ins_bit26 && ins_bit27)
3959 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3960
3961 return AARCH64_RECORD_UNSUPPORTED;
3962}
3963
3964/* Cleans up local record registers and memory allocations. */
3965
3966static void
3967deallocate_reg_mem (insn_decode_record *record)
3968{
3969 xfree (record->aarch64_regs);
3970 xfree (record->aarch64_mems);
3971}
3972
1e2b521d
YQ
3973#if GDB_SELF_TEST
3974namespace selftests {
3975
3976static void
3977aarch64_process_record_test (void)
3978{
3979 struct gdbarch_info info;
3980 uint32_t ret;
3981
3982 gdbarch_info_init (&info);
3983 info.bfd_arch_info = bfd_scan_arch ("aarch64");
3984
3985 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
3986 SELF_CHECK (gdbarch != NULL);
3987
3988 insn_decode_record aarch64_record;
3989
3990 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3991 aarch64_record.regcache = NULL;
3992 aarch64_record.this_addr = 0;
3993 aarch64_record.gdbarch = gdbarch;
3994
3995 /* 20 00 80 f9 prfm pldl1keep, [x1] */
3996 aarch64_record.aarch64_insn = 0xf9800020;
3997 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3998 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
3999 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4000 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4001
4002 deallocate_reg_mem (&aarch64_record);
4003}
4004
4005} // namespace selftests
4006#endif /* GDB_SELF_TEST */
4007
99afc88b
OJ
4008/* Parse the current instruction and record the values of the registers and
4009 memory that will be changed in current instruction to record_arch_list
4010 return -1 if something is wrong. */
4011
4012int
4013aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4014 CORE_ADDR insn_addr)
4015{
4016 uint32_t rec_no = 0;
4017 uint8_t insn_size = 4;
4018 uint32_t ret = 0;
99afc88b
OJ
4019 gdb_byte buf[insn_size];
4020 insn_decode_record aarch64_record;
4021
4022 memset (&buf[0], 0, insn_size);
4023 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4024 target_read_memory (insn_addr, &buf[0], insn_size);
4025 aarch64_record.aarch64_insn
4026 = (uint32_t) extract_unsigned_integer (&buf[0],
4027 insn_size,
4028 gdbarch_byte_order (gdbarch));
4029 aarch64_record.regcache = regcache;
4030 aarch64_record.this_addr = insn_addr;
4031 aarch64_record.gdbarch = gdbarch;
4032
4033 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4034 if (ret == AARCH64_RECORD_UNSUPPORTED)
4035 {
4036 printf_unfiltered (_("Process record does not support instruction "
4037 "0x%0x at address %s.\n"),
4038 aarch64_record.aarch64_insn,
4039 paddress (gdbarch, insn_addr));
4040 ret = -1;
4041 }
4042
4043 if (0 == ret)
4044 {
4045 /* Record registers. */
4046 record_full_arch_list_add_reg (aarch64_record.regcache,
4047 AARCH64_PC_REGNUM);
4048 /* Always record register CPSR. */
4049 record_full_arch_list_add_reg (aarch64_record.regcache,
4050 AARCH64_CPSR_REGNUM);
4051 if (aarch64_record.aarch64_regs)
4052 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4053 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4054 aarch64_record.aarch64_regs[rec_no]))
4055 ret = -1;
4056
4057 /* Record memories. */
4058 if (aarch64_record.aarch64_mems)
4059 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4060 if (record_full_arch_list_add_mem
4061 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4062 aarch64_record.aarch64_mems[rec_no].len))
4063 ret = -1;
4064
4065 if (record_full_arch_list_add_end ())
4066 ret = -1;
4067 }
4068
4069 deallocate_reg_mem (&aarch64_record);
4070 return ret;
4071}
This page took 0.470072 seconds and 4 git commands to generate.