5380baab9e11435dc2a534508f963b52a4a58d95
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "selftest.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
58
59 #include "opcode/aarch64.h"
60 #include <algorithm>
61
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
65
66 /* Pseudo register base numbers. */
67 #define AARCH64_Q0_REGNUM 0
68 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
69 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
72 #define AARCH64_SVE_V0_REGNUM (AARCH64_B0_REGNUM + 32)
73
74 /* All possible aarch64 target descriptors. */
75 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
76
77 /* The standard register names, and all the valid aliases for them. */
78 static const struct
79 {
80 const char *const name;
81 int regnum;
82 } aarch64_register_aliases[] =
83 {
84 /* 64-bit register names. */
85 {"fp", AARCH64_FP_REGNUM},
86 {"lr", AARCH64_LR_REGNUM},
87 {"sp", AARCH64_SP_REGNUM},
88
89 /* 32-bit register names. */
90 {"w0", AARCH64_X0_REGNUM + 0},
91 {"w1", AARCH64_X0_REGNUM + 1},
92 {"w2", AARCH64_X0_REGNUM + 2},
93 {"w3", AARCH64_X0_REGNUM + 3},
94 {"w4", AARCH64_X0_REGNUM + 4},
95 {"w5", AARCH64_X0_REGNUM + 5},
96 {"w6", AARCH64_X0_REGNUM + 6},
97 {"w7", AARCH64_X0_REGNUM + 7},
98 {"w8", AARCH64_X0_REGNUM + 8},
99 {"w9", AARCH64_X0_REGNUM + 9},
100 {"w10", AARCH64_X0_REGNUM + 10},
101 {"w11", AARCH64_X0_REGNUM + 11},
102 {"w12", AARCH64_X0_REGNUM + 12},
103 {"w13", AARCH64_X0_REGNUM + 13},
104 {"w14", AARCH64_X0_REGNUM + 14},
105 {"w15", AARCH64_X0_REGNUM + 15},
106 {"w16", AARCH64_X0_REGNUM + 16},
107 {"w17", AARCH64_X0_REGNUM + 17},
108 {"w18", AARCH64_X0_REGNUM + 18},
109 {"w19", AARCH64_X0_REGNUM + 19},
110 {"w20", AARCH64_X0_REGNUM + 20},
111 {"w21", AARCH64_X0_REGNUM + 21},
112 {"w22", AARCH64_X0_REGNUM + 22},
113 {"w23", AARCH64_X0_REGNUM + 23},
114 {"w24", AARCH64_X0_REGNUM + 24},
115 {"w25", AARCH64_X0_REGNUM + 25},
116 {"w26", AARCH64_X0_REGNUM + 26},
117 {"w27", AARCH64_X0_REGNUM + 27},
118 {"w28", AARCH64_X0_REGNUM + 28},
119 {"w29", AARCH64_X0_REGNUM + 29},
120 {"w30", AARCH64_X0_REGNUM + 30},
121
122 /* specials */
123 {"ip0", AARCH64_X0_REGNUM + 16},
124 {"ip1", AARCH64_X0_REGNUM + 17}
125 };
126
127 /* The required core 'R' registers. */
128 static const char *const aarch64_r_register_names[] =
129 {
130 /* These registers must appear in consecutive RAW register number
131 order and they must begin with AARCH64_X0_REGNUM! */
132 "x0", "x1", "x2", "x3",
133 "x4", "x5", "x6", "x7",
134 "x8", "x9", "x10", "x11",
135 "x12", "x13", "x14", "x15",
136 "x16", "x17", "x18", "x19",
137 "x20", "x21", "x22", "x23",
138 "x24", "x25", "x26", "x27",
139 "x28", "x29", "x30", "sp",
140 "pc", "cpsr"
141 };
142
143 /* The FP/SIMD 'V' registers. */
144 static const char *const aarch64_v_register_names[] =
145 {
146 /* These registers must appear in consecutive RAW register number
147 order and they must begin with AARCH64_V0_REGNUM! */
148 "v0", "v1", "v2", "v3",
149 "v4", "v5", "v6", "v7",
150 "v8", "v9", "v10", "v11",
151 "v12", "v13", "v14", "v15",
152 "v16", "v17", "v18", "v19",
153 "v20", "v21", "v22", "v23",
154 "v24", "v25", "v26", "v27",
155 "v28", "v29", "v30", "v31",
156 "fpsr",
157 "fpcr"
158 };
159
160 /* The SVE 'Z' and 'P' registers. */
161 static const char *const aarch64_sve_register_names[] =
162 {
163 /* These registers must appear in consecutive RAW register number
164 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
165 "z0", "z1", "z2", "z3",
166 "z4", "z5", "z6", "z7",
167 "z8", "z9", "z10", "z11",
168 "z12", "z13", "z14", "z15",
169 "z16", "z17", "z18", "z19",
170 "z20", "z21", "z22", "z23",
171 "z24", "z25", "z26", "z27",
172 "z28", "z29", "z30", "z31",
173 "fpsr", "fpcr",
174 "p0", "p1", "p2", "p3",
175 "p4", "p5", "p6", "p7",
176 "p8", "p9", "p10", "p11",
177 "p12", "p13", "p14", "p15",
178 "ffr", "vg"
179 };
180
181 /* AArch64 prologue cache structure. */
182 struct aarch64_prologue_cache
183 {
184 /* The program counter at the start of the function. It is used to
185 identify this frame as a prologue frame. */
186 CORE_ADDR func;
187
188 /* The program counter at the time this frame was created; i.e. where
189 this function was called from. It is used to identify this frame as a
190 stub frame. */
191 CORE_ADDR prev_pc;
192
193 /* The stack pointer at the time this frame was created; i.e. the
194 caller's stack pointer when this function was called. It is used
195 to identify this frame. */
196 CORE_ADDR prev_sp;
197
198 /* Is the target available to read from? */
199 int available_p;
200
201 /* The frame base for this frame is just prev_sp - frame size.
202 FRAMESIZE is the distance from the frame pointer to the
203 initial stack pointer. */
204 int framesize;
205
206 /* The register used to hold the frame pointer for this frame. */
207 int framereg;
208
209 /* Saved register offsets. */
210 struct trad_frame_saved_reg *saved_regs;
211 };
212
213 static void
214 show_aarch64_debug (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216 {
217 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
218 }
219
220 namespace {
221
222 /* Abstract instruction reader. */
223
224 class abstract_instruction_reader
225 {
226 public:
227 /* Read in one instruction. */
228 virtual ULONGEST read (CORE_ADDR memaddr, int len,
229 enum bfd_endian byte_order) = 0;
230 };
231
232 /* Instruction reader from real target. */
233
234 class instruction_reader : public abstract_instruction_reader
235 {
236 public:
237 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
238 override
239 {
240 return read_code_unsigned_integer (memaddr, len, byte_order);
241 }
242 };
243
244 } // namespace
245
246 /* Analyze a prologue, looking for a recognizable stack frame
247 and frame pointer. Scan until we encounter a store that could
248 clobber the stack frame unexpectedly, or an unknown instruction. */
249
250 static CORE_ADDR
251 aarch64_analyze_prologue (struct gdbarch *gdbarch,
252 CORE_ADDR start, CORE_ADDR limit,
253 struct aarch64_prologue_cache *cache,
254 abstract_instruction_reader& reader)
255 {
256 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
257 int i;
258 /* Track X registers and D registers in prologue. */
259 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
260
261 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
262 regs[i] = pv_register (i, 0);
263 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
264
265 for (; start < limit; start += 4)
266 {
267 uint32_t insn;
268 aarch64_inst inst;
269
270 insn = reader.read (start, 4, byte_order_for_code);
271
272 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
273 break;
274
275 if (inst.opcode->iclass == addsub_imm
276 && (inst.opcode->op == OP_ADD
277 || strcmp ("sub", inst.opcode->name) == 0))
278 {
279 unsigned rd = inst.operands[0].reg.regno;
280 unsigned rn = inst.operands[1].reg.regno;
281
282 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
283 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
284 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
285 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
286
287 if (inst.opcode->op == OP_ADD)
288 {
289 regs[rd] = pv_add_constant (regs[rn],
290 inst.operands[2].imm.value);
291 }
292 else
293 {
294 regs[rd] = pv_add_constant (regs[rn],
295 -inst.operands[2].imm.value);
296 }
297 }
298 else if (inst.opcode->iclass == pcreladdr
299 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
300 {
301 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
302 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
303
304 regs[inst.operands[0].reg.regno] = pv_unknown ();
305 }
306 else if (inst.opcode->iclass == branch_imm)
307 {
308 /* Stop analysis on branch. */
309 break;
310 }
311 else if (inst.opcode->iclass == condbranch)
312 {
313 /* Stop analysis on branch. */
314 break;
315 }
316 else if (inst.opcode->iclass == branch_reg)
317 {
318 /* Stop analysis on branch. */
319 break;
320 }
321 else if (inst.opcode->iclass == compbranch)
322 {
323 /* Stop analysis on branch. */
324 break;
325 }
326 else if (inst.opcode->op == OP_MOVZ)
327 {
328 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
329 regs[inst.operands[0].reg.regno] = pv_unknown ();
330 }
331 else if (inst.opcode->iclass == log_shift
332 && strcmp (inst.opcode->name, "orr") == 0)
333 {
334 unsigned rd = inst.operands[0].reg.regno;
335 unsigned rn = inst.operands[1].reg.regno;
336 unsigned rm = inst.operands[2].reg.regno;
337
338 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
339 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
340 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
341
342 if (inst.operands[2].shifter.amount == 0
343 && rn == AARCH64_SP_REGNUM)
344 regs[rd] = regs[rm];
345 else
346 {
347 if (aarch64_debug)
348 {
349 debug_printf ("aarch64: prologue analysis gave up "
350 "addr=%s opcode=0x%x (orr x register)\n",
351 core_addr_to_string_nz (start), insn);
352 }
353 break;
354 }
355 }
356 else if (inst.opcode->op == OP_STUR)
357 {
358 unsigned rt = inst.operands[0].reg.regno;
359 unsigned rn = inst.operands[1].addr.base_regno;
360 int is64
361 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
362
363 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
365 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
366 gdb_assert (!inst.operands[1].addr.offset.is_reg);
367
368 stack.store (pv_add_constant (regs[rn],
369 inst.operands[1].addr.offset.imm),
370 is64 ? 8 : 4, regs[rt]);
371 }
372 else if ((inst.opcode->iclass == ldstpair_off
373 || (inst.opcode->iclass == ldstpair_indexed
374 && inst.operands[2].addr.preind))
375 && strcmp ("stp", inst.opcode->name) == 0)
376 {
377 /* STP with addressing mode Pre-indexed and Base register. */
378 unsigned rt1;
379 unsigned rt2;
380 unsigned rn = inst.operands[2].addr.base_regno;
381 int32_t imm = inst.operands[2].addr.offset.imm;
382
383 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
384 || inst.operands[0].type == AARCH64_OPND_Ft);
385 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
386 || inst.operands[1].type == AARCH64_OPND_Ft2);
387 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
388 gdb_assert (!inst.operands[2].addr.offset.is_reg);
389
390 /* If recording this store would invalidate the store area
391 (perhaps because rn is not known) then we should abandon
392 further prologue analysis. */
393 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
394 break;
395
396 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
397 break;
398
399 rt1 = inst.operands[0].reg.regno;
400 rt2 = inst.operands[1].reg.regno;
401 if (inst.operands[0].type == AARCH64_OPND_Ft)
402 {
403 /* Only bottom 64-bit of each V register (D register) need
404 to be preserved. */
405 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
406 rt1 += AARCH64_X_REGISTER_COUNT;
407 rt2 += AARCH64_X_REGISTER_COUNT;
408 }
409
410 stack.store (pv_add_constant (regs[rn], imm), 8,
411 regs[rt1]);
412 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
413 regs[rt2]);
414
415 if (inst.operands[2].addr.writeback)
416 regs[rn] = pv_add_constant (regs[rn], imm);
417
418 }
419 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
420 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
421 && (inst.opcode->op == OP_STR_POS
422 || inst.opcode->op == OP_STRF_POS)))
423 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
424 && strcmp ("str", inst.opcode->name) == 0)
425 {
426 /* STR (immediate) */
427 unsigned int rt = inst.operands[0].reg.regno;
428 int32_t imm = inst.operands[1].addr.offset.imm;
429 unsigned int rn = inst.operands[1].addr.base_regno;
430 bool is64
431 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
432 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
433 || inst.operands[0].type == AARCH64_OPND_Ft);
434
435 if (inst.operands[0].type == AARCH64_OPND_Ft)
436 {
437 /* Only bottom 64-bit of each V register (D register) need
438 to be preserved. */
439 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
440 rt += AARCH64_X_REGISTER_COUNT;
441 }
442
443 stack.store (pv_add_constant (regs[rn], imm),
444 is64 ? 8 : 4, regs[rt]);
445 if (inst.operands[1].addr.writeback)
446 regs[rn] = pv_add_constant (regs[rn], imm);
447 }
448 else if (inst.opcode->iclass == testbranch)
449 {
450 /* Stop analysis on branch. */
451 break;
452 }
453 else
454 {
455 if (aarch64_debug)
456 {
457 debug_printf ("aarch64: prologue analysis gave up addr=%s"
458 " opcode=0x%x\n",
459 core_addr_to_string_nz (start), insn);
460 }
461 break;
462 }
463 }
464
465 if (cache == NULL)
466 return start;
467
468 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
469 {
470 /* Frame pointer is fp. Frame size is constant. */
471 cache->framereg = AARCH64_FP_REGNUM;
472 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
473 }
474 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
475 {
476 /* Try the stack pointer. */
477 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
478 cache->framereg = AARCH64_SP_REGNUM;
479 }
480 else
481 {
482 /* We're just out of luck. We don't know where the frame is. */
483 cache->framereg = -1;
484 cache->framesize = 0;
485 }
486
487 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
488 {
489 CORE_ADDR offset;
490
491 if (stack.find_reg (gdbarch, i, &offset))
492 cache->saved_regs[i].addr = offset;
493 }
494
495 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
496 {
497 int regnum = gdbarch_num_regs (gdbarch);
498 CORE_ADDR offset;
499
500 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
501 &offset))
502 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
503 }
504
505 return start;
506 }
507
508 static CORE_ADDR
509 aarch64_analyze_prologue (struct gdbarch *gdbarch,
510 CORE_ADDR start, CORE_ADDR limit,
511 struct aarch64_prologue_cache *cache)
512 {
513 instruction_reader reader;
514
515 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
516 reader);
517 }
518
519 #if GDB_SELF_TEST
520
521 namespace selftests {
522
523 /* Instruction reader from manually cooked instruction sequences. */
524
525 class instruction_reader_test : public abstract_instruction_reader
526 {
527 public:
528 template<size_t SIZE>
529 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
530 : m_insns (insns), m_insns_size (SIZE)
531 {}
532
533 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
534 override
535 {
536 SELF_CHECK (len == 4);
537 SELF_CHECK (memaddr % 4 == 0);
538 SELF_CHECK (memaddr / 4 < m_insns_size);
539
540 return m_insns[memaddr / 4];
541 }
542
543 private:
544 const uint32_t *m_insns;
545 size_t m_insns_size;
546 };
547
548 static void
549 aarch64_analyze_prologue_test (void)
550 {
551 struct gdbarch_info info;
552
553 gdbarch_info_init (&info);
554 info.bfd_arch_info = bfd_scan_arch ("aarch64");
555
556 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
557 SELF_CHECK (gdbarch != NULL);
558
559 /* Test the simple prologue in which frame pointer is used. */
560 {
561 struct aarch64_prologue_cache cache;
562 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
563
564 static const uint32_t insns[] = {
565 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
566 0x910003fd, /* mov x29, sp */
567 0x97ffffe6, /* bl 0x400580 */
568 };
569 instruction_reader_test reader (insns);
570
571 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
572 SELF_CHECK (end == 4 * 2);
573
574 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
575 SELF_CHECK (cache.framesize == 272);
576
577 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
578 {
579 if (i == AARCH64_FP_REGNUM)
580 SELF_CHECK (cache.saved_regs[i].addr == -272);
581 else if (i == AARCH64_LR_REGNUM)
582 SELF_CHECK (cache.saved_regs[i].addr == -264);
583 else
584 SELF_CHECK (cache.saved_regs[i].addr == -1);
585 }
586
587 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
588 {
589 int regnum = gdbarch_num_regs (gdbarch);
590
591 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
592 == -1);
593 }
594 }
595
596 /* Test a prologue in which STR is used and frame pointer is not
597 used. */
598 {
599 struct aarch64_prologue_cache cache;
600 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
601
602 static const uint32_t insns[] = {
603 0xf81d0ff3, /* str x19, [sp, #-48]! */
604 0xb9002fe0, /* str w0, [sp, #44] */
605 0xf90013e1, /* str x1, [sp, #32]*/
606 0xfd000fe0, /* str d0, [sp, #24] */
607 0xaa0203f3, /* mov x19, x2 */
608 0xf94013e0, /* ldr x0, [sp, #32] */
609 };
610 instruction_reader_test reader (insns);
611
612 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
613
614 SELF_CHECK (end == 4 * 5);
615
616 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
617 SELF_CHECK (cache.framesize == 48);
618
619 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
620 {
621 if (i == 1)
622 SELF_CHECK (cache.saved_regs[i].addr == -16);
623 else if (i == 19)
624 SELF_CHECK (cache.saved_regs[i].addr == -48);
625 else
626 SELF_CHECK (cache.saved_regs[i].addr == -1);
627 }
628
629 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
630 {
631 int regnum = gdbarch_num_regs (gdbarch);
632
633 if (i == 0)
634 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
635 == -24);
636 else
637 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
638 == -1);
639 }
640 }
641 }
642 } // namespace selftests
643 #endif /* GDB_SELF_TEST */
644
645 /* Implement the "skip_prologue" gdbarch method. */
646
647 static CORE_ADDR
648 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
649 {
650 CORE_ADDR func_addr, limit_pc;
651
652 /* See if we can determine the end of the prologue via the symbol
653 table. If so, then return either PC, or the PC after the
654 prologue, whichever is greater. */
655 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
656 {
657 CORE_ADDR post_prologue_pc
658 = skip_prologue_using_sal (gdbarch, func_addr);
659
660 if (post_prologue_pc != 0)
661 return std::max (pc, post_prologue_pc);
662 }
663
664 /* Can't determine prologue from the symbol table, need to examine
665 instructions. */
666
667 /* Find an upper limit on the function prologue using the debug
668 information. If the debug information could not be used to
669 provide that bound, then use an arbitrary large number as the
670 upper bound. */
671 limit_pc = skip_prologue_using_sal (gdbarch, pc);
672 if (limit_pc == 0)
673 limit_pc = pc + 128; /* Magic. */
674
675 /* Try disassembling prologue. */
676 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
677 }
678
679 /* Scan the function prologue for THIS_FRAME and populate the prologue
680 cache CACHE. */
681
682 static void
683 aarch64_scan_prologue (struct frame_info *this_frame,
684 struct aarch64_prologue_cache *cache)
685 {
686 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
687 CORE_ADDR prologue_start;
688 CORE_ADDR prologue_end;
689 CORE_ADDR prev_pc = get_frame_pc (this_frame);
690 struct gdbarch *gdbarch = get_frame_arch (this_frame);
691
692 cache->prev_pc = prev_pc;
693
694 /* Assume we do not find a frame. */
695 cache->framereg = -1;
696 cache->framesize = 0;
697
698 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
699 &prologue_end))
700 {
701 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
702
703 if (sal.line == 0)
704 {
705 /* No line info so use the current PC. */
706 prologue_end = prev_pc;
707 }
708 else if (sal.end < prologue_end)
709 {
710 /* The next line begins after the function end. */
711 prologue_end = sal.end;
712 }
713
714 prologue_end = std::min (prologue_end, prev_pc);
715 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
716 }
717 else
718 {
719 CORE_ADDR frame_loc;
720
721 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
722 if (frame_loc == 0)
723 return;
724
725 cache->framereg = AARCH64_FP_REGNUM;
726 cache->framesize = 16;
727 cache->saved_regs[29].addr = 0;
728 cache->saved_regs[30].addr = 8;
729 }
730 }
731
732 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
733 function may throw an exception if the inferior's registers or memory is
734 not available. */
735
736 static void
737 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
738 struct aarch64_prologue_cache *cache)
739 {
740 CORE_ADDR unwound_fp;
741 int reg;
742
743 aarch64_scan_prologue (this_frame, cache);
744
745 if (cache->framereg == -1)
746 return;
747
748 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
749 if (unwound_fp == 0)
750 return;
751
752 cache->prev_sp = unwound_fp + cache->framesize;
753
754 /* Calculate actual addresses of saved registers using offsets
755 determined by aarch64_analyze_prologue. */
756 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
757 if (trad_frame_addr_p (cache->saved_regs, reg))
758 cache->saved_regs[reg].addr += cache->prev_sp;
759
760 cache->func = get_frame_func (this_frame);
761
762 cache->available_p = 1;
763 }
764
765 /* Allocate and fill in *THIS_CACHE with information about the prologue of
766 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
767 Return a pointer to the current aarch64_prologue_cache in
768 *THIS_CACHE. */
769
770 static struct aarch64_prologue_cache *
771 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
772 {
773 struct aarch64_prologue_cache *cache;
774
775 if (*this_cache != NULL)
776 return (struct aarch64_prologue_cache *) *this_cache;
777
778 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
779 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
780 *this_cache = cache;
781
782 TRY
783 {
784 aarch64_make_prologue_cache_1 (this_frame, cache);
785 }
786 CATCH (ex, RETURN_MASK_ERROR)
787 {
788 if (ex.error != NOT_AVAILABLE_ERROR)
789 throw_exception (ex);
790 }
791 END_CATCH
792
793 return cache;
794 }
795
796 /* Implement the "stop_reason" frame_unwind method. */
797
798 static enum unwind_stop_reason
799 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
800 void **this_cache)
801 {
802 struct aarch64_prologue_cache *cache
803 = aarch64_make_prologue_cache (this_frame, this_cache);
804
805 if (!cache->available_p)
806 return UNWIND_UNAVAILABLE;
807
808 /* Halt the backtrace at "_start". */
809 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
810 return UNWIND_OUTERMOST;
811
812 /* We've hit a wall, stop. */
813 if (cache->prev_sp == 0)
814 return UNWIND_OUTERMOST;
815
816 return UNWIND_NO_REASON;
817 }
818
819 /* Our frame ID for a normal frame is the current function's starting
820 PC and the caller's SP when we were called. */
821
822 static void
823 aarch64_prologue_this_id (struct frame_info *this_frame,
824 void **this_cache, struct frame_id *this_id)
825 {
826 struct aarch64_prologue_cache *cache
827 = aarch64_make_prologue_cache (this_frame, this_cache);
828
829 if (!cache->available_p)
830 *this_id = frame_id_build_unavailable_stack (cache->func);
831 else
832 *this_id = frame_id_build (cache->prev_sp, cache->func);
833 }
834
835 /* Implement the "prev_register" frame_unwind method. */
836
837 static struct value *
838 aarch64_prologue_prev_register (struct frame_info *this_frame,
839 void **this_cache, int prev_regnum)
840 {
841 struct aarch64_prologue_cache *cache
842 = aarch64_make_prologue_cache (this_frame, this_cache);
843
844 /* If we are asked to unwind the PC, then we need to return the LR
845 instead. The prologue may save PC, but it will point into this
846 frame's prologue, not the next frame's resume location. */
847 if (prev_regnum == AARCH64_PC_REGNUM)
848 {
849 CORE_ADDR lr;
850
851 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
852 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
853 }
854
855 /* SP is generally not saved to the stack, but this frame is
856 identified by the next frame's stack pointer at the time of the
857 call. The value was already reconstructed into PREV_SP. */
858 /*
859 +----------+ ^
860 | saved lr | |
861 +->| saved fp |--+
862 | | |
863 | | | <- Previous SP
864 | +----------+
865 | | saved lr |
866 +--| saved fp |<- FP
867 | |
868 | |<- SP
869 +----------+ */
870 if (prev_regnum == AARCH64_SP_REGNUM)
871 return frame_unwind_got_constant (this_frame, prev_regnum,
872 cache->prev_sp);
873
874 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
875 prev_regnum);
876 }
877
878 /* AArch64 prologue unwinder. */
879 struct frame_unwind aarch64_prologue_unwind =
880 {
881 NORMAL_FRAME,
882 aarch64_prologue_frame_unwind_stop_reason,
883 aarch64_prologue_this_id,
884 aarch64_prologue_prev_register,
885 NULL,
886 default_frame_sniffer
887 };
888
889 /* Allocate and fill in *THIS_CACHE with information about the prologue of
890 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
891 Return a pointer to the current aarch64_prologue_cache in
892 *THIS_CACHE. */
893
894 static struct aarch64_prologue_cache *
895 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
896 {
897 struct aarch64_prologue_cache *cache;
898
899 if (*this_cache != NULL)
900 return (struct aarch64_prologue_cache *) *this_cache;
901
902 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
903 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
904 *this_cache = cache;
905
906 TRY
907 {
908 cache->prev_sp = get_frame_register_unsigned (this_frame,
909 AARCH64_SP_REGNUM);
910 cache->prev_pc = get_frame_pc (this_frame);
911 cache->available_p = 1;
912 }
913 CATCH (ex, RETURN_MASK_ERROR)
914 {
915 if (ex.error != NOT_AVAILABLE_ERROR)
916 throw_exception (ex);
917 }
918 END_CATCH
919
920 return cache;
921 }
922
923 /* Implement the "stop_reason" frame_unwind method. */
924
925 static enum unwind_stop_reason
926 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
927 void **this_cache)
928 {
929 struct aarch64_prologue_cache *cache
930 = aarch64_make_stub_cache (this_frame, this_cache);
931
932 if (!cache->available_p)
933 return UNWIND_UNAVAILABLE;
934
935 return UNWIND_NO_REASON;
936 }
937
938 /* Our frame ID for a stub frame is the current SP and LR. */
939
940 static void
941 aarch64_stub_this_id (struct frame_info *this_frame,
942 void **this_cache, struct frame_id *this_id)
943 {
944 struct aarch64_prologue_cache *cache
945 = aarch64_make_stub_cache (this_frame, this_cache);
946
947 if (cache->available_p)
948 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
949 else
950 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
951 }
952
953 /* Implement the "sniffer" frame_unwind method. */
954
955 static int
956 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
957 struct frame_info *this_frame,
958 void **this_prologue_cache)
959 {
960 CORE_ADDR addr_in_block;
961 gdb_byte dummy[4];
962
963 addr_in_block = get_frame_address_in_block (this_frame);
964 if (in_plt_section (addr_in_block)
965 /* We also use the stub winder if the target memory is unreadable
966 to avoid having the prologue unwinder trying to read it. */
967 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
968 return 1;
969
970 return 0;
971 }
972
973 /* AArch64 stub unwinder. */
974 struct frame_unwind aarch64_stub_unwind =
975 {
976 NORMAL_FRAME,
977 aarch64_stub_frame_unwind_stop_reason,
978 aarch64_stub_this_id,
979 aarch64_prologue_prev_register,
980 NULL,
981 aarch64_stub_unwind_sniffer
982 };
983
984 /* Return the frame base address of *THIS_FRAME. */
985
986 static CORE_ADDR
987 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
988 {
989 struct aarch64_prologue_cache *cache
990 = aarch64_make_prologue_cache (this_frame, this_cache);
991
992 return cache->prev_sp - cache->framesize;
993 }
994
995 /* AArch64 default frame base information. */
996 struct frame_base aarch64_normal_base =
997 {
998 &aarch64_prologue_unwind,
999 aarch64_normal_frame_base,
1000 aarch64_normal_frame_base,
1001 aarch64_normal_frame_base
1002 };
1003
1004 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1005 dummy frame. The frame ID's base needs to match the TOS value
1006 saved by save_dummy_frame_tos () and returned from
1007 aarch64_push_dummy_call, and the PC needs to match the dummy
1008 frame's breakpoint. */
1009
1010 static struct frame_id
1011 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1012 {
1013 return frame_id_build (get_frame_register_unsigned (this_frame,
1014 AARCH64_SP_REGNUM),
1015 get_frame_pc (this_frame));
1016 }
1017
1018 /* Implement the "unwind_pc" gdbarch method. */
1019
1020 static CORE_ADDR
1021 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1022 {
1023 CORE_ADDR pc
1024 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1025
1026 return pc;
1027 }
1028
1029 /* Implement the "unwind_sp" gdbarch method. */
1030
1031 static CORE_ADDR
1032 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1033 {
1034 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1035 }
1036
1037 /* Return the value of the REGNUM register in the previous frame of
1038 *THIS_FRAME. */
1039
1040 static struct value *
1041 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1042 void **this_cache, int regnum)
1043 {
1044 CORE_ADDR lr;
1045
1046 switch (regnum)
1047 {
1048 case AARCH64_PC_REGNUM:
1049 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1050 return frame_unwind_got_constant (this_frame, regnum, lr);
1051
1052 default:
1053 internal_error (__FILE__, __LINE__,
1054 _("Unexpected register %d"), regnum);
1055 }
1056 }
1057
1058 /* Implement the "init_reg" dwarf2_frame_ops method. */
1059
1060 static void
1061 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1062 struct dwarf2_frame_state_reg *reg,
1063 struct frame_info *this_frame)
1064 {
1065 switch (regnum)
1066 {
1067 case AARCH64_PC_REGNUM:
1068 reg->how = DWARF2_FRAME_REG_FN;
1069 reg->loc.fn = aarch64_dwarf2_prev_register;
1070 break;
1071 case AARCH64_SP_REGNUM:
1072 reg->how = DWARF2_FRAME_REG_CFA;
1073 break;
1074 }
1075 }
1076
1077 /* When arguments must be pushed onto the stack, they go on in reverse
1078 order. The code below implements a FILO (stack) to do this. */
1079
1080 typedef struct
1081 {
1082 /* Value to pass on stack. It can be NULL if this item is for stack
1083 padding. */
1084 const gdb_byte *data;
1085
1086 /* Size in bytes of value to pass on stack. */
1087 int len;
1088 } stack_item_t;
1089
1090 DEF_VEC_O (stack_item_t);
1091
1092 /* Return the alignment (in bytes) of the given type. */
1093
1094 static int
1095 aarch64_type_align (struct type *t)
1096 {
1097 int n;
1098 int align;
1099 int falign;
1100
1101 t = check_typedef (t);
1102 switch (TYPE_CODE (t))
1103 {
1104 default:
1105 /* Should never happen. */
1106 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1107 return 4;
1108
1109 case TYPE_CODE_PTR:
1110 case TYPE_CODE_ENUM:
1111 case TYPE_CODE_INT:
1112 case TYPE_CODE_FLT:
1113 case TYPE_CODE_SET:
1114 case TYPE_CODE_RANGE:
1115 case TYPE_CODE_BITSTRING:
1116 case TYPE_CODE_REF:
1117 case TYPE_CODE_RVALUE_REF:
1118 case TYPE_CODE_CHAR:
1119 case TYPE_CODE_BOOL:
1120 return TYPE_LENGTH (t);
1121
1122 case TYPE_CODE_ARRAY:
1123 if (TYPE_VECTOR (t))
1124 {
1125 /* Use the natural alignment for vector types (the same for
1126 scalar type), but the maximum alignment is 128-bit. */
1127 if (TYPE_LENGTH (t) > 16)
1128 return 16;
1129 else
1130 return TYPE_LENGTH (t);
1131 }
1132 else
1133 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1134 case TYPE_CODE_COMPLEX:
1135 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1136
1137 case TYPE_CODE_STRUCT:
1138 case TYPE_CODE_UNION:
1139 align = 1;
1140 for (n = 0; n < TYPE_NFIELDS (t); n++)
1141 {
1142 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1143 if (falign > align)
1144 align = falign;
1145 }
1146 return align;
1147 }
1148 }
1149
1150 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1151 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1152 document; otherwise return 0. */
1153
1154 static int
1155 is_hfa_or_hva (struct type *ty)
1156 {
1157 switch (TYPE_CODE (ty))
1158 {
1159 case TYPE_CODE_ARRAY:
1160 {
1161 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1162
1163 if (TYPE_VECTOR (ty))
1164 return 0;
1165
1166 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1167 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1168 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1169 && TYPE_VECTOR (target_ty))))
1170 return 1;
1171 break;
1172 }
1173
1174 case TYPE_CODE_UNION:
1175 case TYPE_CODE_STRUCT:
1176 {
1177 /* HFA or HVA has at most four members. */
1178 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1179 {
1180 struct type *member0_type;
1181
1182 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1183 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1184 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1185 && TYPE_VECTOR (member0_type)))
1186 {
1187 int i;
1188
1189 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1190 {
1191 struct type *member1_type;
1192
1193 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1194 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1195 || (TYPE_LENGTH (member0_type)
1196 != TYPE_LENGTH (member1_type)))
1197 return 0;
1198 }
1199 return 1;
1200 }
1201 }
1202 return 0;
1203 }
1204
1205 default:
1206 break;
1207 }
1208
1209 return 0;
1210 }
1211
1212 /* AArch64 function call information structure. */
1213 struct aarch64_call_info
1214 {
1215 /* the current argument number. */
1216 unsigned argnum;
1217
1218 /* The next general purpose register number, equivalent to NGRN as
1219 described in the AArch64 Procedure Call Standard. */
1220 unsigned ngrn;
1221
1222 /* The next SIMD and floating point register number, equivalent to
1223 NSRN as described in the AArch64 Procedure Call Standard. */
1224 unsigned nsrn;
1225
1226 /* The next stacked argument address, equivalent to NSAA as
1227 described in the AArch64 Procedure Call Standard. */
1228 unsigned nsaa;
1229
1230 /* Stack item vector. */
1231 VEC(stack_item_t) *si;
1232 };
1233
1234 /* Pass a value in a sequence of consecutive X registers. The caller
1235 is responsbile for ensuring sufficient registers are available. */
1236
1237 static void
1238 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1239 struct aarch64_call_info *info, struct type *type,
1240 struct value *arg)
1241 {
1242 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1243 int len = TYPE_LENGTH (type);
1244 enum type_code typecode = TYPE_CODE (type);
1245 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1246 const bfd_byte *buf = value_contents (arg);
1247
1248 info->argnum++;
1249
1250 while (len > 0)
1251 {
1252 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1253 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1254 byte_order);
1255
1256
1257 /* Adjust sub-word struct/union args when big-endian. */
1258 if (byte_order == BFD_ENDIAN_BIG
1259 && partial_len < X_REGISTER_SIZE
1260 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1261 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1262
1263 if (aarch64_debug)
1264 {
1265 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1266 gdbarch_register_name (gdbarch, regnum),
1267 phex (regval, X_REGISTER_SIZE));
1268 }
1269 regcache_cooked_write_unsigned (regcache, regnum, regval);
1270 len -= partial_len;
1271 buf += partial_len;
1272 regnum++;
1273 }
1274 }
1275
1276 /* Attempt to marshall a value in a V register. Return 1 if
1277 successful, or 0 if insufficient registers are available. This
1278 function, unlike the equivalent pass_in_x() function does not
1279 handle arguments spread across multiple registers. */
1280
1281 static int
1282 pass_in_v (struct gdbarch *gdbarch,
1283 struct regcache *regcache,
1284 struct aarch64_call_info *info,
1285 int len, const bfd_byte *buf)
1286 {
1287 if (info->nsrn < 8)
1288 {
1289 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1290 gdb_byte reg[V_REGISTER_SIZE];
1291
1292 info->argnum++;
1293 info->nsrn++;
1294
1295 memset (reg, 0, sizeof (reg));
1296 /* PCS C.1, the argument is allocated to the least significant
1297 bits of V register. */
1298 memcpy (reg, buf, len);
1299 regcache->cooked_write (regnum, reg);
1300
1301 if (aarch64_debug)
1302 {
1303 debug_printf ("arg %d in %s\n", info->argnum,
1304 gdbarch_register_name (gdbarch, regnum));
1305 }
1306 return 1;
1307 }
1308 info->nsrn = 8;
1309 return 0;
1310 }
1311
1312 /* Marshall an argument onto the stack. */
1313
1314 static void
1315 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1316 struct value *arg)
1317 {
1318 const bfd_byte *buf = value_contents (arg);
1319 int len = TYPE_LENGTH (type);
1320 int align;
1321 stack_item_t item;
1322
1323 info->argnum++;
1324
1325 align = aarch64_type_align (type);
1326
1327 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1328 Natural alignment of the argument's type. */
1329 align = align_up (align, 8);
1330
1331 /* The AArch64 PCS requires at most doubleword alignment. */
1332 if (align > 16)
1333 align = 16;
1334
1335 if (aarch64_debug)
1336 {
1337 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1338 info->nsaa);
1339 }
1340
1341 item.len = len;
1342 item.data = buf;
1343 VEC_safe_push (stack_item_t, info->si, &item);
1344
1345 info->nsaa += len;
1346 if (info->nsaa & (align - 1))
1347 {
1348 /* Push stack alignment padding. */
1349 int pad = align - (info->nsaa & (align - 1));
1350
1351 item.len = pad;
1352 item.data = NULL;
1353
1354 VEC_safe_push (stack_item_t, info->si, &item);
1355 info->nsaa += pad;
1356 }
1357 }
1358
1359 /* Marshall an argument into a sequence of one or more consecutive X
1360 registers or, if insufficient X registers are available then onto
1361 the stack. */
1362
1363 static void
1364 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1365 struct aarch64_call_info *info, struct type *type,
1366 struct value *arg)
1367 {
1368 int len = TYPE_LENGTH (type);
1369 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1370
1371 /* PCS C.13 - Pass in registers if we have enough spare */
1372 if (info->ngrn + nregs <= 8)
1373 {
1374 pass_in_x (gdbarch, regcache, info, type, arg);
1375 info->ngrn += nregs;
1376 }
1377 else
1378 {
1379 info->ngrn = 8;
1380 pass_on_stack (info, type, arg);
1381 }
1382 }
1383
1384 /* Pass a value in a V register, or on the stack if insufficient are
1385 available. */
1386
1387 static void
1388 pass_in_v_or_stack (struct gdbarch *gdbarch,
1389 struct regcache *regcache,
1390 struct aarch64_call_info *info,
1391 struct type *type,
1392 struct value *arg)
1393 {
1394 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1395 value_contents (arg)))
1396 pass_on_stack (info, type, arg);
1397 }
1398
1399 /* Implement the "push_dummy_call" gdbarch method. */
1400
1401 static CORE_ADDR
1402 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1403 struct regcache *regcache, CORE_ADDR bp_addr,
1404 int nargs,
1405 struct value **args, CORE_ADDR sp, int struct_return,
1406 CORE_ADDR struct_addr)
1407 {
1408 int argnum;
1409 struct aarch64_call_info info;
1410 struct type *func_type;
1411 struct type *return_type;
1412 int lang_struct_return;
1413
1414 memset (&info, 0, sizeof (info));
1415
1416 /* We need to know what the type of the called function is in order
1417 to determine the number of named/anonymous arguments for the
1418 actual argument placement, and the return type in order to handle
1419 return value correctly.
1420
1421 The generic code above us views the decision of return in memory
1422 or return in registers as a two stage processes. The language
1423 handler is consulted first and may decide to return in memory (eg
1424 class with copy constructor returned by value), this will cause
1425 the generic code to allocate space AND insert an initial leading
1426 argument.
1427
1428 If the language code does not decide to pass in memory then the
1429 target code is consulted.
1430
1431 If the language code decides to pass in memory we want to move
1432 the pointer inserted as the initial argument from the argument
1433 list and into X8, the conventional AArch64 struct return pointer
1434 register.
1435
1436 This is slightly awkward, ideally the flag "lang_struct_return"
1437 would be passed to the targets implementation of push_dummy_call.
1438 Rather that change the target interface we call the language code
1439 directly ourselves. */
1440
1441 func_type = check_typedef (value_type (function));
1442
1443 /* Dereference function pointer types. */
1444 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1445 func_type = TYPE_TARGET_TYPE (func_type);
1446
1447 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1448 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1449
1450 /* If language_pass_by_reference () returned true we will have been
1451 given an additional initial argument, a hidden pointer to the
1452 return slot in memory. */
1453 return_type = TYPE_TARGET_TYPE (func_type);
1454 lang_struct_return = language_pass_by_reference (return_type);
1455
1456 /* Set the return address. For the AArch64, the return breakpoint
1457 is always at BP_ADDR. */
1458 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1459
1460 /* If we were given an initial argument for the return slot because
1461 lang_struct_return was true, lose it. */
1462 if (lang_struct_return)
1463 {
1464 args++;
1465 nargs--;
1466 }
1467
1468 /* The struct_return pointer occupies X8. */
1469 if (struct_return || lang_struct_return)
1470 {
1471 if (aarch64_debug)
1472 {
1473 debug_printf ("struct return in %s = 0x%s\n",
1474 gdbarch_register_name (gdbarch,
1475 AARCH64_STRUCT_RETURN_REGNUM),
1476 paddress (gdbarch, struct_addr));
1477 }
1478 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1479 struct_addr);
1480 }
1481
1482 for (argnum = 0; argnum < nargs; argnum++)
1483 {
1484 struct value *arg = args[argnum];
1485 struct type *arg_type;
1486 int len;
1487
1488 arg_type = check_typedef (value_type (arg));
1489 len = TYPE_LENGTH (arg_type);
1490
1491 switch (TYPE_CODE (arg_type))
1492 {
1493 case TYPE_CODE_INT:
1494 case TYPE_CODE_BOOL:
1495 case TYPE_CODE_CHAR:
1496 case TYPE_CODE_RANGE:
1497 case TYPE_CODE_ENUM:
1498 if (len < 4)
1499 {
1500 /* Promote to 32 bit integer. */
1501 if (TYPE_UNSIGNED (arg_type))
1502 arg_type = builtin_type (gdbarch)->builtin_uint32;
1503 else
1504 arg_type = builtin_type (gdbarch)->builtin_int32;
1505 arg = value_cast (arg_type, arg);
1506 }
1507 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1508 break;
1509
1510 case TYPE_CODE_COMPLEX:
1511 if (info.nsrn <= 6)
1512 {
1513 const bfd_byte *buf = value_contents (arg);
1514 struct type *target_type =
1515 check_typedef (TYPE_TARGET_TYPE (arg_type));
1516
1517 pass_in_v (gdbarch, regcache, &info,
1518 TYPE_LENGTH (target_type), buf);
1519 pass_in_v (gdbarch, regcache, &info,
1520 TYPE_LENGTH (target_type),
1521 buf + TYPE_LENGTH (target_type));
1522 }
1523 else
1524 {
1525 info.nsrn = 8;
1526 pass_on_stack (&info, arg_type, arg);
1527 }
1528 break;
1529 case TYPE_CODE_FLT:
1530 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1531 break;
1532
1533 case TYPE_CODE_STRUCT:
1534 case TYPE_CODE_ARRAY:
1535 case TYPE_CODE_UNION:
1536 if (is_hfa_or_hva (arg_type))
1537 {
1538 int elements = TYPE_NFIELDS (arg_type);
1539
1540 /* Homogeneous Aggregates */
1541 if (info.nsrn + elements < 8)
1542 {
1543 int i;
1544
1545 for (i = 0; i < elements; i++)
1546 {
1547 /* We know that we have sufficient registers
1548 available therefore this will never fallback
1549 to the stack. */
1550 struct value *field =
1551 value_primitive_field (arg, 0, i, arg_type);
1552 struct type *field_type =
1553 check_typedef (value_type (field));
1554
1555 pass_in_v_or_stack (gdbarch, regcache, &info,
1556 field_type, field);
1557 }
1558 }
1559 else
1560 {
1561 info.nsrn = 8;
1562 pass_on_stack (&info, arg_type, arg);
1563 }
1564 }
1565 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1566 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1567 {
1568 /* Short vector types are passed in V registers. */
1569 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1570 }
1571 else if (len > 16)
1572 {
1573 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1574 invisible reference. */
1575
1576 /* Allocate aligned storage. */
1577 sp = align_down (sp - len, 16);
1578
1579 /* Write the real data into the stack. */
1580 write_memory (sp, value_contents (arg), len);
1581
1582 /* Construct the indirection. */
1583 arg_type = lookup_pointer_type (arg_type);
1584 arg = value_from_pointer (arg_type, sp);
1585 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1586 }
1587 else
1588 /* PCS C.15 / C.18 multiple values pass. */
1589 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1590 break;
1591
1592 default:
1593 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1594 break;
1595 }
1596 }
1597
1598 /* Make sure stack retains 16 byte alignment. */
1599 if (info.nsaa & 15)
1600 sp -= 16 - (info.nsaa & 15);
1601
1602 while (!VEC_empty (stack_item_t, info.si))
1603 {
1604 stack_item_t *si = VEC_last (stack_item_t, info.si);
1605
1606 sp -= si->len;
1607 if (si->data != NULL)
1608 write_memory (sp, si->data, si->len);
1609 VEC_pop (stack_item_t, info.si);
1610 }
1611
1612 VEC_free (stack_item_t, info.si);
1613
1614 /* Finally, update the SP register. */
1615 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1616
1617 return sp;
1618 }
1619
1620 /* Implement the "frame_align" gdbarch method. */
1621
1622 static CORE_ADDR
1623 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1624 {
1625 /* Align the stack to sixteen bytes. */
1626 return sp & ~(CORE_ADDR) 15;
1627 }
1628
1629 /* Return the type for an AdvSISD Q register. */
1630
1631 static struct type *
1632 aarch64_vnq_type (struct gdbarch *gdbarch)
1633 {
1634 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1635
1636 if (tdep->vnq_type == NULL)
1637 {
1638 struct type *t;
1639 struct type *elem;
1640
1641 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1642 TYPE_CODE_UNION);
1643
1644 elem = builtin_type (gdbarch)->builtin_uint128;
1645 append_composite_type_field (t, "u", elem);
1646
1647 elem = builtin_type (gdbarch)->builtin_int128;
1648 append_composite_type_field (t, "s", elem);
1649
1650 tdep->vnq_type = t;
1651 }
1652
1653 return tdep->vnq_type;
1654 }
1655
1656 /* Return the type for an AdvSISD D register. */
1657
1658 static struct type *
1659 aarch64_vnd_type (struct gdbarch *gdbarch)
1660 {
1661 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1662
1663 if (tdep->vnd_type == NULL)
1664 {
1665 struct type *t;
1666 struct type *elem;
1667
1668 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1669 TYPE_CODE_UNION);
1670
1671 elem = builtin_type (gdbarch)->builtin_double;
1672 append_composite_type_field (t, "f", elem);
1673
1674 elem = builtin_type (gdbarch)->builtin_uint64;
1675 append_composite_type_field (t, "u", elem);
1676
1677 elem = builtin_type (gdbarch)->builtin_int64;
1678 append_composite_type_field (t, "s", elem);
1679
1680 tdep->vnd_type = t;
1681 }
1682
1683 return tdep->vnd_type;
1684 }
1685
1686 /* Return the type for an AdvSISD S register. */
1687
1688 static struct type *
1689 aarch64_vns_type (struct gdbarch *gdbarch)
1690 {
1691 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1692
1693 if (tdep->vns_type == NULL)
1694 {
1695 struct type *t;
1696 struct type *elem;
1697
1698 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1699 TYPE_CODE_UNION);
1700
1701 elem = builtin_type (gdbarch)->builtin_float;
1702 append_composite_type_field (t, "f", elem);
1703
1704 elem = builtin_type (gdbarch)->builtin_uint32;
1705 append_composite_type_field (t, "u", elem);
1706
1707 elem = builtin_type (gdbarch)->builtin_int32;
1708 append_composite_type_field (t, "s", elem);
1709
1710 tdep->vns_type = t;
1711 }
1712
1713 return tdep->vns_type;
1714 }
1715
1716 /* Return the type for an AdvSISD H register. */
1717
1718 static struct type *
1719 aarch64_vnh_type (struct gdbarch *gdbarch)
1720 {
1721 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1722
1723 if (tdep->vnh_type == NULL)
1724 {
1725 struct type *t;
1726 struct type *elem;
1727
1728 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1729 TYPE_CODE_UNION);
1730
1731 elem = builtin_type (gdbarch)->builtin_uint16;
1732 append_composite_type_field (t, "u", elem);
1733
1734 elem = builtin_type (gdbarch)->builtin_int16;
1735 append_composite_type_field (t, "s", elem);
1736
1737 tdep->vnh_type = t;
1738 }
1739
1740 return tdep->vnh_type;
1741 }
1742
1743 /* Return the type for an AdvSISD B register. */
1744
1745 static struct type *
1746 aarch64_vnb_type (struct gdbarch *gdbarch)
1747 {
1748 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1749
1750 if (tdep->vnb_type == NULL)
1751 {
1752 struct type *t;
1753 struct type *elem;
1754
1755 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1756 TYPE_CODE_UNION);
1757
1758 elem = builtin_type (gdbarch)->builtin_uint8;
1759 append_composite_type_field (t, "u", elem);
1760
1761 elem = builtin_type (gdbarch)->builtin_int8;
1762 append_composite_type_field (t, "s", elem);
1763
1764 tdep->vnb_type = t;
1765 }
1766
1767 return tdep->vnb_type;
1768 }
1769
1770 /* Return the type for an AdvSISD V register. */
1771
1772 static struct type *
1773 aarch64_vnv_type (struct gdbarch *gdbarch)
1774 {
1775 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1776
1777 if (tdep->vnv_type == NULL)
1778 {
1779 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1780 TYPE_CODE_UNION);
1781
1782 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1783 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1784 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1785 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1786 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1787
1788 tdep->vnv_type = t;
1789 }
1790
1791 return tdep->vnv_type;
1792 }
1793
1794 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1795
1796 static int
1797 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1798 {
1799 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1800 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1801
1802 if (reg == AARCH64_DWARF_SP)
1803 return AARCH64_SP_REGNUM;
1804
1805 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1806 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1807
1808 return -1;
1809 }
1810 \f
1811
1812 /* Implement the "print_insn" gdbarch method. */
1813
1814 static int
1815 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1816 {
1817 info->symbols = NULL;
1818 return default_print_insn (memaddr, info);
1819 }
1820
1821 /* AArch64 BRK software debug mode instruction.
1822 Note that AArch64 code is always little-endian.
1823 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1824 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1825
1826 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1827
1828 /* Extract from an array REGS containing the (raw) register state a
1829 function return value of type TYPE, and copy that, in virtual
1830 format, into VALBUF. */
1831
1832 static void
1833 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1834 gdb_byte *valbuf)
1835 {
1836 struct gdbarch *gdbarch = regs->arch ();
1837 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1838
1839 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1840 {
1841 bfd_byte buf[V_REGISTER_SIZE];
1842 int len = TYPE_LENGTH (type);
1843
1844 regs->cooked_read (AARCH64_V0_REGNUM, buf);
1845 memcpy (valbuf, buf, len);
1846 }
1847 else if (TYPE_CODE (type) == TYPE_CODE_INT
1848 || TYPE_CODE (type) == TYPE_CODE_CHAR
1849 || TYPE_CODE (type) == TYPE_CODE_BOOL
1850 || TYPE_CODE (type) == TYPE_CODE_PTR
1851 || TYPE_IS_REFERENCE (type)
1852 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1853 {
1854 /* If the the type is a plain integer, then the access is
1855 straight-forward. Otherwise we have to play around a bit
1856 more. */
1857 int len = TYPE_LENGTH (type);
1858 int regno = AARCH64_X0_REGNUM;
1859 ULONGEST tmp;
1860
1861 while (len > 0)
1862 {
1863 /* By using store_unsigned_integer we avoid having to do
1864 anything special for small big-endian values. */
1865 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1866 store_unsigned_integer (valbuf,
1867 (len > X_REGISTER_SIZE
1868 ? X_REGISTER_SIZE : len), byte_order, tmp);
1869 len -= X_REGISTER_SIZE;
1870 valbuf += X_REGISTER_SIZE;
1871 }
1872 }
1873 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1874 {
1875 int regno = AARCH64_V0_REGNUM;
1876 bfd_byte buf[V_REGISTER_SIZE];
1877 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1878 int len = TYPE_LENGTH (target_type);
1879
1880 regs->cooked_read (regno, buf);
1881 memcpy (valbuf, buf, len);
1882 valbuf += len;
1883 regs->cooked_read (regno + 1, buf);
1884 memcpy (valbuf, buf, len);
1885 valbuf += len;
1886 }
1887 else if (is_hfa_or_hva (type))
1888 {
1889 int elements = TYPE_NFIELDS (type);
1890 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1891 int len = TYPE_LENGTH (member_type);
1892 int i;
1893
1894 for (i = 0; i < elements; i++)
1895 {
1896 int regno = AARCH64_V0_REGNUM + i;
1897 bfd_byte buf[V_REGISTER_SIZE];
1898
1899 if (aarch64_debug)
1900 {
1901 debug_printf ("read HFA or HVA return value element %d from %s\n",
1902 i + 1,
1903 gdbarch_register_name (gdbarch, regno));
1904 }
1905 regs->cooked_read (regno, buf);
1906
1907 memcpy (valbuf, buf, len);
1908 valbuf += len;
1909 }
1910 }
1911 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1912 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1913 {
1914 /* Short vector is returned in V register. */
1915 gdb_byte buf[V_REGISTER_SIZE];
1916
1917 regs->cooked_read (AARCH64_V0_REGNUM, buf);
1918 memcpy (valbuf, buf, TYPE_LENGTH (type));
1919 }
1920 else
1921 {
1922 /* For a structure or union the behaviour is as if the value had
1923 been stored to word-aligned memory and then loaded into
1924 registers with 64-bit load instruction(s). */
1925 int len = TYPE_LENGTH (type);
1926 int regno = AARCH64_X0_REGNUM;
1927 bfd_byte buf[X_REGISTER_SIZE];
1928
1929 while (len > 0)
1930 {
1931 regs->cooked_read (regno++, buf);
1932 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1933 len -= X_REGISTER_SIZE;
1934 valbuf += X_REGISTER_SIZE;
1935 }
1936 }
1937 }
1938
1939
1940 /* Will a function return an aggregate type in memory or in a
1941 register? Return 0 if an aggregate type can be returned in a
1942 register, 1 if it must be returned in memory. */
1943
1944 static int
1945 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1946 {
1947 type = check_typedef (type);
1948
1949 if (is_hfa_or_hva (type))
1950 {
1951 /* v0-v7 are used to return values and one register is allocated
1952 for one member. However, HFA or HVA has at most four members. */
1953 return 0;
1954 }
1955
1956 if (TYPE_LENGTH (type) > 16)
1957 {
1958 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1959 invisible reference. */
1960
1961 return 1;
1962 }
1963
1964 return 0;
1965 }
1966
1967 /* Write into appropriate registers a function return value of type
1968 TYPE, given in virtual format. */
1969
1970 static void
1971 aarch64_store_return_value (struct type *type, struct regcache *regs,
1972 const gdb_byte *valbuf)
1973 {
1974 struct gdbarch *gdbarch = regs->arch ();
1975 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1976
1977 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1978 {
1979 bfd_byte buf[V_REGISTER_SIZE];
1980 int len = TYPE_LENGTH (type);
1981
1982 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1983 regs->cooked_write (AARCH64_V0_REGNUM, buf);
1984 }
1985 else if (TYPE_CODE (type) == TYPE_CODE_INT
1986 || TYPE_CODE (type) == TYPE_CODE_CHAR
1987 || TYPE_CODE (type) == TYPE_CODE_BOOL
1988 || TYPE_CODE (type) == TYPE_CODE_PTR
1989 || TYPE_IS_REFERENCE (type)
1990 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1991 {
1992 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1993 {
1994 /* Values of one word or less are zero/sign-extended and
1995 returned in r0. */
1996 bfd_byte tmpbuf[X_REGISTER_SIZE];
1997 LONGEST val = unpack_long (type, valbuf);
1998
1999 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2000 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2001 }
2002 else
2003 {
2004 /* Integral values greater than one word are stored in
2005 consecutive registers starting with r0. This will always
2006 be a multiple of the regiser size. */
2007 int len = TYPE_LENGTH (type);
2008 int regno = AARCH64_X0_REGNUM;
2009
2010 while (len > 0)
2011 {
2012 regs->cooked_write (regno++, valbuf);
2013 len -= X_REGISTER_SIZE;
2014 valbuf += X_REGISTER_SIZE;
2015 }
2016 }
2017 }
2018 else if (is_hfa_or_hva (type))
2019 {
2020 int elements = TYPE_NFIELDS (type);
2021 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2022 int len = TYPE_LENGTH (member_type);
2023 int i;
2024
2025 for (i = 0; i < elements; i++)
2026 {
2027 int regno = AARCH64_V0_REGNUM + i;
2028 bfd_byte tmpbuf[V_REGISTER_SIZE];
2029
2030 if (aarch64_debug)
2031 {
2032 debug_printf ("write HFA or HVA return value element %d to %s\n",
2033 i + 1,
2034 gdbarch_register_name (gdbarch, regno));
2035 }
2036
2037 memcpy (tmpbuf, valbuf, len);
2038 regs->cooked_write (regno, tmpbuf);
2039 valbuf += len;
2040 }
2041 }
2042 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2043 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
2044 {
2045 /* Short vector. */
2046 gdb_byte buf[V_REGISTER_SIZE];
2047
2048 memcpy (buf, valbuf, TYPE_LENGTH (type));
2049 regs->cooked_write (AARCH64_V0_REGNUM, buf);
2050 }
2051 else
2052 {
2053 /* For a structure or union the behaviour is as if the value had
2054 been stored to word-aligned memory and then loaded into
2055 registers with 64-bit load instruction(s). */
2056 int len = TYPE_LENGTH (type);
2057 int regno = AARCH64_X0_REGNUM;
2058 bfd_byte tmpbuf[X_REGISTER_SIZE];
2059
2060 while (len > 0)
2061 {
2062 memcpy (tmpbuf, valbuf,
2063 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2064 regs->cooked_write (regno++, tmpbuf);
2065 len -= X_REGISTER_SIZE;
2066 valbuf += X_REGISTER_SIZE;
2067 }
2068 }
2069 }
2070
2071 /* Implement the "return_value" gdbarch method. */
2072
2073 static enum return_value_convention
2074 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2075 struct type *valtype, struct regcache *regcache,
2076 gdb_byte *readbuf, const gdb_byte *writebuf)
2077 {
2078
2079 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2080 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2081 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2082 {
2083 if (aarch64_return_in_memory (gdbarch, valtype))
2084 {
2085 if (aarch64_debug)
2086 debug_printf ("return value in memory\n");
2087 return RETURN_VALUE_STRUCT_CONVENTION;
2088 }
2089 }
2090
2091 if (writebuf)
2092 aarch64_store_return_value (valtype, regcache, writebuf);
2093
2094 if (readbuf)
2095 aarch64_extract_return_value (valtype, regcache, readbuf);
2096
2097 if (aarch64_debug)
2098 debug_printf ("return value in registers\n");
2099
2100 return RETURN_VALUE_REGISTER_CONVENTION;
2101 }
2102
2103 /* Implement the "get_longjmp_target" gdbarch method. */
2104
2105 static int
2106 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2107 {
2108 CORE_ADDR jb_addr;
2109 gdb_byte buf[X_REGISTER_SIZE];
2110 struct gdbarch *gdbarch = get_frame_arch (frame);
2111 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2112 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2113
2114 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2115
2116 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2117 X_REGISTER_SIZE))
2118 return 0;
2119
2120 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2121 return 1;
2122 }
2123
2124 /* Implement the "gen_return_address" gdbarch method. */
2125
2126 static void
2127 aarch64_gen_return_address (struct gdbarch *gdbarch,
2128 struct agent_expr *ax, struct axs_value *value,
2129 CORE_ADDR scope)
2130 {
2131 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2132 value->kind = axs_lvalue_register;
2133 value->u.reg = AARCH64_LR_REGNUM;
2134 }
2135 \f
2136
2137 /* Return the pseudo register name corresponding to register regnum. */
2138
2139 static const char *
2140 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2141 {
2142 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2143
2144 static const char *const q_name[] =
2145 {
2146 "q0", "q1", "q2", "q3",
2147 "q4", "q5", "q6", "q7",
2148 "q8", "q9", "q10", "q11",
2149 "q12", "q13", "q14", "q15",
2150 "q16", "q17", "q18", "q19",
2151 "q20", "q21", "q22", "q23",
2152 "q24", "q25", "q26", "q27",
2153 "q28", "q29", "q30", "q31",
2154 };
2155
2156 static const char *const d_name[] =
2157 {
2158 "d0", "d1", "d2", "d3",
2159 "d4", "d5", "d6", "d7",
2160 "d8", "d9", "d10", "d11",
2161 "d12", "d13", "d14", "d15",
2162 "d16", "d17", "d18", "d19",
2163 "d20", "d21", "d22", "d23",
2164 "d24", "d25", "d26", "d27",
2165 "d28", "d29", "d30", "d31",
2166 };
2167
2168 static const char *const s_name[] =
2169 {
2170 "s0", "s1", "s2", "s3",
2171 "s4", "s5", "s6", "s7",
2172 "s8", "s9", "s10", "s11",
2173 "s12", "s13", "s14", "s15",
2174 "s16", "s17", "s18", "s19",
2175 "s20", "s21", "s22", "s23",
2176 "s24", "s25", "s26", "s27",
2177 "s28", "s29", "s30", "s31",
2178 };
2179
2180 static const char *const h_name[] =
2181 {
2182 "h0", "h1", "h2", "h3",
2183 "h4", "h5", "h6", "h7",
2184 "h8", "h9", "h10", "h11",
2185 "h12", "h13", "h14", "h15",
2186 "h16", "h17", "h18", "h19",
2187 "h20", "h21", "h22", "h23",
2188 "h24", "h25", "h26", "h27",
2189 "h28", "h29", "h30", "h31",
2190 };
2191
2192 static const char *const b_name[] =
2193 {
2194 "b0", "b1", "b2", "b3",
2195 "b4", "b5", "b6", "b7",
2196 "b8", "b9", "b10", "b11",
2197 "b12", "b13", "b14", "b15",
2198 "b16", "b17", "b18", "b19",
2199 "b20", "b21", "b22", "b23",
2200 "b24", "b25", "b26", "b27",
2201 "b28", "b29", "b30", "b31",
2202 };
2203
2204 regnum -= gdbarch_num_regs (gdbarch);
2205
2206 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2207 return q_name[regnum - AARCH64_Q0_REGNUM];
2208
2209 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2210 return d_name[regnum - AARCH64_D0_REGNUM];
2211
2212 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2213 return s_name[regnum - AARCH64_S0_REGNUM];
2214
2215 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2216 return h_name[regnum - AARCH64_H0_REGNUM];
2217
2218 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2219 return b_name[regnum - AARCH64_B0_REGNUM];
2220
2221 if (tdep->has_sve ())
2222 {
2223 static const char *const sve_v_name[] =
2224 {
2225 "v0", "v1", "v2", "v3",
2226 "v4", "v5", "v6", "v7",
2227 "v8", "v9", "v10", "v11",
2228 "v12", "v13", "v14", "v15",
2229 "v16", "v17", "v18", "v19",
2230 "v20", "v21", "v22", "v23",
2231 "v24", "v25", "v26", "v27",
2232 "v28", "v29", "v30", "v31",
2233 };
2234
2235 if (regnum >= AARCH64_SVE_V0_REGNUM
2236 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2237 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2238 }
2239
2240 internal_error (__FILE__, __LINE__,
2241 _("aarch64_pseudo_register_name: bad register number %d"),
2242 regnum);
2243 }
2244
2245 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2246
2247 static struct type *
2248 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2249 {
2250 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2251
2252 regnum -= gdbarch_num_regs (gdbarch);
2253
2254 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2255 return aarch64_vnq_type (gdbarch);
2256
2257 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2258 return aarch64_vnd_type (gdbarch);
2259
2260 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2261 return aarch64_vns_type (gdbarch);
2262
2263 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2264 return aarch64_vnh_type (gdbarch);
2265
2266 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2267 return aarch64_vnb_type (gdbarch);
2268
2269 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2270 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2271 return aarch64_vnv_type (gdbarch);
2272
2273 internal_error (__FILE__, __LINE__,
2274 _("aarch64_pseudo_register_type: bad register number %d"),
2275 regnum);
2276 }
2277
2278 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2279
2280 static int
2281 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2282 struct reggroup *group)
2283 {
2284 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2285
2286 regnum -= gdbarch_num_regs (gdbarch);
2287
2288 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2289 return group == all_reggroup || group == vector_reggroup;
2290 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2291 return (group == all_reggroup || group == vector_reggroup
2292 || group == float_reggroup);
2293 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2294 return (group == all_reggroup || group == vector_reggroup
2295 || group == float_reggroup);
2296 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2297 return group == all_reggroup || group == vector_reggroup;
2298 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2299 return group == all_reggroup || group == vector_reggroup;
2300 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2301 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2302 return group == all_reggroup || group == vector_reggroup;
2303
2304 return group == all_reggroup;
2305 }
2306
2307 /* Helper for aarch64_pseudo_read_value. */
2308
2309 static struct value *
2310 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2311 readable_regcache *regcache, int regnum_offset,
2312 int regsize, struct value *result_value)
2313 {
2314 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2315
2316 /* Enough space for a full vector register. */
2317 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2318 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2319
2320 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2321 mark_value_bytes_unavailable (result_value, 0,
2322 TYPE_LENGTH (value_type (result_value)));
2323 else
2324 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2325
2326 return result_value;
2327 }
2328
2329 /* Implement the "pseudo_register_read_value" gdbarch method. */
2330
2331 static struct value *
2332 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2333 int regnum)
2334 {
2335 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2336 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2337
2338 VALUE_LVAL (result_value) = lval_register;
2339 VALUE_REGNUM (result_value) = regnum;
2340
2341 regnum -= gdbarch_num_regs (gdbarch);
2342
2343 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2344 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2345 regnum - AARCH64_Q0_REGNUM,
2346 Q_REGISTER_SIZE, result_value);
2347
2348 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2349 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2350 regnum - AARCH64_D0_REGNUM,
2351 D_REGISTER_SIZE, result_value);
2352
2353 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2354 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2355 regnum - AARCH64_S0_REGNUM,
2356 S_REGISTER_SIZE, result_value);
2357
2358 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2359 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2360 regnum - AARCH64_H0_REGNUM,
2361 H_REGISTER_SIZE, result_value);
2362
2363 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2364 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2365 regnum - AARCH64_B0_REGNUM,
2366 B_REGISTER_SIZE, result_value);
2367
2368 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2369 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2370 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2371 regnum - AARCH64_SVE_V0_REGNUM,
2372 V_REGISTER_SIZE, result_value);
2373
2374 gdb_assert_not_reached ("regnum out of bound");
2375 }
2376
2377 /* Helper for aarch64_pseudo_write. */
2378
2379 static void
2380 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2381 int regnum_offset, int regsize, const gdb_byte *buf)
2382 {
2383 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2384
2385 /* Enough space for a full vector register. */
2386 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2387 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2388
2389 /* Ensure the register buffer is zero, we want gdb writes of the
2390 various 'scalar' pseudo registers to behavior like architectural
2391 writes, register width bytes are written the remainder are set to
2392 zero. */
2393 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2394
2395 memcpy (reg_buf, buf, regsize);
2396 regcache->raw_write (v_regnum, reg_buf);
2397 }
2398
2399 /* Implement the "pseudo_register_write" gdbarch method. */
2400
2401 static void
2402 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2403 int regnum, const gdb_byte *buf)
2404 {
2405 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2406 regnum -= gdbarch_num_regs (gdbarch);
2407
2408 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2409 return aarch64_pseudo_write_1 (gdbarch, regcache,
2410 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2411 buf);
2412
2413 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2414 return aarch64_pseudo_write_1 (gdbarch, regcache,
2415 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2416 buf);
2417
2418 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2419 return aarch64_pseudo_write_1 (gdbarch, regcache,
2420 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2421 buf);
2422
2423 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2424 return aarch64_pseudo_write_1 (gdbarch, regcache,
2425 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2426 buf);
2427
2428 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2429 return aarch64_pseudo_write_1 (gdbarch, regcache,
2430 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2431 buf);
2432
2433 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2434 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2435 return aarch64_pseudo_write_1 (gdbarch, regcache,
2436 regnum - AARCH64_SVE_V0_REGNUM,
2437 V_REGISTER_SIZE, buf);
2438
2439 gdb_assert_not_reached ("regnum out of bound");
2440 }
2441
2442 /* Callback function for user_reg_add. */
2443
2444 static struct value *
2445 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2446 {
2447 const int *reg_p = (const int *) baton;
2448
2449 return value_of_register (*reg_p, frame);
2450 }
2451 \f
2452
2453 /* Implement the "software_single_step" gdbarch method, needed to
2454 single step through atomic sequences on AArch64. */
2455
2456 static std::vector<CORE_ADDR>
2457 aarch64_software_single_step (struct regcache *regcache)
2458 {
2459 struct gdbarch *gdbarch = regcache->arch ();
2460 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2461 const int insn_size = 4;
2462 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2463 CORE_ADDR pc = regcache_read_pc (regcache);
2464 CORE_ADDR breaks[2] = { -1, -1 };
2465 CORE_ADDR loc = pc;
2466 CORE_ADDR closing_insn = 0;
2467 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2468 byte_order_for_code);
2469 int index;
2470 int insn_count;
2471 int bc_insn_count = 0; /* Conditional branch instruction count. */
2472 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2473 aarch64_inst inst;
2474
2475 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2476 return {};
2477
2478 /* Look for a Load Exclusive instruction which begins the sequence. */
2479 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2480 return {};
2481
2482 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2483 {
2484 loc += insn_size;
2485 insn = read_memory_unsigned_integer (loc, insn_size,
2486 byte_order_for_code);
2487
2488 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2489 return {};
2490 /* Check if the instruction is a conditional branch. */
2491 if (inst.opcode->iclass == condbranch)
2492 {
2493 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2494
2495 if (bc_insn_count >= 1)
2496 return {};
2497
2498 /* It is, so we'll try to set a breakpoint at the destination. */
2499 breaks[1] = loc + inst.operands[0].imm.value;
2500
2501 bc_insn_count++;
2502 last_breakpoint++;
2503 }
2504
2505 /* Look for the Store Exclusive which closes the atomic sequence. */
2506 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2507 {
2508 closing_insn = loc;
2509 break;
2510 }
2511 }
2512
2513 /* We didn't find a closing Store Exclusive instruction, fall back. */
2514 if (!closing_insn)
2515 return {};
2516
2517 /* Insert breakpoint after the end of the atomic sequence. */
2518 breaks[0] = loc + insn_size;
2519
2520 /* Check for duplicated breakpoints, and also check that the second
2521 breakpoint is not within the atomic sequence. */
2522 if (last_breakpoint
2523 && (breaks[1] == breaks[0]
2524 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2525 last_breakpoint = 0;
2526
2527 std::vector<CORE_ADDR> next_pcs;
2528
2529 /* Insert the breakpoint at the end of the sequence, and one at the
2530 destination of the conditional branch, if it exists. */
2531 for (index = 0; index <= last_breakpoint; index++)
2532 next_pcs.push_back (breaks[index]);
2533
2534 return next_pcs;
2535 }
2536
2537 struct aarch64_displaced_step_closure : public displaced_step_closure
2538 {
2539 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2540 is being displaced stepping. */
2541 int cond = 0;
2542
2543 /* PC adjustment offset after displaced stepping. */
2544 int32_t pc_adjust = 0;
2545 };
2546
2547 /* Data when visiting instructions for displaced stepping. */
2548
2549 struct aarch64_displaced_step_data
2550 {
2551 struct aarch64_insn_data base;
2552
2553 /* The address where the instruction will be executed at. */
2554 CORE_ADDR new_addr;
2555 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2556 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2557 /* Number of instructions in INSN_BUF. */
2558 unsigned insn_count;
2559 /* Registers when doing displaced stepping. */
2560 struct regcache *regs;
2561
2562 aarch64_displaced_step_closure *dsc;
2563 };
2564
2565 /* Implementation of aarch64_insn_visitor method "b". */
2566
2567 static void
2568 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2569 struct aarch64_insn_data *data)
2570 {
2571 struct aarch64_displaced_step_data *dsd
2572 = (struct aarch64_displaced_step_data *) data;
2573 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2574
2575 if (can_encode_int32 (new_offset, 28))
2576 {
2577 /* Emit B rather than BL, because executing BL on a new address
2578 will get the wrong address into LR. In order to avoid this,
2579 we emit B, and update LR if the instruction is BL. */
2580 emit_b (dsd->insn_buf, 0, new_offset);
2581 dsd->insn_count++;
2582 }
2583 else
2584 {
2585 /* Write NOP. */
2586 emit_nop (dsd->insn_buf);
2587 dsd->insn_count++;
2588 dsd->dsc->pc_adjust = offset;
2589 }
2590
2591 if (is_bl)
2592 {
2593 /* Update LR. */
2594 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2595 data->insn_addr + 4);
2596 }
2597 }
2598
2599 /* Implementation of aarch64_insn_visitor method "b_cond". */
2600
2601 static void
2602 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2603 struct aarch64_insn_data *data)
2604 {
2605 struct aarch64_displaced_step_data *dsd
2606 = (struct aarch64_displaced_step_data *) data;
2607
2608 /* GDB has to fix up PC after displaced step this instruction
2609 differently according to the condition is true or false. Instead
2610 of checking COND against conditional flags, we can use
2611 the following instructions, and GDB can tell how to fix up PC
2612 according to the PC value.
2613
2614 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2615 INSN1 ;
2616 TAKEN:
2617 INSN2
2618 */
2619
2620 emit_bcond (dsd->insn_buf, cond, 8);
2621 dsd->dsc->cond = 1;
2622 dsd->dsc->pc_adjust = offset;
2623 dsd->insn_count = 1;
2624 }
2625
2626 /* Dynamically allocate a new register. If we know the register
2627 statically, we should make it a global as above instead of using this
2628 helper function. */
2629
2630 static struct aarch64_register
2631 aarch64_register (unsigned num, int is64)
2632 {
2633 return (struct aarch64_register) { num, is64 };
2634 }
2635
2636 /* Implementation of aarch64_insn_visitor method "cb". */
2637
2638 static void
2639 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2640 const unsigned rn, int is64,
2641 struct aarch64_insn_data *data)
2642 {
2643 struct aarch64_displaced_step_data *dsd
2644 = (struct aarch64_displaced_step_data *) data;
2645
2646 /* The offset is out of range for a compare and branch
2647 instruction. We can use the following instructions instead:
2648
2649 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2650 INSN1 ;
2651 TAKEN:
2652 INSN2
2653 */
2654 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2655 dsd->insn_count = 1;
2656 dsd->dsc->cond = 1;
2657 dsd->dsc->pc_adjust = offset;
2658 }
2659
2660 /* Implementation of aarch64_insn_visitor method "tb". */
2661
2662 static void
2663 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2664 const unsigned rt, unsigned bit,
2665 struct aarch64_insn_data *data)
2666 {
2667 struct aarch64_displaced_step_data *dsd
2668 = (struct aarch64_displaced_step_data *) data;
2669
2670 /* The offset is out of range for a test bit and branch
2671 instruction We can use the following instructions instead:
2672
2673 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2674 INSN1 ;
2675 TAKEN:
2676 INSN2
2677
2678 */
2679 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2680 dsd->insn_count = 1;
2681 dsd->dsc->cond = 1;
2682 dsd->dsc->pc_adjust = offset;
2683 }
2684
2685 /* Implementation of aarch64_insn_visitor method "adr". */
2686
2687 static void
2688 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2689 const int is_adrp, struct aarch64_insn_data *data)
2690 {
2691 struct aarch64_displaced_step_data *dsd
2692 = (struct aarch64_displaced_step_data *) data;
2693 /* We know exactly the address the ADR{P,} instruction will compute.
2694 We can just write it to the destination register. */
2695 CORE_ADDR address = data->insn_addr + offset;
2696
2697 if (is_adrp)
2698 {
2699 /* Clear the lower 12 bits of the offset to get the 4K page. */
2700 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2701 address & ~0xfff);
2702 }
2703 else
2704 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2705 address);
2706
2707 dsd->dsc->pc_adjust = 4;
2708 emit_nop (dsd->insn_buf);
2709 dsd->insn_count = 1;
2710 }
2711
2712 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2713
2714 static void
2715 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2716 const unsigned rt, const int is64,
2717 struct aarch64_insn_data *data)
2718 {
2719 struct aarch64_displaced_step_data *dsd
2720 = (struct aarch64_displaced_step_data *) data;
2721 CORE_ADDR address = data->insn_addr + offset;
2722 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2723
2724 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2725 address);
2726
2727 if (is_sw)
2728 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2729 aarch64_register (rt, 1), zero);
2730 else
2731 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2732 aarch64_register (rt, 1), zero);
2733
2734 dsd->dsc->pc_adjust = 4;
2735 }
2736
2737 /* Implementation of aarch64_insn_visitor method "others". */
2738
2739 static void
2740 aarch64_displaced_step_others (const uint32_t insn,
2741 struct aarch64_insn_data *data)
2742 {
2743 struct aarch64_displaced_step_data *dsd
2744 = (struct aarch64_displaced_step_data *) data;
2745
2746 aarch64_emit_insn (dsd->insn_buf, insn);
2747 dsd->insn_count = 1;
2748
2749 if ((insn & 0xfffffc1f) == 0xd65f0000)
2750 {
2751 /* RET */
2752 dsd->dsc->pc_adjust = 0;
2753 }
2754 else
2755 dsd->dsc->pc_adjust = 4;
2756 }
2757
2758 static const struct aarch64_insn_visitor visitor =
2759 {
2760 aarch64_displaced_step_b,
2761 aarch64_displaced_step_b_cond,
2762 aarch64_displaced_step_cb,
2763 aarch64_displaced_step_tb,
2764 aarch64_displaced_step_adr,
2765 aarch64_displaced_step_ldr_literal,
2766 aarch64_displaced_step_others,
2767 };
2768
2769 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2770
2771 struct displaced_step_closure *
2772 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2773 CORE_ADDR from, CORE_ADDR to,
2774 struct regcache *regs)
2775 {
2776 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2777 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2778 struct aarch64_displaced_step_data dsd;
2779 aarch64_inst inst;
2780
2781 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2782 return NULL;
2783
2784 /* Look for a Load Exclusive instruction which begins the sequence. */
2785 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2786 {
2787 /* We can't displaced step atomic sequences. */
2788 return NULL;
2789 }
2790
2791 std::unique_ptr<aarch64_displaced_step_closure> dsc
2792 (new aarch64_displaced_step_closure);
2793 dsd.base.insn_addr = from;
2794 dsd.new_addr = to;
2795 dsd.regs = regs;
2796 dsd.dsc = dsc.get ();
2797 dsd.insn_count = 0;
2798 aarch64_relocate_instruction (insn, &visitor,
2799 (struct aarch64_insn_data *) &dsd);
2800 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2801
2802 if (dsd.insn_count != 0)
2803 {
2804 int i;
2805
2806 /* Instruction can be relocated to scratch pad. Copy
2807 relocated instruction(s) there. */
2808 for (i = 0; i < dsd.insn_count; i++)
2809 {
2810 if (debug_displaced)
2811 {
2812 debug_printf ("displaced: writing insn ");
2813 debug_printf ("%.8x", dsd.insn_buf[i]);
2814 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2815 }
2816 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2817 (ULONGEST) dsd.insn_buf[i]);
2818 }
2819 }
2820 else
2821 {
2822 dsc = NULL;
2823 }
2824
2825 return dsc.release ();
2826 }
2827
2828 /* Implement the "displaced_step_fixup" gdbarch method. */
2829
2830 void
2831 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2832 struct displaced_step_closure *dsc_,
2833 CORE_ADDR from, CORE_ADDR to,
2834 struct regcache *regs)
2835 {
2836 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2837
2838 if (dsc->cond)
2839 {
2840 ULONGEST pc;
2841
2842 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2843 if (pc - to == 8)
2844 {
2845 /* Condition is true. */
2846 }
2847 else if (pc - to == 4)
2848 {
2849 /* Condition is false. */
2850 dsc->pc_adjust = 4;
2851 }
2852 else
2853 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2854 }
2855
2856 if (dsc->pc_adjust != 0)
2857 {
2858 if (debug_displaced)
2859 {
2860 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2861 paddress (gdbarch, from), dsc->pc_adjust);
2862 }
2863 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2864 from + dsc->pc_adjust);
2865 }
2866 }
2867
2868 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2869
2870 int
2871 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2872 struct displaced_step_closure *closure)
2873 {
2874 return 1;
2875 }
2876
2877 /* Get the correct target description for the given VQ value.
2878 If VQ is zero then it is assumed SVE is not supported.
2879 (It is not possible to set VQ to zero on an SVE system). */
2880
2881 const target_desc *
2882 aarch64_read_description (uint64_t vq)
2883 {
2884 if (vq > AARCH64_MAX_SVE_VQ)
2885 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
2886 AARCH64_MAX_SVE_VQ);
2887
2888 struct target_desc *tdesc = tdesc_aarch64_list[vq];
2889
2890 if (tdesc == NULL)
2891 {
2892 tdesc = aarch64_create_target_description (vq);
2893 tdesc_aarch64_list[vq] = tdesc;
2894 }
2895
2896 return tdesc;
2897 }
2898
2899 /* Return the VQ used when creating the target description TDESC. */
2900
2901 static uint64_t
2902 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2903 {
2904 const struct tdesc_feature *feature_sve;
2905
2906 if (!tdesc_has_registers (tdesc))
2907 return 0;
2908
2909 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2910
2911 if (feature_sve == nullptr)
2912 return 0;
2913
2914 uint64_t vl = tdesc_register_size (feature_sve,
2915 aarch64_sve_register_names[0]);
2916 return sve_vq_from_vl (vl);
2917 }
2918
2919
2920 /* Initialize the current architecture based on INFO. If possible,
2921 re-use an architecture from ARCHES, which is a list of
2922 architectures already created during this debugging session.
2923
2924 Called e.g. at program startup, when reading a core file, and when
2925 reading a binary file. */
2926
2927 static struct gdbarch *
2928 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2929 {
2930 struct gdbarch_tdep *tdep;
2931 struct gdbarch *gdbarch;
2932 struct gdbarch_list *best_arch;
2933 struct tdesc_arch_data *tdesc_data = NULL;
2934 const struct target_desc *tdesc = info.target_desc;
2935 int i;
2936 int valid_p = 1;
2937 const struct tdesc_feature *feature_core;
2938 const struct tdesc_feature *feature_fpu;
2939 const struct tdesc_feature *feature_sve;
2940 int num_regs = 0;
2941 int num_pseudo_regs = 0;
2942
2943 /* Ensure we always have a target description. */
2944 if (!tdesc_has_registers (tdesc))
2945 tdesc = aarch64_read_description (0);
2946 gdb_assert (tdesc);
2947
2948 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2949 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2950 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2951
2952 if (feature_core == NULL)
2953 return NULL;
2954
2955 tdesc_data = tdesc_data_alloc ();
2956
2957 /* Validate the description provides the mandatory core R registers
2958 and allocate their numbers. */
2959 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2960 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
2961 AARCH64_X0_REGNUM + i,
2962 aarch64_r_register_names[i]);
2963
2964 num_regs = AARCH64_X0_REGNUM + i;
2965
2966 /* Add the V registers. */
2967 if (feature_fpu != NULL)
2968 {
2969 if (feature_sve != NULL)
2970 error (_("Program contains both fpu and SVE features."));
2971
2972 /* Validate the description provides the mandatory V registers
2973 and allocate their numbers. */
2974 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2975 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
2976 AARCH64_V0_REGNUM + i,
2977 aarch64_v_register_names[i]);
2978
2979 num_regs = AARCH64_V0_REGNUM + i;
2980 }
2981
2982 /* Add the SVE registers. */
2983 if (feature_sve != NULL)
2984 {
2985 /* Validate the description provides the mandatory SVE registers
2986 and allocate their numbers. */
2987 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
2988 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
2989 AARCH64_SVE_Z0_REGNUM + i,
2990 aarch64_sve_register_names[i]);
2991
2992 num_regs = AARCH64_SVE_Z0_REGNUM + i;
2993 num_pseudo_regs += 32; /* add the Vn register pseudos. */
2994 }
2995
2996 if (feature_fpu != NULL || feature_sve != NULL)
2997 {
2998 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2999 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3000 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3001 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3002 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3003 }
3004
3005 if (!valid_p)
3006 {
3007 tdesc_data_cleanup (tdesc_data);
3008 return NULL;
3009 }
3010
3011 /* AArch64 code is always little-endian. */
3012 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3013
3014 /* If there is already a candidate, use it. */
3015 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3016 best_arch != NULL;
3017 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3018 {
3019 /* Found a match. */
3020 break;
3021 }
3022
3023 if (best_arch != NULL)
3024 {
3025 if (tdesc_data != NULL)
3026 tdesc_data_cleanup (tdesc_data);
3027 return best_arch->gdbarch;
3028 }
3029
3030 tdep = XCNEW (struct gdbarch_tdep);
3031 gdbarch = gdbarch_alloc (&info, tdep);
3032
3033 /* This should be low enough for everything. */
3034 tdep->lowest_pc = 0x20;
3035 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3036 tdep->jb_elt_size = 8;
3037 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3038
3039 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3040 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3041
3042 /* Frame handling. */
3043 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3044 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3045 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3046
3047 /* Advance PC across function entry code. */
3048 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3049
3050 /* The stack grows downward. */
3051 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3052
3053 /* Breakpoint manipulation. */
3054 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3055 aarch64_breakpoint::kind_from_pc);
3056 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3057 aarch64_breakpoint::bp_from_kind);
3058 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3059 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3060
3061 /* Information about registers, etc. */
3062 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3063 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3064 set_gdbarch_num_regs (gdbarch, num_regs);
3065
3066 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3067 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3068 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3069 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3070 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3071 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3072 aarch64_pseudo_register_reggroup_p);
3073
3074 /* ABI */
3075 set_gdbarch_short_bit (gdbarch, 16);
3076 set_gdbarch_int_bit (gdbarch, 32);
3077 set_gdbarch_float_bit (gdbarch, 32);
3078 set_gdbarch_double_bit (gdbarch, 64);
3079 set_gdbarch_long_double_bit (gdbarch, 128);
3080 set_gdbarch_long_bit (gdbarch, 64);
3081 set_gdbarch_long_long_bit (gdbarch, 64);
3082 set_gdbarch_ptr_bit (gdbarch, 64);
3083 set_gdbarch_char_signed (gdbarch, 0);
3084 set_gdbarch_wchar_signed (gdbarch, 0);
3085 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3086 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3087 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3088
3089 /* Internal <-> external register number maps. */
3090 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3091
3092 /* Returning results. */
3093 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3094
3095 /* Disassembly. */
3096 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3097
3098 /* Virtual tables. */
3099 set_gdbarch_vbit_in_delta (gdbarch, 1);
3100
3101 /* Hook in the ABI-specific overrides, if they have been registered. */
3102 info.target_desc = tdesc;
3103 info.tdesc_data = tdesc_data;
3104 gdbarch_init_osabi (info, gdbarch);
3105
3106 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3107
3108 /* Add some default predicates. */
3109 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3110 dwarf2_append_unwinders (gdbarch);
3111 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3112
3113 frame_base_set_default (gdbarch, &aarch64_normal_base);
3114
3115 /* Now we have tuned the configuration, set a few final things,
3116 based on what the OS ABI has told us. */
3117
3118 if (tdep->jb_pc >= 0)
3119 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3120
3121 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3122
3123 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3124
3125 /* Add standard register aliases. */
3126 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3127 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3128 value_of_aarch64_user_reg,
3129 &aarch64_register_aliases[i].regnum);
3130
3131 return gdbarch;
3132 }
3133
3134 static void
3135 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3136 {
3137 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3138
3139 if (tdep == NULL)
3140 return;
3141
3142 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3143 paddress (gdbarch, tdep->lowest_pc));
3144 }
3145
3146 #if GDB_SELF_TEST
3147 namespace selftests
3148 {
3149 static void aarch64_process_record_test (void);
3150 }
3151 #endif
3152
3153 void
3154 _initialize_aarch64_tdep (void)
3155 {
3156 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3157 aarch64_dump_tdep);
3158
3159 /* Debug this file's internals. */
3160 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3161 Set AArch64 debugging."), _("\
3162 Show AArch64 debugging."), _("\
3163 When on, AArch64 specific debugging is enabled."),
3164 NULL,
3165 show_aarch64_debug,
3166 &setdebuglist, &showdebuglist);
3167
3168 #if GDB_SELF_TEST
3169 selftests::register_test ("aarch64-analyze-prologue",
3170 selftests::aarch64_analyze_prologue_test);
3171 selftests::register_test ("aarch64-process-record",
3172 selftests::aarch64_process_record_test);
3173 selftests::record_xml_tdesc ("aarch64.xml",
3174 aarch64_create_target_description (0));
3175 #endif
3176 }
3177
3178 /* AArch64 process record-replay related structures, defines etc. */
3179
3180 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3181 do \
3182 { \
3183 unsigned int reg_len = LENGTH; \
3184 if (reg_len) \
3185 { \
3186 REGS = XNEWVEC (uint32_t, reg_len); \
3187 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3188 } \
3189 } \
3190 while (0)
3191
3192 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3193 do \
3194 { \
3195 unsigned int mem_len = LENGTH; \
3196 if (mem_len) \
3197 { \
3198 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3199 memcpy(&MEMS->len, &RECORD_BUF[0], \
3200 sizeof(struct aarch64_mem_r) * LENGTH); \
3201 } \
3202 } \
3203 while (0)
3204
3205 /* AArch64 record/replay structures and enumerations. */
3206
3207 struct aarch64_mem_r
3208 {
3209 uint64_t len; /* Record length. */
3210 uint64_t addr; /* Memory address. */
3211 };
3212
3213 enum aarch64_record_result
3214 {
3215 AARCH64_RECORD_SUCCESS,
3216 AARCH64_RECORD_UNSUPPORTED,
3217 AARCH64_RECORD_UNKNOWN
3218 };
3219
3220 typedef struct insn_decode_record_t
3221 {
3222 struct gdbarch *gdbarch;
3223 struct regcache *regcache;
3224 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3225 uint32_t aarch64_insn; /* Insn to be recorded. */
3226 uint32_t mem_rec_count; /* Count of memory records. */
3227 uint32_t reg_rec_count; /* Count of register records. */
3228 uint32_t *aarch64_regs; /* Registers to be recorded. */
3229 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3230 } insn_decode_record;
3231
3232 /* Record handler for data processing - register instructions. */
3233
3234 static unsigned int
3235 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3236 {
3237 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3238 uint32_t record_buf[4];
3239
3240 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3241 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3242 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3243
3244 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3245 {
3246 uint8_t setflags;
3247
3248 /* Logical (shifted register). */
3249 if (insn_bits24_27 == 0x0a)
3250 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3251 /* Add/subtract. */
3252 else if (insn_bits24_27 == 0x0b)
3253 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3254 else
3255 return AARCH64_RECORD_UNKNOWN;
3256
3257 record_buf[0] = reg_rd;
3258 aarch64_insn_r->reg_rec_count = 1;
3259 if (setflags)
3260 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3261 }
3262 else
3263 {
3264 if (insn_bits24_27 == 0x0b)
3265 {
3266 /* Data-processing (3 source). */
3267 record_buf[0] = reg_rd;
3268 aarch64_insn_r->reg_rec_count = 1;
3269 }
3270 else if (insn_bits24_27 == 0x0a)
3271 {
3272 if (insn_bits21_23 == 0x00)
3273 {
3274 /* Add/subtract (with carry). */
3275 record_buf[0] = reg_rd;
3276 aarch64_insn_r->reg_rec_count = 1;
3277 if (bit (aarch64_insn_r->aarch64_insn, 29))
3278 {
3279 record_buf[1] = AARCH64_CPSR_REGNUM;
3280 aarch64_insn_r->reg_rec_count = 2;
3281 }
3282 }
3283 else if (insn_bits21_23 == 0x02)
3284 {
3285 /* Conditional compare (register) and conditional compare
3286 (immediate) instructions. */
3287 record_buf[0] = AARCH64_CPSR_REGNUM;
3288 aarch64_insn_r->reg_rec_count = 1;
3289 }
3290 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3291 {
3292 /* CConditional select. */
3293 /* Data-processing (2 source). */
3294 /* Data-processing (1 source). */
3295 record_buf[0] = reg_rd;
3296 aarch64_insn_r->reg_rec_count = 1;
3297 }
3298 else
3299 return AARCH64_RECORD_UNKNOWN;
3300 }
3301 }
3302
3303 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3304 record_buf);
3305 return AARCH64_RECORD_SUCCESS;
3306 }
3307
3308 /* Record handler for data processing - immediate instructions. */
3309
3310 static unsigned int
3311 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3312 {
3313 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3314 uint32_t record_buf[4];
3315
3316 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3317 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3318 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3319
3320 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3321 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3322 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3323 {
3324 record_buf[0] = reg_rd;
3325 aarch64_insn_r->reg_rec_count = 1;
3326 }
3327 else if (insn_bits24_27 == 0x01)
3328 {
3329 /* Add/Subtract (immediate). */
3330 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3331 record_buf[0] = reg_rd;
3332 aarch64_insn_r->reg_rec_count = 1;
3333 if (setflags)
3334 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3335 }
3336 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3337 {
3338 /* Logical (immediate). */
3339 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3340 record_buf[0] = reg_rd;
3341 aarch64_insn_r->reg_rec_count = 1;
3342 if (setflags)
3343 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3344 }
3345 else
3346 return AARCH64_RECORD_UNKNOWN;
3347
3348 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3349 record_buf);
3350 return AARCH64_RECORD_SUCCESS;
3351 }
3352
3353 /* Record handler for branch, exception generation and system instructions. */
3354
3355 static unsigned int
3356 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3357 {
3358 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3359 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3360 uint32_t record_buf[4];
3361
3362 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3363 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3364 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3365
3366 if (insn_bits28_31 == 0x0d)
3367 {
3368 /* Exception generation instructions. */
3369 if (insn_bits24_27 == 0x04)
3370 {
3371 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3372 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3373 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3374 {
3375 ULONGEST svc_number;
3376
3377 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3378 &svc_number);
3379 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3380 svc_number);
3381 }
3382 else
3383 return AARCH64_RECORD_UNSUPPORTED;
3384 }
3385 /* System instructions. */
3386 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3387 {
3388 uint32_t reg_rt, reg_crn;
3389
3390 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3391 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3392
3393 /* Record rt in case of sysl and mrs instructions. */
3394 if (bit (aarch64_insn_r->aarch64_insn, 21))
3395 {
3396 record_buf[0] = reg_rt;
3397 aarch64_insn_r->reg_rec_count = 1;
3398 }
3399 /* Record cpsr for hint and msr(immediate) instructions. */
3400 else if (reg_crn == 0x02 || reg_crn == 0x04)
3401 {
3402 record_buf[0] = AARCH64_CPSR_REGNUM;
3403 aarch64_insn_r->reg_rec_count = 1;
3404 }
3405 }
3406 /* Unconditional branch (register). */
3407 else if((insn_bits24_27 & 0x0e) == 0x06)
3408 {
3409 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3410 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3411 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3412 }
3413 else
3414 return AARCH64_RECORD_UNKNOWN;
3415 }
3416 /* Unconditional branch (immediate). */
3417 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3418 {
3419 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3420 if (bit (aarch64_insn_r->aarch64_insn, 31))
3421 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3422 }
3423 else
3424 /* Compare & branch (immediate), Test & branch (immediate) and
3425 Conditional branch (immediate). */
3426 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3427
3428 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3429 record_buf);
3430 return AARCH64_RECORD_SUCCESS;
3431 }
3432
3433 /* Record handler for advanced SIMD load and store instructions. */
3434
3435 static unsigned int
3436 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3437 {
3438 CORE_ADDR address;
3439 uint64_t addr_offset = 0;
3440 uint32_t record_buf[24];
3441 uint64_t record_buf_mem[24];
3442 uint32_t reg_rn, reg_rt;
3443 uint32_t reg_index = 0, mem_index = 0;
3444 uint8_t opcode_bits, size_bits;
3445
3446 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3447 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3448 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3449 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3450 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3451
3452 if (record_debug)
3453 debug_printf ("Process record: Advanced SIMD load/store\n");
3454
3455 /* Load/store single structure. */
3456 if (bit (aarch64_insn_r->aarch64_insn, 24))
3457 {
3458 uint8_t sindex, scale, selem, esize, replicate = 0;
3459 scale = opcode_bits >> 2;
3460 selem = ((opcode_bits & 0x02) |
3461 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3462 switch (scale)
3463 {
3464 case 1:
3465 if (size_bits & 0x01)
3466 return AARCH64_RECORD_UNKNOWN;
3467 break;
3468 case 2:
3469 if ((size_bits >> 1) & 0x01)
3470 return AARCH64_RECORD_UNKNOWN;
3471 if (size_bits & 0x01)
3472 {
3473 if (!((opcode_bits >> 1) & 0x01))
3474 scale = 3;
3475 else
3476 return AARCH64_RECORD_UNKNOWN;
3477 }
3478 break;
3479 case 3:
3480 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3481 {
3482 scale = size_bits;
3483 replicate = 1;
3484 break;
3485 }
3486 else
3487 return AARCH64_RECORD_UNKNOWN;
3488 default:
3489 break;
3490 }
3491 esize = 8 << scale;
3492 if (replicate)
3493 for (sindex = 0; sindex < selem; sindex++)
3494 {
3495 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3496 reg_rt = (reg_rt + 1) % 32;
3497 }
3498 else
3499 {
3500 for (sindex = 0; sindex < selem; sindex++)
3501 {
3502 if (bit (aarch64_insn_r->aarch64_insn, 22))
3503 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3504 else
3505 {
3506 record_buf_mem[mem_index++] = esize / 8;
3507 record_buf_mem[mem_index++] = address + addr_offset;
3508 }
3509 addr_offset = addr_offset + (esize / 8);
3510 reg_rt = (reg_rt + 1) % 32;
3511 }
3512 }
3513 }
3514 /* Load/store multiple structure. */
3515 else
3516 {
3517 uint8_t selem, esize, rpt, elements;
3518 uint8_t eindex, rindex;
3519
3520 esize = 8 << size_bits;
3521 if (bit (aarch64_insn_r->aarch64_insn, 30))
3522 elements = 128 / esize;
3523 else
3524 elements = 64 / esize;
3525
3526 switch (opcode_bits)
3527 {
3528 /*LD/ST4 (4 Registers). */
3529 case 0:
3530 rpt = 1;
3531 selem = 4;
3532 break;
3533 /*LD/ST1 (4 Registers). */
3534 case 2:
3535 rpt = 4;
3536 selem = 1;
3537 break;
3538 /*LD/ST3 (3 Registers). */
3539 case 4:
3540 rpt = 1;
3541 selem = 3;
3542 break;
3543 /*LD/ST1 (3 Registers). */
3544 case 6:
3545 rpt = 3;
3546 selem = 1;
3547 break;
3548 /*LD/ST1 (1 Register). */
3549 case 7:
3550 rpt = 1;
3551 selem = 1;
3552 break;
3553 /*LD/ST2 (2 Registers). */
3554 case 8:
3555 rpt = 1;
3556 selem = 2;
3557 break;
3558 /*LD/ST1 (2 Registers). */
3559 case 10:
3560 rpt = 2;
3561 selem = 1;
3562 break;
3563 default:
3564 return AARCH64_RECORD_UNSUPPORTED;
3565 break;
3566 }
3567 for (rindex = 0; rindex < rpt; rindex++)
3568 for (eindex = 0; eindex < elements; eindex++)
3569 {
3570 uint8_t reg_tt, sindex;
3571 reg_tt = (reg_rt + rindex) % 32;
3572 for (sindex = 0; sindex < selem; sindex++)
3573 {
3574 if (bit (aarch64_insn_r->aarch64_insn, 22))
3575 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3576 else
3577 {
3578 record_buf_mem[mem_index++] = esize / 8;
3579 record_buf_mem[mem_index++] = address + addr_offset;
3580 }
3581 addr_offset = addr_offset + (esize / 8);
3582 reg_tt = (reg_tt + 1) % 32;
3583 }
3584 }
3585 }
3586
3587 if (bit (aarch64_insn_r->aarch64_insn, 23))
3588 record_buf[reg_index++] = reg_rn;
3589
3590 aarch64_insn_r->reg_rec_count = reg_index;
3591 aarch64_insn_r->mem_rec_count = mem_index / 2;
3592 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3593 record_buf_mem);
3594 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3595 record_buf);
3596 return AARCH64_RECORD_SUCCESS;
3597 }
3598
3599 /* Record handler for load and store instructions. */
3600
3601 static unsigned int
3602 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3603 {
3604 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3605 uint8_t insn_bit23, insn_bit21;
3606 uint8_t opc, size_bits, ld_flag, vector_flag;
3607 uint32_t reg_rn, reg_rt, reg_rt2;
3608 uint64_t datasize, offset;
3609 uint32_t record_buf[8];
3610 uint64_t record_buf_mem[8];
3611 CORE_ADDR address;
3612
3613 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3614 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3615 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3616 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3617 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3618 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3619 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3620 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3621 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3622 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3623 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3624
3625 /* Load/store exclusive. */
3626 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3627 {
3628 if (record_debug)
3629 debug_printf ("Process record: load/store exclusive\n");
3630
3631 if (ld_flag)
3632 {
3633 record_buf[0] = reg_rt;
3634 aarch64_insn_r->reg_rec_count = 1;
3635 if (insn_bit21)
3636 {
3637 record_buf[1] = reg_rt2;
3638 aarch64_insn_r->reg_rec_count = 2;
3639 }
3640 }
3641 else
3642 {
3643 if (insn_bit21)
3644 datasize = (8 << size_bits) * 2;
3645 else
3646 datasize = (8 << size_bits);
3647 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3648 &address);
3649 record_buf_mem[0] = datasize / 8;
3650 record_buf_mem[1] = address;
3651 aarch64_insn_r->mem_rec_count = 1;
3652 if (!insn_bit23)
3653 {
3654 /* Save register rs. */
3655 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3656 aarch64_insn_r->reg_rec_count = 1;
3657 }
3658 }
3659 }
3660 /* Load register (literal) instructions decoding. */
3661 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3662 {
3663 if (record_debug)
3664 debug_printf ("Process record: load register (literal)\n");
3665 if (vector_flag)
3666 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3667 else
3668 record_buf[0] = reg_rt;
3669 aarch64_insn_r->reg_rec_count = 1;
3670 }
3671 /* All types of load/store pair instructions decoding. */
3672 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3673 {
3674 if (record_debug)
3675 debug_printf ("Process record: load/store pair\n");
3676
3677 if (ld_flag)
3678 {
3679 if (vector_flag)
3680 {
3681 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3682 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3683 }
3684 else
3685 {
3686 record_buf[0] = reg_rt;
3687 record_buf[1] = reg_rt2;
3688 }
3689 aarch64_insn_r->reg_rec_count = 2;
3690 }
3691 else
3692 {
3693 uint16_t imm7_off;
3694 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3695 if (!vector_flag)
3696 size_bits = size_bits >> 1;
3697 datasize = 8 << (2 + size_bits);
3698 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3699 offset = offset << (2 + size_bits);
3700 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3701 &address);
3702 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3703 {
3704 if (imm7_off & 0x40)
3705 address = address - offset;
3706 else
3707 address = address + offset;
3708 }
3709
3710 record_buf_mem[0] = datasize / 8;
3711 record_buf_mem[1] = address;
3712 record_buf_mem[2] = datasize / 8;
3713 record_buf_mem[3] = address + (datasize / 8);
3714 aarch64_insn_r->mem_rec_count = 2;
3715 }
3716 if (bit (aarch64_insn_r->aarch64_insn, 23))
3717 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3718 }
3719 /* Load/store register (unsigned immediate) instructions. */
3720 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3721 {
3722 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3723 if (!(opc >> 1))
3724 {
3725 if (opc & 0x01)
3726 ld_flag = 0x01;
3727 else
3728 ld_flag = 0x0;
3729 }
3730 else
3731 {
3732 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3733 {
3734 /* PRFM (immediate) */
3735 return AARCH64_RECORD_SUCCESS;
3736 }
3737 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3738 {
3739 /* LDRSW (immediate) */
3740 ld_flag = 0x1;
3741 }
3742 else
3743 {
3744 if (opc & 0x01)
3745 ld_flag = 0x01;
3746 else
3747 ld_flag = 0x0;
3748 }
3749 }
3750
3751 if (record_debug)
3752 {
3753 debug_printf ("Process record: load/store (unsigned immediate):"
3754 " size %x V %d opc %x\n", size_bits, vector_flag,
3755 opc);
3756 }
3757
3758 if (!ld_flag)
3759 {
3760 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3761 datasize = 8 << size_bits;
3762 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3763 &address);
3764 offset = offset << size_bits;
3765 address = address + offset;
3766
3767 record_buf_mem[0] = datasize >> 3;
3768 record_buf_mem[1] = address;
3769 aarch64_insn_r->mem_rec_count = 1;
3770 }
3771 else
3772 {
3773 if (vector_flag)
3774 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3775 else
3776 record_buf[0] = reg_rt;
3777 aarch64_insn_r->reg_rec_count = 1;
3778 }
3779 }
3780 /* Load/store register (register offset) instructions. */
3781 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3782 && insn_bits10_11 == 0x02 && insn_bit21)
3783 {
3784 if (record_debug)
3785 debug_printf ("Process record: load/store (register offset)\n");
3786 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3787 if (!(opc >> 1))
3788 if (opc & 0x01)
3789 ld_flag = 0x01;
3790 else
3791 ld_flag = 0x0;
3792 else
3793 if (size_bits != 0x03)
3794 ld_flag = 0x01;
3795 else
3796 return AARCH64_RECORD_UNKNOWN;
3797
3798 if (!ld_flag)
3799 {
3800 ULONGEST reg_rm_val;
3801
3802 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3803 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3804 if (bit (aarch64_insn_r->aarch64_insn, 12))
3805 offset = reg_rm_val << size_bits;
3806 else
3807 offset = reg_rm_val;
3808 datasize = 8 << size_bits;
3809 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3810 &address);
3811 address = address + offset;
3812 record_buf_mem[0] = datasize >> 3;
3813 record_buf_mem[1] = address;
3814 aarch64_insn_r->mem_rec_count = 1;
3815 }
3816 else
3817 {
3818 if (vector_flag)
3819 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3820 else
3821 record_buf[0] = reg_rt;
3822 aarch64_insn_r->reg_rec_count = 1;
3823 }
3824 }
3825 /* Load/store register (immediate and unprivileged) instructions. */
3826 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3827 && !insn_bit21)
3828 {
3829 if (record_debug)
3830 {
3831 debug_printf ("Process record: load/store "
3832 "(immediate and unprivileged)\n");
3833 }
3834 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3835 if (!(opc >> 1))
3836 if (opc & 0x01)
3837 ld_flag = 0x01;
3838 else
3839 ld_flag = 0x0;
3840 else
3841 if (size_bits != 0x03)
3842 ld_flag = 0x01;
3843 else
3844 return AARCH64_RECORD_UNKNOWN;
3845
3846 if (!ld_flag)
3847 {
3848 uint16_t imm9_off;
3849 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3850 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3851 datasize = 8 << size_bits;
3852 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3853 &address);
3854 if (insn_bits10_11 != 0x01)
3855 {
3856 if (imm9_off & 0x0100)
3857 address = address - offset;
3858 else
3859 address = address + offset;
3860 }
3861 record_buf_mem[0] = datasize >> 3;
3862 record_buf_mem[1] = address;
3863 aarch64_insn_r->mem_rec_count = 1;
3864 }
3865 else
3866 {
3867 if (vector_flag)
3868 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3869 else
3870 record_buf[0] = reg_rt;
3871 aarch64_insn_r->reg_rec_count = 1;
3872 }
3873 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3874 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3875 }
3876 /* Advanced SIMD load/store instructions. */
3877 else
3878 return aarch64_record_asimd_load_store (aarch64_insn_r);
3879
3880 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3881 record_buf_mem);
3882 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3883 record_buf);
3884 return AARCH64_RECORD_SUCCESS;
3885 }
3886
3887 /* Record handler for data processing SIMD and floating point instructions. */
3888
3889 static unsigned int
3890 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3891 {
3892 uint8_t insn_bit21, opcode, rmode, reg_rd;
3893 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3894 uint8_t insn_bits11_14;
3895 uint32_t record_buf[2];
3896
3897 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3898 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3899 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3900 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3901 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3902 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3903 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3904 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3905 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3906
3907 if (record_debug)
3908 debug_printf ("Process record: data processing SIMD/FP: ");
3909
3910 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3911 {
3912 /* Floating point - fixed point conversion instructions. */
3913 if (!insn_bit21)
3914 {
3915 if (record_debug)
3916 debug_printf ("FP - fixed point conversion");
3917
3918 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3919 record_buf[0] = reg_rd;
3920 else
3921 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3922 }
3923 /* Floating point - conditional compare instructions. */
3924 else if (insn_bits10_11 == 0x01)
3925 {
3926 if (record_debug)
3927 debug_printf ("FP - conditional compare");
3928
3929 record_buf[0] = AARCH64_CPSR_REGNUM;
3930 }
3931 /* Floating point - data processing (2-source) and
3932 conditional select instructions. */
3933 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3934 {
3935 if (record_debug)
3936 debug_printf ("FP - DP (2-source)");
3937
3938 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3939 }
3940 else if (insn_bits10_11 == 0x00)
3941 {
3942 /* Floating point - immediate instructions. */
3943 if ((insn_bits12_15 & 0x01) == 0x01
3944 || (insn_bits12_15 & 0x07) == 0x04)
3945 {
3946 if (record_debug)
3947 debug_printf ("FP - immediate");
3948 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3949 }
3950 /* Floating point - compare instructions. */
3951 else if ((insn_bits12_15 & 0x03) == 0x02)
3952 {
3953 if (record_debug)
3954 debug_printf ("FP - immediate");
3955 record_buf[0] = AARCH64_CPSR_REGNUM;
3956 }
3957 /* Floating point - integer conversions instructions. */
3958 else if (insn_bits12_15 == 0x00)
3959 {
3960 /* Convert float to integer instruction. */
3961 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3962 {
3963 if (record_debug)
3964 debug_printf ("float to int conversion");
3965
3966 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3967 }
3968 /* Convert integer to float instruction. */
3969 else if ((opcode >> 1) == 0x01 && !rmode)
3970 {
3971 if (record_debug)
3972 debug_printf ("int to float conversion");
3973
3974 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3975 }
3976 /* Move float to integer instruction. */
3977 else if ((opcode >> 1) == 0x03)
3978 {
3979 if (record_debug)
3980 debug_printf ("move float to int");
3981
3982 if (!(opcode & 0x01))
3983 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3984 else
3985 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3986 }
3987 else
3988 return AARCH64_RECORD_UNKNOWN;
3989 }
3990 else
3991 return AARCH64_RECORD_UNKNOWN;
3992 }
3993 else
3994 return AARCH64_RECORD_UNKNOWN;
3995 }
3996 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3997 {
3998 if (record_debug)
3999 debug_printf ("SIMD copy");
4000
4001 /* Advanced SIMD copy instructions. */
4002 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4003 && !bit (aarch64_insn_r->aarch64_insn, 15)
4004 && bit (aarch64_insn_r->aarch64_insn, 10))
4005 {
4006 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4007 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4008 else
4009 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4010 }
4011 else
4012 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4013 }
4014 /* All remaining floating point or advanced SIMD instructions. */
4015 else
4016 {
4017 if (record_debug)
4018 debug_printf ("all remain");
4019
4020 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4021 }
4022
4023 if (record_debug)
4024 debug_printf ("\n");
4025
4026 aarch64_insn_r->reg_rec_count++;
4027 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4028 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4029 record_buf);
4030 return AARCH64_RECORD_SUCCESS;
4031 }
4032
4033 /* Decodes insns type and invokes its record handler. */
4034
4035 static unsigned int
4036 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4037 {
4038 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4039
4040 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4041 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4042 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4043 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4044
4045 /* Data processing - immediate instructions. */
4046 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4047 return aarch64_record_data_proc_imm (aarch64_insn_r);
4048
4049 /* Branch, exception generation and system instructions. */
4050 if (ins_bit26 && !ins_bit27 && ins_bit28)
4051 return aarch64_record_branch_except_sys (aarch64_insn_r);
4052
4053 /* Load and store instructions. */
4054 if (!ins_bit25 && ins_bit27)
4055 return aarch64_record_load_store (aarch64_insn_r);
4056
4057 /* Data processing - register instructions. */
4058 if (ins_bit25 && !ins_bit26 && ins_bit27)
4059 return aarch64_record_data_proc_reg (aarch64_insn_r);
4060
4061 /* Data processing - SIMD and floating point instructions. */
4062 if (ins_bit25 && ins_bit26 && ins_bit27)
4063 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4064
4065 return AARCH64_RECORD_UNSUPPORTED;
4066 }
4067
4068 /* Cleans up local record registers and memory allocations. */
4069
4070 static void
4071 deallocate_reg_mem (insn_decode_record *record)
4072 {
4073 xfree (record->aarch64_regs);
4074 xfree (record->aarch64_mems);
4075 }
4076
4077 #if GDB_SELF_TEST
4078 namespace selftests {
4079
4080 static void
4081 aarch64_process_record_test (void)
4082 {
4083 struct gdbarch_info info;
4084 uint32_t ret;
4085
4086 gdbarch_info_init (&info);
4087 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4088
4089 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4090 SELF_CHECK (gdbarch != NULL);
4091
4092 insn_decode_record aarch64_record;
4093
4094 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4095 aarch64_record.regcache = NULL;
4096 aarch64_record.this_addr = 0;
4097 aarch64_record.gdbarch = gdbarch;
4098
4099 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4100 aarch64_record.aarch64_insn = 0xf9800020;
4101 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4102 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4103 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4104 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4105
4106 deallocate_reg_mem (&aarch64_record);
4107 }
4108
4109 } // namespace selftests
4110 #endif /* GDB_SELF_TEST */
4111
4112 /* Parse the current instruction and record the values of the registers and
4113 memory that will be changed in current instruction to record_arch_list
4114 return -1 if something is wrong. */
4115
4116 int
4117 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4118 CORE_ADDR insn_addr)
4119 {
4120 uint32_t rec_no = 0;
4121 uint8_t insn_size = 4;
4122 uint32_t ret = 0;
4123 gdb_byte buf[insn_size];
4124 insn_decode_record aarch64_record;
4125
4126 memset (&buf[0], 0, insn_size);
4127 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4128 target_read_memory (insn_addr, &buf[0], insn_size);
4129 aarch64_record.aarch64_insn
4130 = (uint32_t) extract_unsigned_integer (&buf[0],
4131 insn_size,
4132 gdbarch_byte_order (gdbarch));
4133 aarch64_record.regcache = regcache;
4134 aarch64_record.this_addr = insn_addr;
4135 aarch64_record.gdbarch = gdbarch;
4136
4137 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4138 if (ret == AARCH64_RECORD_UNSUPPORTED)
4139 {
4140 printf_unfiltered (_("Process record does not support instruction "
4141 "0x%0x at address %s.\n"),
4142 aarch64_record.aarch64_insn,
4143 paddress (gdbarch, insn_addr));
4144 ret = -1;
4145 }
4146
4147 if (0 == ret)
4148 {
4149 /* Record registers. */
4150 record_full_arch_list_add_reg (aarch64_record.regcache,
4151 AARCH64_PC_REGNUM);
4152 /* Always record register CPSR. */
4153 record_full_arch_list_add_reg (aarch64_record.regcache,
4154 AARCH64_CPSR_REGNUM);
4155 if (aarch64_record.aarch64_regs)
4156 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4157 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4158 aarch64_record.aarch64_regs[rec_no]))
4159 ret = -1;
4160
4161 /* Record memories. */
4162 if (aarch64_record.aarch64_mems)
4163 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4164 if (record_full_arch_list_add_mem
4165 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4166 aarch64_record.aarch64_mems[rec_no].len))
4167 ret = -1;
4168
4169 if (record_full_arch_list_add_end ())
4170 ret = -1;
4171 }
4172
4173 deallocate_reg_mem (&aarch64_record);
4174 return ret;
4175 }
This page took 0.145908 seconds and 3 git commands to generate.