AArch64 AAPCS: Empty structs have non zero size in C++
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "selftest.h"
47
48 #include "aarch64-tdep.h"
49 #include "aarch64-ravenscar-thread.h"
50
51 #include "elf-bfd.h"
52 #include "elf/aarch64.h"
53
54 #include "vec.h"
55
56 #include "record.h"
57 #include "record-full.h"
58 #include "arch/aarch64-insn.h"
59
60 #include "opcode/aarch64.h"
61 #include <algorithm>
62
63 #define submask(x) ((1L << ((x) + 1)) - 1)
64 #define bit(obj,st) (((obj) >> (st)) & 1)
65 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66
67 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
68 four members. */
69 #define HA_MAX_NUM_FLDS 4
70
71 /* All possible aarch64 target descriptors. */
72 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
73
74 /* The standard register names, and all the valid aliases for them. */
75 static const struct
76 {
77 const char *const name;
78 int regnum;
79 } aarch64_register_aliases[] =
80 {
81 /* 64-bit register names. */
82 {"fp", AARCH64_FP_REGNUM},
83 {"lr", AARCH64_LR_REGNUM},
84 {"sp", AARCH64_SP_REGNUM},
85
86 /* 32-bit register names. */
87 {"w0", AARCH64_X0_REGNUM + 0},
88 {"w1", AARCH64_X0_REGNUM + 1},
89 {"w2", AARCH64_X0_REGNUM + 2},
90 {"w3", AARCH64_X0_REGNUM + 3},
91 {"w4", AARCH64_X0_REGNUM + 4},
92 {"w5", AARCH64_X0_REGNUM + 5},
93 {"w6", AARCH64_X0_REGNUM + 6},
94 {"w7", AARCH64_X0_REGNUM + 7},
95 {"w8", AARCH64_X0_REGNUM + 8},
96 {"w9", AARCH64_X0_REGNUM + 9},
97 {"w10", AARCH64_X0_REGNUM + 10},
98 {"w11", AARCH64_X0_REGNUM + 11},
99 {"w12", AARCH64_X0_REGNUM + 12},
100 {"w13", AARCH64_X0_REGNUM + 13},
101 {"w14", AARCH64_X0_REGNUM + 14},
102 {"w15", AARCH64_X0_REGNUM + 15},
103 {"w16", AARCH64_X0_REGNUM + 16},
104 {"w17", AARCH64_X0_REGNUM + 17},
105 {"w18", AARCH64_X0_REGNUM + 18},
106 {"w19", AARCH64_X0_REGNUM + 19},
107 {"w20", AARCH64_X0_REGNUM + 20},
108 {"w21", AARCH64_X0_REGNUM + 21},
109 {"w22", AARCH64_X0_REGNUM + 22},
110 {"w23", AARCH64_X0_REGNUM + 23},
111 {"w24", AARCH64_X0_REGNUM + 24},
112 {"w25", AARCH64_X0_REGNUM + 25},
113 {"w26", AARCH64_X0_REGNUM + 26},
114 {"w27", AARCH64_X0_REGNUM + 27},
115 {"w28", AARCH64_X0_REGNUM + 28},
116 {"w29", AARCH64_X0_REGNUM + 29},
117 {"w30", AARCH64_X0_REGNUM + 30},
118
119 /* specials */
120 {"ip0", AARCH64_X0_REGNUM + 16},
121 {"ip1", AARCH64_X0_REGNUM + 17}
122 };
123
124 /* The required core 'R' registers. */
125 static const char *const aarch64_r_register_names[] =
126 {
127 /* These registers must appear in consecutive RAW register number
128 order and they must begin with AARCH64_X0_REGNUM! */
129 "x0", "x1", "x2", "x3",
130 "x4", "x5", "x6", "x7",
131 "x8", "x9", "x10", "x11",
132 "x12", "x13", "x14", "x15",
133 "x16", "x17", "x18", "x19",
134 "x20", "x21", "x22", "x23",
135 "x24", "x25", "x26", "x27",
136 "x28", "x29", "x30", "sp",
137 "pc", "cpsr"
138 };
139
140 /* The FP/SIMD 'V' registers. */
141 static const char *const aarch64_v_register_names[] =
142 {
143 /* These registers must appear in consecutive RAW register number
144 order and they must begin with AARCH64_V0_REGNUM! */
145 "v0", "v1", "v2", "v3",
146 "v4", "v5", "v6", "v7",
147 "v8", "v9", "v10", "v11",
148 "v12", "v13", "v14", "v15",
149 "v16", "v17", "v18", "v19",
150 "v20", "v21", "v22", "v23",
151 "v24", "v25", "v26", "v27",
152 "v28", "v29", "v30", "v31",
153 "fpsr",
154 "fpcr"
155 };
156
157 /* The SVE 'Z' and 'P' registers. */
158 static const char *const aarch64_sve_register_names[] =
159 {
160 /* These registers must appear in consecutive RAW register number
161 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
162 "z0", "z1", "z2", "z3",
163 "z4", "z5", "z6", "z7",
164 "z8", "z9", "z10", "z11",
165 "z12", "z13", "z14", "z15",
166 "z16", "z17", "z18", "z19",
167 "z20", "z21", "z22", "z23",
168 "z24", "z25", "z26", "z27",
169 "z28", "z29", "z30", "z31",
170 "fpsr", "fpcr",
171 "p0", "p1", "p2", "p3",
172 "p4", "p5", "p6", "p7",
173 "p8", "p9", "p10", "p11",
174 "p12", "p13", "p14", "p15",
175 "ffr", "vg"
176 };
177
178 /* AArch64 prologue cache structure. */
179 struct aarch64_prologue_cache
180 {
181 /* The program counter at the start of the function. It is used to
182 identify this frame as a prologue frame. */
183 CORE_ADDR func;
184
185 /* The program counter at the time this frame was created; i.e. where
186 this function was called from. It is used to identify this frame as a
187 stub frame. */
188 CORE_ADDR prev_pc;
189
190 /* The stack pointer at the time this frame was created; i.e. the
191 caller's stack pointer when this function was called. It is used
192 to identify this frame. */
193 CORE_ADDR prev_sp;
194
195 /* Is the target available to read from? */
196 int available_p;
197
198 /* The frame base for this frame is just prev_sp - frame size.
199 FRAMESIZE is the distance from the frame pointer to the
200 initial stack pointer. */
201 int framesize;
202
203 /* The register used to hold the frame pointer for this frame. */
204 int framereg;
205
206 /* Saved register offsets. */
207 struct trad_frame_saved_reg *saved_regs;
208 };
209
210 static void
211 show_aarch64_debug (struct ui_file *file, int from_tty,
212 struct cmd_list_element *c, const char *value)
213 {
214 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
215 }
216
217 namespace {
218
219 /* Abstract instruction reader. */
220
221 class abstract_instruction_reader
222 {
223 public:
224 /* Read in one instruction. */
225 virtual ULONGEST read (CORE_ADDR memaddr, int len,
226 enum bfd_endian byte_order) = 0;
227 };
228
229 /* Instruction reader from real target. */
230
231 class instruction_reader : public abstract_instruction_reader
232 {
233 public:
234 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
235 override
236 {
237 return read_code_unsigned_integer (memaddr, len, byte_order);
238 }
239 };
240
241 } // namespace
242
243 /* Analyze a prologue, looking for a recognizable stack frame
244 and frame pointer. Scan until we encounter a store that could
245 clobber the stack frame unexpectedly, or an unknown instruction. */
246
247 static CORE_ADDR
248 aarch64_analyze_prologue (struct gdbarch *gdbarch,
249 CORE_ADDR start, CORE_ADDR limit,
250 struct aarch64_prologue_cache *cache,
251 abstract_instruction_reader& reader)
252 {
253 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
254 int i;
255 /* Track X registers and D registers in prologue. */
256 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
257
258 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
259 regs[i] = pv_register (i, 0);
260 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
261
262 for (; start < limit; start += 4)
263 {
264 uint32_t insn;
265 aarch64_inst inst;
266
267 insn = reader.read (start, 4, byte_order_for_code);
268
269 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
270 break;
271
272 if (inst.opcode->iclass == addsub_imm
273 && (inst.opcode->op == OP_ADD
274 || strcmp ("sub", inst.opcode->name) == 0))
275 {
276 unsigned rd = inst.operands[0].reg.regno;
277 unsigned rn = inst.operands[1].reg.regno;
278
279 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
281 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
282 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
283
284 if (inst.opcode->op == OP_ADD)
285 {
286 regs[rd] = pv_add_constant (regs[rn],
287 inst.operands[2].imm.value);
288 }
289 else
290 {
291 regs[rd] = pv_add_constant (regs[rn],
292 -inst.operands[2].imm.value);
293 }
294 }
295 else if (inst.opcode->iclass == pcreladdr
296 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
297 {
298 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
299 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
300
301 regs[inst.operands[0].reg.regno] = pv_unknown ();
302 }
303 else if (inst.opcode->iclass == branch_imm)
304 {
305 /* Stop analysis on branch. */
306 break;
307 }
308 else if (inst.opcode->iclass == condbranch)
309 {
310 /* Stop analysis on branch. */
311 break;
312 }
313 else if (inst.opcode->iclass == branch_reg)
314 {
315 /* Stop analysis on branch. */
316 break;
317 }
318 else if (inst.opcode->iclass == compbranch)
319 {
320 /* Stop analysis on branch. */
321 break;
322 }
323 else if (inst.opcode->op == OP_MOVZ)
324 {
325 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
326 regs[inst.operands[0].reg.regno] = pv_unknown ();
327 }
328 else if (inst.opcode->iclass == log_shift
329 && strcmp (inst.opcode->name, "orr") == 0)
330 {
331 unsigned rd = inst.operands[0].reg.regno;
332 unsigned rn = inst.operands[1].reg.regno;
333 unsigned rm = inst.operands[2].reg.regno;
334
335 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
336 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
337 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
338
339 if (inst.operands[2].shifter.amount == 0
340 && rn == AARCH64_SP_REGNUM)
341 regs[rd] = regs[rm];
342 else
343 {
344 if (aarch64_debug)
345 {
346 debug_printf ("aarch64: prologue analysis gave up "
347 "addr=%s opcode=0x%x (orr x register)\n",
348 core_addr_to_string_nz (start), insn);
349 }
350 break;
351 }
352 }
353 else if (inst.opcode->op == OP_STUR)
354 {
355 unsigned rt = inst.operands[0].reg.regno;
356 unsigned rn = inst.operands[1].addr.base_regno;
357 int is64
358 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
359
360 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
361 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
362 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
363 gdb_assert (!inst.operands[1].addr.offset.is_reg);
364
365 stack.store (pv_add_constant (regs[rn],
366 inst.operands[1].addr.offset.imm),
367 is64 ? 8 : 4, regs[rt]);
368 }
369 else if ((inst.opcode->iclass == ldstpair_off
370 || (inst.opcode->iclass == ldstpair_indexed
371 && inst.operands[2].addr.preind))
372 && strcmp ("stp", inst.opcode->name) == 0)
373 {
374 /* STP with addressing mode Pre-indexed and Base register. */
375 unsigned rt1;
376 unsigned rt2;
377 unsigned rn = inst.operands[2].addr.base_regno;
378 int32_t imm = inst.operands[2].addr.offset.imm;
379
380 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
381 || inst.operands[0].type == AARCH64_OPND_Ft);
382 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
383 || inst.operands[1].type == AARCH64_OPND_Ft2);
384 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
385 gdb_assert (!inst.operands[2].addr.offset.is_reg);
386
387 /* If recording this store would invalidate the store area
388 (perhaps because rn is not known) then we should abandon
389 further prologue analysis. */
390 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
391 break;
392
393 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
394 break;
395
396 rt1 = inst.operands[0].reg.regno;
397 rt2 = inst.operands[1].reg.regno;
398 if (inst.operands[0].type == AARCH64_OPND_Ft)
399 {
400 /* Only bottom 64-bit of each V register (D register) need
401 to be preserved. */
402 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
403 rt1 += AARCH64_X_REGISTER_COUNT;
404 rt2 += AARCH64_X_REGISTER_COUNT;
405 }
406
407 stack.store (pv_add_constant (regs[rn], imm), 8,
408 regs[rt1]);
409 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
410 regs[rt2]);
411
412 if (inst.operands[2].addr.writeback)
413 regs[rn] = pv_add_constant (regs[rn], imm);
414
415 }
416 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
417 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
418 && (inst.opcode->op == OP_STR_POS
419 || inst.opcode->op == OP_STRF_POS)))
420 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
421 && strcmp ("str", inst.opcode->name) == 0)
422 {
423 /* STR (immediate) */
424 unsigned int rt = inst.operands[0].reg.regno;
425 int32_t imm = inst.operands[1].addr.offset.imm;
426 unsigned int rn = inst.operands[1].addr.base_regno;
427 bool is64
428 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
429 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
430 || inst.operands[0].type == AARCH64_OPND_Ft);
431
432 if (inst.operands[0].type == AARCH64_OPND_Ft)
433 {
434 /* Only bottom 64-bit of each V register (D register) need
435 to be preserved. */
436 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
437 rt += AARCH64_X_REGISTER_COUNT;
438 }
439
440 stack.store (pv_add_constant (regs[rn], imm),
441 is64 ? 8 : 4, regs[rt]);
442 if (inst.operands[1].addr.writeback)
443 regs[rn] = pv_add_constant (regs[rn], imm);
444 }
445 else if (inst.opcode->iclass == testbranch)
446 {
447 /* Stop analysis on branch. */
448 break;
449 }
450 else
451 {
452 if (aarch64_debug)
453 {
454 debug_printf ("aarch64: prologue analysis gave up addr=%s"
455 " opcode=0x%x\n",
456 core_addr_to_string_nz (start), insn);
457 }
458 break;
459 }
460 }
461
462 if (cache == NULL)
463 return start;
464
465 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
466 {
467 /* Frame pointer is fp. Frame size is constant. */
468 cache->framereg = AARCH64_FP_REGNUM;
469 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
470 }
471 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
472 {
473 /* Try the stack pointer. */
474 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
475 cache->framereg = AARCH64_SP_REGNUM;
476 }
477 else
478 {
479 /* We're just out of luck. We don't know where the frame is. */
480 cache->framereg = -1;
481 cache->framesize = 0;
482 }
483
484 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
485 {
486 CORE_ADDR offset;
487
488 if (stack.find_reg (gdbarch, i, &offset))
489 cache->saved_regs[i].addr = offset;
490 }
491
492 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
493 {
494 int regnum = gdbarch_num_regs (gdbarch);
495 CORE_ADDR offset;
496
497 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
498 &offset))
499 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
500 }
501
502 return start;
503 }
504
505 static CORE_ADDR
506 aarch64_analyze_prologue (struct gdbarch *gdbarch,
507 CORE_ADDR start, CORE_ADDR limit,
508 struct aarch64_prologue_cache *cache)
509 {
510 instruction_reader reader;
511
512 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
513 reader);
514 }
515
516 #if GDB_SELF_TEST
517
518 namespace selftests {
519
520 /* Instruction reader from manually cooked instruction sequences. */
521
522 class instruction_reader_test : public abstract_instruction_reader
523 {
524 public:
525 template<size_t SIZE>
526 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
527 : m_insns (insns), m_insns_size (SIZE)
528 {}
529
530 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
531 override
532 {
533 SELF_CHECK (len == 4);
534 SELF_CHECK (memaddr % 4 == 0);
535 SELF_CHECK (memaddr / 4 < m_insns_size);
536
537 return m_insns[memaddr / 4];
538 }
539
540 private:
541 const uint32_t *m_insns;
542 size_t m_insns_size;
543 };
544
545 static void
546 aarch64_analyze_prologue_test (void)
547 {
548 struct gdbarch_info info;
549
550 gdbarch_info_init (&info);
551 info.bfd_arch_info = bfd_scan_arch ("aarch64");
552
553 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
554 SELF_CHECK (gdbarch != NULL);
555
556 /* Test the simple prologue in which frame pointer is used. */
557 {
558 struct aarch64_prologue_cache cache;
559 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
560
561 static const uint32_t insns[] = {
562 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
563 0x910003fd, /* mov x29, sp */
564 0x97ffffe6, /* bl 0x400580 */
565 };
566 instruction_reader_test reader (insns);
567
568 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
569 SELF_CHECK (end == 4 * 2);
570
571 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
572 SELF_CHECK (cache.framesize == 272);
573
574 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
575 {
576 if (i == AARCH64_FP_REGNUM)
577 SELF_CHECK (cache.saved_regs[i].addr == -272);
578 else if (i == AARCH64_LR_REGNUM)
579 SELF_CHECK (cache.saved_regs[i].addr == -264);
580 else
581 SELF_CHECK (cache.saved_regs[i].addr == -1);
582 }
583
584 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
585 {
586 int regnum = gdbarch_num_regs (gdbarch);
587
588 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
589 == -1);
590 }
591 }
592
593 /* Test a prologue in which STR is used and frame pointer is not
594 used. */
595 {
596 struct aarch64_prologue_cache cache;
597 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
598
599 static const uint32_t insns[] = {
600 0xf81d0ff3, /* str x19, [sp, #-48]! */
601 0xb9002fe0, /* str w0, [sp, #44] */
602 0xf90013e1, /* str x1, [sp, #32]*/
603 0xfd000fe0, /* str d0, [sp, #24] */
604 0xaa0203f3, /* mov x19, x2 */
605 0xf94013e0, /* ldr x0, [sp, #32] */
606 };
607 instruction_reader_test reader (insns);
608
609 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
610
611 SELF_CHECK (end == 4 * 5);
612
613 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
614 SELF_CHECK (cache.framesize == 48);
615
616 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
617 {
618 if (i == 1)
619 SELF_CHECK (cache.saved_regs[i].addr == -16);
620 else if (i == 19)
621 SELF_CHECK (cache.saved_regs[i].addr == -48);
622 else
623 SELF_CHECK (cache.saved_regs[i].addr == -1);
624 }
625
626 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
627 {
628 int regnum = gdbarch_num_regs (gdbarch);
629
630 if (i == 0)
631 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
632 == -24);
633 else
634 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
635 == -1);
636 }
637 }
638 }
639 } // namespace selftests
640 #endif /* GDB_SELF_TEST */
641
642 /* Implement the "skip_prologue" gdbarch method. */
643
644 static CORE_ADDR
645 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
646 {
647 CORE_ADDR func_addr, limit_pc;
648
649 /* See if we can determine the end of the prologue via the symbol
650 table. If so, then return either PC, or the PC after the
651 prologue, whichever is greater. */
652 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
653 {
654 CORE_ADDR post_prologue_pc
655 = skip_prologue_using_sal (gdbarch, func_addr);
656
657 if (post_prologue_pc != 0)
658 return std::max (pc, post_prologue_pc);
659 }
660
661 /* Can't determine prologue from the symbol table, need to examine
662 instructions. */
663
664 /* Find an upper limit on the function prologue using the debug
665 information. If the debug information could not be used to
666 provide that bound, then use an arbitrary large number as the
667 upper bound. */
668 limit_pc = skip_prologue_using_sal (gdbarch, pc);
669 if (limit_pc == 0)
670 limit_pc = pc + 128; /* Magic. */
671
672 /* Try disassembling prologue. */
673 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
674 }
675
676 /* Scan the function prologue for THIS_FRAME and populate the prologue
677 cache CACHE. */
678
679 static void
680 aarch64_scan_prologue (struct frame_info *this_frame,
681 struct aarch64_prologue_cache *cache)
682 {
683 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
684 CORE_ADDR prologue_start;
685 CORE_ADDR prologue_end;
686 CORE_ADDR prev_pc = get_frame_pc (this_frame);
687 struct gdbarch *gdbarch = get_frame_arch (this_frame);
688
689 cache->prev_pc = prev_pc;
690
691 /* Assume we do not find a frame. */
692 cache->framereg = -1;
693 cache->framesize = 0;
694
695 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
696 &prologue_end))
697 {
698 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
699
700 if (sal.line == 0)
701 {
702 /* No line info so use the current PC. */
703 prologue_end = prev_pc;
704 }
705 else if (sal.end < prologue_end)
706 {
707 /* The next line begins after the function end. */
708 prologue_end = sal.end;
709 }
710
711 prologue_end = std::min (prologue_end, prev_pc);
712 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
713 }
714 else
715 {
716 CORE_ADDR frame_loc;
717
718 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
719 if (frame_loc == 0)
720 return;
721
722 cache->framereg = AARCH64_FP_REGNUM;
723 cache->framesize = 16;
724 cache->saved_regs[29].addr = 0;
725 cache->saved_regs[30].addr = 8;
726 }
727 }
728
729 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
730 function may throw an exception if the inferior's registers or memory is
731 not available. */
732
733 static void
734 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
735 struct aarch64_prologue_cache *cache)
736 {
737 CORE_ADDR unwound_fp;
738 int reg;
739
740 aarch64_scan_prologue (this_frame, cache);
741
742 if (cache->framereg == -1)
743 return;
744
745 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
746 if (unwound_fp == 0)
747 return;
748
749 cache->prev_sp = unwound_fp + cache->framesize;
750
751 /* Calculate actual addresses of saved registers using offsets
752 determined by aarch64_analyze_prologue. */
753 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
754 if (trad_frame_addr_p (cache->saved_regs, reg))
755 cache->saved_regs[reg].addr += cache->prev_sp;
756
757 cache->func = get_frame_func (this_frame);
758
759 cache->available_p = 1;
760 }
761
762 /* Allocate and fill in *THIS_CACHE with information about the prologue of
763 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
764 Return a pointer to the current aarch64_prologue_cache in
765 *THIS_CACHE. */
766
767 static struct aarch64_prologue_cache *
768 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
769 {
770 struct aarch64_prologue_cache *cache;
771
772 if (*this_cache != NULL)
773 return (struct aarch64_prologue_cache *) *this_cache;
774
775 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
776 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
777 *this_cache = cache;
778
779 TRY
780 {
781 aarch64_make_prologue_cache_1 (this_frame, cache);
782 }
783 CATCH (ex, RETURN_MASK_ERROR)
784 {
785 if (ex.error != NOT_AVAILABLE_ERROR)
786 throw_exception (ex);
787 }
788 END_CATCH
789
790 return cache;
791 }
792
793 /* Implement the "stop_reason" frame_unwind method. */
794
795 static enum unwind_stop_reason
796 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
797 void **this_cache)
798 {
799 struct aarch64_prologue_cache *cache
800 = aarch64_make_prologue_cache (this_frame, this_cache);
801
802 if (!cache->available_p)
803 return UNWIND_UNAVAILABLE;
804
805 /* Halt the backtrace at "_start". */
806 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
807 return UNWIND_OUTERMOST;
808
809 /* We've hit a wall, stop. */
810 if (cache->prev_sp == 0)
811 return UNWIND_OUTERMOST;
812
813 return UNWIND_NO_REASON;
814 }
815
816 /* Our frame ID for a normal frame is the current function's starting
817 PC and the caller's SP when we were called. */
818
819 static void
820 aarch64_prologue_this_id (struct frame_info *this_frame,
821 void **this_cache, struct frame_id *this_id)
822 {
823 struct aarch64_prologue_cache *cache
824 = aarch64_make_prologue_cache (this_frame, this_cache);
825
826 if (!cache->available_p)
827 *this_id = frame_id_build_unavailable_stack (cache->func);
828 else
829 *this_id = frame_id_build (cache->prev_sp, cache->func);
830 }
831
832 /* Implement the "prev_register" frame_unwind method. */
833
834 static struct value *
835 aarch64_prologue_prev_register (struct frame_info *this_frame,
836 void **this_cache, int prev_regnum)
837 {
838 struct aarch64_prologue_cache *cache
839 = aarch64_make_prologue_cache (this_frame, this_cache);
840
841 /* If we are asked to unwind the PC, then we need to return the LR
842 instead. The prologue may save PC, but it will point into this
843 frame's prologue, not the next frame's resume location. */
844 if (prev_regnum == AARCH64_PC_REGNUM)
845 {
846 CORE_ADDR lr;
847
848 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
849 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
850 }
851
852 /* SP is generally not saved to the stack, but this frame is
853 identified by the next frame's stack pointer at the time of the
854 call. The value was already reconstructed into PREV_SP. */
855 /*
856 +----------+ ^
857 | saved lr | |
858 +->| saved fp |--+
859 | | |
860 | | | <- Previous SP
861 | +----------+
862 | | saved lr |
863 +--| saved fp |<- FP
864 | |
865 | |<- SP
866 +----------+ */
867 if (prev_regnum == AARCH64_SP_REGNUM)
868 return frame_unwind_got_constant (this_frame, prev_regnum,
869 cache->prev_sp);
870
871 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
872 prev_regnum);
873 }
874
875 /* AArch64 prologue unwinder. */
876 struct frame_unwind aarch64_prologue_unwind =
877 {
878 NORMAL_FRAME,
879 aarch64_prologue_frame_unwind_stop_reason,
880 aarch64_prologue_this_id,
881 aarch64_prologue_prev_register,
882 NULL,
883 default_frame_sniffer
884 };
885
886 /* Allocate and fill in *THIS_CACHE with information about the prologue of
887 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
888 Return a pointer to the current aarch64_prologue_cache in
889 *THIS_CACHE. */
890
891 static struct aarch64_prologue_cache *
892 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
893 {
894 struct aarch64_prologue_cache *cache;
895
896 if (*this_cache != NULL)
897 return (struct aarch64_prologue_cache *) *this_cache;
898
899 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
900 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
901 *this_cache = cache;
902
903 TRY
904 {
905 cache->prev_sp = get_frame_register_unsigned (this_frame,
906 AARCH64_SP_REGNUM);
907 cache->prev_pc = get_frame_pc (this_frame);
908 cache->available_p = 1;
909 }
910 CATCH (ex, RETURN_MASK_ERROR)
911 {
912 if (ex.error != NOT_AVAILABLE_ERROR)
913 throw_exception (ex);
914 }
915 END_CATCH
916
917 return cache;
918 }
919
920 /* Implement the "stop_reason" frame_unwind method. */
921
922 static enum unwind_stop_reason
923 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
924 void **this_cache)
925 {
926 struct aarch64_prologue_cache *cache
927 = aarch64_make_stub_cache (this_frame, this_cache);
928
929 if (!cache->available_p)
930 return UNWIND_UNAVAILABLE;
931
932 return UNWIND_NO_REASON;
933 }
934
935 /* Our frame ID for a stub frame is the current SP and LR. */
936
937 static void
938 aarch64_stub_this_id (struct frame_info *this_frame,
939 void **this_cache, struct frame_id *this_id)
940 {
941 struct aarch64_prologue_cache *cache
942 = aarch64_make_stub_cache (this_frame, this_cache);
943
944 if (cache->available_p)
945 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
946 else
947 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
948 }
949
950 /* Implement the "sniffer" frame_unwind method. */
951
952 static int
953 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
954 struct frame_info *this_frame,
955 void **this_prologue_cache)
956 {
957 CORE_ADDR addr_in_block;
958 gdb_byte dummy[4];
959
960 addr_in_block = get_frame_address_in_block (this_frame);
961 if (in_plt_section (addr_in_block)
962 /* We also use the stub winder if the target memory is unreadable
963 to avoid having the prologue unwinder trying to read it. */
964 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
965 return 1;
966
967 return 0;
968 }
969
970 /* AArch64 stub unwinder. */
971 struct frame_unwind aarch64_stub_unwind =
972 {
973 NORMAL_FRAME,
974 aarch64_stub_frame_unwind_stop_reason,
975 aarch64_stub_this_id,
976 aarch64_prologue_prev_register,
977 NULL,
978 aarch64_stub_unwind_sniffer
979 };
980
981 /* Return the frame base address of *THIS_FRAME. */
982
983 static CORE_ADDR
984 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
985 {
986 struct aarch64_prologue_cache *cache
987 = aarch64_make_prologue_cache (this_frame, this_cache);
988
989 return cache->prev_sp - cache->framesize;
990 }
991
992 /* AArch64 default frame base information. */
993 struct frame_base aarch64_normal_base =
994 {
995 &aarch64_prologue_unwind,
996 aarch64_normal_frame_base,
997 aarch64_normal_frame_base,
998 aarch64_normal_frame_base
999 };
1000
1001 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1002 dummy frame. The frame ID's base needs to match the TOS value
1003 saved by save_dummy_frame_tos () and returned from
1004 aarch64_push_dummy_call, and the PC needs to match the dummy
1005 frame's breakpoint. */
1006
1007 static struct frame_id
1008 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1009 {
1010 return frame_id_build (get_frame_register_unsigned (this_frame,
1011 AARCH64_SP_REGNUM),
1012 get_frame_pc (this_frame));
1013 }
1014
1015 /* Implement the "unwind_pc" gdbarch method. */
1016
1017 static CORE_ADDR
1018 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1019 {
1020 CORE_ADDR pc
1021 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1022
1023 return pc;
1024 }
1025
1026 /* Implement the "unwind_sp" gdbarch method. */
1027
1028 static CORE_ADDR
1029 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1030 {
1031 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1032 }
1033
1034 /* Return the value of the REGNUM register in the previous frame of
1035 *THIS_FRAME. */
1036
1037 static struct value *
1038 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1039 void **this_cache, int regnum)
1040 {
1041 CORE_ADDR lr;
1042
1043 switch (regnum)
1044 {
1045 case AARCH64_PC_REGNUM:
1046 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1047 return frame_unwind_got_constant (this_frame, regnum, lr);
1048
1049 default:
1050 internal_error (__FILE__, __LINE__,
1051 _("Unexpected register %d"), regnum);
1052 }
1053 }
1054
1055 /* Implement the "init_reg" dwarf2_frame_ops method. */
1056
1057 static void
1058 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1059 struct dwarf2_frame_state_reg *reg,
1060 struct frame_info *this_frame)
1061 {
1062 switch (regnum)
1063 {
1064 case AARCH64_PC_REGNUM:
1065 reg->how = DWARF2_FRAME_REG_FN;
1066 reg->loc.fn = aarch64_dwarf2_prev_register;
1067 break;
1068 case AARCH64_SP_REGNUM:
1069 reg->how = DWARF2_FRAME_REG_CFA;
1070 break;
1071 }
1072 }
1073
1074 /* When arguments must be pushed onto the stack, they go on in reverse
1075 order. The code below implements a FILO (stack) to do this. */
1076
1077 typedef struct
1078 {
1079 /* Value to pass on stack. It can be NULL if this item is for stack
1080 padding. */
1081 const gdb_byte *data;
1082
1083 /* Size in bytes of value to pass on stack. */
1084 int len;
1085 } stack_item_t;
1086
1087 DEF_VEC_O (stack_item_t);
1088
1089 /* Return the alignment (in bytes) of the given type. */
1090
1091 static int
1092 aarch64_type_align (struct type *t)
1093 {
1094 int n;
1095 int align;
1096 int falign;
1097
1098 t = check_typedef (t);
1099 switch (TYPE_CODE (t))
1100 {
1101 default:
1102 /* Should never happen. */
1103 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1104 return 4;
1105
1106 case TYPE_CODE_PTR:
1107 case TYPE_CODE_ENUM:
1108 case TYPE_CODE_INT:
1109 case TYPE_CODE_FLT:
1110 case TYPE_CODE_SET:
1111 case TYPE_CODE_RANGE:
1112 case TYPE_CODE_BITSTRING:
1113 case TYPE_CODE_REF:
1114 case TYPE_CODE_RVALUE_REF:
1115 case TYPE_CODE_CHAR:
1116 case TYPE_CODE_BOOL:
1117 return TYPE_LENGTH (t);
1118
1119 case TYPE_CODE_ARRAY:
1120 if (TYPE_VECTOR (t))
1121 {
1122 /* Use the natural alignment for vector types (the same for
1123 scalar type), but the maximum alignment is 128-bit. */
1124 if (TYPE_LENGTH (t) > 16)
1125 return 16;
1126 else
1127 return TYPE_LENGTH (t);
1128 }
1129 else
1130 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1131 case TYPE_CODE_COMPLEX:
1132 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1133
1134 case TYPE_CODE_STRUCT:
1135 case TYPE_CODE_UNION:
1136 align = 1;
1137 for (n = 0; n < TYPE_NFIELDS (t); n++)
1138 {
1139 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1140 if (falign > align)
1141 align = falign;
1142 }
1143 return align;
1144 }
1145 }
1146
1147 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1148
1149 Return the number of register required, or -1 on failure.
1150
1151 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1152 to the element, else fail if the type of this element does not match the
1153 existing value. */
1154
1155 static int
1156 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1157 struct type **fundamental_type)
1158 {
1159 if (type == nullptr)
1160 return -1;
1161
1162 switch (TYPE_CODE (type))
1163 {
1164 case TYPE_CODE_FLT:
1165 if (TYPE_LENGTH (type) > 16)
1166 return -1;
1167
1168 if (*fundamental_type == nullptr)
1169 *fundamental_type = type;
1170 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1171 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1172 return -1;
1173
1174 return 1;
1175
1176 case TYPE_CODE_COMPLEX:
1177 {
1178 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1179 if (TYPE_LENGTH (target_type) > 16)
1180 return -1;
1181
1182 if (*fundamental_type == nullptr)
1183 *fundamental_type = target_type;
1184 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1185 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1186 return -1;
1187
1188 return 2;
1189 }
1190
1191 case TYPE_CODE_ARRAY:
1192 {
1193 if (TYPE_VECTOR (type))
1194 {
1195 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1196 return -1;
1197
1198 if (*fundamental_type == nullptr)
1199 *fundamental_type = type;
1200 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1201 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1202 return -1;
1203
1204 return 1;
1205 }
1206 else
1207 {
1208 struct type *target_type = TYPE_TARGET_TYPE (type);
1209 int count = aapcs_is_vfp_call_or_return_candidate_1
1210 (target_type, fundamental_type);
1211
1212 if (count == -1)
1213 return count;
1214
1215 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1216 return count;
1217 }
1218 }
1219
1220 case TYPE_CODE_STRUCT:
1221 case TYPE_CODE_UNION:
1222 {
1223 int count = 0;
1224
1225 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1226 {
1227 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1228
1229 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1230 (member, fundamental_type);
1231 if (sub_count == -1)
1232 return -1;
1233 count += sub_count;
1234 }
1235
1236 /* Ensure there is no padding between the fields (allowing for empty
1237 zero length structs) */
1238 int ftype_length = (*fundamental_type == nullptr)
1239 ? 0 : TYPE_LENGTH (*fundamental_type);
1240 if (count * ftype_length != TYPE_LENGTH (type))
1241 return -1;
1242
1243 return count;
1244 }
1245
1246 default:
1247 break;
1248 }
1249
1250 return -1;
1251 }
1252
1253 /* Return true if an argument, whose type is described by TYPE, can be passed or
1254 returned in simd/fp registers, providing enough parameter passing registers
1255 are available. This is as described in the AAPCS64.
1256
1257 Upon successful return, *COUNT returns the number of needed registers,
1258 *FUNDAMENTAL_TYPE contains the type of those registers.
1259
1260 Candidate as per the AAPCS64 5.4.2.C is either a:
1261 - float.
1262 - short-vector.
1263 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1264 all the members are floats and has at most 4 members.
1265 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1266 all the members are short vectors and has at most 4 members.
1267 - Complex (7.1.1)
1268
1269 Note that HFAs and HVAs can include nested structures and arrays. */
1270
1271 static bool
1272 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1273 struct type **fundamental_type)
1274 {
1275 if (type == nullptr)
1276 return false;
1277
1278 *fundamental_type = nullptr;
1279
1280 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1281 fundamental_type);
1282
1283 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1284 {
1285 *count = ag_count;
1286 return true;
1287 }
1288 else
1289 return false;
1290 }
1291
1292 /* AArch64 function call information structure. */
1293 struct aarch64_call_info
1294 {
1295 /* the current argument number. */
1296 unsigned argnum;
1297
1298 /* The next general purpose register number, equivalent to NGRN as
1299 described in the AArch64 Procedure Call Standard. */
1300 unsigned ngrn;
1301
1302 /* The next SIMD and floating point register number, equivalent to
1303 NSRN as described in the AArch64 Procedure Call Standard. */
1304 unsigned nsrn;
1305
1306 /* The next stacked argument address, equivalent to NSAA as
1307 described in the AArch64 Procedure Call Standard. */
1308 unsigned nsaa;
1309
1310 /* Stack item vector. */
1311 VEC(stack_item_t) *si;
1312 };
1313
1314 /* Pass a value in a sequence of consecutive X registers. The caller
1315 is responsbile for ensuring sufficient registers are available. */
1316
1317 static void
1318 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1319 struct aarch64_call_info *info, struct type *type,
1320 struct value *arg)
1321 {
1322 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1323 int len = TYPE_LENGTH (type);
1324 enum type_code typecode = TYPE_CODE (type);
1325 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1326 const bfd_byte *buf = value_contents (arg);
1327
1328 info->argnum++;
1329
1330 while (len > 0)
1331 {
1332 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1333 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1334 byte_order);
1335
1336
1337 /* Adjust sub-word struct/union args when big-endian. */
1338 if (byte_order == BFD_ENDIAN_BIG
1339 && partial_len < X_REGISTER_SIZE
1340 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1341 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1342
1343 if (aarch64_debug)
1344 {
1345 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1346 gdbarch_register_name (gdbarch, regnum),
1347 phex (regval, X_REGISTER_SIZE));
1348 }
1349 regcache_cooked_write_unsigned (regcache, regnum, regval);
1350 len -= partial_len;
1351 buf += partial_len;
1352 regnum++;
1353 }
1354 }
1355
1356 /* Attempt to marshall a value in a V register. Return 1 if
1357 successful, or 0 if insufficient registers are available. This
1358 function, unlike the equivalent pass_in_x() function does not
1359 handle arguments spread across multiple registers. */
1360
1361 static int
1362 pass_in_v (struct gdbarch *gdbarch,
1363 struct regcache *regcache,
1364 struct aarch64_call_info *info,
1365 int len, const bfd_byte *buf)
1366 {
1367 if (info->nsrn < 8)
1368 {
1369 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1370 /* Enough space for a full vector register. */
1371 gdb_byte reg[register_size (gdbarch, regnum)];
1372 gdb_assert (len <= sizeof (reg));
1373
1374 info->argnum++;
1375 info->nsrn++;
1376
1377 memset (reg, 0, sizeof (reg));
1378 /* PCS C.1, the argument is allocated to the least significant
1379 bits of V register. */
1380 memcpy (reg, buf, len);
1381 regcache->cooked_write (regnum, reg);
1382
1383 if (aarch64_debug)
1384 {
1385 debug_printf ("arg %d in %s\n", info->argnum,
1386 gdbarch_register_name (gdbarch, regnum));
1387 }
1388 return 1;
1389 }
1390 info->nsrn = 8;
1391 return 0;
1392 }
1393
1394 /* Marshall an argument onto the stack. */
1395
1396 static void
1397 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1398 struct value *arg)
1399 {
1400 const bfd_byte *buf = value_contents (arg);
1401 int len = TYPE_LENGTH (type);
1402 int align;
1403 stack_item_t item;
1404
1405 info->argnum++;
1406
1407 align = aarch64_type_align (type);
1408
1409 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1410 Natural alignment of the argument's type. */
1411 align = align_up (align, 8);
1412
1413 /* The AArch64 PCS requires at most doubleword alignment. */
1414 if (align > 16)
1415 align = 16;
1416
1417 if (aarch64_debug)
1418 {
1419 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1420 info->nsaa);
1421 }
1422
1423 item.len = len;
1424 item.data = buf;
1425 VEC_safe_push (stack_item_t, info->si, &item);
1426
1427 info->nsaa += len;
1428 if (info->nsaa & (align - 1))
1429 {
1430 /* Push stack alignment padding. */
1431 int pad = align - (info->nsaa & (align - 1));
1432
1433 item.len = pad;
1434 item.data = NULL;
1435
1436 VEC_safe_push (stack_item_t, info->si, &item);
1437 info->nsaa += pad;
1438 }
1439 }
1440
1441 /* Marshall an argument into a sequence of one or more consecutive X
1442 registers or, if insufficient X registers are available then onto
1443 the stack. */
1444
1445 static void
1446 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1447 struct aarch64_call_info *info, struct type *type,
1448 struct value *arg)
1449 {
1450 int len = TYPE_LENGTH (type);
1451 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1452
1453 /* PCS C.13 - Pass in registers if we have enough spare */
1454 if (info->ngrn + nregs <= 8)
1455 {
1456 pass_in_x (gdbarch, regcache, info, type, arg);
1457 info->ngrn += nregs;
1458 }
1459 else
1460 {
1461 info->ngrn = 8;
1462 pass_on_stack (info, type, arg);
1463 }
1464 }
1465
1466 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1467 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1468 registers. A return value of false is an error state as the value will have
1469 been partially passed to the stack. */
1470 static bool
1471 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1472 struct aarch64_call_info *info, struct type *arg_type,
1473 struct value *arg)
1474 {
1475 switch (TYPE_CODE (arg_type))
1476 {
1477 case TYPE_CODE_FLT:
1478 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1479 value_contents (arg));
1480 break;
1481
1482 case TYPE_CODE_COMPLEX:
1483 {
1484 const bfd_byte *buf = value_contents (arg);
1485 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1486
1487 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1488 buf))
1489 return false;
1490
1491 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1492 buf + TYPE_LENGTH (target_type));
1493 }
1494
1495 case TYPE_CODE_ARRAY:
1496 if (TYPE_VECTOR (arg_type))
1497 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1498 value_contents (arg));
1499 /* fall through. */
1500
1501 case TYPE_CODE_STRUCT:
1502 case TYPE_CODE_UNION:
1503 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1504 {
1505 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1506 struct type *field_type = check_typedef (value_type (field));
1507
1508 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1509 field))
1510 return false;
1511 }
1512 return true;
1513
1514 default:
1515 return false;
1516 }
1517 }
1518
1519 /* Implement the "push_dummy_call" gdbarch method. */
1520
1521 static CORE_ADDR
1522 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1523 struct regcache *regcache, CORE_ADDR bp_addr,
1524 int nargs,
1525 struct value **args, CORE_ADDR sp,
1526 function_call_return_method return_method,
1527 CORE_ADDR struct_addr)
1528 {
1529 int argnum;
1530 struct aarch64_call_info info;
1531
1532 memset (&info, 0, sizeof (info));
1533
1534 /* We need to know what the type of the called function is in order
1535 to determine the number of named/anonymous arguments for the
1536 actual argument placement, and the return type in order to handle
1537 return value correctly.
1538
1539 The generic code above us views the decision of return in memory
1540 or return in registers as a two stage processes. The language
1541 handler is consulted first and may decide to return in memory (eg
1542 class with copy constructor returned by value), this will cause
1543 the generic code to allocate space AND insert an initial leading
1544 argument.
1545
1546 If the language code does not decide to pass in memory then the
1547 target code is consulted.
1548
1549 If the language code decides to pass in memory we want to move
1550 the pointer inserted as the initial argument from the argument
1551 list and into X8, the conventional AArch64 struct return pointer
1552 register. */
1553
1554 /* Set the return address. For the AArch64, the return breakpoint
1555 is always at BP_ADDR. */
1556 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1557
1558 /* If we were given an initial argument for the return slot, lose it. */
1559 if (return_method == return_method_hidden_param)
1560 {
1561 args++;
1562 nargs--;
1563 }
1564
1565 /* The struct_return pointer occupies X8. */
1566 if (return_method != return_method_normal)
1567 {
1568 if (aarch64_debug)
1569 {
1570 debug_printf ("struct return in %s = 0x%s\n",
1571 gdbarch_register_name (gdbarch,
1572 AARCH64_STRUCT_RETURN_REGNUM),
1573 paddress (gdbarch, struct_addr));
1574 }
1575 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1576 struct_addr);
1577 }
1578
1579 for (argnum = 0; argnum < nargs; argnum++)
1580 {
1581 struct value *arg = args[argnum];
1582 struct type *arg_type, *fundamental_type;
1583 int len, elements;
1584
1585 arg_type = check_typedef (value_type (arg));
1586 len = TYPE_LENGTH (arg_type);
1587
1588 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1589 if there are enough spare registers. */
1590 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1591 &fundamental_type))
1592 {
1593 if (info.nsrn + elements <= 8)
1594 {
1595 /* We know that we have sufficient registers available therefore
1596 this will never need to fallback to the stack. */
1597 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1598 arg))
1599 gdb_assert_not_reached ("Failed to push args");
1600 }
1601 else
1602 {
1603 info.nsrn = 8;
1604 pass_on_stack (&info, arg_type, arg);
1605 }
1606 continue;
1607 }
1608
1609 switch (TYPE_CODE (arg_type))
1610 {
1611 case TYPE_CODE_INT:
1612 case TYPE_CODE_BOOL:
1613 case TYPE_CODE_CHAR:
1614 case TYPE_CODE_RANGE:
1615 case TYPE_CODE_ENUM:
1616 if (len < 4)
1617 {
1618 /* Promote to 32 bit integer. */
1619 if (TYPE_UNSIGNED (arg_type))
1620 arg_type = builtin_type (gdbarch)->builtin_uint32;
1621 else
1622 arg_type = builtin_type (gdbarch)->builtin_int32;
1623 arg = value_cast (arg_type, arg);
1624 }
1625 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1626 break;
1627
1628 case TYPE_CODE_STRUCT:
1629 case TYPE_CODE_ARRAY:
1630 case TYPE_CODE_UNION:
1631 if (len > 16)
1632 {
1633 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1634 invisible reference. */
1635
1636 /* Allocate aligned storage. */
1637 sp = align_down (sp - len, 16);
1638
1639 /* Write the real data into the stack. */
1640 write_memory (sp, value_contents (arg), len);
1641
1642 /* Construct the indirection. */
1643 arg_type = lookup_pointer_type (arg_type);
1644 arg = value_from_pointer (arg_type, sp);
1645 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1646 }
1647 else
1648 /* PCS C.15 / C.18 multiple values pass. */
1649 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1650 break;
1651
1652 default:
1653 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1654 break;
1655 }
1656 }
1657
1658 /* Make sure stack retains 16 byte alignment. */
1659 if (info.nsaa & 15)
1660 sp -= 16 - (info.nsaa & 15);
1661
1662 while (!VEC_empty (stack_item_t, info.si))
1663 {
1664 stack_item_t *si = VEC_last (stack_item_t, info.si);
1665
1666 sp -= si->len;
1667 if (si->data != NULL)
1668 write_memory (sp, si->data, si->len);
1669 VEC_pop (stack_item_t, info.si);
1670 }
1671
1672 VEC_free (stack_item_t, info.si);
1673
1674 /* Finally, update the SP register. */
1675 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1676
1677 return sp;
1678 }
1679
1680 /* Implement the "frame_align" gdbarch method. */
1681
1682 static CORE_ADDR
1683 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1684 {
1685 /* Align the stack to sixteen bytes. */
1686 return sp & ~(CORE_ADDR) 15;
1687 }
1688
1689 /* Return the type for an AdvSISD Q register. */
1690
1691 static struct type *
1692 aarch64_vnq_type (struct gdbarch *gdbarch)
1693 {
1694 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1695
1696 if (tdep->vnq_type == NULL)
1697 {
1698 struct type *t;
1699 struct type *elem;
1700
1701 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1702 TYPE_CODE_UNION);
1703
1704 elem = builtin_type (gdbarch)->builtin_uint128;
1705 append_composite_type_field (t, "u", elem);
1706
1707 elem = builtin_type (gdbarch)->builtin_int128;
1708 append_composite_type_field (t, "s", elem);
1709
1710 tdep->vnq_type = t;
1711 }
1712
1713 return tdep->vnq_type;
1714 }
1715
1716 /* Return the type for an AdvSISD D register. */
1717
1718 static struct type *
1719 aarch64_vnd_type (struct gdbarch *gdbarch)
1720 {
1721 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1722
1723 if (tdep->vnd_type == NULL)
1724 {
1725 struct type *t;
1726 struct type *elem;
1727
1728 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1729 TYPE_CODE_UNION);
1730
1731 elem = builtin_type (gdbarch)->builtin_double;
1732 append_composite_type_field (t, "f", elem);
1733
1734 elem = builtin_type (gdbarch)->builtin_uint64;
1735 append_composite_type_field (t, "u", elem);
1736
1737 elem = builtin_type (gdbarch)->builtin_int64;
1738 append_composite_type_field (t, "s", elem);
1739
1740 tdep->vnd_type = t;
1741 }
1742
1743 return tdep->vnd_type;
1744 }
1745
1746 /* Return the type for an AdvSISD S register. */
1747
1748 static struct type *
1749 aarch64_vns_type (struct gdbarch *gdbarch)
1750 {
1751 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1752
1753 if (tdep->vns_type == NULL)
1754 {
1755 struct type *t;
1756 struct type *elem;
1757
1758 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1759 TYPE_CODE_UNION);
1760
1761 elem = builtin_type (gdbarch)->builtin_float;
1762 append_composite_type_field (t, "f", elem);
1763
1764 elem = builtin_type (gdbarch)->builtin_uint32;
1765 append_composite_type_field (t, "u", elem);
1766
1767 elem = builtin_type (gdbarch)->builtin_int32;
1768 append_composite_type_field (t, "s", elem);
1769
1770 tdep->vns_type = t;
1771 }
1772
1773 return tdep->vns_type;
1774 }
1775
1776 /* Return the type for an AdvSISD H register. */
1777
1778 static struct type *
1779 aarch64_vnh_type (struct gdbarch *gdbarch)
1780 {
1781 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1782
1783 if (tdep->vnh_type == NULL)
1784 {
1785 struct type *t;
1786 struct type *elem;
1787
1788 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1789 TYPE_CODE_UNION);
1790
1791 elem = builtin_type (gdbarch)->builtin_uint16;
1792 append_composite_type_field (t, "u", elem);
1793
1794 elem = builtin_type (gdbarch)->builtin_int16;
1795 append_composite_type_field (t, "s", elem);
1796
1797 tdep->vnh_type = t;
1798 }
1799
1800 return tdep->vnh_type;
1801 }
1802
1803 /* Return the type for an AdvSISD B register. */
1804
1805 static struct type *
1806 aarch64_vnb_type (struct gdbarch *gdbarch)
1807 {
1808 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1809
1810 if (tdep->vnb_type == NULL)
1811 {
1812 struct type *t;
1813 struct type *elem;
1814
1815 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1816 TYPE_CODE_UNION);
1817
1818 elem = builtin_type (gdbarch)->builtin_uint8;
1819 append_composite_type_field (t, "u", elem);
1820
1821 elem = builtin_type (gdbarch)->builtin_int8;
1822 append_composite_type_field (t, "s", elem);
1823
1824 tdep->vnb_type = t;
1825 }
1826
1827 return tdep->vnb_type;
1828 }
1829
1830 /* Return the type for an AdvSISD V register. */
1831
1832 static struct type *
1833 aarch64_vnv_type (struct gdbarch *gdbarch)
1834 {
1835 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1836
1837 if (tdep->vnv_type == NULL)
1838 {
1839 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1840 TYPE_CODE_UNION);
1841
1842 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1843 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1844 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1845 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1846 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1847
1848 tdep->vnv_type = t;
1849 }
1850
1851 return tdep->vnv_type;
1852 }
1853
1854 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1855
1856 static int
1857 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1858 {
1859 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1860 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1861
1862 if (reg == AARCH64_DWARF_SP)
1863 return AARCH64_SP_REGNUM;
1864
1865 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1866 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1867
1868 if (reg == AARCH64_DWARF_SVE_VG)
1869 return AARCH64_SVE_VG_REGNUM;
1870
1871 if (reg == AARCH64_DWARF_SVE_FFR)
1872 return AARCH64_SVE_FFR_REGNUM;
1873
1874 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1875 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1876
1877 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1878 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1879
1880 return -1;
1881 }
1882
1883 /* Implement the "print_insn" gdbarch method. */
1884
1885 static int
1886 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1887 {
1888 info->symbols = NULL;
1889 return default_print_insn (memaddr, info);
1890 }
1891
1892 /* AArch64 BRK software debug mode instruction.
1893 Note that AArch64 code is always little-endian.
1894 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1895 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1896
1897 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1898
1899 /* Extract from an array REGS containing the (raw) register state a
1900 function return value of type TYPE, and copy that, in virtual
1901 format, into VALBUF. */
1902
1903 static void
1904 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1905 gdb_byte *valbuf)
1906 {
1907 struct gdbarch *gdbarch = regs->arch ();
1908 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1909 int elements;
1910 struct type *fundamental_type;
1911
1912 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1913 &fundamental_type))
1914 {
1915 int len = TYPE_LENGTH (fundamental_type);
1916
1917 for (int i = 0; i < elements; i++)
1918 {
1919 int regno = AARCH64_V0_REGNUM + i;
1920 /* Enough space for a full vector register. */
1921 gdb_byte buf[register_size (gdbarch, regno)];
1922 gdb_assert (len <= sizeof (buf));
1923
1924 if (aarch64_debug)
1925 {
1926 debug_printf ("read HFA or HVA return value element %d from %s\n",
1927 i + 1,
1928 gdbarch_register_name (gdbarch, regno));
1929 }
1930 regs->cooked_read (regno, buf);
1931
1932 memcpy (valbuf, buf, len);
1933 valbuf += len;
1934 }
1935 }
1936 else if (TYPE_CODE (type) == TYPE_CODE_INT
1937 || TYPE_CODE (type) == TYPE_CODE_CHAR
1938 || TYPE_CODE (type) == TYPE_CODE_BOOL
1939 || TYPE_CODE (type) == TYPE_CODE_PTR
1940 || TYPE_IS_REFERENCE (type)
1941 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1942 {
1943 /* If the type is a plain integer, then the access is
1944 straight-forward. Otherwise we have to play around a bit
1945 more. */
1946 int len = TYPE_LENGTH (type);
1947 int regno = AARCH64_X0_REGNUM;
1948 ULONGEST tmp;
1949
1950 while (len > 0)
1951 {
1952 /* By using store_unsigned_integer we avoid having to do
1953 anything special for small big-endian values. */
1954 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1955 store_unsigned_integer (valbuf,
1956 (len > X_REGISTER_SIZE
1957 ? X_REGISTER_SIZE : len), byte_order, tmp);
1958 len -= X_REGISTER_SIZE;
1959 valbuf += X_REGISTER_SIZE;
1960 }
1961 }
1962 else
1963 {
1964 /* For a structure or union the behaviour is as if the value had
1965 been stored to word-aligned memory and then loaded into
1966 registers with 64-bit load instruction(s). */
1967 int len = TYPE_LENGTH (type);
1968 int regno = AARCH64_X0_REGNUM;
1969 bfd_byte buf[X_REGISTER_SIZE];
1970
1971 while (len > 0)
1972 {
1973 regs->cooked_read (regno++, buf);
1974 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1975 len -= X_REGISTER_SIZE;
1976 valbuf += X_REGISTER_SIZE;
1977 }
1978 }
1979 }
1980
1981
1982 /* Will a function return an aggregate type in memory or in a
1983 register? Return 0 if an aggregate type can be returned in a
1984 register, 1 if it must be returned in memory. */
1985
1986 static int
1987 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1988 {
1989 type = check_typedef (type);
1990 int elements;
1991 struct type *fundamental_type;
1992
1993 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1994 &fundamental_type))
1995 {
1996 /* v0-v7 are used to return values and one register is allocated
1997 for one member. However, HFA or HVA has at most four members. */
1998 return 0;
1999 }
2000
2001 if (TYPE_LENGTH (type) > 16)
2002 {
2003 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2004 invisible reference. */
2005
2006 return 1;
2007 }
2008
2009 return 0;
2010 }
2011
2012 /* Write into appropriate registers a function return value of type
2013 TYPE, given in virtual format. */
2014
2015 static void
2016 aarch64_store_return_value (struct type *type, struct regcache *regs,
2017 const gdb_byte *valbuf)
2018 {
2019 struct gdbarch *gdbarch = regs->arch ();
2020 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2021 int elements;
2022 struct type *fundamental_type;
2023
2024 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2025 &fundamental_type))
2026 {
2027 int len = TYPE_LENGTH (fundamental_type);
2028
2029 for (int i = 0; i < elements; i++)
2030 {
2031 int regno = AARCH64_V0_REGNUM + i;
2032 /* Enough space for a full vector register. */
2033 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2034 gdb_assert (len <= sizeof (tmpbuf));
2035
2036 if (aarch64_debug)
2037 {
2038 debug_printf ("write HFA or HVA return value element %d to %s\n",
2039 i + 1,
2040 gdbarch_register_name (gdbarch, regno));
2041 }
2042
2043 memcpy (tmpbuf, valbuf,
2044 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2045 regs->cooked_write (regno, tmpbuf);
2046 valbuf += len;
2047 }
2048 }
2049 else if (TYPE_CODE (type) == TYPE_CODE_INT
2050 || TYPE_CODE (type) == TYPE_CODE_CHAR
2051 || TYPE_CODE (type) == TYPE_CODE_BOOL
2052 || TYPE_CODE (type) == TYPE_CODE_PTR
2053 || TYPE_IS_REFERENCE (type)
2054 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2055 {
2056 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2057 {
2058 /* Values of one word or less are zero/sign-extended and
2059 returned in r0. */
2060 bfd_byte tmpbuf[X_REGISTER_SIZE];
2061 LONGEST val = unpack_long (type, valbuf);
2062
2063 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2064 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2065 }
2066 else
2067 {
2068 /* Integral values greater than one word are stored in
2069 consecutive registers starting with r0. This will always
2070 be a multiple of the regiser size. */
2071 int len = TYPE_LENGTH (type);
2072 int regno = AARCH64_X0_REGNUM;
2073
2074 while (len > 0)
2075 {
2076 regs->cooked_write (regno++, valbuf);
2077 len -= X_REGISTER_SIZE;
2078 valbuf += X_REGISTER_SIZE;
2079 }
2080 }
2081 }
2082 else
2083 {
2084 /* For a structure or union the behaviour is as if the value had
2085 been stored to word-aligned memory and then loaded into
2086 registers with 64-bit load instruction(s). */
2087 int len = TYPE_LENGTH (type);
2088 int regno = AARCH64_X0_REGNUM;
2089 bfd_byte tmpbuf[X_REGISTER_SIZE];
2090
2091 while (len > 0)
2092 {
2093 memcpy (tmpbuf, valbuf,
2094 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2095 regs->cooked_write (regno++, tmpbuf);
2096 len -= X_REGISTER_SIZE;
2097 valbuf += X_REGISTER_SIZE;
2098 }
2099 }
2100 }
2101
2102 /* Implement the "return_value" gdbarch method. */
2103
2104 static enum return_value_convention
2105 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2106 struct type *valtype, struct regcache *regcache,
2107 gdb_byte *readbuf, const gdb_byte *writebuf)
2108 {
2109
2110 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2111 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2112 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2113 {
2114 if (aarch64_return_in_memory (gdbarch, valtype))
2115 {
2116 if (aarch64_debug)
2117 debug_printf ("return value in memory\n");
2118 return RETURN_VALUE_STRUCT_CONVENTION;
2119 }
2120 }
2121
2122 if (writebuf)
2123 aarch64_store_return_value (valtype, regcache, writebuf);
2124
2125 if (readbuf)
2126 aarch64_extract_return_value (valtype, regcache, readbuf);
2127
2128 if (aarch64_debug)
2129 debug_printf ("return value in registers\n");
2130
2131 return RETURN_VALUE_REGISTER_CONVENTION;
2132 }
2133
2134 /* Implement the "get_longjmp_target" gdbarch method. */
2135
2136 static int
2137 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2138 {
2139 CORE_ADDR jb_addr;
2140 gdb_byte buf[X_REGISTER_SIZE];
2141 struct gdbarch *gdbarch = get_frame_arch (frame);
2142 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2143 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2144
2145 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2146
2147 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2148 X_REGISTER_SIZE))
2149 return 0;
2150
2151 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2152 return 1;
2153 }
2154
2155 /* Implement the "gen_return_address" gdbarch method. */
2156
2157 static void
2158 aarch64_gen_return_address (struct gdbarch *gdbarch,
2159 struct agent_expr *ax, struct axs_value *value,
2160 CORE_ADDR scope)
2161 {
2162 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2163 value->kind = axs_lvalue_register;
2164 value->u.reg = AARCH64_LR_REGNUM;
2165 }
2166 \f
2167
2168 /* Return the pseudo register name corresponding to register regnum. */
2169
2170 static const char *
2171 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2172 {
2173 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2174
2175 static const char *const q_name[] =
2176 {
2177 "q0", "q1", "q2", "q3",
2178 "q4", "q5", "q6", "q7",
2179 "q8", "q9", "q10", "q11",
2180 "q12", "q13", "q14", "q15",
2181 "q16", "q17", "q18", "q19",
2182 "q20", "q21", "q22", "q23",
2183 "q24", "q25", "q26", "q27",
2184 "q28", "q29", "q30", "q31",
2185 };
2186
2187 static const char *const d_name[] =
2188 {
2189 "d0", "d1", "d2", "d3",
2190 "d4", "d5", "d6", "d7",
2191 "d8", "d9", "d10", "d11",
2192 "d12", "d13", "d14", "d15",
2193 "d16", "d17", "d18", "d19",
2194 "d20", "d21", "d22", "d23",
2195 "d24", "d25", "d26", "d27",
2196 "d28", "d29", "d30", "d31",
2197 };
2198
2199 static const char *const s_name[] =
2200 {
2201 "s0", "s1", "s2", "s3",
2202 "s4", "s5", "s6", "s7",
2203 "s8", "s9", "s10", "s11",
2204 "s12", "s13", "s14", "s15",
2205 "s16", "s17", "s18", "s19",
2206 "s20", "s21", "s22", "s23",
2207 "s24", "s25", "s26", "s27",
2208 "s28", "s29", "s30", "s31",
2209 };
2210
2211 static const char *const h_name[] =
2212 {
2213 "h0", "h1", "h2", "h3",
2214 "h4", "h5", "h6", "h7",
2215 "h8", "h9", "h10", "h11",
2216 "h12", "h13", "h14", "h15",
2217 "h16", "h17", "h18", "h19",
2218 "h20", "h21", "h22", "h23",
2219 "h24", "h25", "h26", "h27",
2220 "h28", "h29", "h30", "h31",
2221 };
2222
2223 static const char *const b_name[] =
2224 {
2225 "b0", "b1", "b2", "b3",
2226 "b4", "b5", "b6", "b7",
2227 "b8", "b9", "b10", "b11",
2228 "b12", "b13", "b14", "b15",
2229 "b16", "b17", "b18", "b19",
2230 "b20", "b21", "b22", "b23",
2231 "b24", "b25", "b26", "b27",
2232 "b28", "b29", "b30", "b31",
2233 };
2234
2235 regnum -= gdbarch_num_regs (gdbarch);
2236
2237 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2238 return q_name[regnum - AARCH64_Q0_REGNUM];
2239
2240 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2241 return d_name[regnum - AARCH64_D0_REGNUM];
2242
2243 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2244 return s_name[regnum - AARCH64_S0_REGNUM];
2245
2246 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2247 return h_name[regnum - AARCH64_H0_REGNUM];
2248
2249 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2250 return b_name[regnum - AARCH64_B0_REGNUM];
2251
2252 if (tdep->has_sve ())
2253 {
2254 static const char *const sve_v_name[] =
2255 {
2256 "v0", "v1", "v2", "v3",
2257 "v4", "v5", "v6", "v7",
2258 "v8", "v9", "v10", "v11",
2259 "v12", "v13", "v14", "v15",
2260 "v16", "v17", "v18", "v19",
2261 "v20", "v21", "v22", "v23",
2262 "v24", "v25", "v26", "v27",
2263 "v28", "v29", "v30", "v31",
2264 };
2265
2266 if (regnum >= AARCH64_SVE_V0_REGNUM
2267 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2268 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2269 }
2270
2271 internal_error (__FILE__, __LINE__,
2272 _("aarch64_pseudo_register_name: bad register number %d"),
2273 regnum);
2274 }
2275
2276 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2277
2278 static struct type *
2279 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2280 {
2281 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2282
2283 regnum -= gdbarch_num_regs (gdbarch);
2284
2285 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2286 return aarch64_vnq_type (gdbarch);
2287
2288 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2289 return aarch64_vnd_type (gdbarch);
2290
2291 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2292 return aarch64_vns_type (gdbarch);
2293
2294 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2295 return aarch64_vnh_type (gdbarch);
2296
2297 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2298 return aarch64_vnb_type (gdbarch);
2299
2300 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2301 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2302 return aarch64_vnv_type (gdbarch);
2303
2304 internal_error (__FILE__, __LINE__,
2305 _("aarch64_pseudo_register_type: bad register number %d"),
2306 regnum);
2307 }
2308
2309 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2310
2311 static int
2312 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2313 struct reggroup *group)
2314 {
2315 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2316
2317 regnum -= gdbarch_num_regs (gdbarch);
2318
2319 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2320 return group == all_reggroup || group == vector_reggroup;
2321 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2322 return (group == all_reggroup || group == vector_reggroup
2323 || group == float_reggroup);
2324 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2325 return (group == all_reggroup || group == vector_reggroup
2326 || group == float_reggroup);
2327 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2328 return group == all_reggroup || group == vector_reggroup;
2329 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2330 return group == all_reggroup || group == vector_reggroup;
2331 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2332 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2333 return group == all_reggroup || group == vector_reggroup;
2334
2335 return group == all_reggroup;
2336 }
2337
2338 /* Helper for aarch64_pseudo_read_value. */
2339
2340 static struct value *
2341 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2342 readable_regcache *regcache, int regnum_offset,
2343 int regsize, struct value *result_value)
2344 {
2345 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2346
2347 /* Enough space for a full vector register. */
2348 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2349 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2350
2351 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2352 mark_value_bytes_unavailable (result_value, 0,
2353 TYPE_LENGTH (value_type (result_value)));
2354 else
2355 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2356
2357 return result_value;
2358 }
2359
2360 /* Implement the "pseudo_register_read_value" gdbarch method. */
2361
2362 static struct value *
2363 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2364 int regnum)
2365 {
2366 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2367 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2368
2369 VALUE_LVAL (result_value) = lval_register;
2370 VALUE_REGNUM (result_value) = regnum;
2371
2372 regnum -= gdbarch_num_regs (gdbarch);
2373
2374 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2375 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2376 regnum - AARCH64_Q0_REGNUM,
2377 Q_REGISTER_SIZE, result_value);
2378
2379 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2380 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2381 regnum - AARCH64_D0_REGNUM,
2382 D_REGISTER_SIZE, result_value);
2383
2384 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2385 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2386 regnum - AARCH64_S0_REGNUM,
2387 S_REGISTER_SIZE, result_value);
2388
2389 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2390 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2391 regnum - AARCH64_H0_REGNUM,
2392 H_REGISTER_SIZE, result_value);
2393
2394 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2395 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2396 regnum - AARCH64_B0_REGNUM,
2397 B_REGISTER_SIZE, result_value);
2398
2399 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2400 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2401 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2402 regnum - AARCH64_SVE_V0_REGNUM,
2403 V_REGISTER_SIZE, result_value);
2404
2405 gdb_assert_not_reached ("regnum out of bound");
2406 }
2407
2408 /* Helper for aarch64_pseudo_write. */
2409
2410 static void
2411 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2412 int regnum_offset, int regsize, const gdb_byte *buf)
2413 {
2414 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2415
2416 /* Enough space for a full vector register. */
2417 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2418 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2419
2420 /* Ensure the register buffer is zero, we want gdb writes of the
2421 various 'scalar' pseudo registers to behavior like architectural
2422 writes, register width bytes are written the remainder are set to
2423 zero. */
2424 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2425
2426 memcpy (reg_buf, buf, regsize);
2427 regcache->raw_write (v_regnum, reg_buf);
2428 }
2429
2430 /* Implement the "pseudo_register_write" gdbarch method. */
2431
2432 static void
2433 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2434 int regnum, const gdb_byte *buf)
2435 {
2436 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2437 regnum -= gdbarch_num_regs (gdbarch);
2438
2439 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2440 return aarch64_pseudo_write_1 (gdbarch, regcache,
2441 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2442 buf);
2443
2444 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2445 return aarch64_pseudo_write_1 (gdbarch, regcache,
2446 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2447 buf);
2448
2449 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2450 return aarch64_pseudo_write_1 (gdbarch, regcache,
2451 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2452 buf);
2453
2454 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2455 return aarch64_pseudo_write_1 (gdbarch, regcache,
2456 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2457 buf);
2458
2459 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2460 return aarch64_pseudo_write_1 (gdbarch, regcache,
2461 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2462 buf);
2463
2464 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2465 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2466 return aarch64_pseudo_write_1 (gdbarch, regcache,
2467 regnum - AARCH64_SVE_V0_REGNUM,
2468 V_REGISTER_SIZE, buf);
2469
2470 gdb_assert_not_reached ("regnum out of bound");
2471 }
2472
2473 /* Callback function for user_reg_add. */
2474
2475 static struct value *
2476 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2477 {
2478 const int *reg_p = (const int *) baton;
2479
2480 return value_of_register (*reg_p, frame);
2481 }
2482 \f
2483
2484 /* Implement the "software_single_step" gdbarch method, needed to
2485 single step through atomic sequences on AArch64. */
2486
2487 static std::vector<CORE_ADDR>
2488 aarch64_software_single_step (struct regcache *regcache)
2489 {
2490 struct gdbarch *gdbarch = regcache->arch ();
2491 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2492 const int insn_size = 4;
2493 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2494 CORE_ADDR pc = regcache_read_pc (regcache);
2495 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2496 CORE_ADDR loc = pc;
2497 CORE_ADDR closing_insn = 0;
2498 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2499 byte_order_for_code);
2500 int index;
2501 int insn_count;
2502 int bc_insn_count = 0; /* Conditional branch instruction count. */
2503 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2504 aarch64_inst inst;
2505
2506 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2507 return {};
2508
2509 /* Look for a Load Exclusive instruction which begins the sequence. */
2510 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2511 return {};
2512
2513 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2514 {
2515 loc += insn_size;
2516 insn = read_memory_unsigned_integer (loc, insn_size,
2517 byte_order_for_code);
2518
2519 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2520 return {};
2521 /* Check if the instruction is a conditional branch. */
2522 if (inst.opcode->iclass == condbranch)
2523 {
2524 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2525
2526 if (bc_insn_count >= 1)
2527 return {};
2528
2529 /* It is, so we'll try to set a breakpoint at the destination. */
2530 breaks[1] = loc + inst.operands[0].imm.value;
2531
2532 bc_insn_count++;
2533 last_breakpoint++;
2534 }
2535
2536 /* Look for the Store Exclusive which closes the atomic sequence. */
2537 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2538 {
2539 closing_insn = loc;
2540 break;
2541 }
2542 }
2543
2544 /* We didn't find a closing Store Exclusive instruction, fall back. */
2545 if (!closing_insn)
2546 return {};
2547
2548 /* Insert breakpoint after the end of the atomic sequence. */
2549 breaks[0] = loc + insn_size;
2550
2551 /* Check for duplicated breakpoints, and also check that the second
2552 breakpoint is not within the atomic sequence. */
2553 if (last_breakpoint
2554 && (breaks[1] == breaks[0]
2555 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2556 last_breakpoint = 0;
2557
2558 std::vector<CORE_ADDR> next_pcs;
2559
2560 /* Insert the breakpoint at the end of the sequence, and one at the
2561 destination of the conditional branch, if it exists. */
2562 for (index = 0; index <= last_breakpoint; index++)
2563 next_pcs.push_back (breaks[index]);
2564
2565 return next_pcs;
2566 }
2567
2568 struct aarch64_displaced_step_closure : public displaced_step_closure
2569 {
2570 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2571 is being displaced stepping. */
2572 int cond = 0;
2573
2574 /* PC adjustment offset after displaced stepping. */
2575 int32_t pc_adjust = 0;
2576 };
2577
2578 /* Data when visiting instructions for displaced stepping. */
2579
2580 struct aarch64_displaced_step_data
2581 {
2582 struct aarch64_insn_data base;
2583
2584 /* The address where the instruction will be executed at. */
2585 CORE_ADDR new_addr;
2586 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2587 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2588 /* Number of instructions in INSN_BUF. */
2589 unsigned insn_count;
2590 /* Registers when doing displaced stepping. */
2591 struct regcache *regs;
2592
2593 aarch64_displaced_step_closure *dsc;
2594 };
2595
2596 /* Implementation of aarch64_insn_visitor method "b". */
2597
2598 static void
2599 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2600 struct aarch64_insn_data *data)
2601 {
2602 struct aarch64_displaced_step_data *dsd
2603 = (struct aarch64_displaced_step_data *) data;
2604 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2605
2606 if (can_encode_int32 (new_offset, 28))
2607 {
2608 /* Emit B rather than BL, because executing BL on a new address
2609 will get the wrong address into LR. In order to avoid this,
2610 we emit B, and update LR if the instruction is BL. */
2611 emit_b (dsd->insn_buf, 0, new_offset);
2612 dsd->insn_count++;
2613 }
2614 else
2615 {
2616 /* Write NOP. */
2617 emit_nop (dsd->insn_buf);
2618 dsd->insn_count++;
2619 dsd->dsc->pc_adjust = offset;
2620 }
2621
2622 if (is_bl)
2623 {
2624 /* Update LR. */
2625 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2626 data->insn_addr + 4);
2627 }
2628 }
2629
2630 /* Implementation of aarch64_insn_visitor method "b_cond". */
2631
2632 static void
2633 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2634 struct aarch64_insn_data *data)
2635 {
2636 struct aarch64_displaced_step_data *dsd
2637 = (struct aarch64_displaced_step_data *) data;
2638
2639 /* GDB has to fix up PC after displaced step this instruction
2640 differently according to the condition is true or false. Instead
2641 of checking COND against conditional flags, we can use
2642 the following instructions, and GDB can tell how to fix up PC
2643 according to the PC value.
2644
2645 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2646 INSN1 ;
2647 TAKEN:
2648 INSN2
2649 */
2650
2651 emit_bcond (dsd->insn_buf, cond, 8);
2652 dsd->dsc->cond = 1;
2653 dsd->dsc->pc_adjust = offset;
2654 dsd->insn_count = 1;
2655 }
2656
2657 /* Dynamically allocate a new register. If we know the register
2658 statically, we should make it a global as above instead of using this
2659 helper function. */
2660
2661 static struct aarch64_register
2662 aarch64_register (unsigned num, int is64)
2663 {
2664 return (struct aarch64_register) { num, is64 };
2665 }
2666
2667 /* Implementation of aarch64_insn_visitor method "cb". */
2668
2669 static void
2670 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2671 const unsigned rn, int is64,
2672 struct aarch64_insn_data *data)
2673 {
2674 struct aarch64_displaced_step_data *dsd
2675 = (struct aarch64_displaced_step_data *) data;
2676
2677 /* The offset is out of range for a compare and branch
2678 instruction. We can use the following instructions instead:
2679
2680 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2681 INSN1 ;
2682 TAKEN:
2683 INSN2
2684 */
2685 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2686 dsd->insn_count = 1;
2687 dsd->dsc->cond = 1;
2688 dsd->dsc->pc_adjust = offset;
2689 }
2690
2691 /* Implementation of aarch64_insn_visitor method "tb". */
2692
2693 static void
2694 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2695 const unsigned rt, unsigned bit,
2696 struct aarch64_insn_data *data)
2697 {
2698 struct aarch64_displaced_step_data *dsd
2699 = (struct aarch64_displaced_step_data *) data;
2700
2701 /* The offset is out of range for a test bit and branch
2702 instruction We can use the following instructions instead:
2703
2704 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2705 INSN1 ;
2706 TAKEN:
2707 INSN2
2708
2709 */
2710 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2711 dsd->insn_count = 1;
2712 dsd->dsc->cond = 1;
2713 dsd->dsc->pc_adjust = offset;
2714 }
2715
2716 /* Implementation of aarch64_insn_visitor method "adr". */
2717
2718 static void
2719 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2720 const int is_adrp, struct aarch64_insn_data *data)
2721 {
2722 struct aarch64_displaced_step_data *dsd
2723 = (struct aarch64_displaced_step_data *) data;
2724 /* We know exactly the address the ADR{P,} instruction will compute.
2725 We can just write it to the destination register. */
2726 CORE_ADDR address = data->insn_addr + offset;
2727
2728 if (is_adrp)
2729 {
2730 /* Clear the lower 12 bits of the offset to get the 4K page. */
2731 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2732 address & ~0xfff);
2733 }
2734 else
2735 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2736 address);
2737
2738 dsd->dsc->pc_adjust = 4;
2739 emit_nop (dsd->insn_buf);
2740 dsd->insn_count = 1;
2741 }
2742
2743 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2744
2745 static void
2746 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2747 const unsigned rt, const int is64,
2748 struct aarch64_insn_data *data)
2749 {
2750 struct aarch64_displaced_step_data *dsd
2751 = (struct aarch64_displaced_step_data *) data;
2752 CORE_ADDR address = data->insn_addr + offset;
2753 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2754
2755 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2756 address);
2757
2758 if (is_sw)
2759 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2760 aarch64_register (rt, 1), zero);
2761 else
2762 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2763 aarch64_register (rt, 1), zero);
2764
2765 dsd->dsc->pc_adjust = 4;
2766 }
2767
2768 /* Implementation of aarch64_insn_visitor method "others". */
2769
2770 static void
2771 aarch64_displaced_step_others (const uint32_t insn,
2772 struct aarch64_insn_data *data)
2773 {
2774 struct aarch64_displaced_step_data *dsd
2775 = (struct aarch64_displaced_step_data *) data;
2776
2777 aarch64_emit_insn (dsd->insn_buf, insn);
2778 dsd->insn_count = 1;
2779
2780 if ((insn & 0xfffffc1f) == 0xd65f0000)
2781 {
2782 /* RET */
2783 dsd->dsc->pc_adjust = 0;
2784 }
2785 else
2786 dsd->dsc->pc_adjust = 4;
2787 }
2788
2789 static const struct aarch64_insn_visitor visitor =
2790 {
2791 aarch64_displaced_step_b,
2792 aarch64_displaced_step_b_cond,
2793 aarch64_displaced_step_cb,
2794 aarch64_displaced_step_tb,
2795 aarch64_displaced_step_adr,
2796 aarch64_displaced_step_ldr_literal,
2797 aarch64_displaced_step_others,
2798 };
2799
2800 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2801
2802 struct displaced_step_closure *
2803 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2804 CORE_ADDR from, CORE_ADDR to,
2805 struct regcache *regs)
2806 {
2807 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2808 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2809 struct aarch64_displaced_step_data dsd;
2810 aarch64_inst inst;
2811
2812 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2813 return NULL;
2814
2815 /* Look for a Load Exclusive instruction which begins the sequence. */
2816 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2817 {
2818 /* We can't displaced step atomic sequences. */
2819 return NULL;
2820 }
2821
2822 std::unique_ptr<aarch64_displaced_step_closure> dsc
2823 (new aarch64_displaced_step_closure);
2824 dsd.base.insn_addr = from;
2825 dsd.new_addr = to;
2826 dsd.regs = regs;
2827 dsd.dsc = dsc.get ();
2828 dsd.insn_count = 0;
2829 aarch64_relocate_instruction (insn, &visitor,
2830 (struct aarch64_insn_data *) &dsd);
2831 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2832
2833 if (dsd.insn_count != 0)
2834 {
2835 int i;
2836
2837 /* Instruction can be relocated to scratch pad. Copy
2838 relocated instruction(s) there. */
2839 for (i = 0; i < dsd.insn_count; i++)
2840 {
2841 if (debug_displaced)
2842 {
2843 debug_printf ("displaced: writing insn ");
2844 debug_printf ("%.8x", dsd.insn_buf[i]);
2845 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2846 }
2847 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2848 (ULONGEST) dsd.insn_buf[i]);
2849 }
2850 }
2851 else
2852 {
2853 dsc = NULL;
2854 }
2855
2856 return dsc.release ();
2857 }
2858
2859 /* Implement the "displaced_step_fixup" gdbarch method. */
2860
2861 void
2862 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2863 struct displaced_step_closure *dsc_,
2864 CORE_ADDR from, CORE_ADDR to,
2865 struct regcache *regs)
2866 {
2867 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2868
2869 if (dsc->cond)
2870 {
2871 ULONGEST pc;
2872
2873 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2874 if (pc - to == 8)
2875 {
2876 /* Condition is true. */
2877 }
2878 else if (pc - to == 4)
2879 {
2880 /* Condition is false. */
2881 dsc->pc_adjust = 4;
2882 }
2883 else
2884 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2885 }
2886
2887 if (dsc->pc_adjust != 0)
2888 {
2889 if (debug_displaced)
2890 {
2891 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2892 paddress (gdbarch, from), dsc->pc_adjust);
2893 }
2894 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2895 from + dsc->pc_adjust);
2896 }
2897 }
2898
2899 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2900
2901 int
2902 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2903 struct displaced_step_closure *closure)
2904 {
2905 return 1;
2906 }
2907
2908 /* Get the correct target description for the given VQ value.
2909 If VQ is zero then it is assumed SVE is not supported.
2910 (It is not possible to set VQ to zero on an SVE system). */
2911
2912 const target_desc *
2913 aarch64_read_description (uint64_t vq)
2914 {
2915 if (vq > AARCH64_MAX_SVE_VQ)
2916 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
2917 AARCH64_MAX_SVE_VQ);
2918
2919 struct target_desc *tdesc = tdesc_aarch64_list[vq];
2920
2921 if (tdesc == NULL)
2922 {
2923 tdesc = aarch64_create_target_description (vq);
2924 tdesc_aarch64_list[vq] = tdesc;
2925 }
2926
2927 return tdesc;
2928 }
2929
2930 /* Return the VQ used when creating the target description TDESC. */
2931
2932 static uint64_t
2933 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2934 {
2935 const struct tdesc_feature *feature_sve;
2936
2937 if (!tdesc_has_registers (tdesc))
2938 return 0;
2939
2940 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2941
2942 if (feature_sve == nullptr)
2943 return 0;
2944
2945 uint64_t vl = tdesc_register_bitsize (feature_sve,
2946 aarch64_sve_register_names[0]) / 8;
2947 return sve_vq_from_vl (vl);
2948 }
2949
2950
2951 /* Initialize the current architecture based on INFO. If possible,
2952 re-use an architecture from ARCHES, which is a list of
2953 architectures already created during this debugging session.
2954
2955 Called e.g. at program startup, when reading a core file, and when
2956 reading a binary file. */
2957
2958 static struct gdbarch *
2959 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2960 {
2961 struct gdbarch_tdep *tdep;
2962 struct gdbarch *gdbarch;
2963 struct gdbarch_list *best_arch;
2964 struct tdesc_arch_data *tdesc_data = NULL;
2965 const struct target_desc *tdesc = info.target_desc;
2966 int i;
2967 int valid_p = 1;
2968 const struct tdesc_feature *feature_core;
2969 const struct tdesc_feature *feature_fpu;
2970 const struct tdesc_feature *feature_sve;
2971 int num_regs = 0;
2972 int num_pseudo_regs = 0;
2973
2974 /* Ensure we always have a target description. */
2975 if (!tdesc_has_registers (tdesc))
2976 tdesc = aarch64_read_description (0);
2977 gdb_assert (tdesc);
2978
2979 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2980 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2981 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2982
2983 if (feature_core == NULL)
2984 return NULL;
2985
2986 tdesc_data = tdesc_data_alloc ();
2987
2988 /* Validate the description provides the mandatory core R registers
2989 and allocate their numbers. */
2990 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2991 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
2992 AARCH64_X0_REGNUM + i,
2993 aarch64_r_register_names[i]);
2994
2995 num_regs = AARCH64_X0_REGNUM + i;
2996
2997 /* Add the V registers. */
2998 if (feature_fpu != NULL)
2999 {
3000 if (feature_sve != NULL)
3001 error (_("Program contains both fpu and SVE features."));
3002
3003 /* Validate the description provides the mandatory V registers
3004 and allocate their numbers. */
3005 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3006 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3007 AARCH64_V0_REGNUM + i,
3008 aarch64_v_register_names[i]);
3009
3010 num_regs = AARCH64_V0_REGNUM + i;
3011 }
3012
3013 /* Add the SVE registers. */
3014 if (feature_sve != NULL)
3015 {
3016 /* Validate the description provides the mandatory SVE registers
3017 and allocate their numbers. */
3018 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3019 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3020 AARCH64_SVE_Z0_REGNUM + i,
3021 aarch64_sve_register_names[i]);
3022
3023 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3024 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3025 }
3026
3027 if (feature_fpu != NULL || feature_sve != NULL)
3028 {
3029 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3030 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3031 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3032 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3033 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3034 }
3035
3036 if (!valid_p)
3037 {
3038 tdesc_data_cleanup (tdesc_data);
3039 return NULL;
3040 }
3041
3042 /* AArch64 code is always little-endian. */
3043 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3044
3045 /* If there is already a candidate, use it. */
3046 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3047 best_arch != NULL;
3048 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3049 {
3050 /* Found a match. */
3051 break;
3052 }
3053
3054 if (best_arch != NULL)
3055 {
3056 if (tdesc_data != NULL)
3057 tdesc_data_cleanup (tdesc_data);
3058 return best_arch->gdbarch;
3059 }
3060
3061 tdep = XCNEW (struct gdbarch_tdep);
3062 gdbarch = gdbarch_alloc (&info, tdep);
3063
3064 /* This should be low enough for everything. */
3065 tdep->lowest_pc = 0x20;
3066 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3067 tdep->jb_elt_size = 8;
3068 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3069
3070 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3071 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3072
3073 /* Frame handling. */
3074 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3075 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3076 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3077
3078 /* Advance PC across function entry code. */
3079 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3080
3081 /* The stack grows downward. */
3082 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3083
3084 /* Breakpoint manipulation. */
3085 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3086 aarch64_breakpoint::kind_from_pc);
3087 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3088 aarch64_breakpoint::bp_from_kind);
3089 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3090 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3091
3092 /* Information about registers, etc. */
3093 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3094 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3095 set_gdbarch_num_regs (gdbarch, num_regs);
3096
3097 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3098 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3099 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3100 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3101 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3102 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3103 aarch64_pseudo_register_reggroup_p);
3104
3105 /* ABI */
3106 set_gdbarch_short_bit (gdbarch, 16);
3107 set_gdbarch_int_bit (gdbarch, 32);
3108 set_gdbarch_float_bit (gdbarch, 32);
3109 set_gdbarch_double_bit (gdbarch, 64);
3110 set_gdbarch_long_double_bit (gdbarch, 128);
3111 set_gdbarch_long_bit (gdbarch, 64);
3112 set_gdbarch_long_long_bit (gdbarch, 64);
3113 set_gdbarch_ptr_bit (gdbarch, 64);
3114 set_gdbarch_char_signed (gdbarch, 0);
3115 set_gdbarch_wchar_signed (gdbarch, 0);
3116 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3117 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3118 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3119
3120 /* Internal <-> external register number maps. */
3121 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3122
3123 /* Returning results. */
3124 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3125
3126 /* Disassembly. */
3127 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3128
3129 /* Virtual tables. */
3130 set_gdbarch_vbit_in_delta (gdbarch, 1);
3131
3132 /* Hook in the ABI-specific overrides, if they have been registered. */
3133 info.target_desc = tdesc;
3134 info.tdesc_data = tdesc_data;
3135 gdbarch_init_osabi (info, gdbarch);
3136
3137 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3138
3139 /* Add some default predicates. */
3140 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3141 dwarf2_append_unwinders (gdbarch);
3142 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3143
3144 frame_base_set_default (gdbarch, &aarch64_normal_base);
3145
3146 /* Now we have tuned the configuration, set a few final things,
3147 based on what the OS ABI has told us. */
3148
3149 if (tdep->jb_pc >= 0)
3150 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3151
3152 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3153
3154 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3155
3156 /* Add standard register aliases. */
3157 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3158 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3159 value_of_aarch64_user_reg,
3160 &aarch64_register_aliases[i].regnum);
3161
3162 register_aarch64_ravenscar_ops (gdbarch);
3163
3164 return gdbarch;
3165 }
3166
3167 static void
3168 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3169 {
3170 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3171
3172 if (tdep == NULL)
3173 return;
3174
3175 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3176 paddress (gdbarch, tdep->lowest_pc));
3177 }
3178
3179 #if GDB_SELF_TEST
3180 namespace selftests
3181 {
3182 static void aarch64_process_record_test (void);
3183 }
3184 #endif
3185
3186 void
3187 _initialize_aarch64_tdep (void)
3188 {
3189 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3190 aarch64_dump_tdep);
3191
3192 /* Debug this file's internals. */
3193 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3194 Set AArch64 debugging."), _("\
3195 Show AArch64 debugging."), _("\
3196 When on, AArch64 specific debugging is enabled."),
3197 NULL,
3198 show_aarch64_debug,
3199 &setdebuglist, &showdebuglist);
3200
3201 #if GDB_SELF_TEST
3202 selftests::register_test ("aarch64-analyze-prologue",
3203 selftests::aarch64_analyze_prologue_test);
3204 selftests::register_test ("aarch64-process-record",
3205 selftests::aarch64_process_record_test);
3206 selftests::record_xml_tdesc ("aarch64.xml",
3207 aarch64_create_target_description (0));
3208 #endif
3209 }
3210
3211 /* AArch64 process record-replay related structures, defines etc. */
3212
3213 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3214 do \
3215 { \
3216 unsigned int reg_len = LENGTH; \
3217 if (reg_len) \
3218 { \
3219 REGS = XNEWVEC (uint32_t, reg_len); \
3220 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3221 } \
3222 } \
3223 while (0)
3224
3225 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3226 do \
3227 { \
3228 unsigned int mem_len = LENGTH; \
3229 if (mem_len) \
3230 { \
3231 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3232 memcpy(&MEMS->len, &RECORD_BUF[0], \
3233 sizeof(struct aarch64_mem_r) * LENGTH); \
3234 } \
3235 } \
3236 while (0)
3237
3238 /* AArch64 record/replay structures and enumerations. */
3239
3240 struct aarch64_mem_r
3241 {
3242 uint64_t len; /* Record length. */
3243 uint64_t addr; /* Memory address. */
3244 };
3245
3246 enum aarch64_record_result
3247 {
3248 AARCH64_RECORD_SUCCESS,
3249 AARCH64_RECORD_UNSUPPORTED,
3250 AARCH64_RECORD_UNKNOWN
3251 };
3252
3253 typedef struct insn_decode_record_t
3254 {
3255 struct gdbarch *gdbarch;
3256 struct regcache *regcache;
3257 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3258 uint32_t aarch64_insn; /* Insn to be recorded. */
3259 uint32_t mem_rec_count; /* Count of memory records. */
3260 uint32_t reg_rec_count; /* Count of register records. */
3261 uint32_t *aarch64_regs; /* Registers to be recorded. */
3262 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3263 } insn_decode_record;
3264
3265 /* Record handler for data processing - register instructions. */
3266
3267 static unsigned int
3268 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3269 {
3270 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3271 uint32_t record_buf[4];
3272
3273 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3274 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3275 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3276
3277 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3278 {
3279 uint8_t setflags;
3280
3281 /* Logical (shifted register). */
3282 if (insn_bits24_27 == 0x0a)
3283 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3284 /* Add/subtract. */
3285 else if (insn_bits24_27 == 0x0b)
3286 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3287 else
3288 return AARCH64_RECORD_UNKNOWN;
3289
3290 record_buf[0] = reg_rd;
3291 aarch64_insn_r->reg_rec_count = 1;
3292 if (setflags)
3293 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3294 }
3295 else
3296 {
3297 if (insn_bits24_27 == 0x0b)
3298 {
3299 /* Data-processing (3 source). */
3300 record_buf[0] = reg_rd;
3301 aarch64_insn_r->reg_rec_count = 1;
3302 }
3303 else if (insn_bits24_27 == 0x0a)
3304 {
3305 if (insn_bits21_23 == 0x00)
3306 {
3307 /* Add/subtract (with carry). */
3308 record_buf[0] = reg_rd;
3309 aarch64_insn_r->reg_rec_count = 1;
3310 if (bit (aarch64_insn_r->aarch64_insn, 29))
3311 {
3312 record_buf[1] = AARCH64_CPSR_REGNUM;
3313 aarch64_insn_r->reg_rec_count = 2;
3314 }
3315 }
3316 else if (insn_bits21_23 == 0x02)
3317 {
3318 /* Conditional compare (register) and conditional compare
3319 (immediate) instructions. */
3320 record_buf[0] = AARCH64_CPSR_REGNUM;
3321 aarch64_insn_r->reg_rec_count = 1;
3322 }
3323 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3324 {
3325 /* CConditional select. */
3326 /* Data-processing (2 source). */
3327 /* Data-processing (1 source). */
3328 record_buf[0] = reg_rd;
3329 aarch64_insn_r->reg_rec_count = 1;
3330 }
3331 else
3332 return AARCH64_RECORD_UNKNOWN;
3333 }
3334 }
3335
3336 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3337 record_buf);
3338 return AARCH64_RECORD_SUCCESS;
3339 }
3340
3341 /* Record handler for data processing - immediate instructions. */
3342
3343 static unsigned int
3344 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3345 {
3346 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3347 uint32_t record_buf[4];
3348
3349 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3350 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3351 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3352
3353 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3354 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3355 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3356 {
3357 record_buf[0] = reg_rd;
3358 aarch64_insn_r->reg_rec_count = 1;
3359 }
3360 else if (insn_bits24_27 == 0x01)
3361 {
3362 /* Add/Subtract (immediate). */
3363 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3364 record_buf[0] = reg_rd;
3365 aarch64_insn_r->reg_rec_count = 1;
3366 if (setflags)
3367 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3368 }
3369 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3370 {
3371 /* Logical (immediate). */
3372 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3373 record_buf[0] = reg_rd;
3374 aarch64_insn_r->reg_rec_count = 1;
3375 if (setflags)
3376 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3377 }
3378 else
3379 return AARCH64_RECORD_UNKNOWN;
3380
3381 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3382 record_buf);
3383 return AARCH64_RECORD_SUCCESS;
3384 }
3385
3386 /* Record handler for branch, exception generation and system instructions. */
3387
3388 static unsigned int
3389 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3390 {
3391 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3392 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3393 uint32_t record_buf[4];
3394
3395 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3396 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3397 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3398
3399 if (insn_bits28_31 == 0x0d)
3400 {
3401 /* Exception generation instructions. */
3402 if (insn_bits24_27 == 0x04)
3403 {
3404 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3405 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3406 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3407 {
3408 ULONGEST svc_number;
3409
3410 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3411 &svc_number);
3412 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3413 svc_number);
3414 }
3415 else
3416 return AARCH64_RECORD_UNSUPPORTED;
3417 }
3418 /* System instructions. */
3419 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3420 {
3421 uint32_t reg_rt, reg_crn;
3422
3423 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3424 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3425
3426 /* Record rt in case of sysl and mrs instructions. */
3427 if (bit (aarch64_insn_r->aarch64_insn, 21))
3428 {
3429 record_buf[0] = reg_rt;
3430 aarch64_insn_r->reg_rec_count = 1;
3431 }
3432 /* Record cpsr for hint and msr(immediate) instructions. */
3433 else if (reg_crn == 0x02 || reg_crn == 0x04)
3434 {
3435 record_buf[0] = AARCH64_CPSR_REGNUM;
3436 aarch64_insn_r->reg_rec_count = 1;
3437 }
3438 }
3439 /* Unconditional branch (register). */
3440 else if((insn_bits24_27 & 0x0e) == 0x06)
3441 {
3442 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3443 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3444 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3445 }
3446 else
3447 return AARCH64_RECORD_UNKNOWN;
3448 }
3449 /* Unconditional branch (immediate). */
3450 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3451 {
3452 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3453 if (bit (aarch64_insn_r->aarch64_insn, 31))
3454 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3455 }
3456 else
3457 /* Compare & branch (immediate), Test & branch (immediate) and
3458 Conditional branch (immediate). */
3459 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3460
3461 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3462 record_buf);
3463 return AARCH64_RECORD_SUCCESS;
3464 }
3465
3466 /* Record handler for advanced SIMD load and store instructions. */
3467
3468 static unsigned int
3469 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3470 {
3471 CORE_ADDR address;
3472 uint64_t addr_offset = 0;
3473 uint32_t record_buf[24];
3474 uint64_t record_buf_mem[24];
3475 uint32_t reg_rn, reg_rt;
3476 uint32_t reg_index = 0, mem_index = 0;
3477 uint8_t opcode_bits, size_bits;
3478
3479 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3480 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3481 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3482 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3483 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3484
3485 if (record_debug)
3486 debug_printf ("Process record: Advanced SIMD load/store\n");
3487
3488 /* Load/store single structure. */
3489 if (bit (aarch64_insn_r->aarch64_insn, 24))
3490 {
3491 uint8_t sindex, scale, selem, esize, replicate = 0;
3492 scale = opcode_bits >> 2;
3493 selem = ((opcode_bits & 0x02) |
3494 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3495 switch (scale)
3496 {
3497 case 1:
3498 if (size_bits & 0x01)
3499 return AARCH64_RECORD_UNKNOWN;
3500 break;
3501 case 2:
3502 if ((size_bits >> 1) & 0x01)
3503 return AARCH64_RECORD_UNKNOWN;
3504 if (size_bits & 0x01)
3505 {
3506 if (!((opcode_bits >> 1) & 0x01))
3507 scale = 3;
3508 else
3509 return AARCH64_RECORD_UNKNOWN;
3510 }
3511 break;
3512 case 3:
3513 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3514 {
3515 scale = size_bits;
3516 replicate = 1;
3517 break;
3518 }
3519 else
3520 return AARCH64_RECORD_UNKNOWN;
3521 default:
3522 break;
3523 }
3524 esize = 8 << scale;
3525 if (replicate)
3526 for (sindex = 0; sindex < selem; sindex++)
3527 {
3528 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3529 reg_rt = (reg_rt + 1) % 32;
3530 }
3531 else
3532 {
3533 for (sindex = 0; sindex < selem; sindex++)
3534 {
3535 if (bit (aarch64_insn_r->aarch64_insn, 22))
3536 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3537 else
3538 {
3539 record_buf_mem[mem_index++] = esize / 8;
3540 record_buf_mem[mem_index++] = address + addr_offset;
3541 }
3542 addr_offset = addr_offset + (esize / 8);
3543 reg_rt = (reg_rt + 1) % 32;
3544 }
3545 }
3546 }
3547 /* Load/store multiple structure. */
3548 else
3549 {
3550 uint8_t selem, esize, rpt, elements;
3551 uint8_t eindex, rindex;
3552
3553 esize = 8 << size_bits;
3554 if (bit (aarch64_insn_r->aarch64_insn, 30))
3555 elements = 128 / esize;
3556 else
3557 elements = 64 / esize;
3558
3559 switch (opcode_bits)
3560 {
3561 /*LD/ST4 (4 Registers). */
3562 case 0:
3563 rpt = 1;
3564 selem = 4;
3565 break;
3566 /*LD/ST1 (4 Registers). */
3567 case 2:
3568 rpt = 4;
3569 selem = 1;
3570 break;
3571 /*LD/ST3 (3 Registers). */
3572 case 4:
3573 rpt = 1;
3574 selem = 3;
3575 break;
3576 /*LD/ST1 (3 Registers). */
3577 case 6:
3578 rpt = 3;
3579 selem = 1;
3580 break;
3581 /*LD/ST1 (1 Register). */
3582 case 7:
3583 rpt = 1;
3584 selem = 1;
3585 break;
3586 /*LD/ST2 (2 Registers). */
3587 case 8:
3588 rpt = 1;
3589 selem = 2;
3590 break;
3591 /*LD/ST1 (2 Registers). */
3592 case 10:
3593 rpt = 2;
3594 selem = 1;
3595 break;
3596 default:
3597 return AARCH64_RECORD_UNSUPPORTED;
3598 break;
3599 }
3600 for (rindex = 0; rindex < rpt; rindex++)
3601 for (eindex = 0; eindex < elements; eindex++)
3602 {
3603 uint8_t reg_tt, sindex;
3604 reg_tt = (reg_rt + rindex) % 32;
3605 for (sindex = 0; sindex < selem; sindex++)
3606 {
3607 if (bit (aarch64_insn_r->aarch64_insn, 22))
3608 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3609 else
3610 {
3611 record_buf_mem[mem_index++] = esize / 8;
3612 record_buf_mem[mem_index++] = address + addr_offset;
3613 }
3614 addr_offset = addr_offset + (esize / 8);
3615 reg_tt = (reg_tt + 1) % 32;
3616 }
3617 }
3618 }
3619
3620 if (bit (aarch64_insn_r->aarch64_insn, 23))
3621 record_buf[reg_index++] = reg_rn;
3622
3623 aarch64_insn_r->reg_rec_count = reg_index;
3624 aarch64_insn_r->mem_rec_count = mem_index / 2;
3625 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3626 record_buf_mem);
3627 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3628 record_buf);
3629 return AARCH64_RECORD_SUCCESS;
3630 }
3631
3632 /* Record handler for load and store instructions. */
3633
3634 static unsigned int
3635 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3636 {
3637 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3638 uint8_t insn_bit23, insn_bit21;
3639 uint8_t opc, size_bits, ld_flag, vector_flag;
3640 uint32_t reg_rn, reg_rt, reg_rt2;
3641 uint64_t datasize, offset;
3642 uint32_t record_buf[8];
3643 uint64_t record_buf_mem[8];
3644 CORE_ADDR address;
3645
3646 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3647 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3648 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3649 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3650 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3651 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3652 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3653 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3654 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3655 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3656 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3657
3658 /* Load/store exclusive. */
3659 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3660 {
3661 if (record_debug)
3662 debug_printf ("Process record: load/store exclusive\n");
3663
3664 if (ld_flag)
3665 {
3666 record_buf[0] = reg_rt;
3667 aarch64_insn_r->reg_rec_count = 1;
3668 if (insn_bit21)
3669 {
3670 record_buf[1] = reg_rt2;
3671 aarch64_insn_r->reg_rec_count = 2;
3672 }
3673 }
3674 else
3675 {
3676 if (insn_bit21)
3677 datasize = (8 << size_bits) * 2;
3678 else
3679 datasize = (8 << size_bits);
3680 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3681 &address);
3682 record_buf_mem[0] = datasize / 8;
3683 record_buf_mem[1] = address;
3684 aarch64_insn_r->mem_rec_count = 1;
3685 if (!insn_bit23)
3686 {
3687 /* Save register rs. */
3688 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3689 aarch64_insn_r->reg_rec_count = 1;
3690 }
3691 }
3692 }
3693 /* Load register (literal) instructions decoding. */
3694 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3695 {
3696 if (record_debug)
3697 debug_printf ("Process record: load register (literal)\n");
3698 if (vector_flag)
3699 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3700 else
3701 record_buf[0] = reg_rt;
3702 aarch64_insn_r->reg_rec_count = 1;
3703 }
3704 /* All types of load/store pair instructions decoding. */
3705 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3706 {
3707 if (record_debug)
3708 debug_printf ("Process record: load/store pair\n");
3709
3710 if (ld_flag)
3711 {
3712 if (vector_flag)
3713 {
3714 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3715 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3716 }
3717 else
3718 {
3719 record_buf[0] = reg_rt;
3720 record_buf[1] = reg_rt2;
3721 }
3722 aarch64_insn_r->reg_rec_count = 2;
3723 }
3724 else
3725 {
3726 uint16_t imm7_off;
3727 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3728 if (!vector_flag)
3729 size_bits = size_bits >> 1;
3730 datasize = 8 << (2 + size_bits);
3731 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3732 offset = offset << (2 + size_bits);
3733 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3734 &address);
3735 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3736 {
3737 if (imm7_off & 0x40)
3738 address = address - offset;
3739 else
3740 address = address + offset;
3741 }
3742
3743 record_buf_mem[0] = datasize / 8;
3744 record_buf_mem[1] = address;
3745 record_buf_mem[2] = datasize / 8;
3746 record_buf_mem[3] = address + (datasize / 8);
3747 aarch64_insn_r->mem_rec_count = 2;
3748 }
3749 if (bit (aarch64_insn_r->aarch64_insn, 23))
3750 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3751 }
3752 /* Load/store register (unsigned immediate) instructions. */
3753 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3754 {
3755 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3756 if (!(opc >> 1))
3757 {
3758 if (opc & 0x01)
3759 ld_flag = 0x01;
3760 else
3761 ld_flag = 0x0;
3762 }
3763 else
3764 {
3765 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3766 {
3767 /* PRFM (immediate) */
3768 return AARCH64_RECORD_SUCCESS;
3769 }
3770 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3771 {
3772 /* LDRSW (immediate) */
3773 ld_flag = 0x1;
3774 }
3775 else
3776 {
3777 if (opc & 0x01)
3778 ld_flag = 0x01;
3779 else
3780 ld_flag = 0x0;
3781 }
3782 }
3783
3784 if (record_debug)
3785 {
3786 debug_printf ("Process record: load/store (unsigned immediate):"
3787 " size %x V %d opc %x\n", size_bits, vector_flag,
3788 opc);
3789 }
3790
3791 if (!ld_flag)
3792 {
3793 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3794 datasize = 8 << size_bits;
3795 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3796 &address);
3797 offset = offset << size_bits;
3798 address = address + offset;
3799
3800 record_buf_mem[0] = datasize >> 3;
3801 record_buf_mem[1] = address;
3802 aarch64_insn_r->mem_rec_count = 1;
3803 }
3804 else
3805 {
3806 if (vector_flag)
3807 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3808 else
3809 record_buf[0] = reg_rt;
3810 aarch64_insn_r->reg_rec_count = 1;
3811 }
3812 }
3813 /* Load/store register (register offset) instructions. */
3814 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3815 && insn_bits10_11 == 0x02 && insn_bit21)
3816 {
3817 if (record_debug)
3818 debug_printf ("Process record: load/store (register offset)\n");
3819 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3820 if (!(opc >> 1))
3821 if (opc & 0x01)
3822 ld_flag = 0x01;
3823 else
3824 ld_flag = 0x0;
3825 else
3826 if (size_bits != 0x03)
3827 ld_flag = 0x01;
3828 else
3829 return AARCH64_RECORD_UNKNOWN;
3830
3831 if (!ld_flag)
3832 {
3833 ULONGEST reg_rm_val;
3834
3835 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3836 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3837 if (bit (aarch64_insn_r->aarch64_insn, 12))
3838 offset = reg_rm_val << size_bits;
3839 else
3840 offset = reg_rm_val;
3841 datasize = 8 << size_bits;
3842 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3843 &address);
3844 address = address + offset;
3845 record_buf_mem[0] = datasize >> 3;
3846 record_buf_mem[1] = address;
3847 aarch64_insn_r->mem_rec_count = 1;
3848 }
3849 else
3850 {
3851 if (vector_flag)
3852 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3853 else
3854 record_buf[0] = reg_rt;
3855 aarch64_insn_r->reg_rec_count = 1;
3856 }
3857 }
3858 /* Load/store register (immediate and unprivileged) instructions. */
3859 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3860 && !insn_bit21)
3861 {
3862 if (record_debug)
3863 {
3864 debug_printf ("Process record: load/store "
3865 "(immediate and unprivileged)\n");
3866 }
3867 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3868 if (!(opc >> 1))
3869 if (opc & 0x01)
3870 ld_flag = 0x01;
3871 else
3872 ld_flag = 0x0;
3873 else
3874 if (size_bits != 0x03)
3875 ld_flag = 0x01;
3876 else
3877 return AARCH64_RECORD_UNKNOWN;
3878
3879 if (!ld_flag)
3880 {
3881 uint16_t imm9_off;
3882 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3883 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3884 datasize = 8 << size_bits;
3885 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3886 &address);
3887 if (insn_bits10_11 != 0x01)
3888 {
3889 if (imm9_off & 0x0100)
3890 address = address - offset;
3891 else
3892 address = address + offset;
3893 }
3894 record_buf_mem[0] = datasize >> 3;
3895 record_buf_mem[1] = address;
3896 aarch64_insn_r->mem_rec_count = 1;
3897 }
3898 else
3899 {
3900 if (vector_flag)
3901 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3902 else
3903 record_buf[0] = reg_rt;
3904 aarch64_insn_r->reg_rec_count = 1;
3905 }
3906 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3907 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3908 }
3909 /* Advanced SIMD load/store instructions. */
3910 else
3911 return aarch64_record_asimd_load_store (aarch64_insn_r);
3912
3913 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3914 record_buf_mem);
3915 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3916 record_buf);
3917 return AARCH64_RECORD_SUCCESS;
3918 }
3919
3920 /* Record handler for data processing SIMD and floating point instructions. */
3921
3922 static unsigned int
3923 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3924 {
3925 uint8_t insn_bit21, opcode, rmode, reg_rd;
3926 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3927 uint8_t insn_bits11_14;
3928 uint32_t record_buf[2];
3929
3930 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3931 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3932 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3933 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3934 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3935 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3936 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3937 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3938 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3939
3940 if (record_debug)
3941 debug_printf ("Process record: data processing SIMD/FP: ");
3942
3943 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3944 {
3945 /* Floating point - fixed point conversion instructions. */
3946 if (!insn_bit21)
3947 {
3948 if (record_debug)
3949 debug_printf ("FP - fixed point conversion");
3950
3951 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3952 record_buf[0] = reg_rd;
3953 else
3954 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3955 }
3956 /* Floating point - conditional compare instructions. */
3957 else if (insn_bits10_11 == 0x01)
3958 {
3959 if (record_debug)
3960 debug_printf ("FP - conditional compare");
3961
3962 record_buf[0] = AARCH64_CPSR_REGNUM;
3963 }
3964 /* Floating point - data processing (2-source) and
3965 conditional select instructions. */
3966 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3967 {
3968 if (record_debug)
3969 debug_printf ("FP - DP (2-source)");
3970
3971 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3972 }
3973 else if (insn_bits10_11 == 0x00)
3974 {
3975 /* Floating point - immediate instructions. */
3976 if ((insn_bits12_15 & 0x01) == 0x01
3977 || (insn_bits12_15 & 0x07) == 0x04)
3978 {
3979 if (record_debug)
3980 debug_printf ("FP - immediate");
3981 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3982 }
3983 /* Floating point - compare instructions. */
3984 else if ((insn_bits12_15 & 0x03) == 0x02)
3985 {
3986 if (record_debug)
3987 debug_printf ("FP - immediate");
3988 record_buf[0] = AARCH64_CPSR_REGNUM;
3989 }
3990 /* Floating point - integer conversions instructions. */
3991 else if (insn_bits12_15 == 0x00)
3992 {
3993 /* Convert float to integer instruction. */
3994 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3995 {
3996 if (record_debug)
3997 debug_printf ("float to int conversion");
3998
3999 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4000 }
4001 /* Convert integer to float instruction. */
4002 else if ((opcode >> 1) == 0x01 && !rmode)
4003 {
4004 if (record_debug)
4005 debug_printf ("int to float conversion");
4006
4007 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4008 }
4009 /* Move float to integer instruction. */
4010 else if ((opcode >> 1) == 0x03)
4011 {
4012 if (record_debug)
4013 debug_printf ("move float to int");
4014
4015 if (!(opcode & 0x01))
4016 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4017 else
4018 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4019 }
4020 else
4021 return AARCH64_RECORD_UNKNOWN;
4022 }
4023 else
4024 return AARCH64_RECORD_UNKNOWN;
4025 }
4026 else
4027 return AARCH64_RECORD_UNKNOWN;
4028 }
4029 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4030 {
4031 if (record_debug)
4032 debug_printf ("SIMD copy");
4033
4034 /* Advanced SIMD copy instructions. */
4035 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4036 && !bit (aarch64_insn_r->aarch64_insn, 15)
4037 && bit (aarch64_insn_r->aarch64_insn, 10))
4038 {
4039 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4040 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4041 else
4042 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4043 }
4044 else
4045 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4046 }
4047 /* All remaining floating point or advanced SIMD instructions. */
4048 else
4049 {
4050 if (record_debug)
4051 debug_printf ("all remain");
4052
4053 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4054 }
4055
4056 if (record_debug)
4057 debug_printf ("\n");
4058
4059 aarch64_insn_r->reg_rec_count++;
4060 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4061 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4062 record_buf);
4063 return AARCH64_RECORD_SUCCESS;
4064 }
4065
4066 /* Decodes insns type and invokes its record handler. */
4067
4068 static unsigned int
4069 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4070 {
4071 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4072
4073 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4074 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4075 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4076 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4077
4078 /* Data processing - immediate instructions. */
4079 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4080 return aarch64_record_data_proc_imm (aarch64_insn_r);
4081
4082 /* Branch, exception generation and system instructions. */
4083 if (ins_bit26 && !ins_bit27 && ins_bit28)
4084 return aarch64_record_branch_except_sys (aarch64_insn_r);
4085
4086 /* Load and store instructions. */
4087 if (!ins_bit25 && ins_bit27)
4088 return aarch64_record_load_store (aarch64_insn_r);
4089
4090 /* Data processing - register instructions. */
4091 if (ins_bit25 && !ins_bit26 && ins_bit27)
4092 return aarch64_record_data_proc_reg (aarch64_insn_r);
4093
4094 /* Data processing - SIMD and floating point instructions. */
4095 if (ins_bit25 && ins_bit26 && ins_bit27)
4096 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4097
4098 return AARCH64_RECORD_UNSUPPORTED;
4099 }
4100
4101 /* Cleans up local record registers and memory allocations. */
4102
4103 static void
4104 deallocate_reg_mem (insn_decode_record *record)
4105 {
4106 xfree (record->aarch64_regs);
4107 xfree (record->aarch64_mems);
4108 }
4109
4110 #if GDB_SELF_TEST
4111 namespace selftests {
4112
4113 static void
4114 aarch64_process_record_test (void)
4115 {
4116 struct gdbarch_info info;
4117 uint32_t ret;
4118
4119 gdbarch_info_init (&info);
4120 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4121
4122 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4123 SELF_CHECK (gdbarch != NULL);
4124
4125 insn_decode_record aarch64_record;
4126
4127 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4128 aarch64_record.regcache = NULL;
4129 aarch64_record.this_addr = 0;
4130 aarch64_record.gdbarch = gdbarch;
4131
4132 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4133 aarch64_record.aarch64_insn = 0xf9800020;
4134 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4135 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4136 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4137 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4138
4139 deallocate_reg_mem (&aarch64_record);
4140 }
4141
4142 } // namespace selftests
4143 #endif /* GDB_SELF_TEST */
4144
4145 /* Parse the current instruction and record the values of the registers and
4146 memory that will be changed in current instruction to record_arch_list
4147 return -1 if something is wrong. */
4148
4149 int
4150 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4151 CORE_ADDR insn_addr)
4152 {
4153 uint32_t rec_no = 0;
4154 uint8_t insn_size = 4;
4155 uint32_t ret = 0;
4156 gdb_byte buf[insn_size];
4157 insn_decode_record aarch64_record;
4158
4159 memset (&buf[0], 0, insn_size);
4160 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4161 target_read_memory (insn_addr, &buf[0], insn_size);
4162 aarch64_record.aarch64_insn
4163 = (uint32_t) extract_unsigned_integer (&buf[0],
4164 insn_size,
4165 gdbarch_byte_order (gdbarch));
4166 aarch64_record.regcache = regcache;
4167 aarch64_record.this_addr = insn_addr;
4168 aarch64_record.gdbarch = gdbarch;
4169
4170 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4171 if (ret == AARCH64_RECORD_UNSUPPORTED)
4172 {
4173 printf_unfiltered (_("Process record does not support instruction "
4174 "0x%0x at address %s.\n"),
4175 aarch64_record.aarch64_insn,
4176 paddress (gdbarch, insn_addr));
4177 ret = -1;
4178 }
4179
4180 if (0 == ret)
4181 {
4182 /* Record registers. */
4183 record_full_arch_list_add_reg (aarch64_record.regcache,
4184 AARCH64_PC_REGNUM);
4185 /* Always record register CPSR. */
4186 record_full_arch_list_add_reg (aarch64_record.regcache,
4187 AARCH64_CPSR_REGNUM);
4188 if (aarch64_record.aarch64_regs)
4189 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4190 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4191 aarch64_record.aarch64_regs[rec_no]))
4192 ret = -1;
4193
4194 /* Record memories. */
4195 if (aarch64_record.aarch64_mems)
4196 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4197 if (record_full_arch_list_add_mem
4198 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4199 aarch64_record.aarch64_mems[rec_no].len))
4200 ret = -1;
4201
4202 if (record_full_arch_list_add_end ())
4203 ret = -1;
4204 }
4205
4206 deallocate_reg_mem (&aarch64_record);
4207 return ret;
4208 }
This page took 0.196438 seconds and 4 git commands to generate.