5e9f7b8ee0aae73079b144c30cd3b4fa308e873e
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47 #include "gdbsupport/selftest.h"
48
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
51
52 #include "elf-bfd.h"
53 #include "elf/aarch64.h"
54
55 #include "gdbsupport/vec.h"
56
57 #include "record.h"
58 #include "record-full.h"
59 #include "arch/aarch64-insn.h"
60 #include "gdbarch.h"
61
62 #include "opcode/aarch64.h"
63 #include <algorithm>
64
65 #define submask(x) ((1L << ((x) + 1)) - 1)
66 #define bit(obj,st) (((obj) >> (st)) & 1)
67 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68
69 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
70 four members. */
71 #define HA_MAX_NUM_FLDS 4
72
73 /* All possible aarch64 target descriptors. */
74 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
75
76 /* The standard register names, and all the valid aliases for them. */
77 static const struct
78 {
79 const char *const name;
80 int regnum;
81 } aarch64_register_aliases[] =
82 {
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
87
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
120
121 /* specials */
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
124 };
125
126 /* The required core 'R' registers. */
127 static const char *const aarch64_r_register_names[] =
128 {
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
139 "pc", "cpsr"
140 };
141
142 /* The FP/SIMD 'V' registers. */
143 static const char *const aarch64_v_register_names[] =
144 {
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
155 "fpsr",
156 "fpcr"
157 };
158
159 /* The SVE 'Z' and 'P' registers. */
160 static const char *const aarch64_sve_register_names[] =
161 {
162 /* These registers must appear in consecutive RAW register number
163 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
164 "z0", "z1", "z2", "z3",
165 "z4", "z5", "z6", "z7",
166 "z8", "z9", "z10", "z11",
167 "z12", "z13", "z14", "z15",
168 "z16", "z17", "z18", "z19",
169 "z20", "z21", "z22", "z23",
170 "z24", "z25", "z26", "z27",
171 "z28", "z29", "z30", "z31",
172 "fpsr", "fpcr",
173 "p0", "p1", "p2", "p3",
174 "p4", "p5", "p6", "p7",
175 "p8", "p9", "p10", "p11",
176 "p12", "p13", "p14", "p15",
177 "ffr", "vg"
178 };
179
180 static const char *const aarch64_pauth_register_names[] =
181 {
182 /* Authentication mask for data pointer. */
183 "pauth_dmask",
184 /* Authentication mask for code pointer. */
185 "pauth_cmask"
186 };
187
188 /* AArch64 prologue cache structure. */
189 struct aarch64_prologue_cache
190 {
191 /* The program counter at the start of the function. It is used to
192 identify this frame as a prologue frame. */
193 CORE_ADDR func;
194
195 /* The program counter at the time this frame was created; i.e. where
196 this function was called from. It is used to identify this frame as a
197 stub frame. */
198 CORE_ADDR prev_pc;
199
200 /* The stack pointer at the time this frame was created; i.e. the
201 caller's stack pointer when this function was called. It is used
202 to identify this frame. */
203 CORE_ADDR prev_sp;
204
205 /* Is the target available to read from? */
206 int available_p;
207
208 /* The frame base for this frame is just prev_sp - frame size.
209 FRAMESIZE is the distance from the frame pointer to the
210 initial stack pointer. */
211 int framesize;
212
213 /* The register used to hold the frame pointer for this frame. */
214 int framereg;
215
216 /* Saved register offsets. */
217 struct trad_frame_saved_reg *saved_regs;
218 };
219
220 static void
221 show_aarch64_debug (struct ui_file *file, int from_tty,
222 struct cmd_list_element *c, const char *value)
223 {
224 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
225 }
226
227 namespace {
228
229 /* Abstract instruction reader. */
230
231 class abstract_instruction_reader
232 {
233 public:
234 /* Read in one instruction. */
235 virtual ULONGEST read (CORE_ADDR memaddr, int len,
236 enum bfd_endian byte_order) = 0;
237 };
238
239 /* Instruction reader from real target. */
240
241 class instruction_reader : public abstract_instruction_reader
242 {
243 public:
244 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
245 override
246 {
247 return read_code_unsigned_integer (memaddr, len, byte_order);
248 }
249 };
250
251 } // namespace
252
253 /* If address signing is enabled, mask off the signature bits from the link
254 register, which is passed by value in ADDR, using the register values in
255 THIS_FRAME. */
256
257 static CORE_ADDR
258 aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
259 struct frame_info *this_frame, CORE_ADDR addr)
260 {
261 if (tdep->has_pauth ()
262 && frame_unwind_register_unsigned (this_frame,
263 tdep->pauth_ra_state_regnum))
264 {
265 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
266 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
267 addr = addr & ~cmask;
268
269 /* Record in the frame that the link register required unmasking. */
270 set_frame_previous_pc_masked (this_frame);
271 }
272
273 return addr;
274 }
275
276 /* Analyze a prologue, looking for a recognizable stack frame
277 and frame pointer. Scan until we encounter a store that could
278 clobber the stack frame unexpectedly, or an unknown instruction. */
279
280 static CORE_ADDR
281 aarch64_analyze_prologue (struct gdbarch *gdbarch,
282 CORE_ADDR start, CORE_ADDR limit,
283 struct aarch64_prologue_cache *cache,
284 abstract_instruction_reader& reader)
285 {
286 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
287 int i;
288 /* Track X registers and D registers in prologue. */
289 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
290
291 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
292 regs[i] = pv_register (i, 0);
293 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
294
295 for (; start < limit; start += 4)
296 {
297 uint32_t insn;
298 aarch64_inst inst;
299
300 insn = reader.read (start, 4, byte_order_for_code);
301
302 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
303 break;
304
305 if (inst.opcode->iclass == addsub_imm
306 && (inst.opcode->op == OP_ADD
307 || strcmp ("sub", inst.opcode->name) == 0))
308 {
309 unsigned rd = inst.operands[0].reg.regno;
310 unsigned rn = inst.operands[1].reg.regno;
311
312 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
313 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
314 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
315 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
316
317 if (inst.opcode->op == OP_ADD)
318 {
319 regs[rd] = pv_add_constant (regs[rn],
320 inst.operands[2].imm.value);
321 }
322 else
323 {
324 regs[rd] = pv_add_constant (regs[rn],
325 -inst.operands[2].imm.value);
326 }
327 }
328 else if (inst.opcode->iclass == pcreladdr
329 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
330 {
331 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
332 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
333
334 regs[inst.operands[0].reg.regno] = pv_unknown ();
335 }
336 else if (inst.opcode->iclass == branch_imm)
337 {
338 /* Stop analysis on branch. */
339 break;
340 }
341 else if (inst.opcode->iclass == condbranch)
342 {
343 /* Stop analysis on branch. */
344 break;
345 }
346 else if (inst.opcode->iclass == branch_reg)
347 {
348 /* Stop analysis on branch. */
349 break;
350 }
351 else if (inst.opcode->iclass == compbranch)
352 {
353 /* Stop analysis on branch. */
354 break;
355 }
356 else if (inst.opcode->op == OP_MOVZ)
357 {
358 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
359 regs[inst.operands[0].reg.regno] = pv_unknown ();
360 }
361 else if (inst.opcode->iclass == log_shift
362 && strcmp (inst.opcode->name, "orr") == 0)
363 {
364 unsigned rd = inst.operands[0].reg.regno;
365 unsigned rn = inst.operands[1].reg.regno;
366 unsigned rm = inst.operands[2].reg.regno;
367
368 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
369 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
370 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
371
372 if (inst.operands[2].shifter.amount == 0
373 && rn == AARCH64_SP_REGNUM)
374 regs[rd] = regs[rm];
375 else
376 {
377 if (aarch64_debug)
378 {
379 debug_printf ("aarch64: prologue analysis gave up "
380 "addr=%s opcode=0x%x (orr x register)\n",
381 core_addr_to_string_nz (start), insn);
382 }
383 break;
384 }
385 }
386 else if (inst.opcode->op == OP_STUR)
387 {
388 unsigned rt = inst.operands[0].reg.regno;
389 unsigned rn = inst.operands[1].addr.base_regno;
390 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
391
392 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
393 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
394 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
395 gdb_assert (!inst.operands[1].addr.offset.is_reg);
396
397 stack.store
398 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
399 size, regs[rt]);
400 }
401 else if ((inst.opcode->iclass == ldstpair_off
402 || (inst.opcode->iclass == ldstpair_indexed
403 && inst.operands[2].addr.preind))
404 && strcmp ("stp", inst.opcode->name) == 0)
405 {
406 /* STP with addressing mode Pre-indexed and Base register. */
407 unsigned rt1;
408 unsigned rt2;
409 unsigned rn = inst.operands[2].addr.base_regno;
410 int32_t imm = inst.operands[2].addr.offset.imm;
411 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
412
413 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
414 || inst.operands[0].type == AARCH64_OPND_Ft);
415 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
416 || inst.operands[1].type == AARCH64_OPND_Ft2);
417 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
418 gdb_assert (!inst.operands[2].addr.offset.is_reg);
419
420 /* If recording this store would invalidate the store area
421 (perhaps because rn is not known) then we should abandon
422 further prologue analysis. */
423 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
424 break;
425
426 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
427 break;
428
429 rt1 = inst.operands[0].reg.regno;
430 rt2 = inst.operands[1].reg.regno;
431 if (inst.operands[0].type == AARCH64_OPND_Ft)
432 {
433 rt1 += AARCH64_X_REGISTER_COUNT;
434 rt2 += AARCH64_X_REGISTER_COUNT;
435 }
436
437 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
438 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
439
440 if (inst.operands[2].addr.writeback)
441 regs[rn] = pv_add_constant (regs[rn], imm);
442
443 }
444 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
445 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
446 && (inst.opcode->op == OP_STR_POS
447 || inst.opcode->op == OP_STRF_POS)))
448 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
449 && strcmp ("str", inst.opcode->name) == 0)
450 {
451 /* STR (immediate) */
452 unsigned int rt = inst.operands[0].reg.regno;
453 int32_t imm = inst.operands[1].addr.offset.imm;
454 unsigned int rn = inst.operands[1].addr.base_regno;
455 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
456 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
457 || inst.operands[0].type == AARCH64_OPND_Ft);
458
459 if (inst.operands[0].type == AARCH64_OPND_Ft)
460 rt += AARCH64_X_REGISTER_COUNT;
461
462 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
463 if (inst.operands[1].addr.writeback)
464 regs[rn] = pv_add_constant (regs[rn], imm);
465 }
466 else if (inst.opcode->iclass == testbranch)
467 {
468 /* Stop analysis on branch. */
469 break;
470 }
471 else if (inst.opcode->iclass == ic_system)
472 {
473 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
474 int ra_state_val = 0;
475
476 if (insn == 0xd503233f /* paciasp. */
477 || insn == 0xd503237f /* pacibsp. */)
478 {
479 /* Return addresses are mangled. */
480 ra_state_val = 1;
481 }
482 else if (insn == 0xd50323bf /* autiasp. */
483 || insn == 0xd50323ff /* autibsp. */)
484 {
485 /* Return addresses are not mangled. */
486 ra_state_val = 0;
487 }
488 else
489 {
490 if (aarch64_debug)
491 debug_printf ("aarch64: prologue analysis gave up addr=%s"
492 " opcode=0x%x (iclass)\n",
493 core_addr_to_string_nz (start), insn);
494 break;
495 }
496
497 if (tdep->has_pauth () && cache != nullptr)
498 trad_frame_set_value (cache->saved_regs,
499 tdep->pauth_ra_state_regnum,
500 ra_state_val);
501 }
502 else
503 {
504 if (aarch64_debug)
505 {
506 debug_printf ("aarch64: prologue analysis gave up addr=%s"
507 " opcode=0x%x\n",
508 core_addr_to_string_nz (start), insn);
509 }
510 break;
511 }
512 }
513
514 if (cache == NULL)
515 return start;
516
517 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
518 {
519 /* Frame pointer is fp. Frame size is constant. */
520 cache->framereg = AARCH64_FP_REGNUM;
521 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
522 }
523 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
524 {
525 /* Try the stack pointer. */
526 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
527 cache->framereg = AARCH64_SP_REGNUM;
528 }
529 else
530 {
531 /* We're just out of luck. We don't know where the frame is. */
532 cache->framereg = -1;
533 cache->framesize = 0;
534 }
535
536 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
537 {
538 CORE_ADDR offset;
539
540 if (stack.find_reg (gdbarch, i, &offset))
541 cache->saved_regs[i].addr = offset;
542 }
543
544 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
545 {
546 int regnum = gdbarch_num_regs (gdbarch);
547 CORE_ADDR offset;
548
549 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
550 &offset))
551 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
552 }
553
554 return start;
555 }
556
557 static CORE_ADDR
558 aarch64_analyze_prologue (struct gdbarch *gdbarch,
559 CORE_ADDR start, CORE_ADDR limit,
560 struct aarch64_prologue_cache *cache)
561 {
562 instruction_reader reader;
563
564 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
565 reader);
566 }
567
568 #if GDB_SELF_TEST
569
570 namespace selftests {
571
572 /* Instruction reader from manually cooked instruction sequences. */
573
574 class instruction_reader_test : public abstract_instruction_reader
575 {
576 public:
577 template<size_t SIZE>
578 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
579 : m_insns (insns), m_insns_size (SIZE)
580 {}
581
582 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
583 override
584 {
585 SELF_CHECK (len == 4);
586 SELF_CHECK (memaddr % 4 == 0);
587 SELF_CHECK (memaddr / 4 < m_insns_size);
588
589 return m_insns[memaddr / 4];
590 }
591
592 private:
593 const uint32_t *m_insns;
594 size_t m_insns_size;
595 };
596
597 static void
598 aarch64_analyze_prologue_test (void)
599 {
600 struct gdbarch_info info;
601
602 gdbarch_info_init (&info);
603 info.bfd_arch_info = bfd_scan_arch ("aarch64");
604
605 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
606 SELF_CHECK (gdbarch != NULL);
607
608 struct aarch64_prologue_cache cache;
609 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
610
611 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
612
613 /* Test the simple prologue in which frame pointer is used. */
614 {
615 static const uint32_t insns[] = {
616 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
617 0x910003fd, /* mov x29, sp */
618 0x97ffffe6, /* bl 0x400580 */
619 };
620 instruction_reader_test reader (insns);
621
622 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
623 SELF_CHECK (end == 4 * 2);
624
625 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
626 SELF_CHECK (cache.framesize == 272);
627
628 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
629 {
630 if (i == AARCH64_FP_REGNUM)
631 SELF_CHECK (cache.saved_regs[i].addr == -272);
632 else if (i == AARCH64_LR_REGNUM)
633 SELF_CHECK (cache.saved_regs[i].addr == -264);
634 else
635 SELF_CHECK (cache.saved_regs[i].addr == -1);
636 }
637
638 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
639 {
640 int regnum = gdbarch_num_regs (gdbarch);
641
642 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
643 == -1);
644 }
645 }
646
647 /* Test a prologue in which STR is used and frame pointer is not
648 used. */
649 {
650 static const uint32_t insns[] = {
651 0xf81d0ff3, /* str x19, [sp, #-48]! */
652 0xb9002fe0, /* str w0, [sp, #44] */
653 0xf90013e1, /* str x1, [sp, #32]*/
654 0xfd000fe0, /* str d0, [sp, #24] */
655 0xaa0203f3, /* mov x19, x2 */
656 0xf94013e0, /* ldr x0, [sp, #32] */
657 };
658 instruction_reader_test reader (insns);
659
660 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
661 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
662
663 SELF_CHECK (end == 4 * 5);
664
665 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
666 SELF_CHECK (cache.framesize == 48);
667
668 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
669 {
670 if (i == 1)
671 SELF_CHECK (cache.saved_regs[i].addr == -16);
672 else if (i == 19)
673 SELF_CHECK (cache.saved_regs[i].addr == -48);
674 else
675 SELF_CHECK (cache.saved_regs[i].addr == -1);
676 }
677
678 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
679 {
680 int regnum = gdbarch_num_regs (gdbarch);
681
682 if (i == 0)
683 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
684 == -24);
685 else
686 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
687 == -1);
688 }
689 }
690
691 /* Test a prologue in which there is a return address signing instruction. */
692 if (tdep->has_pauth ())
693 {
694 static const uint32_t insns[] = {
695 0xd503233f, /* paciasp */
696 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
697 0x910003fd, /* mov x29, sp */
698 0xf801c3f3, /* str x19, [sp, #28] */
699 0xb9401fa0, /* ldr x19, [x29, #28] */
700 };
701 instruction_reader_test reader (insns);
702
703 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
704 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
705 reader);
706
707 SELF_CHECK (end == 4 * 4);
708 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
709 SELF_CHECK (cache.framesize == 48);
710
711 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
712 {
713 if (i == 19)
714 SELF_CHECK (cache.saved_regs[i].addr == -20);
715 else if (i == AARCH64_FP_REGNUM)
716 SELF_CHECK (cache.saved_regs[i].addr == -48);
717 else if (i == AARCH64_LR_REGNUM)
718 SELF_CHECK (cache.saved_regs[i].addr == -40);
719 else
720 SELF_CHECK (cache.saved_regs[i].addr == -1);
721 }
722
723 if (tdep->has_pauth ())
724 {
725 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
726 tdep->pauth_ra_state_regnum));
727 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
728 }
729 }
730 }
731 } // namespace selftests
732 #endif /* GDB_SELF_TEST */
733
734 /* Implement the "skip_prologue" gdbarch method. */
735
736 static CORE_ADDR
737 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
738 {
739 CORE_ADDR func_addr, limit_pc;
740
741 /* See if we can determine the end of the prologue via the symbol
742 table. If so, then return either PC, or the PC after the
743 prologue, whichever is greater. */
744 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
745 {
746 CORE_ADDR post_prologue_pc
747 = skip_prologue_using_sal (gdbarch, func_addr);
748
749 if (post_prologue_pc != 0)
750 return std::max (pc, post_prologue_pc);
751 }
752
753 /* Can't determine prologue from the symbol table, need to examine
754 instructions. */
755
756 /* Find an upper limit on the function prologue using the debug
757 information. If the debug information could not be used to
758 provide that bound, then use an arbitrary large number as the
759 upper bound. */
760 limit_pc = skip_prologue_using_sal (gdbarch, pc);
761 if (limit_pc == 0)
762 limit_pc = pc + 128; /* Magic. */
763
764 /* Try disassembling prologue. */
765 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
766 }
767
768 /* Scan the function prologue for THIS_FRAME and populate the prologue
769 cache CACHE. */
770
771 static void
772 aarch64_scan_prologue (struct frame_info *this_frame,
773 struct aarch64_prologue_cache *cache)
774 {
775 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
776 CORE_ADDR prologue_start;
777 CORE_ADDR prologue_end;
778 CORE_ADDR prev_pc = get_frame_pc (this_frame);
779 struct gdbarch *gdbarch = get_frame_arch (this_frame);
780
781 cache->prev_pc = prev_pc;
782
783 /* Assume we do not find a frame. */
784 cache->framereg = -1;
785 cache->framesize = 0;
786
787 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
788 &prologue_end))
789 {
790 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
791
792 if (sal.line == 0)
793 {
794 /* No line info so use the current PC. */
795 prologue_end = prev_pc;
796 }
797 else if (sal.end < prologue_end)
798 {
799 /* The next line begins after the function end. */
800 prologue_end = sal.end;
801 }
802
803 prologue_end = std::min (prologue_end, prev_pc);
804 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
805 }
806 else
807 {
808 CORE_ADDR frame_loc;
809
810 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
811 if (frame_loc == 0)
812 return;
813
814 cache->framereg = AARCH64_FP_REGNUM;
815 cache->framesize = 16;
816 cache->saved_regs[29].addr = 0;
817 cache->saved_regs[30].addr = 8;
818 }
819 }
820
821 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
822 function may throw an exception if the inferior's registers or memory is
823 not available. */
824
825 static void
826 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
827 struct aarch64_prologue_cache *cache)
828 {
829 CORE_ADDR unwound_fp;
830 int reg;
831
832 aarch64_scan_prologue (this_frame, cache);
833
834 if (cache->framereg == -1)
835 return;
836
837 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
838 if (unwound_fp == 0)
839 return;
840
841 cache->prev_sp = unwound_fp + cache->framesize;
842
843 /* Calculate actual addresses of saved registers using offsets
844 determined by aarch64_analyze_prologue. */
845 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
846 if (trad_frame_addr_p (cache->saved_regs, reg))
847 cache->saved_regs[reg].addr += cache->prev_sp;
848
849 cache->func = get_frame_func (this_frame);
850
851 cache->available_p = 1;
852 }
853
854 /* Allocate and fill in *THIS_CACHE with information about the prologue of
855 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
856 Return a pointer to the current aarch64_prologue_cache in
857 *THIS_CACHE. */
858
859 static struct aarch64_prologue_cache *
860 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
861 {
862 struct aarch64_prologue_cache *cache;
863
864 if (*this_cache != NULL)
865 return (struct aarch64_prologue_cache *) *this_cache;
866
867 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
868 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
869 *this_cache = cache;
870
871 try
872 {
873 aarch64_make_prologue_cache_1 (this_frame, cache);
874 }
875 catch (const gdb_exception_error &ex)
876 {
877 if (ex.error != NOT_AVAILABLE_ERROR)
878 throw;
879 }
880
881 return cache;
882 }
883
884 /* Implement the "stop_reason" frame_unwind method. */
885
886 static enum unwind_stop_reason
887 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
888 void **this_cache)
889 {
890 struct aarch64_prologue_cache *cache
891 = aarch64_make_prologue_cache (this_frame, this_cache);
892
893 if (!cache->available_p)
894 return UNWIND_UNAVAILABLE;
895
896 /* Halt the backtrace at "_start". */
897 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
898 return UNWIND_OUTERMOST;
899
900 /* We've hit a wall, stop. */
901 if (cache->prev_sp == 0)
902 return UNWIND_OUTERMOST;
903
904 return UNWIND_NO_REASON;
905 }
906
907 /* Our frame ID for a normal frame is the current function's starting
908 PC and the caller's SP when we were called. */
909
910 static void
911 aarch64_prologue_this_id (struct frame_info *this_frame,
912 void **this_cache, struct frame_id *this_id)
913 {
914 struct aarch64_prologue_cache *cache
915 = aarch64_make_prologue_cache (this_frame, this_cache);
916
917 if (!cache->available_p)
918 *this_id = frame_id_build_unavailable_stack (cache->func);
919 else
920 *this_id = frame_id_build (cache->prev_sp, cache->func);
921 }
922
923 /* Implement the "prev_register" frame_unwind method. */
924
925 static struct value *
926 aarch64_prologue_prev_register (struct frame_info *this_frame,
927 void **this_cache, int prev_regnum)
928 {
929 struct aarch64_prologue_cache *cache
930 = aarch64_make_prologue_cache (this_frame, this_cache);
931
932 /* If we are asked to unwind the PC, then we need to return the LR
933 instead. The prologue may save PC, but it will point into this
934 frame's prologue, not the next frame's resume location. */
935 if (prev_regnum == AARCH64_PC_REGNUM)
936 {
937 CORE_ADDR lr;
938 struct gdbarch *gdbarch = get_frame_arch (this_frame);
939 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
940
941 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
942
943 if (tdep->has_pauth ()
944 && trad_frame_value_p (cache->saved_regs,
945 tdep->pauth_ra_state_regnum))
946 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
947
948 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
949 }
950
951 /* SP is generally not saved to the stack, but this frame is
952 identified by the next frame's stack pointer at the time of the
953 call. The value was already reconstructed into PREV_SP. */
954 /*
955 +----------+ ^
956 | saved lr | |
957 +->| saved fp |--+
958 | | |
959 | | | <- Previous SP
960 | +----------+
961 | | saved lr |
962 +--| saved fp |<- FP
963 | |
964 | |<- SP
965 +----------+ */
966 if (prev_regnum == AARCH64_SP_REGNUM)
967 return frame_unwind_got_constant (this_frame, prev_regnum,
968 cache->prev_sp);
969
970 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
971 prev_regnum);
972 }
973
974 /* AArch64 prologue unwinder. */
975 struct frame_unwind aarch64_prologue_unwind =
976 {
977 NORMAL_FRAME,
978 aarch64_prologue_frame_unwind_stop_reason,
979 aarch64_prologue_this_id,
980 aarch64_prologue_prev_register,
981 NULL,
982 default_frame_sniffer
983 };
984
985 /* Allocate and fill in *THIS_CACHE with information about the prologue of
986 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
987 Return a pointer to the current aarch64_prologue_cache in
988 *THIS_CACHE. */
989
990 static struct aarch64_prologue_cache *
991 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
992 {
993 struct aarch64_prologue_cache *cache;
994
995 if (*this_cache != NULL)
996 return (struct aarch64_prologue_cache *) *this_cache;
997
998 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
999 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1000 *this_cache = cache;
1001
1002 try
1003 {
1004 cache->prev_sp = get_frame_register_unsigned (this_frame,
1005 AARCH64_SP_REGNUM);
1006 cache->prev_pc = get_frame_pc (this_frame);
1007 cache->available_p = 1;
1008 }
1009 catch (const gdb_exception_error &ex)
1010 {
1011 if (ex.error != NOT_AVAILABLE_ERROR)
1012 throw;
1013 }
1014
1015 return cache;
1016 }
1017
1018 /* Implement the "stop_reason" frame_unwind method. */
1019
1020 static enum unwind_stop_reason
1021 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1022 void **this_cache)
1023 {
1024 struct aarch64_prologue_cache *cache
1025 = aarch64_make_stub_cache (this_frame, this_cache);
1026
1027 if (!cache->available_p)
1028 return UNWIND_UNAVAILABLE;
1029
1030 return UNWIND_NO_REASON;
1031 }
1032
1033 /* Our frame ID for a stub frame is the current SP and LR. */
1034
1035 static void
1036 aarch64_stub_this_id (struct frame_info *this_frame,
1037 void **this_cache, struct frame_id *this_id)
1038 {
1039 struct aarch64_prologue_cache *cache
1040 = aarch64_make_stub_cache (this_frame, this_cache);
1041
1042 if (cache->available_p)
1043 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1044 else
1045 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1046 }
1047
1048 /* Implement the "sniffer" frame_unwind method. */
1049
1050 static int
1051 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1052 struct frame_info *this_frame,
1053 void **this_prologue_cache)
1054 {
1055 CORE_ADDR addr_in_block;
1056 gdb_byte dummy[4];
1057
1058 addr_in_block = get_frame_address_in_block (this_frame);
1059 if (in_plt_section (addr_in_block)
1060 /* We also use the stub winder if the target memory is unreadable
1061 to avoid having the prologue unwinder trying to read it. */
1062 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1063 return 1;
1064
1065 return 0;
1066 }
1067
1068 /* AArch64 stub unwinder. */
1069 struct frame_unwind aarch64_stub_unwind =
1070 {
1071 NORMAL_FRAME,
1072 aarch64_stub_frame_unwind_stop_reason,
1073 aarch64_stub_this_id,
1074 aarch64_prologue_prev_register,
1075 NULL,
1076 aarch64_stub_unwind_sniffer
1077 };
1078
1079 /* Return the frame base address of *THIS_FRAME. */
1080
1081 static CORE_ADDR
1082 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1083 {
1084 struct aarch64_prologue_cache *cache
1085 = aarch64_make_prologue_cache (this_frame, this_cache);
1086
1087 return cache->prev_sp - cache->framesize;
1088 }
1089
1090 /* AArch64 default frame base information. */
1091 struct frame_base aarch64_normal_base =
1092 {
1093 &aarch64_prologue_unwind,
1094 aarch64_normal_frame_base,
1095 aarch64_normal_frame_base,
1096 aarch64_normal_frame_base
1097 };
1098
1099 /* Return the value of the REGNUM register in the previous frame of
1100 *THIS_FRAME. */
1101
1102 static struct value *
1103 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1104 void **this_cache, int regnum)
1105 {
1106 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1107 CORE_ADDR lr;
1108
1109 switch (regnum)
1110 {
1111 case AARCH64_PC_REGNUM:
1112 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1113 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1114 return frame_unwind_got_constant (this_frame, regnum, lr);
1115
1116 default:
1117 internal_error (__FILE__, __LINE__,
1118 _("Unexpected register %d"), regnum);
1119 }
1120 }
1121
1122 static const unsigned char op_lit0 = DW_OP_lit0;
1123 static const unsigned char op_lit1 = DW_OP_lit1;
1124
1125 /* Implement the "init_reg" dwarf2_frame_ops method. */
1126
1127 static void
1128 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1129 struct dwarf2_frame_state_reg *reg,
1130 struct frame_info *this_frame)
1131 {
1132 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1133
1134 switch (regnum)
1135 {
1136 case AARCH64_PC_REGNUM:
1137 reg->how = DWARF2_FRAME_REG_FN;
1138 reg->loc.fn = aarch64_dwarf2_prev_register;
1139 return;
1140
1141 case AARCH64_SP_REGNUM:
1142 reg->how = DWARF2_FRAME_REG_CFA;
1143 return;
1144 }
1145
1146 /* Init pauth registers. */
1147 if (tdep->has_pauth ())
1148 {
1149 if (regnum == tdep->pauth_ra_state_regnum)
1150 {
1151 /* Initialize RA_STATE to zero. */
1152 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1153 reg->loc.exp.start = &op_lit0;
1154 reg->loc.exp.len = 1;
1155 return;
1156 }
1157 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1158 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1159 {
1160 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1161 return;
1162 }
1163 }
1164 }
1165
1166 /* Implement the execute_dwarf_cfa_vendor_op method. */
1167
1168 static bool
1169 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1170 struct dwarf2_frame_state *fs)
1171 {
1172 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1173 struct dwarf2_frame_state_reg *ra_state;
1174
1175 if (op == DW_CFA_AARCH64_negate_ra_state)
1176 {
1177 /* On systems without pauth, treat as a nop. */
1178 if (!tdep->has_pauth ())
1179 return true;
1180
1181 /* Allocate RA_STATE column if it's not allocated yet. */
1182 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1183
1184 /* Toggle the status of RA_STATE between 0 and 1. */
1185 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1186 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1187
1188 if (ra_state->loc.exp.start == nullptr
1189 || ra_state->loc.exp.start == &op_lit0)
1190 ra_state->loc.exp.start = &op_lit1;
1191 else
1192 ra_state->loc.exp.start = &op_lit0;
1193
1194 ra_state->loc.exp.len = 1;
1195
1196 return true;
1197 }
1198
1199 return false;
1200 }
1201
1202 /* When arguments must be pushed onto the stack, they go on in reverse
1203 order. The code below implements a FILO (stack) to do this. */
1204
1205 struct stack_item_t
1206 {
1207 /* Value to pass on stack. It can be NULL if this item is for stack
1208 padding. */
1209 const gdb_byte *data;
1210
1211 /* Size in bytes of value to pass on stack. */
1212 int len;
1213 };
1214
1215 /* Implement the gdbarch type alignment method, overrides the generic
1216 alignment algorithm for anything that is aarch64 specific. */
1217
1218 static ULONGEST
1219 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1220 {
1221 t = check_typedef (t);
1222 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
1223 {
1224 /* Use the natural alignment for vector types (the same for
1225 scalar type), but the maximum alignment is 128-bit. */
1226 if (TYPE_LENGTH (t) > 16)
1227 return 16;
1228 else
1229 return TYPE_LENGTH (t);
1230 }
1231
1232 /* Allow the common code to calculate the alignment. */
1233 return 0;
1234 }
1235
1236 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1237
1238 Return the number of register required, or -1 on failure.
1239
1240 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1241 to the element, else fail if the type of this element does not match the
1242 existing value. */
1243
1244 static int
1245 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1246 struct type **fundamental_type)
1247 {
1248 if (type == nullptr)
1249 return -1;
1250
1251 switch (TYPE_CODE (type))
1252 {
1253 case TYPE_CODE_FLT:
1254 if (TYPE_LENGTH (type) > 16)
1255 return -1;
1256
1257 if (*fundamental_type == nullptr)
1258 *fundamental_type = type;
1259 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1260 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1261 return -1;
1262
1263 return 1;
1264
1265 case TYPE_CODE_COMPLEX:
1266 {
1267 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1268 if (TYPE_LENGTH (target_type) > 16)
1269 return -1;
1270
1271 if (*fundamental_type == nullptr)
1272 *fundamental_type = target_type;
1273 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1274 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1275 return -1;
1276
1277 return 2;
1278 }
1279
1280 case TYPE_CODE_ARRAY:
1281 {
1282 if (TYPE_VECTOR (type))
1283 {
1284 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1285 return -1;
1286
1287 if (*fundamental_type == nullptr)
1288 *fundamental_type = type;
1289 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1290 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1291 return -1;
1292
1293 return 1;
1294 }
1295 else
1296 {
1297 struct type *target_type = TYPE_TARGET_TYPE (type);
1298 int count = aapcs_is_vfp_call_or_return_candidate_1
1299 (target_type, fundamental_type);
1300
1301 if (count == -1)
1302 return count;
1303
1304 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1305 return count;
1306 }
1307 }
1308
1309 case TYPE_CODE_STRUCT:
1310 case TYPE_CODE_UNION:
1311 {
1312 int count = 0;
1313
1314 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1315 {
1316 /* Ignore any static fields. */
1317 if (field_is_static (&TYPE_FIELD (type, i)))
1318 continue;
1319
1320 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1321
1322 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1323 (member, fundamental_type);
1324 if (sub_count == -1)
1325 return -1;
1326 count += sub_count;
1327 }
1328
1329 /* Ensure there is no padding between the fields (allowing for empty
1330 zero length structs) */
1331 int ftype_length = (*fundamental_type == nullptr)
1332 ? 0 : TYPE_LENGTH (*fundamental_type);
1333 if (count * ftype_length != TYPE_LENGTH (type))
1334 return -1;
1335
1336 return count;
1337 }
1338
1339 default:
1340 break;
1341 }
1342
1343 return -1;
1344 }
1345
1346 /* Return true if an argument, whose type is described by TYPE, can be passed or
1347 returned in simd/fp registers, providing enough parameter passing registers
1348 are available. This is as described in the AAPCS64.
1349
1350 Upon successful return, *COUNT returns the number of needed registers,
1351 *FUNDAMENTAL_TYPE contains the type of those registers.
1352
1353 Candidate as per the AAPCS64 5.4.2.C is either a:
1354 - float.
1355 - short-vector.
1356 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1357 all the members are floats and has at most 4 members.
1358 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1359 all the members are short vectors and has at most 4 members.
1360 - Complex (7.1.1)
1361
1362 Note that HFAs and HVAs can include nested structures and arrays. */
1363
1364 static bool
1365 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1366 struct type **fundamental_type)
1367 {
1368 if (type == nullptr)
1369 return false;
1370
1371 *fundamental_type = nullptr;
1372
1373 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1374 fundamental_type);
1375
1376 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1377 {
1378 *count = ag_count;
1379 return true;
1380 }
1381 else
1382 return false;
1383 }
1384
1385 /* AArch64 function call information structure. */
1386 struct aarch64_call_info
1387 {
1388 /* the current argument number. */
1389 unsigned argnum = 0;
1390
1391 /* The next general purpose register number, equivalent to NGRN as
1392 described in the AArch64 Procedure Call Standard. */
1393 unsigned ngrn = 0;
1394
1395 /* The next SIMD and floating point register number, equivalent to
1396 NSRN as described in the AArch64 Procedure Call Standard. */
1397 unsigned nsrn = 0;
1398
1399 /* The next stacked argument address, equivalent to NSAA as
1400 described in the AArch64 Procedure Call Standard. */
1401 unsigned nsaa = 0;
1402
1403 /* Stack item vector. */
1404 std::vector<stack_item_t> si;
1405 };
1406
1407 /* Pass a value in a sequence of consecutive X registers. The caller
1408 is responsbile for ensuring sufficient registers are available. */
1409
1410 static void
1411 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1412 struct aarch64_call_info *info, struct type *type,
1413 struct value *arg)
1414 {
1415 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1416 int len = TYPE_LENGTH (type);
1417 enum type_code typecode = TYPE_CODE (type);
1418 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1419 const bfd_byte *buf = value_contents (arg);
1420
1421 info->argnum++;
1422
1423 while (len > 0)
1424 {
1425 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1426 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1427 byte_order);
1428
1429
1430 /* Adjust sub-word struct/union args when big-endian. */
1431 if (byte_order == BFD_ENDIAN_BIG
1432 && partial_len < X_REGISTER_SIZE
1433 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1434 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1435
1436 if (aarch64_debug)
1437 {
1438 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1439 gdbarch_register_name (gdbarch, regnum),
1440 phex (regval, X_REGISTER_SIZE));
1441 }
1442 regcache_cooked_write_unsigned (regcache, regnum, regval);
1443 len -= partial_len;
1444 buf += partial_len;
1445 regnum++;
1446 }
1447 }
1448
1449 /* Attempt to marshall a value in a V register. Return 1 if
1450 successful, or 0 if insufficient registers are available. This
1451 function, unlike the equivalent pass_in_x() function does not
1452 handle arguments spread across multiple registers. */
1453
1454 static int
1455 pass_in_v (struct gdbarch *gdbarch,
1456 struct regcache *regcache,
1457 struct aarch64_call_info *info,
1458 int len, const bfd_byte *buf)
1459 {
1460 if (info->nsrn < 8)
1461 {
1462 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1463 /* Enough space for a full vector register. */
1464 gdb_byte reg[register_size (gdbarch, regnum)];
1465 gdb_assert (len <= sizeof (reg));
1466
1467 info->argnum++;
1468 info->nsrn++;
1469
1470 memset (reg, 0, sizeof (reg));
1471 /* PCS C.1, the argument is allocated to the least significant
1472 bits of V register. */
1473 memcpy (reg, buf, len);
1474 regcache->cooked_write (regnum, reg);
1475
1476 if (aarch64_debug)
1477 {
1478 debug_printf ("arg %d in %s\n", info->argnum,
1479 gdbarch_register_name (gdbarch, regnum));
1480 }
1481 return 1;
1482 }
1483 info->nsrn = 8;
1484 return 0;
1485 }
1486
1487 /* Marshall an argument onto the stack. */
1488
1489 static void
1490 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1491 struct value *arg)
1492 {
1493 const bfd_byte *buf = value_contents (arg);
1494 int len = TYPE_LENGTH (type);
1495 int align;
1496 stack_item_t item;
1497
1498 info->argnum++;
1499
1500 align = type_align (type);
1501
1502 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1503 Natural alignment of the argument's type. */
1504 align = align_up (align, 8);
1505
1506 /* The AArch64 PCS requires at most doubleword alignment. */
1507 if (align > 16)
1508 align = 16;
1509
1510 if (aarch64_debug)
1511 {
1512 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1513 info->nsaa);
1514 }
1515
1516 item.len = len;
1517 item.data = buf;
1518 info->si.push_back (item);
1519
1520 info->nsaa += len;
1521 if (info->nsaa & (align - 1))
1522 {
1523 /* Push stack alignment padding. */
1524 int pad = align - (info->nsaa & (align - 1));
1525
1526 item.len = pad;
1527 item.data = NULL;
1528
1529 info->si.push_back (item);
1530 info->nsaa += pad;
1531 }
1532 }
1533
1534 /* Marshall an argument into a sequence of one or more consecutive X
1535 registers or, if insufficient X registers are available then onto
1536 the stack. */
1537
1538 static void
1539 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1540 struct aarch64_call_info *info, struct type *type,
1541 struct value *arg)
1542 {
1543 int len = TYPE_LENGTH (type);
1544 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1545
1546 /* PCS C.13 - Pass in registers if we have enough spare */
1547 if (info->ngrn + nregs <= 8)
1548 {
1549 pass_in_x (gdbarch, regcache, info, type, arg);
1550 info->ngrn += nregs;
1551 }
1552 else
1553 {
1554 info->ngrn = 8;
1555 pass_on_stack (info, type, arg);
1556 }
1557 }
1558
1559 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1560 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1561 registers. A return value of false is an error state as the value will have
1562 been partially passed to the stack. */
1563 static bool
1564 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1565 struct aarch64_call_info *info, struct type *arg_type,
1566 struct value *arg)
1567 {
1568 switch (TYPE_CODE (arg_type))
1569 {
1570 case TYPE_CODE_FLT:
1571 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1572 value_contents (arg));
1573 break;
1574
1575 case TYPE_CODE_COMPLEX:
1576 {
1577 const bfd_byte *buf = value_contents (arg);
1578 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1579
1580 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1581 buf))
1582 return false;
1583
1584 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1585 buf + TYPE_LENGTH (target_type));
1586 }
1587
1588 case TYPE_CODE_ARRAY:
1589 if (TYPE_VECTOR (arg_type))
1590 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1591 value_contents (arg));
1592 /* fall through. */
1593
1594 case TYPE_CODE_STRUCT:
1595 case TYPE_CODE_UNION:
1596 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1597 {
1598 /* Don't include static fields. */
1599 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1600 continue;
1601
1602 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1603 struct type *field_type = check_typedef (value_type (field));
1604
1605 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1606 field))
1607 return false;
1608 }
1609 return true;
1610
1611 default:
1612 return false;
1613 }
1614 }
1615
1616 /* Implement the "push_dummy_call" gdbarch method. */
1617
1618 static CORE_ADDR
1619 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1620 struct regcache *regcache, CORE_ADDR bp_addr,
1621 int nargs,
1622 struct value **args, CORE_ADDR sp,
1623 function_call_return_method return_method,
1624 CORE_ADDR struct_addr)
1625 {
1626 int argnum;
1627 struct aarch64_call_info info;
1628
1629 /* We need to know what the type of the called function is in order
1630 to determine the number of named/anonymous arguments for the
1631 actual argument placement, and the return type in order to handle
1632 return value correctly.
1633
1634 The generic code above us views the decision of return in memory
1635 or return in registers as a two stage processes. The language
1636 handler is consulted first and may decide to return in memory (eg
1637 class with copy constructor returned by value), this will cause
1638 the generic code to allocate space AND insert an initial leading
1639 argument.
1640
1641 If the language code does not decide to pass in memory then the
1642 target code is consulted.
1643
1644 If the language code decides to pass in memory we want to move
1645 the pointer inserted as the initial argument from the argument
1646 list and into X8, the conventional AArch64 struct return pointer
1647 register. */
1648
1649 /* Set the return address. For the AArch64, the return breakpoint
1650 is always at BP_ADDR. */
1651 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1652
1653 /* If we were given an initial argument for the return slot, lose it. */
1654 if (return_method == return_method_hidden_param)
1655 {
1656 args++;
1657 nargs--;
1658 }
1659
1660 /* The struct_return pointer occupies X8. */
1661 if (return_method != return_method_normal)
1662 {
1663 if (aarch64_debug)
1664 {
1665 debug_printf ("struct return in %s = 0x%s\n",
1666 gdbarch_register_name (gdbarch,
1667 AARCH64_STRUCT_RETURN_REGNUM),
1668 paddress (gdbarch, struct_addr));
1669 }
1670 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1671 struct_addr);
1672 }
1673
1674 for (argnum = 0; argnum < nargs; argnum++)
1675 {
1676 struct value *arg = args[argnum];
1677 struct type *arg_type, *fundamental_type;
1678 int len, elements;
1679
1680 arg_type = check_typedef (value_type (arg));
1681 len = TYPE_LENGTH (arg_type);
1682
1683 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1684 if there are enough spare registers. */
1685 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1686 &fundamental_type))
1687 {
1688 if (info.nsrn + elements <= 8)
1689 {
1690 /* We know that we have sufficient registers available therefore
1691 this will never need to fallback to the stack. */
1692 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1693 arg))
1694 gdb_assert_not_reached ("Failed to push args");
1695 }
1696 else
1697 {
1698 info.nsrn = 8;
1699 pass_on_stack (&info, arg_type, arg);
1700 }
1701 continue;
1702 }
1703
1704 switch (TYPE_CODE (arg_type))
1705 {
1706 case TYPE_CODE_INT:
1707 case TYPE_CODE_BOOL:
1708 case TYPE_CODE_CHAR:
1709 case TYPE_CODE_RANGE:
1710 case TYPE_CODE_ENUM:
1711 if (len < 4)
1712 {
1713 /* Promote to 32 bit integer. */
1714 if (TYPE_UNSIGNED (arg_type))
1715 arg_type = builtin_type (gdbarch)->builtin_uint32;
1716 else
1717 arg_type = builtin_type (gdbarch)->builtin_int32;
1718 arg = value_cast (arg_type, arg);
1719 }
1720 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1721 break;
1722
1723 case TYPE_CODE_STRUCT:
1724 case TYPE_CODE_ARRAY:
1725 case TYPE_CODE_UNION:
1726 if (len > 16)
1727 {
1728 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1729 invisible reference. */
1730
1731 /* Allocate aligned storage. */
1732 sp = align_down (sp - len, 16);
1733
1734 /* Write the real data into the stack. */
1735 write_memory (sp, value_contents (arg), len);
1736
1737 /* Construct the indirection. */
1738 arg_type = lookup_pointer_type (arg_type);
1739 arg = value_from_pointer (arg_type, sp);
1740 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1741 }
1742 else
1743 /* PCS C.15 / C.18 multiple values pass. */
1744 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1745 break;
1746
1747 default:
1748 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1749 break;
1750 }
1751 }
1752
1753 /* Make sure stack retains 16 byte alignment. */
1754 if (info.nsaa & 15)
1755 sp -= 16 - (info.nsaa & 15);
1756
1757 while (!info.si.empty ())
1758 {
1759 const stack_item_t &si = info.si.back ();
1760
1761 sp -= si.len;
1762 if (si.data != NULL)
1763 write_memory (sp, si.data, si.len);
1764 info.si.pop_back ();
1765 }
1766
1767 /* Finally, update the SP register. */
1768 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1769
1770 return sp;
1771 }
1772
1773 /* Implement the "frame_align" gdbarch method. */
1774
1775 static CORE_ADDR
1776 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1777 {
1778 /* Align the stack to sixteen bytes. */
1779 return sp & ~(CORE_ADDR) 15;
1780 }
1781
1782 /* Return the type for an AdvSISD Q register. */
1783
1784 static struct type *
1785 aarch64_vnq_type (struct gdbarch *gdbarch)
1786 {
1787 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1788
1789 if (tdep->vnq_type == NULL)
1790 {
1791 struct type *t;
1792 struct type *elem;
1793
1794 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1795 TYPE_CODE_UNION);
1796
1797 elem = builtin_type (gdbarch)->builtin_uint128;
1798 append_composite_type_field (t, "u", elem);
1799
1800 elem = builtin_type (gdbarch)->builtin_int128;
1801 append_composite_type_field (t, "s", elem);
1802
1803 tdep->vnq_type = t;
1804 }
1805
1806 return tdep->vnq_type;
1807 }
1808
1809 /* Return the type for an AdvSISD D register. */
1810
1811 static struct type *
1812 aarch64_vnd_type (struct gdbarch *gdbarch)
1813 {
1814 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1815
1816 if (tdep->vnd_type == NULL)
1817 {
1818 struct type *t;
1819 struct type *elem;
1820
1821 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1822 TYPE_CODE_UNION);
1823
1824 elem = builtin_type (gdbarch)->builtin_double;
1825 append_composite_type_field (t, "f", elem);
1826
1827 elem = builtin_type (gdbarch)->builtin_uint64;
1828 append_composite_type_field (t, "u", elem);
1829
1830 elem = builtin_type (gdbarch)->builtin_int64;
1831 append_composite_type_field (t, "s", elem);
1832
1833 tdep->vnd_type = t;
1834 }
1835
1836 return tdep->vnd_type;
1837 }
1838
1839 /* Return the type for an AdvSISD S register. */
1840
1841 static struct type *
1842 aarch64_vns_type (struct gdbarch *gdbarch)
1843 {
1844 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1845
1846 if (tdep->vns_type == NULL)
1847 {
1848 struct type *t;
1849 struct type *elem;
1850
1851 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1852 TYPE_CODE_UNION);
1853
1854 elem = builtin_type (gdbarch)->builtin_float;
1855 append_composite_type_field (t, "f", elem);
1856
1857 elem = builtin_type (gdbarch)->builtin_uint32;
1858 append_composite_type_field (t, "u", elem);
1859
1860 elem = builtin_type (gdbarch)->builtin_int32;
1861 append_composite_type_field (t, "s", elem);
1862
1863 tdep->vns_type = t;
1864 }
1865
1866 return tdep->vns_type;
1867 }
1868
1869 /* Return the type for an AdvSISD H register. */
1870
1871 static struct type *
1872 aarch64_vnh_type (struct gdbarch *gdbarch)
1873 {
1874 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1875
1876 if (tdep->vnh_type == NULL)
1877 {
1878 struct type *t;
1879 struct type *elem;
1880
1881 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1882 TYPE_CODE_UNION);
1883
1884 elem = builtin_type (gdbarch)->builtin_half;
1885 append_composite_type_field (t, "f", elem);
1886
1887 elem = builtin_type (gdbarch)->builtin_uint16;
1888 append_composite_type_field (t, "u", elem);
1889
1890 elem = builtin_type (gdbarch)->builtin_int16;
1891 append_composite_type_field (t, "s", elem);
1892
1893 tdep->vnh_type = t;
1894 }
1895
1896 return tdep->vnh_type;
1897 }
1898
1899 /* Return the type for an AdvSISD B register. */
1900
1901 static struct type *
1902 aarch64_vnb_type (struct gdbarch *gdbarch)
1903 {
1904 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1905
1906 if (tdep->vnb_type == NULL)
1907 {
1908 struct type *t;
1909 struct type *elem;
1910
1911 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1912 TYPE_CODE_UNION);
1913
1914 elem = builtin_type (gdbarch)->builtin_uint8;
1915 append_composite_type_field (t, "u", elem);
1916
1917 elem = builtin_type (gdbarch)->builtin_int8;
1918 append_composite_type_field (t, "s", elem);
1919
1920 tdep->vnb_type = t;
1921 }
1922
1923 return tdep->vnb_type;
1924 }
1925
1926 /* Return the type for an AdvSISD V register. */
1927
1928 static struct type *
1929 aarch64_vnv_type (struct gdbarch *gdbarch)
1930 {
1931 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1932
1933 if (tdep->vnv_type == NULL)
1934 {
1935 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1936 slice from the non-pseudo vector registers. However NEON V registers
1937 are always vector registers, and need constructing as such. */
1938 const struct builtin_type *bt = builtin_type (gdbarch);
1939
1940 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1941 TYPE_CODE_UNION);
1942
1943 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1944 TYPE_CODE_UNION);
1945 append_composite_type_field (sub, "f",
1946 init_vector_type (bt->builtin_double, 2));
1947 append_composite_type_field (sub, "u",
1948 init_vector_type (bt->builtin_uint64, 2));
1949 append_composite_type_field (sub, "s",
1950 init_vector_type (bt->builtin_int64, 2));
1951 append_composite_type_field (t, "d", sub);
1952
1953 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1954 TYPE_CODE_UNION);
1955 append_composite_type_field (sub, "f",
1956 init_vector_type (bt->builtin_float, 4));
1957 append_composite_type_field (sub, "u",
1958 init_vector_type (bt->builtin_uint32, 4));
1959 append_composite_type_field (sub, "s",
1960 init_vector_type (bt->builtin_int32, 4));
1961 append_composite_type_field (t, "s", sub);
1962
1963 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1964 TYPE_CODE_UNION);
1965 append_composite_type_field (sub, "f",
1966 init_vector_type (bt->builtin_half, 8));
1967 append_composite_type_field (sub, "u",
1968 init_vector_type (bt->builtin_uint16, 8));
1969 append_composite_type_field (sub, "s",
1970 init_vector_type (bt->builtin_int16, 8));
1971 append_composite_type_field (t, "h", sub);
1972
1973 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1974 TYPE_CODE_UNION);
1975 append_composite_type_field (sub, "u",
1976 init_vector_type (bt->builtin_uint8, 16));
1977 append_composite_type_field (sub, "s",
1978 init_vector_type (bt->builtin_int8, 16));
1979 append_composite_type_field (t, "b", sub);
1980
1981 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1982 TYPE_CODE_UNION);
1983 append_composite_type_field (sub, "u",
1984 init_vector_type (bt->builtin_uint128, 1));
1985 append_composite_type_field (sub, "s",
1986 init_vector_type (bt->builtin_int128, 1));
1987 append_composite_type_field (t, "q", sub);
1988
1989 tdep->vnv_type = t;
1990 }
1991
1992 return tdep->vnv_type;
1993 }
1994
1995 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1996
1997 static int
1998 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1999 {
2000 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2001
2002 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2003 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2004
2005 if (reg == AARCH64_DWARF_SP)
2006 return AARCH64_SP_REGNUM;
2007
2008 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2009 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2010
2011 if (reg == AARCH64_DWARF_SVE_VG)
2012 return AARCH64_SVE_VG_REGNUM;
2013
2014 if (reg == AARCH64_DWARF_SVE_FFR)
2015 return AARCH64_SVE_FFR_REGNUM;
2016
2017 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2018 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2019
2020 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2021 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2022
2023 if (tdep->has_pauth ())
2024 {
2025 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2026 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2027
2028 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2029 return tdep->pauth_ra_state_regnum;
2030 }
2031
2032 return -1;
2033 }
2034
2035 /* Implement the "print_insn" gdbarch method. */
2036
2037 static int
2038 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2039 {
2040 info->symbols = NULL;
2041 return default_print_insn (memaddr, info);
2042 }
2043
2044 /* AArch64 BRK software debug mode instruction.
2045 Note that AArch64 code is always little-endian.
2046 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2047 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2048
2049 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2050
2051 /* Extract from an array REGS containing the (raw) register state a
2052 function return value of type TYPE, and copy that, in virtual
2053 format, into VALBUF. */
2054
2055 static void
2056 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2057 gdb_byte *valbuf)
2058 {
2059 struct gdbarch *gdbarch = regs->arch ();
2060 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2061 int elements;
2062 struct type *fundamental_type;
2063
2064 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2065 &fundamental_type))
2066 {
2067 int len = TYPE_LENGTH (fundamental_type);
2068
2069 for (int i = 0; i < elements; i++)
2070 {
2071 int regno = AARCH64_V0_REGNUM + i;
2072 /* Enough space for a full vector register. */
2073 gdb_byte buf[register_size (gdbarch, regno)];
2074 gdb_assert (len <= sizeof (buf));
2075
2076 if (aarch64_debug)
2077 {
2078 debug_printf ("read HFA or HVA return value element %d from %s\n",
2079 i + 1,
2080 gdbarch_register_name (gdbarch, regno));
2081 }
2082 regs->cooked_read (regno, buf);
2083
2084 memcpy (valbuf, buf, len);
2085 valbuf += len;
2086 }
2087 }
2088 else if (TYPE_CODE (type) == TYPE_CODE_INT
2089 || TYPE_CODE (type) == TYPE_CODE_CHAR
2090 || TYPE_CODE (type) == TYPE_CODE_BOOL
2091 || TYPE_CODE (type) == TYPE_CODE_PTR
2092 || TYPE_IS_REFERENCE (type)
2093 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2094 {
2095 /* If the type is a plain integer, then the access is
2096 straight-forward. Otherwise we have to play around a bit
2097 more. */
2098 int len = TYPE_LENGTH (type);
2099 int regno = AARCH64_X0_REGNUM;
2100 ULONGEST tmp;
2101
2102 while (len > 0)
2103 {
2104 /* By using store_unsigned_integer we avoid having to do
2105 anything special for small big-endian values. */
2106 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2107 store_unsigned_integer (valbuf,
2108 (len > X_REGISTER_SIZE
2109 ? X_REGISTER_SIZE : len), byte_order, tmp);
2110 len -= X_REGISTER_SIZE;
2111 valbuf += X_REGISTER_SIZE;
2112 }
2113 }
2114 else
2115 {
2116 /* For a structure or union the behaviour is as if the value had
2117 been stored to word-aligned memory and then loaded into
2118 registers with 64-bit load instruction(s). */
2119 int len = TYPE_LENGTH (type);
2120 int regno = AARCH64_X0_REGNUM;
2121 bfd_byte buf[X_REGISTER_SIZE];
2122
2123 while (len > 0)
2124 {
2125 regs->cooked_read (regno++, buf);
2126 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2127 len -= X_REGISTER_SIZE;
2128 valbuf += X_REGISTER_SIZE;
2129 }
2130 }
2131 }
2132
2133
2134 /* Will a function return an aggregate type in memory or in a
2135 register? Return 0 if an aggregate type can be returned in a
2136 register, 1 if it must be returned in memory. */
2137
2138 static int
2139 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2140 {
2141 type = check_typedef (type);
2142 int elements;
2143 struct type *fundamental_type;
2144
2145 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2146 &fundamental_type))
2147 {
2148 /* v0-v7 are used to return values and one register is allocated
2149 for one member. However, HFA or HVA has at most four members. */
2150 return 0;
2151 }
2152
2153 if (TYPE_LENGTH (type) > 16)
2154 {
2155 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2156 invisible reference. */
2157
2158 return 1;
2159 }
2160
2161 return 0;
2162 }
2163
2164 /* Write into appropriate registers a function return value of type
2165 TYPE, given in virtual format. */
2166
2167 static void
2168 aarch64_store_return_value (struct type *type, struct regcache *regs,
2169 const gdb_byte *valbuf)
2170 {
2171 struct gdbarch *gdbarch = regs->arch ();
2172 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2173 int elements;
2174 struct type *fundamental_type;
2175
2176 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2177 &fundamental_type))
2178 {
2179 int len = TYPE_LENGTH (fundamental_type);
2180
2181 for (int i = 0; i < elements; i++)
2182 {
2183 int regno = AARCH64_V0_REGNUM + i;
2184 /* Enough space for a full vector register. */
2185 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2186 gdb_assert (len <= sizeof (tmpbuf));
2187
2188 if (aarch64_debug)
2189 {
2190 debug_printf ("write HFA or HVA return value element %d to %s\n",
2191 i + 1,
2192 gdbarch_register_name (gdbarch, regno));
2193 }
2194
2195 memcpy (tmpbuf, valbuf,
2196 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2197 regs->cooked_write (regno, tmpbuf);
2198 valbuf += len;
2199 }
2200 }
2201 else if (TYPE_CODE (type) == TYPE_CODE_INT
2202 || TYPE_CODE (type) == TYPE_CODE_CHAR
2203 || TYPE_CODE (type) == TYPE_CODE_BOOL
2204 || TYPE_CODE (type) == TYPE_CODE_PTR
2205 || TYPE_IS_REFERENCE (type)
2206 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2207 {
2208 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2209 {
2210 /* Values of one word or less are zero/sign-extended and
2211 returned in r0. */
2212 bfd_byte tmpbuf[X_REGISTER_SIZE];
2213 LONGEST val = unpack_long (type, valbuf);
2214
2215 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2216 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2217 }
2218 else
2219 {
2220 /* Integral values greater than one word are stored in
2221 consecutive registers starting with r0. This will always
2222 be a multiple of the regiser size. */
2223 int len = TYPE_LENGTH (type);
2224 int regno = AARCH64_X0_REGNUM;
2225
2226 while (len > 0)
2227 {
2228 regs->cooked_write (regno++, valbuf);
2229 len -= X_REGISTER_SIZE;
2230 valbuf += X_REGISTER_SIZE;
2231 }
2232 }
2233 }
2234 else
2235 {
2236 /* For a structure or union the behaviour is as if the value had
2237 been stored to word-aligned memory and then loaded into
2238 registers with 64-bit load instruction(s). */
2239 int len = TYPE_LENGTH (type);
2240 int regno = AARCH64_X0_REGNUM;
2241 bfd_byte tmpbuf[X_REGISTER_SIZE];
2242
2243 while (len > 0)
2244 {
2245 memcpy (tmpbuf, valbuf,
2246 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2247 regs->cooked_write (regno++, tmpbuf);
2248 len -= X_REGISTER_SIZE;
2249 valbuf += X_REGISTER_SIZE;
2250 }
2251 }
2252 }
2253
2254 /* Implement the "return_value" gdbarch method. */
2255
2256 static enum return_value_convention
2257 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2258 struct type *valtype, struct regcache *regcache,
2259 gdb_byte *readbuf, const gdb_byte *writebuf)
2260 {
2261
2262 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2263 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2264 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2265 {
2266 if (aarch64_return_in_memory (gdbarch, valtype))
2267 {
2268 if (aarch64_debug)
2269 debug_printf ("return value in memory\n");
2270 return RETURN_VALUE_STRUCT_CONVENTION;
2271 }
2272 }
2273
2274 if (writebuf)
2275 aarch64_store_return_value (valtype, regcache, writebuf);
2276
2277 if (readbuf)
2278 aarch64_extract_return_value (valtype, regcache, readbuf);
2279
2280 if (aarch64_debug)
2281 debug_printf ("return value in registers\n");
2282
2283 return RETURN_VALUE_REGISTER_CONVENTION;
2284 }
2285
2286 /* Implement the "get_longjmp_target" gdbarch method. */
2287
2288 static int
2289 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2290 {
2291 CORE_ADDR jb_addr;
2292 gdb_byte buf[X_REGISTER_SIZE];
2293 struct gdbarch *gdbarch = get_frame_arch (frame);
2294 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2295 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2296
2297 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2298
2299 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2300 X_REGISTER_SIZE))
2301 return 0;
2302
2303 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2304 return 1;
2305 }
2306
2307 /* Implement the "gen_return_address" gdbarch method. */
2308
2309 static void
2310 aarch64_gen_return_address (struct gdbarch *gdbarch,
2311 struct agent_expr *ax, struct axs_value *value,
2312 CORE_ADDR scope)
2313 {
2314 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2315 value->kind = axs_lvalue_register;
2316 value->u.reg = AARCH64_LR_REGNUM;
2317 }
2318 \f
2319
2320 /* Return the pseudo register name corresponding to register regnum. */
2321
2322 static const char *
2323 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2324 {
2325 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2326
2327 static const char *const q_name[] =
2328 {
2329 "q0", "q1", "q2", "q3",
2330 "q4", "q5", "q6", "q7",
2331 "q8", "q9", "q10", "q11",
2332 "q12", "q13", "q14", "q15",
2333 "q16", "q17", "q18", "q19",
2334 "q20", "q21", "q22", "q23",
2335 "q24", "q25", "q26", "q27",
2336 "q28", "q29", "q30", "q31",
2337 };
2338
2339 static const char *const d_name[] =
2340 {
2341 "d0", "d1", "d2", "d3",
2342 "d4", "d5", "d6", "d7",
2343 "d8", "d9", "d10", "d11",
2344 "d12", "d13", "d14", "d15",
2345 "d16", "d17", "d18", "d19",
2346 "d20", "d21", "d22", "d23",
2347 "d24", "d25", "d26", "d27",
2348 "d28", "d29", "d30", "d31",
2349 };
2350
2351 static const char *const s_name[] =
2352 {
2353 "s0", "s1", "s2", "s3",
2354 "s4", "s5", "s6", "s7",
2355 "s8", "s9", "s10", "s11",
2356 "s12", "s13", "s14", "s15",
2357 "s16", "s17", "s18", "s19",
2358 "s20", "s21", "s22", "s23",
2359 "s24", "s25", "s26", "s27",
2360 "s28", "s29", "s30", "s31",
2361 };
2362
2363 static const char *const h_name[] =
2364 {
2365 "h0", "h1", "h2", "h3",
2366 "h4", "h5", "h6", "h7",
2367 "h8", "h9", "h10", "h11",
2368 "h12", "h13", "h14", "h15",
2369 "h16", "h17", "h18", "h19",
2370 "h20", "h21", "h22", "h23",
2371 "h24", "h25", "h26", "h27",
2372 "h28", "h29", "h30", "h31",
2373 };
2374
2375 static const char *const b_name[] =
2376 {
2377 "b0", "b1", "b2", "b3",
2378 "b4", "b5", "b6", "b7",
2379 "b8", "b9", "b10", "b11",
2380 "b12", "b13", "b14", "b15",
2381 "b16", "b17", "b18", "b19",
2382 "b20", "b21", "b22", "b23",
2383 "b24", "b25", "b26", "b27",
2384 "b28", "b29", "b30", "b31",
2385 };
2386
2387 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2388
2389 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2390 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2391
2392 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2393 return d_name[p_regnum - AARCH64_D0_REGNUM];
2394
2395 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2396 return s_name[p_regnum - AARCH64_S0_REGNUM];
2397
2398 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2399 return h_name[p_regnum - AARCH64_H0_REGNUM];
2400
2401 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2402 return b_name[p_regnum - AARCH64_B0_REGNUM];
2403
2404 if (tdep->has_sve ())
2405 {
2406 static const char *const sve_v_name[] =
2407 {
2408 "v0", "v1", "v2", "v3",
2409 "v4", "v5", "v6", "v7",
2410 "v8", "v9", "v10", "v11",
2411 "v12", "v13", "v14", "v15",
2412 "v16", "v17", "v18", "v19",
2413 "v20", "v21", "v22", "v23",
2414 "v24", "v25", "v26", "v27",
2415 "v28", "v29", "v30", "v31",
2416 };
2417
2418 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2419 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2420 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2421 }
2422
2423 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2424 prevents it from being read by methods such as
2425 mi_cmd_trace_frame_collected. */
2426 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2427 return "";
2428
2429 internal_error (__FILE__, __LINE__,
2430 _("aarch64_pseudo_register_name: bad register number %d"),
2431 p_regnum);
2432 }
2433
2434 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2435
2436 static struct type *
2437 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2438 {
2439 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2440
2441 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2442
2443 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2444 return aarch64_vnq_type (gdbarch);
2445
2446 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2447 return aarch64_vnd_type (gdbarch);
2448
2449 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2450 return aarch64_vns_type (gdbarch);
2451
2452 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2453 return aarch64_vnh_type (gdbarch);
2454
2455 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2456 return aarch64_vnb_type (gdbarch);
2457
2458 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2459 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2460 return aarch64_vnv_type (gdbarch);
2461
2462 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2463 return builtin_type (gdbarch)->builtin_uint64;
2464
2465 internal_error (__FILE__, __LINE__,
2466 _("aarch64_pseudo_register_type: bad register number %d"),
2467 p_regnum);
2468 }
2469
2470 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2471
2472 static int
2473 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2474 struct reggroup *group)
2475 {
2476 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2477
2478 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2479
2480 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2481 return group == all_reggroup || group == vector_reggroup;
2482 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2483 return (group == all_reggroup || group == vector_reggroup
2484 || group == float_reggroup);
2485 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2486 return (group == all_reggroup || group == vector_reggroup
2487 || group == float_reggroup);
2488 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2489 return group == all_reggroup || group == vector_reggroup;
2490 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2491 return group == all_reggroup || group == vector_reggroup;
2492 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2493 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2494 return group == all_reggroup || group == vector_reggroup;
2495 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2496 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2497 return 0;
2498
2499 return group == all_reggroup;
2500 }
2501
2502 /* Helper for aarch64_pseudo_read_value. */
2503
2504 static struct value *
2505 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2506 readable_regcache *regcache, int regnum_offset,
2507 int regsize, struct value *result_value)
2508 {
2509 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2510
2511 /* Enough space for a full vector register. */
2512 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2513 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2514
2515 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2516 mark_value_bytes_unavailable (result_value, 0,
2517 TYPE_LENGTH (value_type (result_value)));
2518 else
2519 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2520
2521 return result_value;
2522 }
2523
2524 /* Implement the "pseudo_register_read_value" gdbarch method. */
2525
2526 static struct value *
2527 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2528 int regnum)
2529 {
2530 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2531 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2532
2533 VALUE_LVAL (result_value) = lval_register;
2534 VALUE_REGNUM (result_value) = regnum;
2535
2536 regnum -= gdbarch_num_regs (gdbarch);
2537
2538 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2539 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2540 regnum - AARCH64_Q0_REGNUM,
2541 Q_REGISTER_SIZE, result_value);
2542
2543 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2544 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2545 regnum - AARCH64_D0_REGNUM,
2546 D_REGISTER_SIZE, result_value);
2547
2548 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2549 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2550 regnum - AARCH64_S0_REGNUM,
2551 S_REGISTER_SIZE, result_value);
2552
2553 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2554 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2555 regnum - AARCH64_H0_REGNUM,
2556 H_REGISTER_SIZE, result_value);
2557
2558 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2559 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2560 regnum - AARCH64_B0_REGNUM,
2561 B_REGISTER_SIZE, result_value);
2562
2563 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2564 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2565 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2566 regnum - AARCH64_SVE_V0_REGNUM,
2567 V_REGISTER_SIZE, result_value);
2568
2569 gdb_assert_not_reached ("regnum out of bound");
2570 }
2571
2572 /* Helper for aarch64_pseudo_write. */
2573
2574 static void
2575 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2576 int regnum_offset, int regsize, const gdb_byte *buf)
2577 {
2578 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2579
2580 /* Enough space for a full vector register. */
2581 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2582 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2583
2584 /* Ensure the register buffer is zero, we want gdb writes of the
2585 various 'scalar' pseudo registers to behavior like architectural
2586 writes, register width bytes are written the remainder are set to
2587 zero. */
2588 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2589
2590 memcpy (reg_buf, buf, regsize);
2591 regcache->raw_write (v_regnum, reg_buf);
2592 }
2593
2594 /* Implement the "pseudo_register_write" gdbarch method. */
2595
2596 static void
2597 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2598 int regnum, const gdb_byte *buf)
2599 {
2600 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2601 regnum -= gdbarch_num_regs (gdbarch);
2602
2603 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2604 return aarch64_pseudo_write_1 (gdbarch, regcache,
2605 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2606 buf);
2607
2608 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2609 return aarch64_pseudo_write_1 (gdbarch, regcache,
2610 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2611 buf);
2612
2613 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2614 return aarch64_pseudo_write_1 (gdbarch, regcache,
2615 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2616 buf);
2617
2618 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2619 return aarch64_pseudo_write_1 (gdbarch, regcache,
2620 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2621 buf);
2622
2623 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2624 return aarch64_pseudo_write_1 (gdbarch, regcache,
2625 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2626 buf);
2627
2628 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2629 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2630 return aarch64_pseudo_write_1 (gdbarch, regcache,
2631 regnum - AARCH64_SVE_V0_REGNUM,
2632 V_REGISTER_SIZE, buf);
2633
2634 gdb_assert_not_reached ("regnum out of bound");
2635 }
2636
2637 /* Callback function for user_reg_add. */
2638
2639 static struct value *
2640 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2641 {
2642 const int *reg_p = (const int *) baton;
2643
2644 return value_of_register (*reg_p, frame);
2645 }
2646 \f
2647
2648 /* Implement the "software_single_step" gdbarch method, needed to
2649 single step through atomic sequences on AArch64. */
2650
2651 static std::vector<CORE_ADDR>
2652 aarch64_software_single_step (struct regcache *regcache)
2653 {
2654 struct gdbarch *gdbarch = regcache->arch ();
2655 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2656 const int insn_size = 4;
2657 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2658 CORE_ADDR pc = regcache_read_pc (regcache);
2659 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2660 CORE_ADDR loc = pc;
2661 CORE_ADDR closing_insn = 0;
2662 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2663 byte_order_for_code);
2664 int index;
2665 int insn_count;
2666 int bc_insn_count = 0; /* Conditional branch instruction count. */
2667 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2668 aarch64_inst inst;
2669
2670 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2671 return {};
2672
2673 /* Look for a Load Exclusive instruction which begins the sequence. */
2674 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2675 return {};
2676
2677 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2678 {
2679 loc += insn_size;
2680 insn = read_memory_unsigned_integer (loc, insn_size,
2681 byte_order_for_code);
2682
2683 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2684 return {};
2685 /* Check if the instruction is a conditional branch. */
2686 if (inst.opcode->iclass == condbranch)
2687 {
2688 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2689
2690 if (bc_insn_count >= 1)
2691 return {};
2692
2693 /* It is, so we'll try to set a breakpoint at the destination. */
2694 breaks[1] = loc + inst.operands[0].imm.value;
2695
2696 bc_insn_count++;
2697 last_breakpoint++;
2698 }
2699
2700 /* Look for the Store Exclusive which closes the atomic sequence. */
2701 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2702 {
2703 closing_insn = loc;
2704 break;
2705 }
2706 }
2707
2708 /* We didn't find a closing Store Exclusive instruction, fall back. */
2709 if (!closing_insn)
2710 return {};
2711
2712 /* Insert breakpoint after the end of the atomic sequence. */
2713 breaks[0] = loc + insn_size;
2714
2715 /* Check for duplicated breakpoints, and also check that the second
2716 breakpoint is not within the atomic sequence. */
2717 if (last_breakpoint
2718 && (breaks[1] == breaks[0]
2719 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2720 last_breakpoint = 0;
2721
2722 std::vector<CORE_ADDR> next_pcs;
2723
2724 /* Insert the breakpoint at the end of the sequence, and one at the
2725 destination of the conditional branch, if it exists. */
2726 for (index = 0; index <= last_breakpoint; index++)
2727 next_pcs.push_back (breaks[index]);
2728
2729 return next_pcs;
2730 }
2731
2732 struct aarch64_displaced_step_closure : public displaced_step_closure
2733 {
2734 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2735 is being displaced stepping. */
2736 int cond = 0;
2737
2738 /* PC adjustment offset after displaced stepping. */
2739 int32_t pc_adjust = 0;
2740 };
2741
2742 /* Data when visiting instructions for displaced stepping. */
2743
2744 struct aarch64_displaced_step_data
2745 {
2746 struct aarch64_insn_data base;
2747
2748 /* The address where the instruction will be executed at. */
2749 CORE_ADDR new_addr;
2750 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2751 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2752 /* Number of instructions in INSN_BUF. */
2753 unsigned insn_count;
2754 /* Registers when doing displaced stepping. */
2755 struct regcache *regs;
2756
2757 aarch64_displaced_step_closure *dsc;
2758 };
2759
2760 /* Implementation of aarch64_insn_visitor method "b". */
2761
2762 static void
2763 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2764 struct aarch64_insn_data *data)
2765 {
2766 struct aarch64_displaced_step_data *dsd
2767 = (struct aarch64_displaced_step_data *) data;
2768 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2769
2770 if (can_encode_int32 (new_offset, 28))
2771 {
2772 /* Emit B rather than BL, because executing BL on a new address
2773 will get the wrong address into LR. In order to avoid this,
2774 we emit B, and update LR if the instruction is BL. */
2775 emit_b (dsd->insn_buf, 0, new_offset);
2776 dsd->insn_count++;
2777 }
2778 else
2779 {
2780 /* Write NOP. */
2781 emit_nop (dsd->insn_buf);
2782 dsd->insn_count++;
2783 dsd->dsc->pc_adjust = offset;
2784 }
2785
2786 if (is_bl)
2787 {
2788 /* Update LR. */
2789 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2790 data->insn_addr + 4);
2791 }
2792 }
2793
2794 /* Implementation of aarch64_insn_visitor method "b_cond". */
2795
2796 static void
2797 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2798 struct aarch64_insn_data *data)
2799 {
2800 struct aarch64_displaced_step_data *dsd
2801 = (struct aarch64_displaced_step_data *) data;
2802
2803 /* GDB has to fix up PC after displaced step this instruction
2804 differently according to the condition is true or false. Instead
2805 of checking COND against conditional flags, we can use
2806 the following instructions, and GDB can tell how to fix up PC
2807 according to the PC value.
2808
2809 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2810 INSN1 ;
2811 TAKEN:
2812 INSN2
2813 */
2814
2815 emit_bcond (dsd->insn_buf, cond, 8);
2816 dsd->dsc->cond = 1;
2817 dsd->dsc->pc_adjust = offset;
2818 dsd->insn_count = 1;
2819 }
2820
2821 /* Dynamically allocate a new register. If we know the register
2822 statically, we should make it a global as above instead of using this
2823 helper function. */
2824
2825 static struct aarch64_register
2826 aarch64_register (unsigned num, int is64)
2827 {
2828 return (struct aarch64_register) { num, is64 };
2829 }
2830
2831 /* Implementation of aarch64_insn_visitor method "cb". */
2832
2833 static void
2834 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2835 const unsigned rn, int is64,
2836 struct aarch64_insn_data *data)
2837 {
2838 struct aarch64_displaced_step_data *dsd
2839 = (struct aarch64_displaced_step_data *) data;
2840
2841 /* The offset is out of range for a compare and branch
2842 instruction. We can use the following instructions instead:
2843
2844 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2845 INSN1 ;
2846 TAKEN:
2847 INSN2
2848 */
2849 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2850 dsd->insn_count = 1;
2851 dsd->dsc->cond = 1;
2852 dsd->dsc->pc_adjust = offset;
2853 }
2854
2855 /* Implementation of aarch64_insn_visitor method "tb". */
2856
2857 static void
2858 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2859 const unsigned rt, unsigned bit,
2860 struct aarch64_insn_data *data)
2861 {
2862 struct aarch64_displaced_step_data *dsd
2863 = (struct aarch64_displaced_step_data *) data;
2864
2865 /* The offset is out of range for a test bit and branch
2866 instruction We can use the following instructions instead:
2867
2868 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2869 INSN1 ;
2870 TAKEN:
2871 INSN2
2872
2873 */
2874 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2875 dsd->insn_count = 1;
2876 dsd->dsc->cond = 1;
2877 dsd->dsc->pc_adjust = offset;
2878 }
2879
2880 /* Implementation of aarch64_insn_visitor method "adr". */
2881
2882 static void
2883 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2884 const int is_adrp, struct aarch64_insn_data *data)
2885 {
2886 struct aarch64_displaced_step_data *dsd
2887 = (struct aarch64_displaced_step_data *) data;
2888 /* We know exactly the address the ADR{P,} instruction will compute.
2889 We can just write it to the destination register. */
2890 CORE_ADDR address = data->insn_addr + offset;
2891
2892 if (is_adrp)
2893 {
2894 /* Clear the lower 12 bits of the offset to get the 4K page. */
2895 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2896 address & ~0xfff);
2897 }
2898 else
2899 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2900 address);
2901
2902 dsd->dsc->pc_adjust = 4;
2903 emit_nop (dsd->insn_buf);
2904 dsd->insn_count = 1;
2905 }
2906
2907 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2908
2909 static void
2910 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2911 const unsigned rt, const int is64,
2912 struct aarch64_insn_data *data)
2913 {
2914 struct aarch64_displaced_step_data *dsd
2915 = (struct aarch64_displaced_step_data *) data;
2916 CORE_ADDR address = data->insn_addr + offset;
2917 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2918
2919 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2920 address);
2921
2922 if (is_sw)
2923 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2924 aarch64_register (rt, 1), zero);
2925 else
2926 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2927 aarch64_register (rt, 1), zero);
2928
2929 dsd->dsc->pc_adjust = 4;
2930 }
2931
2932 /* Implementation of aarch64_insn_visitor method "others". */
2933
2934 static void
2935 aarch64_displaced_step_others (const uint32_t insn,
2936 struct aarch64_insn_data *data)
2937 {
2938 struct aarch64_displaced_step_data *dsd
2939 = (struct aarch64_displaced_step_data *) data;
2940
2941 aarch64_emit_insn (dsd->insn_buf, insn);
2942 dsd->insn_count = 1;
2943
2944 if ((insn & 0xfffffc1f) == 0xd65f0000)
2945 {
2946 /* RET */
2947 dsd->dsc->pc_adjust = 0;
2948 }
2949 else
2950 dsd->dsc->pc_adjust = 4;
2951 }
2952
2953 static const struct aarch64_insn_visitor visitor =
2954 {
2955 aarch64_displaced_step_b,
2956 aarch64_displaced_step_b_cond,
2957 aarch64_displaced_step_cb,
2958 aarch64_displaced_step_tb,
2959 aarch64_displaced_step_adr,
2960 aarch64_displaced_step_ldr_literal,
2961 aarch64_displaced_step_others,
2962 };
2963
2964 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2965
2966 struct displaced_step_closure *
2967 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2968 CORE_ADDR from, CORE_ADDR to,
2969 struct regcache *regs)
2970 {
2971 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2972 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2973 struct aarch64_displaced_step_data dsd;
2974 aarch64_inst inst;
2975
2976 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2977 return NULL;
2978
2979 /* Look for a Load Exclusive instruction which begins the sequence. */
2980 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2981 {
2982 /* We can't displaced step atomic sequences. */
2983 return NULL;
2984 }
2985
2986 std::unique_ptr<aarch64_displaced_step_closure> dsc
2987 (new aarch64_displaced_step_closure);
2988 dsd.base.insn_addr = from;
2989 dsd.new_addr = to;
2990 dsd.regs = regs;
2991 dsd.dsc = dsc.get ();
2992 dsd.insn_count = 0;
2993 aarch64_relocate_instruction (insn, &visitor,
2994 (struct aarch64_insn_data *) &dsd);
2995 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
2996
2997 if (dsd.insn_count != 0)
2998 {
2999 int i;
3000
3001 /* Instruction can be relocated to scratch pad. Copy
3002 relocated instruction(s) there. */
3003 for (i = 0; i < dsd.insn_count; i++)
3004 {
3005 if (debug_displaced)
3006 {
3007 debug_printf ("displaced: writing insn ");
3008 debug_printf ("%.8x", dsd.insn_buf[i]);
3009 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3010 }
3011 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3012 (ULONGEST) dsd.insn_buf[i]);
3013 }
3014 }
3015 else
3016 {
3017 dsc = NULL;
3018 }
3019
3020 return dsc.release ();
3021 }
3022
3023 /* Implement the "displaced_step_fixup" gdbarch method. */
3024
3025 void
3026 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3027 struct displaced_step_closure *dsc_,
3028 CORE_ADDR from, CORE_ADDR to,
3029 struct regcache *regs)
3030 {
3031 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3032
3033 if (dsc->cond)
3034 {
3035 ULONGEST pc;
3036
3037 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3038 if (pc - to == 8)
3039 {
3040 /* Condition is true. */
3041 }
3042 else if (pc - to == 4)
3043 {
3044 /* Condition is false. */
3045 dsc->pc_adjust = 4;
3046 }
3047 else
3048 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3049 }
3050
3051 if (dsc->pc_adjust != 0)
3052 {
3053 if (debug_displaced)
3054 {
3055 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3056 paddress (gdbarch, from), dsc->pc_adjust);
3057 }
3058 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3059 from + dsc->pc_adjust);
3060 }
3061 }
3062
3063 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3064
3065 int
3066 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3067 struct displaced_step_closure *closure)
3068 {
3069 return 1;
3070 }
3071
3072 /* Get the correct target description for the given VQ value.
3073 If VQ is zero then it is assumed SVE is not supported.
3074 (It is not possible to set VQ to zero on an SVE system). */
3075
3076 const target_desc *
3077 aarch64_read_description (uint64_t vq, bool pauth_p)
3078 {
3079 if (vq > AARCH64_MAX_SVE_VQ)
3080 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3081 AARCH64_MAX_SVE_VQ);
3082
3083 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3084
3085 if (tdesc == NULL)
3086 {
3087 tdesc = aarch64_create_target_description (vq, pauth_p);
3088 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3089 }
3090
3091 return tdesc;
3092 }
3093
3094 /* Return the VQ used when creating the target description TDESC. */
3095
3096 static uint64_t
3097 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3098 {
3099 const struct tdesc_feature *feature_sve;
3100
3101 if (!tdesc_has_registers (tdesc))
3102 return 0;
3103
3104 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3105
3106 if (feature_sve == nullptr)
3107 return 0;
3108
3109 uint64_t vl = tdesc_register_bitsize (feature_sve,
3110 aarch64_sve_register_names[0]) / 8;
3111 return sve_vq_from_vl (vl);
3112 }
3113
3114 /* Add all the expected register sets into GDBARCH. */
3115
3116 static void
3117 aarch64_add_reggroups (struct gdbarch *gdbarch)
3118 {
3119 reggroup_add (gdbarch, general_reggroup);
3120 reggroup_add (gdbarch, float_reggroup);
3121 reggroup_add (gdbarch, system_reggroup);
3122 reggroup_add (gdbarch, vector_reggroup);
3123 reggroup_add (gdbarch, all_reggroup);
3124 reggroup_add (gdbarch, save_reggroup);
3125 reggroup_add (gdbarch, restore_reggroup);
3126 }
3127
3128 /* Implement the "cannot_store_register" gdbarch method. */
3129
3130 static int
3131 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3132 {
3133 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3134
3135 if (!tdep->has_pauth ())
3136 return 0;
3137
3138 /* Pointer authentication registers are read-only. */
3139 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3140 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3141 }
3142
3143 /* Initialize the current architecture based on INFO. If possible,
3144 re-use an architecture from ARCHES, which is a list of
3145 architectures already created during this debugging session.
3146
3147 Called e.g. at program startup, when reading a core file, and when
3148 reading a binary file. */
3149
3150 static struct gdbarch *
3151 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3152 {
3153 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3154 const struct tdesc_feature *feature_pauth;
3155 bool valid_p = true;
3156 int i, num_regs = 0, num_pseudo_regs = 0;
3157 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3158
3159 /* Use the vector length passed via the target info. Here -1 is used for no
3160 SVE, and 0 is unset. If unset then use the vector length from the existing
3161 tdesc. */
3162 uint64_t vq = 0;
3163 if (info.id == (int *) -1)
3164 vq = 0;
3165 else if (info.id != 0)
3166 vq = (uint64_t) info.id;
3167 else
3168 vq = aarch64_get_tdesc_vq (info.target_desc);
3169
3170 if (vq > AARCH64_MAX_SVE_VQ)
3171 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3172 pulongest (vq), AARCH64_MAX_SVE_VQ);
3173
3174 /* If there is already a candidate, use it. */
3175 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3176 best_arch != nullptr;
3177 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3178 {
3179 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3180 if (tdep && tdep->vq == vq)
3181 return best_arch->gdbarch;
3182 }
3183
3184 /* Ensure we always have a target descriptor, and that it is for the given VQ
3185 value. */
3186 const struct target_desc *tdesc = info.target_desc;
3187 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3188 tdesc = aarch64_read_description (vq, false);
3189 gdb_assert (tdesc);
3190
3191 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3192 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3193 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3194 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3195
3196 if (feature_core == nullptr)
3197 return nullptr;
3198
3199 struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
3200
3201 /* Validate the description provides the mandatory core R registers
3202 and allocate their numbers. */
3203 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3204 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3205 AARCH64_X0_REGNUM + i,
3206 aarch64_r_register_names[i]);
3207
3208 num_regs = AARCH64_X0_REGNUM + i;
3209
3210 /* Add the V registers. */
3211 if (feature_fpu != nullptr)
3212 {
3213 if (feature_sve != nullptr)
3214 error (_("Program contains both fpu and SVE features."));
3215
3216 /* Validate the description provides the mandatory V registers
3217 and allocate their numbers. */
3218 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3219 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3220 AARCH64_V0_REGNUM + i,
3221 aarch64_v_register_names[i]);
3222
3223 num_regs = AARCH64_V0_REGNUM + i;
3224 }
3225
3226 /* Add the SVE registers. */
3227 if (feature_sve != nullptr)
3228 {
3229 /* Validate the description provides the mandatory SVE registers
3230 and allocate their numbers. */
3231 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3232 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3233 AARCH64_SVE_Z0_REGNUM + i,
3234 aarch64_sve_register_names[i]);
3235
3236 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3237 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3238 }
3239
3240 if (feature_fpu != nullptr || feature_sve != nullptr)
3241 {
3242 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3243 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3244 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3245 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3246 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3247 }
3248
3249 /* Add the pauth registers. */
3250 if (feature_pauth != NULL)
3251 {
3252 first_pauth_regnum = num_regs;
3253 pauth_ra_state_offset = num_pseudo_regs;
3254 /* Validate the descriptor provides the mandatory PAUTH registers and
3255 allocate their numbers. */
3256 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3257 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3258 first_pauth_regnum + i,
3259 aarch64_pauth_register_names[i]);
3260
3261 num_regs += i;
3262 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3263 }
3264
3265 if (!valid_p)
3266 {
3267 tdesc_data_cleanup (tdesc_data);
3268 return nullptr;
3269 }
3270
3271 /* AArch64 code is always little-endian. */
3272 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3273
3274 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3275 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3276
3277 /* This should be low enough for everything. */
3278 tdep->lowest_pc = 0x20;
3279 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3280 tdep->jb_elt_size = 8;
3281 tdep->vq = vq;
3282 tdep->pauth_reg_base = first_pauth_regnum;
3283 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3284 : pauth_ra_state_offset + num_regs;
3285
3286 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3287 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3288
3289 /* Advance PC across function entry code. */
3290 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3291
3292 /* The stack grows downward. */
3293 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3294
3295 /* Breakpoint manipulation. */
3296 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3297 aarch64_breakpoint::kind_from_pc);
3298 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3299 aarch64_breakpoint::bp_from_kind);
3300 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3301 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3302
3303 /* Information about registers, etc. */
3304 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3305 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3306 set_gdbarch_num_regs (gdbarch, num_regs);
3307
3308 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3309 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3310 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3311 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3312 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3313 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3314 aarch64_pseudo_register_reggroup_p);
3315 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3316
3317 /* ABI */
3318 set_gdbarch_short_bit (gdbarch, 16);
3319 set_gdbarch_int_bit (gdbarch, 32);
3320 set_gdbarch_float_bit (gdbarch, 32);
3321 set_gdbarch_double_bit (gdbarch, 64);
3322 set_gdbarch_long_double_bit (gdbarch, 128);
3323 set_gdbarch_long_bit (gdbarch, 64);
3324 set_gdbarch_long_long_bit (gdbarch, 64);
3325 set_gdbarch_ptr_bit (gdbarch, 64);
3326 set_gdbarch_char_signed (gdbarch, 0);
3327 set_gdbarch_wchar_signed (gdbarch, 0);
3328 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3329 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3330 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3331 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3332
3333 /* Internal <-> external register number maps. */
3334 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3335
3336 /* Returning results. */
3337 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3338
3339 /* Disassembly. */
3340 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3341
3342 /* Virtual tables. */
3343 set_gdbarch_vbit_in_delta (gdbarch, 1);
3344
3345 /* Register architecture. */
3346 aarch64_add_reggroups (gdbarch);
3347
3348 /* Hook in the ABI-specific overrides, if they have been registered. */
3349 info.target_desc = tdesc;
3350 info.tdesc_data = tdesc_data;
3351 gdbarch_init_osabi (info, gdbarch);
3352
3353 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3354 /* Register DWARF CFA vendor handler. */
3355 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3356 aarch64_execute_dwarf_cfa_vendor_op);
3357
3358 /* Add some default predicates. */
3359 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3360 dwarf2_append_unwinders (gdbarch);
3361 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3362
3363 frame_base_set_default (gdbarch, &aarch64_normal_base);
3364
3365 /* Now we have tuned the configuration, set a few final things,
3366 based on what the OS ABI has told us. */
3367
3368 if (tdep->jb_pc >= 0)
3369 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3370
3371 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3372
3373 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3374
3375 /* Add standard register aliases. */
3376 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3377 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3378 value_of_aarch64_user_reg,
3379 &aarch64_register_aliases[i].regnum);
3380
3381 register_aarch64_ravenscar_ops (gdbarch);
3382
3383 return gdbarch;
3384 }
3385
3386 static void
3387 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3388 {
3389 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3390
3391 if (tdep == NULL)
3392 return;
3393
3394 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3395 paddress (gdbarch, tdep->lowest_pc));
3396 }
3397
3398 #if GDB_SELF_TEST
3399 namespace selftests
3400 {
3401 static void aarch64_process_record_test (void);
3402 }
3403 #endif
3404
3405 void
3406 _initialize_aarch64_tdep (void)
3407 {
3408 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3409 aarch64_dump_tdep);
3410
3411 /* Debug this file's internals. */
3412 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3413 Set AArch64 debugging."), _("\
3414 Show AArch64 debugging."), _("\
3415 When on, AArch64 specific debugging is enabled."),
3416 NULL,
3417 show_aarch64_debug,
3418 &setdebuglist, &showdebuglist);
3419
3420 #if GDB_SELF_TEST
3421 selftests::register_test ("aarch64-analyze-prologue",
3422 selftests::aarch64_analyze_prologue_test);
3423 selftests::register_test ("aarch64-process-record",
3424 selftests::aarch64_process_record_test);
3425 #endif
3426 }
3427
3428 /* AArch64 process record-replay related structures, defines etc. */
3429
3430 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3431 do \
3432 { \
3433 unsigned int reg_len = LENGTH; \
3434 if (reg_len) \
3435 { \
3436 REGS = XNEWVEC (uint32_t, reg_len); \
3437 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3438 } \
3439 } \
3440 while (0)
3441
3442 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3443 do \
3444 { \
3445 unsigned int mem_len = LENGTH; \
3446 if (mem_len) \
3447 { \
3448 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3449 memcpy(&MEMS->len, &RECORD_BUF[0], \
3450 sizeof(struct aarch64_mem_r) * LENGTH); \
3451 } \
3452 } \
3453 while (0)
3454
3455 /* AArch64 record/replay structures and enumerations. */
3456
3457 struct aarch64_mem_r
3458 {
3459 uint64_t len; /* Record length. */
3460 uint64_t addr; /* Memory address. */
3461 };
3462
3463 enum aarch64_record_result
3464 {
3465 AARCH64_RECORD_SUCCESS,
3466 AARCH64_RECORD_UNSUPPORTED,
3467 AARCH64_RECORD_UNKNOWN
3468 };
3469
3470 typedef struct insn_decode_record_t
3471 {
3472 struct gdbarch *gdbarch;
3473 struct regcache *regcache;
3474 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3475 uint32_t aarch64_insn; /* Insn to be recorded. */
3476 uint32_t mem_rec_count; /* Count of memory records. */
3477 uint32_t reg_rec_count; /* Count of register records. */
3478 uint32_t *aarch64_regs; /* Registers to be recorded. */
3479 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3480 } insn_decode_record;
3481
3482 /* Record handler for data processing - register instructions. */
3483
3484 static unsigned int
3485 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3486 {
3487 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3488 uint32_t record_buf[4];
3489
3490 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3491 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3492 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3493
3494 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3495 {
3496 uint8_t setflags;
3497
3498 /* Logical (shifted register). */
3499 if (insn_bits24_27 == 0x0a)
3500 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3501 /* Add/subtract. */
3502 else if (insn_bits24_27 == 0x0b)
3503 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3504 else
3505 return AARCH64_RECORD_UNKNOWN;
3506
3507 record_buf[0] = reg_rd;
3508 aarch64_insn_r->reg_rec_count = 1;
3509 if (setflags)
3510 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3511 }
3512 else
3513 {
3514 if (insn_bits24_27 == 0x0b)
3515 {
3516 /* Data-processing (3 source). */
3517 record_buf[0] = reg_rd;
3518 aarch64_insn_r->reg_rec_count = 1;
3519 }
3520 else if (insn_bits24_27 == 0x0a)
3521 {
3522 if (insn_bits21_23 == 0x00)
3523 {
3524 /* Add/subtract (with carry). */
3525 record_buf[0] = reg_rd;
3526 aarch64_insn_r->reg_rec_count = 1;
3527 if (bit (aarch64_insn_r->aarch64_insn, 29))
3528 {
3529 record_buf[1] = AARCH64_CPSR_REGNUM;
3530 aarch64_insn_r->reg_rec_count = 2;
3531 }
3532 }
3533 else if (insn_bits21_23 == 0x02)
3534 {
3535 /* Conditional compare (register) and conditional compare
3536 (immediate) instructions. */
3537 record_buf[0] = AARCH64_CPSR_REGNUM;
3538 aarch64_insn_r->reg_rec_count = 1;
3539 }
3540 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3541 {
3542 /* CConditional select. */
3543 /* Data-processing (2 source). */
3544 /* Data-processing (1 source). */
3545 record_buf[0] = reg_rd;
3546 aarch64_insn_r->reg_rec_count = 1;
3547 }
3548 else
3549 return AARCH64_RECORD_UNKNOWN;
3550 }
3551 }
3552
3553 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3554 record_buf);
3555 return AARCH64_RECORD_SUCCESS;
3556 }
3557
3558 /* Record handler for data processing - immediate instructions. */
3559
3560 static unsigned int
3561 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3562 {
3563 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3564 uint32_t record_buf[4];
3565
3566 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3567 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3568 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3569
3570 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3571 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3572 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3573 {
3574 record_buf[0] = reg_rd;
3575 aarch64_insn_r->reg_rec_count = 1;
3576 }
3577 else if (insn_bits24_27 == 0x01)
3578 {
3579 /* Add/Subtract (immediate). */
3580 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3581 record_buf[0] = reg_rd;
3582 aarch64_insn_r->reg_rec_count = 1;
3583 if (setflags)
3584 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3585 }
3586 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3587 {
3588 /* Logical (immediate). */
3589 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3590 record_buf[0] = reg_rd;
3591 aarch64_insn_r->reg_rec_count = 1;
3592 if (setflags)
3593 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3594 }
3595 else
3596 return AARCH64_RECORD_UNKNOWN;
3597
3598 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3599 record_buf);
3600 return AARCH64_RECORD_SUCCESS;
3601 }
3602
3603 /* Record handler for branch, exception generation and system instructions. */
3604
3605 static unsigned int
3606 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3607 {
3608 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3609 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3610 uint32_t record_buf[4];
3611
3612 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3613 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3614 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3615
3616 if (insn_bits28_31 == 0x0d)
3617 {
3618 /* Exception generation instructions. */
3619 if (insn_bits24_27 == 0x04)
3620 {
3621 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3622 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3623 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3624 {
3625 ULONGEST svc_number;
3626
3627 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3628 &svc_number);
3629 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3630 svc_number);
3631 }
3632 else
3633 return AARCH64_RECORD_UNSUPPORTED;
3634 }
3635 /* System instructions. */
3636 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3637 {
3638 uint32_t reg_rt, reg_crn;
3639
3640 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3641 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3642
3643 /* Record rt in case of sysl and mrs instructions. */
3644 if (bit (aarch64_insn_r->aarch64_insn, 21))
3645 {
3646 record_buf[0] = reg_rt;
3647 aarch64_insn_r->reg_rec_count = 1;
3648 }
3649 /* Record cpsr for hint and msr(immediate) instructions. */
3650 else if (reg_crn == 0x02 || reg_crn == 0x04)
3651 {
3652 record_buf[0] = AARCH64_CPSR_REGNUM;
3653 aarch64_insn_r->reg_rec_count = 1;
3654 }
3655 }
3656 /* Unconditional branch (register). */
3657 else if((insn_bits24_27 & 0x0e) == 0x06)
3658 {
3659 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3660 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3661 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3662 }
3663 else
3664 return AARCH64_RECORD_UNKNOWN;
3665 }
3666 /* Unconditional branch (immediate). */
3667 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3668 {
3669 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3670 if (bit (aarch64_insn_r->aarch64_insn, 31))
3671 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3672 }
3673 else
3674 /* Compare & branch (immediate), Test & branch (immediate) and
3675 Conditional branch (immediate). */
3676 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3677
3678 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3679 record_buf);
3680 return AARCH64_RECORD_SUCCESS;
3681 }
3682
3683 /* Record handler for advanced SIMD load and store instructions. */
3684
3685 static unsigned int
3686 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3687 {
3688 CORE_ADDR address;
3689 uint64_t addr_offset = 0;
3690 uint32_t record_buf[24];
3691 uint64_t record_buf_mem[24];
3692 uint32_t reg_rn, reg_rt;
3693 uint32_t reg_index = 0, mem_index = 0;
3694 uint8_t opcode_bits, size_bits;
3695
3696 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3697 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3698 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3699 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3700 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3701
3702 if (record_debug)
3703 debug_printf ("Process record: Advanced SIMD load/store\n");
3704
3705 /* Load/store single structure. */
3706 if (bit (aarch64_insn_r->aarch64_insn, 24))
3707 {
3708 uint8_t sindex, scale, selem, esize, replicate = 0;
3709 scale = opcode_bits >> 2;
3710 selem = ((opcode_bits & 0x02) |
3711 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3712 switch (scale)
3713 {
3714 case 1:
3715 if (size_bits & 0x01)
3716 return AARCH64_RECORD_UNKNOWN;
3717 break;
3718 case 2:
3719 if ((size_bits >> 1) & 0x01)
3720 return AARCH64_RECORD_UNKNOWN;
3721 if (size_bits & 0x01)
3722 {
3723 if (!((opcode_bits >> 1) & 0x01))
3724 scale = 3;
3725 else
3726 return AARCH64_RECORD_UNKNOWN;
3727 }
3728 break;
3729 case 3:
3730 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3731 {
3732 scale = size_bits;
3733 replicate = 1;
3734 break;
3735 }
3736 else
3737 return AARCH64_RECORD_UNKNOWN;
3738 default:
3739 break;
3740 }
3741 esize = 8 << scale;
3742 if (replicate)
3743 for (sindex = 0; sindex < selem; sindex++)
3744 {
3745 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3746 reg_rt = (reg_rt + 1) % 32;
3747 }
3748 else
3749 {
3750 for (sindex = 0; sindex < selem; sindex++)
3751 {
3752 if (bit (aarch64_insn_r->aarch64_insn, 22))
3753 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3754 else
3755 {
3756 record_buf_mem[mem_index++] = esize / 8;
3757 record_buf_mem[mem_index++] = address + addr_offset;
3758 }
3759 addr_offset = addr_offset + (esize / 8);
3760 reg_rt = (reg_rt + 1) % 32;
3761 }
3762 }
3763 }
3764 /* Load/store multiple structure. */
3765 else
3766 {
3767 uint8_t selem, esize, rpt, elements;
3768 uint8_t eindex, rindex;
3769
3770 esize = 8 << size_bits;
3771 if (bit (aarch64_insn_r->aarch64_insn, 30))
3772 elements = 128 / esize;
3773 else
3774 elements = 64 / esize;
3775
3776 switch (opcode_bits)
3777 {
3778 /*LD/ST4 (4 Registers). */
3779 case 0:
3780 rpt = 1;
3781 selem = 4;
3782 break;
3783 /*LD/ST1 (4 Registers). */
3784 case 2:
3785 rpt = 4;
3786 selem = 1;
3787 break;
3788 /*LD/ST3 (3 Registers). */
3789 case 4:
3790 rpt = 1;
3791 selem = 3;
3792 break;
3793 /*LD/ST1 (3 Registers). */
3794 case 6:
3795 rpt = 3;
3796 selem = 1;
3797 break;
3798 /*LD/ST1 (1 Register). */
3799 case 7:
3800 rpt = 1;
3801 selem = 1;
3802 break;
3803 /*LD/ST2 (2 Registers). */
3804 case 8:
3805 rpt = 1;
3806 selem = 2;
3807 break;
3808 /*LD/ST1 (2 Registers). */
3809 case 10:
3810 rpt = 2;
3811 selem = 1;
3812 break;
3813 default:
3814 return AARCH64_RECORD_UNSUPPORTED;
3815 break;
3816 }
3817 for (rindex = 0; rindex < rpt; rindex++)
3818 for (eindex = 0; eindex < elements; eindex++)
3819 {
3820 uint8_t reg_tt, sindex;
3821 reg_tt = (reg_rt + rindex) % 32;
3822 for (sindex = 0; sindex < selem; sindex++)
3823 {
3824 if (bit (aarch64_insn_r->aarch64_insn, 22))
3825 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3826 else
3827 {
3828 record_buf_mem[mem_index++] = esize / 8;
3829 record_buf_mem[mem_index++] = address + addr_offset;
3830 }
3831 addr_offset = addr_offset + (esize / 8);
3832 reg_tt = (reg_tt + 1) % 32;
3833 }
3834 }
3835 }
3836
3837 if (bit (aarch64_insn_r->aarch64_insn, 23))
3838 record_buf[reg_index++] = reg_rn;
3839
3840 aarch64_insn_r->reg_rec_count = reg_index;
3841 aarch64_insn_r->mem_rec_count = mem_index / 2;
3842 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3843 record_buf_mem);
3844 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3845 record_buf);
3846 return AARCH64_RECORD_SUCCESS;
3847 }
3848
3849 /* Record handler for load and store instructions. */
3850
3851 static unsigned int
3852 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3853 {
3854 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3855 uint8_t insn_bit23, insn_bit21;
3856 uint8_t opc, size_bits, ld_flag, vector_flag;
3857 uint32_t reg_rn, reg_rt, reg_rt2;
3858 uint64_t datasize, offset;
3859 uint32_t record_buf[8];
3860 uint64_t record_buf_mem[8];
3861 CORE_ADDR address;
3862
3863 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3864 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3865 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3866 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3867 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3868 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3869 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3870 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3871 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3872 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3873 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3874
3875 /* Load/store exclusive. */
3876 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3877 {
3878 if (record_debug)
3879 debug_printf ("Process record: load/store exclusive\n");
3880
3881 if (ld_flag)
3882 {
3883 record_buf[0] = reg_rt;
3884 aarch64_insn_r->reg_rec_count = 1;
3885 if (insn_bit21)
3886 {
3887 record_buf[1] = reg_rt2;
3888 aarch64_insn_r->reg_rec_count = 2;
3889 }
3890 }
3891 else
3892 {
3893 if (insn_bit21)
3894 datasize = (8 << size_bits) * 2;
3895 else
3896 datasize = (8 << size_bits);
3897 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3898 &address);
3899 record_buf_mem[0] = datasize / 8;
3900 record_buf_mem[1] = address;
3901 aarch64_insn_r->mem_rec_count = 1;
3902 if (!insn_bit23)
3903 {
3904 /* Save register rs. */
3905 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3906 aarch64_insn_r->reg_rec_count = 1;
3907 }
3908 }
3909 }
3910 /* Load register (literal) instructions decoding. */
3911 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3912 {
3913 if (record_debug)
3914 debug_printf ("Process record: load register (literal)\n");
3915 if (vector_flag)
3916 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3917 else
3918 record_buf[0] = reg_rt;
3919 aarch64_insn_r->reg_rec_count = 1;
3920 }
3921 /* All types of load/store pair instructions decoding. */
3922 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3923 {
3924 if (record_debug)
3925 debug_printf ("Process record: load/store pair\n");
3926
3927 if (ld_flag)
3928 {
3929 if (vector_flag)
3930 {
3931 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3932 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3933 }
3934 else
3935 {
3936 record_buf[0] = reg_rt;
3937 record_buf[1] = reg_rt2;
3938 }
3939 aarch64_insn_r->reg_rec_count = 2;
3940 }
3941 else
3942 {
3943 uint16_t imm7_off;
3944 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3945 if (!vector_flag)
3946 size_bits = size_bits >> 1;
3947 datasize = 8 << (2 + size_bits);
3948 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3949 offset = offset << (2 + size_bits);
3950 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3951 &address);
3952 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3953 {
3954 if (imm7_off & 0x40)
3955 address = address - offset;
3956 else
3957 address = address + offset;
3958 }
3959
3960 record_buf_mem[0] = datasize / 8;
3961 record_buf_mem[1] = address;
3962 record_buf_mem[2] = datasize / 8;
3963 record_buf_mem[3] = address + (datasize / 8);
3964 aarch64_insn_r->mem_rec_count = 2;
3965 }
3966 if (bit (aarch64_insn_r->aarch64_insn, 23))
3967 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3968 }
3969 /* Load/store register (unsigned immediate) instructions. */
3970 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3971 {
3972 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3973 if (!(opc >> 1))
3974 {
3975 if (opc & 0x01)
3976 ld_flag = 0x01;
3977 else
3978 ld_flag = 0x0;
3979 }
3980 else
3981 {
3982 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3983 {
3984 /* PRFM (immediate) */
3985 return AARCH64_RECORD_SUCCESS;
3986 }
3987 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3988 {
3989 /* LDRSW (immediate) */
3990 ld_flag = 0x1;
3991 }
3992 else
3993 {
3994 if (opc & 0x01)
3995 ld_flag = 0x01;
3996 else
3997 ld_flag = 0x0;
3998 }
3999 }
4000
4001 if (record_debug)
4002 {
4003 debug_printf ("Process record: load/store (unsigned immediate):"
4004 " size %x V %d opc %x\n", size_bits, vector_flag,
4005 opc);
4006 }
4007
4008 if (!ld_flag)
4009 {
4010 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4011 datasize = 8 << size_bits;
4012 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4013 &address);
4014 offset = offset << size_bits;
4015 address = address + offset;
4016
4017 record_buf_mem[0] = datasize >> 3;
4018 record_buf_mem[1] = address;
4019 aarch64_insn_r->mem_rec_count = 1;
4020 }
4021 else
4022 {
4023 if (vector_flag)
4024 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4025 else
4026 record_buf[0] = reg_rt;
4027 aarch64_insn_r->reg_rec_count = 1;
4028 }
4029 }
4030 /* Load/store register (register offset) instructions. */
4031 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4032 && insn_bits10_11 == 0x02 && insn_bit21)
4033 {
4034 if (record_debug)
4035 debug_printf ("Process record: load/store (register offset)\n");
4036 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4037 if (!(opc >> 1))
4038 if (opc & 0x01)
4039 ld_flag = 0x01;
4040 else
4041 ld_flag = 0x0;
4042 else
4043 if (size_bits != 0x03)
4044 ld_flag = 0x01;
4045 else
4046 return AARCH64_RECORD_UNKNOWN;
4047
4048 if (!ld_flag)
4049 {
4050 ULONGEST reg_rm_val;
4051
4052 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4053 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4054 if (bit (aarch64_insn_r->aarch64_insn, 12))
4055 offset = reg_rm_val << size_bits;
4056 else
4057 offset = reg_rm_val;
4058 datasize = 8 << size_bits;
4059 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4060 &address);
4061 address = address + offset;
4062 record_buf_mem[0] = datasize >> 3;
4063 record_buf_mem[1] = address;
4064 aarch64_insn_r->mem_rec_count = 1;
4065 }
4066 else
4067 {
4068 if (vector_flag)
4069 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4070 else
4071 record_buf[0] = reg_rt;
4072 aarch64_insn_r->reg_rec_count = 1;
4073 }
4074 }
4075 /* Load/store register (immediate and unprivileged) instructions. */
4076 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4077 && !insn_bit21)
4078 {
4079 if (record_debug)
4080 {
4081 debug_printf ("Process record: load/store "
4082 "(immediate and unprivileged)\n");
4083 }
4084 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4085 if (!(opc >> 1))
4086 if (opc & 0x01)
4087 ld_flag = 0x01;
4088 else
4089 ld_flag = 0x0;
4090 else
4091 if (size_bits != 0x03)
4092 ld_flag = 0x01;
4093 else
4094 return AARCH64_RECORD_UNKNOWN;
4095
4096 if (!ld_flag)
4097 {
4098 uint16_t imm9_off;
4099 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4100 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4101 datasize = 8 << size_bits;
4102 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4103 &address);
4104 if (insn_bits10_11 != 0x01)
4105 {
4106 if (imm9_off & 0x0100)
4107 address = address - offset;
4108 else
4109 address = address + offset;
4110 }
4111 record_buf_mem[0] = datasize >> 3;
4112 record_buf_mem[1] = address;
4113 aarch64_insn_r->mem_rec_count = 1;
4114 }
4115 else
4116 {
4117 if (vector_flag)
4118 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4119 else
4120 record_buf[0] = reg_rt;
4121 aarch64_insn_r->reg_rec_count = 1;
4122 }
4123 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4124 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4125 }
4126 /* Advanced SIMD load/store instructions. */
4127 else
4128 return aarch64_record_asimd_load_store (aarch64_insn_r);
4129
4130 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4131 record_buf_mem);
4132 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4133 record_buf);
4134 return AARCH64_RECORD_SUCCESS;
4135 }
4136
4137 /* Record handler for data processing SIMD and floating point instructions. */
4138
4139 static unsigned int
4140 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4141 {
4142 uint8_t insn_bit21, opcode, rmode, reg_rd;
4143 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4144 uint8_t insn_bits11_14;
4145 uint32_t record_buf[2];
4146
4147 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4148 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4149 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4150 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4151 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4152 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4153 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4154 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4155 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4156
4157 if (record_debug)
4158 debug_printf ("Process record: data processing SIMD/FP: ");
4159
4160 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4161 {
4162 /* Floating point - fixed point conversion instructions. */
4163 if (!insn_bit21)
4164 {
4165 if (record_debug)
4166 debug_printf ("FP - fixed point conversion");
4167
4168 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4169 record_buf[0] = reg_rd;
4170 else
4171 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4172 }
4173 /* Floating point - conditional compare instructions. */
4174 else if (insn_bits10_11 == 0x01)
4175 {
4176 if (record_debug)
4177 debug_printf ("FP - conditional compare");
4178
4179 record_buf[0] = AARCH64_CPSR_REGNUM;
4180 }
4181 /* Floating point - data processing (2-source) and
4182 conditional select instructions. */
4183 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4184 {
4185 if (record_debug)
4186 debug_printf ("FP - DP (2-source)");
4187
4188 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4189 }
4190 else if (insn_bits10_11 == 0x00)
4191 {
4192 /* Floating point - immediate instructions. */
4193 if ((insn_bits12_15 & 0x01) == 0x01
4194 || (insn_bits12_15 & 0x07) == 0x04)
4195 {
4196 if (record_debug)
4197 debug_printf ("FP - immediate");
4198 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4199 }
4200 /* Floating point - compare instructions. */
4201 else if ((insn_bits12_15 & 0x03) == 0x02)
4202 {
4203 if (record_debug)
4204 debug_printf ("FP - immediate");
4205 record_buf[0] = AARCH64_CPSR_REGNUM;
4206 }
4207 /* Floating point - integer conversions instructions. */
4208 else if (insn_bits12_15 == 0x00)
4209 {
4210 /* Convert float to integer instruction. */
4211 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4212 {
4213 if (record_debug)
4214 debug_printf ("float to int conversion");
4215
4216 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4217 }
4218 /* Convert integer to float instruction. */
4219 else if ((opcode >> 1) == 0x01 && !rmode)
4220 {
4221 if (record_debug)
4222 debug_printf ("int to float conversion");
4223
4224 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4225 }
4226 /* Move float to integer instruction. */
4227 else if ((opcode >> 1) == 0x03)
4228 {
4229 if (record_debug)
4230 debug_printf ("move float to int");
4231
4232 if (!(opcode & 0x01))
4233 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4234 else
4235 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4236 }
4237 else
4238 return AARCH64_RECORD_UNKNOWN;
4239 }
4240 else
4241 return AARCH64_RECORD_UNKNOWN;
4242 }
4243 else
4244 return AARCH64_RECORD_UNKNOWN;
4245 }
4246 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4247 {
4248 if (record_debug)
4249 debug_printf ("SIMD copy");
4250
4251 /* Advanced SIMD copy instructions. */
4252 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4253 && !bit (aarch64_insn_r->aarch64_insn, 15)
4254 && bit (aarch64_insn_r->aarch64_insn, 10))
4255 {
4256 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4257 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4258 else
4259 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4260 }
4261 else
4262 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4263 }
4264 /* All remaining floating point or advanced SIMD instructions. */
4265 else
4266 {
4267 if (record_debug)
4268 debug_printf ("all remain");
4269
4270 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4271 }
4272
4273 if (record_debug)
4274 debug_printf ("\n");
4275
4276 aarch64_insn_r->reg_rec_count++;
4277 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4278 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4279 record_buf);
4280 return AARCH64_RECORD_SUCCESS;
4281 }
4282
4283 /* Decodes insns type and invokes its record handler. */
4284
4285 static unsigned int
4286 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4287 {
4288 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4289
4290 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4291 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4292 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4293 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4294
4295 /* Data processing - immediate instructions. */
4296 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4297 return aarch64_record_data_proc_imm (aarch64_insn_r);
4298
4299 /* Branch, exception generation and system instructions. */
4300 if (ins_bit26 && !ins_bit27 && ins_bit28)
4301 return aarch64_record_branch_except_sys (aarch64_insn_r);
4302
4303 /* Load and store instructions. */
4304 if (!ins_bit25 && ins_bit27)
4305 return aarch64_record_load_store (aarch64_insn_r);
4306
4307 /* Data processing - register instructions. */
4308 if (ins_bit25 && !ins_bit26 && ins_bit27)
4309 return aarch64_record_data_proc_reg (aarch64_insn_r);
4310
4311 /* Data processing - SIMD and floating point instructions. */
4312 if (ins_bit25 && ins_bit26 && ins_bit27)
4313 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4314
4315 return AARCH64_RECORD_UNSUPPORTED;
4316 }
4317
4318 /* Cleans up local record registers and memory allocations. */
4319
4320 static void
4321 deallocate_reg_mem (insn_decode_record *record)
4322 {
4323 xfree (record->aarch64_regs);
4324 xfree (record->aarch64_mems);
4325 }
4326
4327 #if GDB_SELF_TEST
4328 namespace selftests {
4329
4330 static void
4331 aarch64_process_record_test (void)
4332 {
4333 struct gdbarch_info info;
4334 uint32_t ret;
4335
4336 gdbarch_info_init (&info);
4337 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4338
4339 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4340 SELF_CHECK (gdbarch != NULL);
4341
4342 insn_decode_record aarch64_record;
4343
4344 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4345 aarch64_record.regcache = NULL;
4346 aarch64_record.this_addr = 0;
4347 aarch64_record.gdbarch = gdbarch;
4348
4349 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4350 aarch64_record.aarch64_insn = 0xf9800020;
4351 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4352 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4353 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4354 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4355
4356 deallocate_reg_mem (&aarch64_record);
4357 }
4358
4359 } // namespace selftests
4360 #endif /* GDB_SELF_TEST */
4361
4362 /* Parse the current instruction and record the values of the registers and
4363 memory that will be changed in current instruction to record_arch_list
4364 return -1 if something is wrong. */
4365
4366 int
4367 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4368 CORE_ADDR insn_addr)
4369 {
4370 uint32_t rec_no = 0;
4371 uint8_t insn_size = 4;
4372 uint32_t ret = 0;
4373 gdb_byte buf[insn_size];
4374 insn_decode_record aarch64_record;
4375
4376 memset (&buf[0], 0, insn_size);
4377 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4378 target_read_memory (insn_addr, &buf[0], insn_size);
4379 aarch64_record.aarch64_insn
4380 = (uint32_t) extract_unsigned_integer (&buf[0],
4381 insn_size,
4382 gdbarch_byte_order (gdbarch));
4383 aarch64_record.regcache = regcache;
4384 aarch64_record.this_addr = insn_addr;
4385 aarch64_record.gdbarch = gdbarch;
4386
4387 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4388 if (ret == AARCH64_RECORD_UNSUPPORTED)
4389 {
4390 printf_unfiltered (_("Process record does not support instruction "
4391 "0x%0x at address %s.\n"),
4392 aarch64_record.aarch64_insn,
4393 paddress (gdbarch, insn_addr));
4394 ret = -1;
4395 }
4396
4397 if (0 == ret)
4398 {
4399 /* Record registers. */
4400 record_full_arch_list_add_reg (aarch64_record.regcache,
4401 AARCH64_PC_REGNUM);
4402 /* Always record register CPSR. */
4403 record_full_arch_list_add_reg (aarch64_record.regcache,
4404 AARCH64_CPSR_REGNUM);
4405 if (aarch64_record.aarch64_regs)
4406 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4407 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4408 aarch64_record.aarch64_regs[rec_no]))
4409 ret = -1;
4410
4411 /* Record memories. */
4412 if (aarch64_record.aarch64_mems)
4413 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4414 if (record_full_arch_list_add_mem
4415 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4416 aarch64_record.aarch64_mems[rec_no].len))
4417 ret = -1;
4418
4419 if (record_full_arch_list_add_end ())
4420 ret = -1;
4421 }
4422
4423 deallocate_reg_mem (&aarch64_record);
4424 return ret;
4425 }
This page took 0.163144 seconds and 4 git commands to generate.