x86: replace "anymem" checks where possible
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47 #include "gdbsupport/selftest.h"
48
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
51
52 #include "elf-bfd.h"
53 #include "elf/aarch64.h"
54
55 #include "gdbsupport/vec.h"
56
57 #include "record.h"
58 #include "record-full.h"
59 #include "arch/aarch64-insn.h"
60 #include "gdbarch.h"
61
62 #include "opcode/aarch64.h"
63 #include <algorithm>
64
65 #define submask(x) ((1L << ((x) + 1)) - 1)
66 #define bit(obj,st) (((obj) >> (st)) & 1)
67 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68
69 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
70 four members. */
71 #define HA_MAX_NUM_FLDS 4
72
73 /* All possible aarch64 target descriptors. */
74 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
75
76 /* The standard register names, and all the valid aliases for them. */
77 static const struct
78 {
79 const char *const name;
80 int regnum;
81 } aarch64_register_aliases[] =
82 {
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
87
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
120
121 /* specials */
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
124 };
125
126 /* The required core 'R' registers. */
127 static const char *const aarch64_r_register_names[] =
128 {
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
139 "pc", "cpsr"
140 };
141
142 /* The FP/SIMD 'V' registers. */
143 static const char *const aarch64_v_register_names[] =
144 {
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
155 "fpsr",
156 "fpcr"
157 };
158
159 /* The SVE 'Z' and 'P' registers. */
160 static const char *const aarch64_sve_register_names[] =
161 {
162 /* These registers must appear in consecutive RAW register number
163 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
164 "z0", "z1", "z2", "z3",
165 "z4", "z5", "z6", "z7",
166 "z8", "z9", "z10", "z11",
167 "z12", "z13", "z14", "z15",
168 "z16", "z17", "z18", "z19",
169 "z20", "z21", "z22", "z23",
170 "z24", "z25", "z26", "z27",
171 "z28", "z29", "z30", "z31",
172 "fpsr", "fpcr",
173 "p0", "p1", "p2", "p3",
174 "p4", "p5", "p6", "p7",
175 "p8", "p9", "p10", "p11",
176 "p12", "p13", "p14", "p15",
177 "ffr", "vg"
178 };
179
180 static const char *const aarch64_pauth_register_names[] =
181 {
182 /* Authentication mask for data pointer. */
183 "pauth_dmask",
184 /* Authentication mask for code pointer. */
185 "pauth_cmask"
186 };
187
188 /* AArch64 prologue cache structure. */
189 struct aarch64_prologue_cache
190 {
191 /* The program counter at the start of the function. It is used to
192 identify this frame as a prologue frame. */
193 CORE_ADDR func;
194
195 /* The program counter at the time this frame was created; i.e. where
196 this function was called from. It is used to identify this frame as a
197 stub frame. */
198 CORE_ADDR prev_pc;
199
200 /* The stack pointer at the time this frame was created; i.e. the
201 caller's stack pointer when this function was called. It is used
202 to identify this frame. */
203 CORE_ADDR prev_sp;
204
205 /* Is the target available to read from? */
206 int available_p;
207
208 /* The frame base for this frame is just prev_sp - frame size.
209 FRAMESIZE is the distance from the frame pointer to the
210 initial stack pointer. */
211 int framesize;
212
213 /* The register used to hold the frame pointer for this frame. */
214 int framereg;
215
216 /* Saved register offsets. */
217 struct trad_frame_saved_reg *saved_regs;
218 };
219
220 static void
221 show_aarch64_debug (struct ui_file *file, int from_tty,
222 struct cmd_list_element *c, const char *value)
223 {
224 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
225 }
226
227 namespace {
228
229 /* Abstract instruction reader. */
230
231 class abstract_instruction_reader
232 {
233 public:
234 /* Read in one instruction. */
235 virtual ULONGEST read (CORE_ADDR memaddr, int len,
236 enum bfd_endian byte_order) = 0;
237 };
238
239 /* Instruction reader from real target. */
240
241 class instruction_reader : public abstract_instruction_reader
242 {
243 public:
244 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
245 override
246 {
247 return read_code_unsigned_integer (memaddr, len, byte_order);
248 }
249 };
250
251 } // namespace
252
253 /* If address signing is enabled, mask off the signature bits from ADDR, using
254 the register values in THIS_FRAME. */
255
256 static CORE_ADDR
257 aarch64_frame_unmask_address (struct gdbarch_tdep *tdep,
258 struct frame_info *this_frame,
259 CORE_ADDR addr)
260 {
261 if (tdep->has_pauth ()
262 && frame_unwind_register_unsigned (this_frame,
263 tdep->pauth_ra_state_regnum))
264 {
265 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
266 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
267 addr = addr & ~cmask;
268 }
269
270 return addr;
271 }
272
273 /* Analyze a prologue, looking for a recognizable stack frame
274 and frame pointer. Scan until we encounter a store that could
275 clobber the stack frame unexpectedly, or an unknown instruction. */
276
277 static CORE_ADDR
278 aarch64_analyze_prologue (struct gdbarch *gdbarch,
279 CORE_ADDR start, CORE_ADDR limit,
280 struct aarch64_prologue_cache *cache,
281 abstract_instruction_reader& reader)
282 {
283 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
284 int i;
285 /* Track X registers and D registers in prologue. */
286 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
287
288 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
289 regs[i] = pv_register (i, 0);
290 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
291
292 for (; start < limit; start += 4)
293 {
294 uint32_t insn;
295 aarch64_inst inst;
296
297 insn = reader.read (start, 4, byte_order_for_code);
298
299 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
300 break;
301
302 if (inst.opcode->iclass == addsub_imm
303 && (inst.opcode->op == OP_ADD
304 || strcmp ("sub", inst.opcode->name) == 0))
305 {
306 unsigned rd = inst.operands[0].reg.regno;
307 unsigned rn = inst.operands[1].reg.regno;
308
309 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
310 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
311 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
312 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
313
314 if (inst.opcode->op == OP_ADD)
315 {
316 regs[rd] = pv_add_constant (regs[rn],
317 inst.operands[2].imm.value);
318 }
319 else
320 {
321 regs[rd] = pv_add_constant (regs[rn],
322 -inst.operands[2].imm.value);
323 }
324 }
325 else if (inst.opcode->iclass == pcreladdr
326 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
327 {
328 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
329 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
330
331 regs[inst.operands[0].reg.regno] = pv_unknown ();
332 }
333 else if (inst.opcode->iclass == branch_imm)
334 {
335 /* Stop analysis on branch. */
336 break;
337 }
338 else if (inst.opcode->iclass == condbranch)
339 {
340 /* Stop analysis on branch. */
341 break;
342 }
343 else if (inst.opcode->iclass == branch_reg)
344 {
345 /* Stop analysis on branch. */
346 break;
347 }
348 else if (inst.opcode->iclass == compbranch)
349 {
350 /* Stop analysis on branch. */
351 break;
352 }
353 else if (inst.opcode->op == OP_MOVZ)
354 {
355 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
356 regs[inst.operands[0].reg.regno] = pv_unknown ();
357 }
358 else if (inst.opcode->iclass == log_shift
359 && strcmp (inst.opcode->name, "orr") == 0)
360 {
361 unsigned rd = inst.operands[0].reg.regno;
362 unsigned rn = inst.operands[1].reg.regno;
363 unsigned rm = inst.operands[2].reg.regno;
364
365 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
366 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
367 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
368
369 if (inst.operands[2].shifter.amount == 0
370 && rn == AARCH64_SP_REGNUM)
371 regs[rd] = regs[rm];
372 else
373 {
374 if (aarch64_debug)
375 {
376 debug_printf ("aarch64: prologue analysis gave up "
377 "addr=%s opcode=0x%x (orr x register)\n",
378 core_addr_to_string_nz (start), insn);
379 }
380 break;
381 }
382 }
383 else if (inst.opcode->op == OP_STUR)
384 {
385 unsigned rt = inst.operands[0].reg.regno;
386 unsigned rn = inst.operands[1].addr.base_regno;
387 int is64
388 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
389
390 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
391 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
392 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
393 gdb_assert (!inst.operands[1].addr.offset.is_reg);
394
395 stack.store (pv_add_constant (regs[rn],
396 inst.operands[1].addr.offset.imm),
397 is64 ? 8 : 4, regs[rt]);
398 }
399 else if ((inst.opcode->iclass == ldstpair_off
400 || (inst.opcode->iclass == ldstpair_indexed
401 && inst.operands[2].addr.preind))
402 && strcmp ("stp", inst.opcode->name) == 0)
403 {
404 /* STP with addressing mode Pre-indexed and Base register. */
405 unsigned rt1;
406 unsigned rt2;
407 unsigned rn = inst.operands[2].addr.base_regno;
408 int32_t imm = inst.operands[2].addr.offset.imm;
409
410 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
411 || inst.operands[0].type == AARCH64_OPND_Ft);
412 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
413 || inst.operands[1].type == AARCH64_OPND_Ft2);
414 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
415 gdb_assert (!inst.operands[2].addr.offset.is_reg);
416
417 /* If recording this store would invalidate the store area
418 (perhaps because rn is not known) then we should abandon
419 further prologue analysis. */
420 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
421 break;
422
423 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
424 break;
425
426 rt1 = inst.operands[0].reg.regno;
427 rt2 = inst.operands[1].reg.regno;
428 if (inst.operands[0].type == AARCH64_OPND_Ft)
429 {
430 /* Only bottom 64-bit of each V register (D register) need
431 to be preserved. */
432 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
433 rt1 += AARCH64_X_REGISTER_COUNT;
434 rt2 += AARCH64_X_REGISTER_COUNT;
435 }
436
437 stack.store (pv_add_constant (regs[rn], imm), 8,
438 regs[rt1]);
439 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
440 regs[rt2]);
441
442 if (inst.operands[2].addr.writeback)
443 regs[rn] = pv_add_constant (regs[rn], imm);
444
445 }
446 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
447 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
448 && (inst.opcode->op == OP_STR_POS
449 || inst.opcode->op == OP_STRF_POS)))
450 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
451 && strcmp ("str", inst.opcode->name) == 0)
452 {
453 /* STR (immediate) */
454 unsigned int rt = inst.operands[0].reg.regno;
455 int32_t imm = inst.operands[1].addr.offset.imm;
456 unsigned int rn = inst.operands[1].addr.base_regno;
457 bool is64
458 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
459 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
460 || inst.operands[0].type == AARCH64_OPND_Ft);
461
462 if (inst.operands[0].type == AARCH64_OPND_Ft)
463 {
464 /* Only bottom 64-bit of each V register (D register) need
465 to be preserved. */
466 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
467 rt += AARCH64_X_REGISTER_COUNT;
468 }
469
470 stack.store (pv_add_constant (regs[rn], imm),
471 is64 ? 8 : 4, regs[rt]);
472 if (inst.operands[1].addr.writeback)
473 regs[rn] = pv_add_constant (regs[rn], imm);
474 }
475 else if (inst.opcode->iclass == testbranch)
476 {
477 /* Stop analysis on branch. */
478 break;
479 }
480 else if (inst.opcode->iclass == ic_system)
481 {
482 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
483 int ra_state_val = 0;
484
485 if (insn == 0xd503233f /* paciasp. */
486 || insn == 0xd503237f /* pacibsp. */)
487 {
488 /* Return addresses are mangled. */
489 ra_state_val = 1;
490 }
491 else if (insn == 0xd50323bf /* autiasp. */
492 || insn == 0xd50323ff /* autibsp. */)
493 {
494 /* Return addresses are not mangled. */
495 ra_state_val = 0;
496 }
497 else
498 {
499 if (aarch64_debug)
500 debug_printf ("aarch64: prologue analysis gave up addr=%s"
501 " opcode=0x%x (iclass)\n",
502 core_addr_to_string_nz (start), insn);
503 break;
504 }
505
506 if (tdep->has_pauth () && cache != nullptr)
507 trad_frame_set_value (cache->saved_regs,
508 tdep->pauth_ra_state_regnum,
509 ra_state_val);
510 }
511 else
512 {
513 if (aarch64_debug)
514 {
515 debug_printf ("aarch64: prologue analysis gave up addr=%s"
516 " opcode=0x%x\n",
517 core_addr_to_string_nz (start), insn);
518 }
519 break;
520 }
521 }
522
523 if (cache == NULL)
524 return start;
525
526 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
527 {
528 /* Frame pointer is fp. Frame size is constant. */
529 cache->framereg = AARCH64_FP_REGNUM;
530 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
531 }
532 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
533 {
534 /* Try the stack pointer. */
535 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
536 cache->framereg = AARCH64_SP_REGNUM;
537 }
538 else
539 {
540 /* We're just out of luck. We don't know where the frame is. */
541 cache->framereg = -1;
542 cache->framesize = 0;
543 }
544
545 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
546 {
547 CORE_ADDR offset;
548
549 if (stack.find_reg (gdbarch, i, &offset))
550 cache->saved_regs[i].addr = offset;
551 }
552
553 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
554 {
555 int regnum = gdbarch_num_regs (gdbarch);
556 CORE_ADDR offset;
557
558 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
559 &offset))
560 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
561 }
562
563 return start;
564 }
565
566 static CORE_ADDR
567 aarch64_analyze_prologue (struct gdbarch *gdbarch,
568 CORE_ADDR start, CORE_ADDR limit,
569 struct aarch64_prologue_cache *cache)
570 {
571 instruction_reader reader;
572
573 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
574 reader);
575 }
576
577 #if GDB_SELF_TEST
578
579 namespace selftests {
580
581 /* Instruction reader from manually cooked instruction sequences. */
582
583 class instruction_reader_test : public abstract_instruction_reader
584 {
585 public:
586 template<size_t SIZE>
587 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
588 : m_insns (insns), m_insns_size (SIZE)
589 {}
590
591 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
592 override
593 {
594 SELF_CHECK (len == 4);
595 SELF_CHECK (memaddr % 4 == 0);
596 SELF_CHECK (memaddr / 4 < m_insns_size);
597
598 return m_insns[memaddr / 4];
599 }
600
601 private:
602 const uint32_t *m_insns;
603 size_t m_insns_size;
604 };
605
606 static void
607 aarch64_analyze_prologue_test (void)
608 {
609 struct gdbarch_info info;
610
611 gdbarch_info_init (&info);
612 info.bfd_arch_info = bfd_scan_arch ("aarch64");
613
614 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
615 SELF_CHECK (gdbarch != NULL);
616
617 struct aarch64_prologue_cache cache;
618 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
619
620 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
621
622 /* Test the simple prologue in which frame pointer is used. */
623 {
624 static const uint32_t insns[] = {
625 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
626 0x910003fd, /* mov x29, sp */
627 0x97ffffe6, /* bl 0x400580 */
628 };
629 instruction_reader_test reader (insns);
630
631 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
632 SELF_CHECK (end == 4 * 2);
633
634 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
635 SELF_CHECK (cache.framesize == 272);
636
637 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
638 {
639 if (i == AARCH64_FP_REGNUM)
640 SELF_CHECK (cache.saved_regs[i].addr == -272);
641 else if (i == AARCH64_LR_REGNUM)
642 SELF_CHECK (cache.saved_regs[i].addr == -264);
643 else
644 SELF_CHECK (cache.saved_regs[i].addr == -1);
645 }
646
647 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
648 {
649 int regnum = gdbarch_num_regs (gdbarch);
650
651 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
652 == -1);
653 }
654 }
655
656 /* Test a prologue in which STR is used and frame pointer is not
657 used. */
658 {
659 static const uint32_t insns[] = {
660 0xf81d0ff3, /* str x19, [sp, #-48]! */
661 0xb9002fe0, /* str w0, [sp, #44] */
662 0xf90013e1, /* str x1, [sp, #32]*/
663 0xfd000fe0, /* str d0, [sp, #24] */
664 0xaa0203f3, /* mov x19, x2 */
665 0xf94013e0, /* ldr x0, [sp, #32] */
666 };
667 instruction_reader_test reader (insns);
668
669 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
670 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
671
672 SELF_CHECK (end == 4 * 5);
673
674 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
675 SELF_CHECK (cache.framesize == 48);
676
677 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
678 {
679 if (i == 1)
680 SELF_CHECK (cache.saved_regs[i].addr == -16);
681 else if (i == 19)
682 SELF_CHECK (cache.saved_regs[i].addr == -48);
683 else
684 SELF_CHECK (cache.saved_regs[i].addr == -1);
685 }
686
687 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
688 {
689 int regnum = gdbarch_num_regs (gdbarch);
690
691 if (i == 0)
692 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
693 == -24);
694 else
695 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
696 == -1);
697 }
698 }
699
700 /* Test a prologue in which there is a return address signing instruction. */
701 if (tdep->has_pauth ())
702 {
703 static const uint32_t insns[] = {
704 0xd503233f, /* paciasp */
705 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
706 0x910003fd, /* mov x29, sp */
707 0xf801c3f3, /* str x19, [sp, #28] */
708 0xb9401fa0, /* ldr x19, [x29, #28] */
709 };
710 instruction_reader_test reader (insns);
711
712 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
713 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
714 reader);
715
716 SELF_CHECK (end == 4 * 4);
717 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
718 SELF_CHECK (cache.framesize == 48);
719
720 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
721 {
722 if (i == 19)
723 SELF_CHECK (cache.saved_regs[i].addr == -20);
724 else if (i == AARCH64_FP_REGNUM)
725 SELF_CHECK (cache.saved_regs[i].addr == -48);
726 else if (i == AARCH64_LR_REGNUM)
727 SELF_CHECK (cache.saved_regs[i].addr == -40);
728 else
729 SELF_CHECK (cache.saved_regs[i].addr == -1);
730 }
731
732 if (tdep->has_pauth ())
733 {
734 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
735 tdep->pauth_ra_state_regnum));
736 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
737 }
738 }
739 }
740 } // namespace selftests
741 #endif /* GDB_SELF_TEST */
742
743 /* Implement the "skip_prologue" gdbarch method. */
744
745 static CORE_ADDR
746 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
747 {
748 CORE_ADDR func_addr, limit_pc;
749
750 /* See if we can determine the end of the prologue via the symbol
751 table. If so, then return either PC, or the PC after the
752 prologue, whichever is greater. */
753 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
754 {
755 CORE_ADDR post_prologue_pc
756 = skip_prologue_using_sal (gdbarch, func_addr);
757
758 if (post_prologue_pc != 0)
759 return std::max (pc, post_prologue_pc);
760 }
761
762 /* Can't determine prologue from the symbol table, need to examine
763 instructions. */
764
765 /* Find an upper limit on the function prologue using the debug
766 information. If the debug information could not be used to
767 provide that bound, then use an arbitrary large number as the
768 upper bound. */
769 limit_pc = skip_prologue_using_sal (gdbarch, pc);
770 if (limit_pc == 0)
771 limit_pc = pc + 128; /* Magic. */
772
773 /* Try disassembling prologue. */
774 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
775 }
776
777 /* Scan the function prologue for THIS_FRAME and populate the prologue
778 cache CACHE. */
779
780 static void
781 aarch64_scan_prologue (struct frame_info *this_frame,
782 struct aarch64_prologue_cache *cache)
783 {
784 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
785 CORE_ADDR prologue_start;
786 CORE_ADDR prologue_end;
787 CORE_ADDR prev_pc = get_frame_pc (this_frame);
788 struct gdbarch *gdbarch = get_frame_arch (this_frame);
789
790 cache->prev_pc = prev_pc;
791
792 /* Assume we do not find a frame. */
793 cache->framereg = -1;
794 cache->framesize = 0;
795
796 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
797 &prologue_end))
798 {
799 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
800
801 if (sal.line == 0)
802 {
803 /* No line info so use the current PC. */
804 prologue_end = prev_pc;
805 }
806 else if (sal.end < prologue_end)
807 {
808 /* The next line begins after the function end. */
809 prologue_end = sal.end;
810 }
811
812 prologue_end = std::min (prologue_end, prev_pc);
813 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
814 }
815 else
816 {
817 CORE_ADDR frame_loc;
818
819 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
820 if (frame_loc == 0)
821 return;
822
823 cache->framereg = AARCH64_FP_REGNUM;
824 cache->framesize = 16;
825 cache->saved_regs[29].addr = 0;
826 cache->saved_regs[30].addr = 8;
827 }
828 }
829
830 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
831 function may throw an exception if the inferior's registers or memory is
832 not available. */
833
834 static void
835 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
836 struct aarch64_prologue_cache *cache)
837 {
838 CORE_ADDR unwound_fp;
839 int reg;
840
841 aarch64_scan_prologue (this_frame, cache);
842
843 if (cache->framereg == -1)
844 return;
845
846 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
847 if (unwound_fp == 0)
848 return;
849
850 cache->prev_sp = unwound_fp + cache->framesize;
851
852 /* Calculate actual addresses of saved registers using offsets
853 determined by aarch64_analyze_prologue. */
854 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
855 if (trad_frame_addr_p (cache->saved_regs, reg))
856 cache->saved_regs[reg].addr += cache->prev_sp;
857
858 cache->func = get_frame_func (this_frame);
859
860 cache->available_p = 1;
861 }
862
863 /* Allocate and fill in *THIS_CACHE with information about the prologue of
864 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
865 Return a pointer to the current aarch64_prologue_cache in
866 *THIS_CACHE. */
867
868 static struct aarch64_prologue_cache *
869 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
870 {
871 struct aarch64_prologue_cache *cache;
872
873 if (*this_cache != NULL)
874 return (struct aarch64_prologue_cache *) *this_cache;
875
876 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
877 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
878 *this_cache = cache;
879
880 try
881 {
882 aarch64_make_prologue_cache_1 (this_frame, cache);
883 }
884 catch (const gdb_exception_error &ex)
885 {
886 if (ex.error != NOT_AVAILABLE_ERROR)
887 throw;
888 }
889
890 return cache;
891 }
892
893 /* Implement the "stop_reason" frame_unwind method. */
894
895 static enum unwind_stop_reason
896 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
897 void **this_cache)
898 {
899 struct aarch64_prologue_cache *cache
900 = aarch64_make_prologue_cache (this_frame, this_cache);
901
902 if (!cache->available_p)
903 return UNWIND_UNAVAILABLE;
904
905 /* Halt the backtrace at "_start". */
906 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
907 return UNWIND_OUTERMOST;
908
909 /* We've hit a wall, stop. */
910 if (cache->prev_sp == 0)
911 return UNWIND_OUTERMOST;
912
913 return UNWIND_NO_REASON;
914 }
915
916 /* Our frame ID for a normal frame is the current function's starting
917 PC and the caller's SP when we were called. */
918
919 static void
920 aarch64_prologue_this_id (struct frame_info *this_frame,
921 void **this_cache, struct frame_id *this_id)
922 {
923 struct aarch64_prologue_cache *cache
924 = aarch64_make_prologue_cache (this_frame, this_cache);
925
926 if (!cache->available_p)
927 *this_id = frame_id_build_unavailable_stack (cache->func);
928 else
929 *this_id = frame_id_build (cache->prev_sp, cache->func);
930 }
931
932 /* Implement the "prev_register" frame_unwind method. */
933
934 static struct value *
935 aarch64_prologue_prev_register (struct frame_info *this_frame,
936 void **this_cache, int prev_regnum)
937 {
938 struct aarch64_prologue_cache *cache
939 = aarch64_make_prologue_cache (this_frame, this_cache);
940
941 /* If we are asked to unwind the PC, then we need to return the LR
942 instead. The prologue may save PC, but it will point into this
943 frame's prologue, not the next frame's resume location. */
944 if (prev_regnum == AARCH64_PC_REGNUM)
945 {
946 CORE_ADDR lr;
947 struct gdbarch *gdbarch = get_frame_arch (this_frame);
948 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
949
950 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
951
952 if (tdep->has_pauth ()
953 && trad_frame_value_p (cache->saved_regs,
954 tdep->pauth_ra_state_regnum))
955 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
956
957 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
958 }
959
960 /* SP is generally not saved to the stack, but this frame is
961 identified by the next frame's stack pointer at the time of the
962 call. The value was already reconstructed into PREV_SP. */
963 /*
964 +----------+ ^
965 | saved lr | |
966 +->| saved fp |--+
967 | | |
968 | | | <- Previous SP
969 | +----------+
970 | | saved lr |
971 +--| saved fp |<- FP
972 | |
973 | |<- SP
974 +----------+ */
975 if (prev_regnum == AARCH64_SP_REGNUM)
976 return frame_unwind_got_constant (this_frame, prev_regnum,
977 cache->prev_sp);
978
979 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
980 prev_regnum);
981 }
982
983 /* AArch64 prologue unwinder. */
984 struct frame_unwind aarch64_prologue_unwind =
985 {
986 NORMAL_FRAME,
987 aarch64_prologue_frame_unwind_stop_reason,
988 aarch64_prologue_this_id,
989 aarch64_prologue_prev_register,
990 NULL,
991 default_frame_sniffer
992 };
993
994 /* Allocate and fill in *THIS_CACHE with information about the prologue of
995 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
996 Return a pointer to the current aarch64_prologue_cache in
997 *THIS_CACHE. */
998
999 static struct aarch64_prologue_cache *
1000 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1001 {
1002 struct aarch64_prologue_cache *cache;
1003
1004 if (*this_cache != NULL)
1005 return (struct aarch64_prologue_cache *) *this_cache;
1006
1007 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1008 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1009 *this_cache = cache;
1010
1011 try
1012 {
1013 cache->prev_sp = get_frame_register_unsigned (this_frame,
1014 AARCH64_SP_REGNUM);
1015 cache->prev_pc = get_frame_pc (this_frame);
1016 cache->available_p = 1;
1017 }
1018 catch (const gdb_exception_error &ex)
1019 {
1020 if (ex.error != NOT_AVAILABLE_ERROR)
1021 throw;
1022 }
1023
1024 return cache;
1025 }
1026
1027 /* Implement the "stop_reason" frame_unwind method. */
1028
1029 static enum unwind_stop_reason
1030 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1031 void **this_cache)
1032 {
1033 struct aarch64_prologue_cache *cache
1034 = aarch64_make_stub_cache (this_frame, this_cache);
1035
1036 if (!cache->available_p)
1037 return UNWIND_UNAVAILABLE;
1038
1039 return UNWIND_NO_REASON;
1040 }
1041
1042 /* Our frame ID for a stub frame is the current SP and LR. */
1043
1044 static void
1045 aarch64_stub_this_id (struct frame_info *this_frame,
1046 void **this_cache, struct frame_id *this_id)
1047 {
1048 struct aarch64_prologue_cache *cache
1049 = aarch64_make_stub_cache (this_frame, this_cache);
1050
1051 if (cache->available_p)
1052 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1053 else
1054 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1055 }
1056
1057 /* Implement the "sniffer" frame_unwind method. */
1058
1059 static int
1060 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1061 struct frame_info *this_frame,
1062 void **this_prologue_cache)
1063 {
1064 CORE_ADDR addr_in_block;
1065 gdb_byte dummy[4];
1066
1067 addr_in_block = get_frame_address_in_block (this_frame);
1068 if (in_plt_section (addr_in_block)
1069 /* We also use the stub winder if the target memory is unreadable
1070 to avoid having the prologue unwinder trying to read it. */
1071 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1072 return 1;
1073
1074 return 0;
1075 }
1076
1077 /* AArch64 stub unwinder. */
1078 struct frame_unwind aarch64_stub_unwind =
1079 {
1080 NORMAL_FRAME,
1081 aarch64_stub_frame_unwind_stop_reason,
1082 aarch64_stub_this_id,
1083 aarch64_prologue_prev_register,
1084 NULL,
1085 aarch64_stub_unwind_sniffer
1086 };
1087
1088 /* Return the frame base address of *THIS_FRAME. */
1089
1090 static CORE_ADDR
1091 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1092 {
1093 struct aarch64_prologue_cache *cache
1094 = aarch64_make_prologue_cache (this_frame, this_cache);
1095
1096 return cache->prev_sp - cache->framesize;
1097 }
1098
1099 /* AArch64 default frame base information. */
1100 struct frame_base aarch64_normal_base =
1101 {
1102 &aarch64_prologue_unwind,
1103 aarch64_normal_frame_base,
1104 aarch64_normal_frame_base,
1105 aarch64_normal_frame_base
1106 };
1107
1108 /* Return the value of the REGNUM register in the previous frame of
1109 *THIS_FRAME. */
1110
1111 static struct value *
1112 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1113 void **this_cache, int regnum)
1114 {
1115 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1116 CORE_ADDR lr;
1117
1118 switch (regnum)
1119 {
1120 case AARCH64_PC_REGNUM:
1121 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1122 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
1123 return frame_unwind_got_constant (this_frame, regnum, lr);
1124
1125 default:
1126 internal_error (__FILE__, __LINE__,
1127 _("Unexpected register %d"), regnum);
1128 }
1129 }
1130
1131 static const unsigned char op_lit0 = DW_OP_lit0;
1132 static const unsigned char op_lit1 = DW_OP_lit1;
1133
1134 /* Implement the "init_reg" dwarf2_frame_ops method. */
1135
1136 static void
1137 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1138 struct dwarf2_frame_state_reg *reg,
1139 struct frame_info *this_frame)
1140 {
1141 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1142
1143 switch (regnum)
1144 {
1145 case AARCH64_PC_REGNUM:
1146 reg->how = DWARF2_FRAME_REG_FN;
1147 reg->loc.fn = aarch64_dwarf2_prev_register;
1148 return;
1149
1150 case AARCH64_SP_REGNUM:
1151 reg->how = DWARF2_FRAME_REG_CFA;
1152 return;
1153 }
1154
1155 /* Init pauth registers. */
1156 if (tdep->has_pauth ())
1157 {
1158 if (regnum == tdep->pauth_ra_state_regnum)
1159 {
1160 /* Initialize RA_STATE to zero. */
1161 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1162 reg->loc.exp.start = &op_lit0;
1163 reg->loc.exp.len = 1;
1164 return;
1165 }
1166 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1167 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1168 {
1169 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1170 return;
1171 }
1172 }
1173 }
1174
1175 /* Implement the execute_dwarf_cfa_vendor_op method. */
1176
1177 static bool
1178 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1179 struct dwarf2_frame_state *fs)
1180 {
1181 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1182 struct dwarf2_frame_state_reg *ra_state;
1183
1184 if (op == DW_CFA_AARCH64_negate_ra_state)
1185 {
1186 /* On systems without pauth, treat as a nop. */
1187 if (!tdep->has_pauth ())
1188 return true;
1189
1190 /* Allocate RA_STATE column if it's not allocated yet. */
1191 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1192
1193 /* Toggle the status of RA_STATE between 0 and 1. */
1194 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1195 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1196
1197 if (ra_state->loc.exp.start == nullptr
1198 || ra_state->loc.exp.start == &op_lit0)
1199 ra_state->loc.exp.start = &op_lit1;
1200 else
1201 ra_state->loc.exp.start = &op_lit0;
1202
1203 ra_state->loc.exp.len = 1;
1204
1205 return true;
1206 }
1207
1208 return false;
1209 }
1210
1211 /* When arguments must be pushed onto the stack, they go on in reverse
1212 order. The code below implements a FILO (stack) to do this. */
1213
1214 struct stack_item_t
1215 {
1216 /* Value to pass on stack. It can be NULL if this item is for stack
1217 padding. */
1218 const gdb_byte *data;
1219
1220 /* Size in bytes of value to pass on stack. */
1221 int len;
1222 };
1223
1224 /* Implement the gdbarch type alignment method, overrides the generic
1225 alignment algorithm for anything that is aarch64 specific. */
1226
1227 static ULONGEST
1228 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1229 {
1230 t = check_typedef (t);
1231 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
1232 {
1233 /* Use the natural alignment for vector types (the same for
1234 scalar type), but the maximum alignment is 128-bit. */
1235 if (TYPE_LENGTH (t) > 16)
1236 return 16;
1237 else
1238 return TYPE_LENGTH (t);
1239 }
1240
1241 /* Allow the common code to calculate the alignment. */
1242 return 0;
1243 }
1244
1245 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1246
1247 Return the number of register required, or -1 on failure.
1248
1249 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1250 to the element, else fail if the type of this element does not match the
1251 existing value. */
1252
1253 static int
1254 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1255 struct type **fundamental_type)
1256 {
1257 if (type == nullptr)
1258 return -1;
1259
1260 switch (TYPE_CODE (type))
1261 {
1262 case TYPE_CODE_FLT:
1263 if (TYPE_LENGTH (type) > 16)
1264 return -1;
1265
1266 if (*fundamental_type == nullptr)
1267 *fundamental_type = type;
1268 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1269 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1270 return -1;
1271
1272 return 1;
1273
1274 case TYPE_CODE_COMPLEX:
1275 {
1276 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1277 if (TYPE_LENGTH (target_type) > 16)
1278 return -1;
1279
1280 if (*fundamental_type == nullptr)
1281 *fundamental_type = target_type;
1282 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1283 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1284 return -1;
1285
1286 return 2;
1287 }
1288
1289 case TYPE_CODE_ARRAY:
1290 {
1291 if (TYPE_VECTOR (type))
1292 {
1293 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1294 return -1;
1295
1296 if (*fundamental_type == nullptr)
1297 *fundamental_type = type;
1298 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1299 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1300 return -1;
1301
1302 return 1;
1303 }
1304 else
1305 {
1306 struct type *target_type = TYPE_TARGET_TYPE (type);
1307 int count = aapcs_is_vfp_call_or_return_candidate_1
1308 (target_type, fundamental_type);
1309
1310 if (count == -1)
1311 return count;
1312
1313 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1314 return count;
1315 }
1316 }
1317
1318 case TYPE_CODE_STRUCT:
1319 case TYPE_CODE_UNION:
1320 {
1321 int count = 0;
1322
1323 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1324 {
1325 /* Ignore any static fields. */
1326 if (field_is_static (&TYPE_FIELD (type, i)))
1327 continue;
1328
1329 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1330
1331 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1332 (member, fundamental_type);
1333 if (sub_count == -1)
1334 return -1;
1335 count += sub_count;
1336 }
1337
1338 /* Ensure there is no padding between the fields (allowing for empty
1339 zero length structs) */
1340 int ftype_length = (*fundamental_type == nullptr)
1341 ? 0 : TYPE_LENGTH (*fundamental_type);
1342 if (count * ftype_length != TYPE_LENGTH (type))
1343 return -1;
1344
1345 return count;
1346 }
1347
1348 default:
1349 break;
1350 }
1351
1352 return -1;
1353 }
1354
1355 /* Return true if an argument, whose type is described by TYPE, can be passed or
1356 returned in simd/fp registers, providing enough parameter passing registers
1357 are available. This is as described in the AAPCS64.
1358
1359 Upon successful return, *COUNT returns the number of needed registers,
1360 *FUNDAMENTAL_TYPE contains the type of those registers.
1361
1362 Candidate as per the AAPCS64 5.4.2.C is either a:
1363 - float.
1364 - short-vector.
1365 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1366 all the members are floats and has at most 4 members.
1367 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1368 all the members are short vectors and has at most 4 members.
1369 - Complex (7.1.1)
1370
1371 Note that HFAs and HVAs can include nested structures and arrays. */
1372
1373 static bool
1374 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1375 struct type **fundamental_type)
1376 {
1377 if (type == nullptr)
1378 return false;
1379
1380 *fundamental_type = nullptr;
1381
1382 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1383 fundamental_type);
1384
1385 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1386 {
1387 *count = ag_count;
1388 return true;
1389 }
1390 else
1391 return false;
1392 }
1393
1394 /* AArch64 function call information structure. */
1395 struct aarch64_call_info
1396 {
1397 /* the current argument number. */
1398 unsigned argnum = 0;
1399
1400 /* The next general purpose register number, equivalent to NGRN as
1401 described in the AArch64 Procedure Call Standard. */
1402 unsigned ngrn = 0;
1403
1404 /* The next SIMD and floating point register number, equivalent to
1405 NSRN as described in the AArch64 Procedure Call Standard. */
1406 unsigned nsrn = 0;
1407
1408 /* The next stacked argument address, equivalent to NSAA as
1409 described in the AArch64 Procedure Call Standard. */
1410 unsigned nsaa = 0;
1411
1412 /* Stack item vector. */
1413 std::vector<stack_item_t> si;
1414 };
1415
1416 /* Pass a value in a sequence of consecutive X registers. The caller
1417 is responsbile for ensuring sufficient registers are available. */
1418
1419 static void
1420 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1421 struct aarch64_call_info *info, struct type *type,
1422 struct value *arg)
1423 {
1424 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1425 int len = TYPE_LENGTH (type);
1426 enum type_code typecode = TYPE_CODE (type);
1427 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1428 const bfd_byte *buf = value_contents (arg);
1429
1430 info->argnum++;
1431
1432 while (len > 0)
1433 {
1434 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1435 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1436 byte_order);
1437
1438
1439 /* Adjust sub-word struct/union args when big-endian. */
1440 if (byte_order == BFD_ENDIAN_BIG
1441 && partial_len < X_REGISTER_SIZE
1442 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1443 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1444
1445 if (aarch64_debug)
1446 {
1447 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1448 gdbarch_register_name (gdbarch, regnum),
1449 phex (regval, X_REGISTER_SIZE));
1450 }
1451 regcache_cooked_write_unsigned (regcache, regnum, regval);
1452 len -= partial_len;
1453 buf += partial_len;
1454 regnum++;
1455 }
1456 }
1457
1458 /* Attempt to marshall a value in a V register. Return 1 if
1459 successful, or 0 if insufficient registers are available. This
1460 function, unlike the equivalent pass_in_x() function does not
1461 handle arguments spread across multiple registers. */
1462
1463 static int
1464 pass_in_v (struct gdbarch *gdbarch,
1465 struct regcache *regcache,
1466 struct aarch64_call_info *info,
1467 int len, const bfd_byte *buf)
1468 {
1469 if (info->nsrn < 8)
1470 {
1471 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1472 /* Enough space for a full vector register. */
1473 gdb_byte reg[register_size (gdbarch, regnum)];
1474 gdb_assert (len <= sizeof (reg));
1475
1476 info->argnum++;
1477 info->nsrn++;
1478
1479 memset (reg, 0, sizeof (reg));
1480 /* PCS C.1, the argument is allocated to the least significant
1481 bits of V register. */
1482 memcpy (reg, buf, len);
1483 regcache->cooked_write (regnum, reg);
1484
1485 if (aarch64_debug)
1486 {
1487 debug_printf ("arg %d in %s\n", info->argnum,
1488 gdbarch_register_name (gdbarch, regnum));
1489 }
1490 return 1;
1491 }
1492 info->nsrn = 8;
1493 return 0;
1494 }
1495
1496 /* Marshall an argument onto the stack. */
1497
1498 static void
1499 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1500 struct value *arg)
1501 {
1502 const bfd_byte *buf = value_contents (arg);
1503 int len = TYPE_LENGTH (type);
1504 int align;
1505 stack_item_t item;
1506
1507 info->argnum++;
1508
1509 align = type_align (type);
1510
1511 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1512 Natural alignment of the argument's type. */
1513 align = align_up (align, 8);
1514
1515 /* The AArch64 PCS requires at most doubleword alignment. */
1516 if (align > 16)
1517 align = 16;
1518
1519 if (aarch64_debug)
1520 {
1521 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1522 info->nsaa);
1523 }
1524
1525 item.len = len;
1526 item.data = buf;
1527 info->si.push_back (item);
1528
1529 info->nsaa += len;
1530 if (info->nsaa & (align - 1))
1531 {
1532 /* Push stack alignment padding. */
1533 int pad = align - (info->nsaa & (align - 1));
1534
1535 item.len = pad;
1536 item.data = NULL;
1537
1538 info->si.push_back (item);
1539 info->nsaa += pad;
1540 }
1541 }
1542
1543 /* Marshall an argument into a sequence of one or more consecutive X
1544 registers or, if insufficient X registers are available then onto
1545 the stack. */
1546
1547 static void
1548 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1549 struct aarch64_call_info *info, struct type *type,
1550 struct value *arg)
1551 {
1552 int len = TYPE_LENGTH (type);
1553 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1554
1555 /* PCS C.13 - Pass in registers if we have enough spare */
1556 if (info->ngrn + nregs <= 8)
1557 {
1558 pass_in_x (gdbarch, regcache, info, type, arg);
1559 info->ngrn += nregs;
1560 }
1561 else
1562 {
1563 info->ngrn = 8;
1564 pass_on_stack (info, type, arg);
1565 }
1566 }
1567
1568 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1569 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1570 registers. A return value of false is an error state as the value will have
1571 been partially passed to the stack. */
1572 static bool
1573 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1574 struct aarch64_call_info *info, struct type *arg_type,
1575 struct value *arg)
1576 {
1577 switch (TYPE_CODE (arg_type))
1578 {
1579 case TYPE_CODE_FLT:
1580 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1581 value_contents (arg));
1582 break;
1583
1584 case TYPE_CODE_COMPLEX:
1585 {
1586 const bfd_byte *buf = value_contents (arg);
1587 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1588
1589 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1590 buf))
1591 return false;
1592
1593 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1594 buf + TYPE_LENGTH (target_type));
1595 }
1596
1597 case TYPE_CODE_ARRAY:
1598 if (TYPE_VECTOR (arg_type))
1599 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1600 value_contents (arg));
1601 /* fall through. */
1602
1603 case TYPE_CODE_STRUCT:
1604 case TYPE_CODE_UNION:
1605 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1606 {
1607 /* Don't include static fields. */
1608 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1609 continue;
1610
1611 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1612 struct type *field_type = check_typedef (value_type (field));
1613
1614 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1615 field))
1616 return false;
1617 }
1618 return true;
1619
1620 default:
1621 return false;
1622 }
1623 }
1624
1625 /* Implement the "push_dummy_call" gdbarch method. */
1626
1627 static CORE_ADDR
1628 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1629 struct regcache *regcache, CORE_ADDR bp_addr,
1630 int nargs,
1631 struct value **args, CORE_ADDR sp,
1632 function_call_return_method return_method,
1633 CORE_ADDR struct_addr)
1634 {
1635 int argnum;
1636 struct aarch64_call_info info;
1637
1638 /* We need to know what the type of the called function is in order
1639 to determine the number of named/anonymous arguments for the
1640 actual argument placement, and the return type in order to handle
1641 return value correctly.
1642
1643 The generic code above us views the decision of return in memory
1644 or return in registers as a two stage processes. The language
1645 handler is consulted first and may decide to return in memory (eg
1646 class with copy constructor returned by value), this will cause
1647 the generic code to allocate space AND insert an initial leading
1648 argument.
1649
1650 If the language code does not decide to pass in memory then the
1651 target code is consulted.
1652
1653 If the language code decides to pass in memory we want to move
1654 the pointer inserted as the initial argument from the argument
1655 list and into X8, the conventional AArch64 struct return pointer
1656 register. */
1657
1658 /* Set the return address. For the AArch64, the return breakpoint
1659 is always at BP_ADDR. */
1660 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1661
1662 /* If we were given an initial argument for the return slot, lose it. */
1663 if (return_method == return_method_hidden_param)
1664 {
1665 args++;
1666 nargs--;
1667 }
1668
1669 /* The struct_return pointer occupies X8. */
1670 if (return_method != return_method_normal)
1671 {
1672 if (aarch64_debug)
1673 {
1674 debug_printf ("struct return in %s = 0x%s\n",
1675 gdbarch_register_name (gdbarch,
1676 AARCH64_STRUCT_RETURN_REGNUM),
1677 paddress (gdbarch, struct_addr));
1678 }
1679 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1680 struct_addr);
1681 }
1682
1683 for (argnum = 0; argnum < nargs; argnum++)
1684 {
1685 struct value *arg = args[argnum];
1686 struct type *arg_type, *fundamental_type;
1687 int len, elements;
1688
1689 arg_type = check_typedef (value_type (arg));
1690 len = TYPE_LENGTH (arg_type);
1691
1692 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1693 if there are enough spare registers. */
1694 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1695 &fundamental_type))
1696 {
1697 if (info.nsrn + elements <= 8)
1698 {
1699 /* We know that we have sufficient registers available therefore
1700 this will never need to fallback to the stack. */
1701 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1702 arg))
1703 gdb_assert_not_reached ("Failed to push args");
1704 }
1705 else
1706 {
1707 info.nsrn = 8;
1708 pass_on_stack (&info, arg_type, arg);
1709 }
1710 continue;
1711 }
1712
1713 switch (TYPE_CODE (arg_type))
1714 {
1715 case TYPE_CODE_INT:
1716 case TYPE_CODE_BOOL:
1717 case TYPE_CODE_CHAR:
1718 case TYPE_CODE_RANGE:
1719 case TYPE_CODE_ENUM:
1720 if (len < 4)
1721 {
1722 /* Promote to 32 bit integer. */
1723 if (TYPE_UNSIGNED (arg_type))
1724 arg_type = builtin_type (gdbarch)->builtin_uint32;
1725 else
1726 arg_type = builtin_type (gdbarch)->builtin_int32;
1727 arg = value_cast (arg_type, arg);
1728 }
1729 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1730 break;
1731
1732 case TYPE_CODE_STRUCT:
1733 case TYPE_CODE_ARRAY:
1734 case TYPE_CODE_UNION:
1735 if (len > 16)
1736 {
1737 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1738 invisible reference. */
1739
1740 /* Allocate aligned storage. */
1741 sp = align_down (sp - len, 16);
1742
1743 /* Write the real data into the stack. */
1744 write_memory (sp, value_contents (arg), len);
1745
1746 /* Construct the indirection. */
1747 arg_type = lookup_pointer_type (arg_type);
1748 arg = value_from_pointer (arg_type, sp);
1749 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1750 }
1751 else
1752 /* PCS C.15 / C.18 multiple values pass. */
1753 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1754 break;
1755
1756 default:
1757 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1758 break;
1759 }
1760 }
1761
1762 /* Make sure stack retains 16 byte alignment. */
1763 if (info.nsaa & 15)
1764 sp -= 16 - (info.nsaa & 15);
1765
1766 while (!info.si.empty ())
1767 {
1768 const stack_item_t &si = info.si.back ();
1769
1770 sp -= si.len;
1771 if (si.data != NULL)
1772 write_memory (sp, si.data, si.len);
1773 info.si.pop_back ();
1774 }
1775
1776 /* Finally, update the SP register. */
1777 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1778
1779 return sp;
1780 }
1781
1782 /* Implement the "frame_align" gdbarch method. */
1783
1784 static CORE_ADDR
1785 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1786 {
1787 /* Align the stack to sixteen bytes. */
1788 return sp & ~(CORE_ADDR) 15;
1789 }
1790
1791 /* Return the type for an AdvSISD Q register. */
1792
1793 static struct type *
1794 aarch64_vnq_type (struct gdbarch *gdbarch)
1795 {
1796 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1797
1798 if (tdep->vnq_type == NULL)
1799 {
1800 struct type *t;
1801 struct type *elem;
1802
1803 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1804 TYPE_CODE_UNION);
1805
1806 elem = builtin_type (gdbarch)->builtin_uint128;
1807 append_composite_type_field (t, "u", elem);
1808
1809 elem = builtin_type (gdbarch)->builtin_int128;
1810 append_composite_type_field (t, "s", elem);
1811
1812 tdep->vnq_type = t;
1813 }
1814
1815 return tdep->vnq_type;
1816 }
1817
1818 /* Return the type for an AdvSISD D register. */
1819
1820 static struct type *
1821 aarch64_vnd_type (struct gdbarch *gdbarch)
1822 {
1823 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1824
1825 if (tdep->vnd_type == NULL)
1826 {
1827 struct type *t;
1828 struct type *elem;
1829
1830 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1831 TYPE_CODE_UNION);
1832
1833 elem = builtin_type (gdbarch)->builtin_double;
1834 append_composite_type_field (t, "f", elem);
1835
1836 elem = builtin_type (gdbarch)->builtin_uint64;
1837 append_composite_type_field (t, "u", elem);
1838
1839 elem = builtin_type (gdbarch)->builtin_int64;
1840 append_composite_type_field (t, "s", elem);
1841
1842 tdep->vnd_type = t;
1843 }
1844
1845 return tdep->vnd_type;
1846 }
1847
1848 /* Return the type for an AdvSISD S register. */
1849
1850 static struct type *
1851 aarch64_vns_type (struct gdbarch *gdbarch)
1852 {
1853 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1854
1855 if (tdep->vns_type == NULL)
1856 {
1857 struct type *t;
1858 struct type *elem;
1859
1860 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1861 TYPE_CODE_UNION);
1862
1863 elem = builtin_type (gdbarch)->builtin_float;
1864 append_composite_type_field (t, "f", elem);
1865
1866 elem = builtin_type (gdbarch)->builtin_uint32;
1867 append_composite_type_field (t, "u", elem);
1868
1869 elem = builtin_type (gdbarch)->builtin_int32;
1870 append_composite_type_field (t, "s", elem);
1871
1872 tdep->vns_type = t;
1873 }
1874
1875 return tdep->vns_type;
1876 }
1877
1878 /* Return the type for an AdvSISD H register. */
1879
1880 static struct type *
1881 aarch64_vnh_type (struct gdbarch *gdbarch)
1882 {
1883 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1884
1885 if (tdep->vnh_type == NULL)
1886 {
1887 struct type *t;
1888 struct type *elem;
1889
1890 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1891 TYPE_CODE_UNION);
1892
1893 elem = builtin_type (gdbarch)->builtin_half;
1894 append_composite_type_field (t, "f", elem);
1895
1896 elem = builtin_type (gdbarch)->builtin_uint16;
1897 append_composite_type_field (t, "u", elem);
1898
1899 elem = builtin_type (gdbarch)->builtin_int16;
1900 append_composite_type_field (t, "s", elem);
1901
1902 tdep->vnh_type = t;
1903 }
1904
1905 return tdep->vnh_type;
1906 }
1907
1908 /* Return the type for an AdvSISD B register. */
1909
1910 static struct type *
1911 aarch64_vnb_type (struct gdbarch *gdbarch)
1912 {
1913 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1914
1915 if (tdep->vnb_type == NULL)
1916 {
1917 struct type *t;
1918 struct type *elem;
1919
1920 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1921 TYPE_CODE_UNION);
1922
1923 elem = builtin_type (gdbarch)->builtin_uint8;
1924 append_composite_type_field (t, "u", elem);
1925
1926 elem = builtin_type (gdbarch)->builtin_int8;
1927 append_composite_type_field (t, "s", elem);
1928
1929 tdep->vnb_type = t;
1930 }
1931
1932 return tdep->vnb_type;
1933 }
1934
1935 /* Return the type for an AdvSISD V register. */
1936
1937 static struct type *
1938 aarch64_vnv_type (struct gdbarch *gdbarch)
1939 {
1940 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1941
1942 if (tdep->vnv_type == NULL)
1943 {
1944 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1945 slice from the non-pseudo vector registers. However NEON V registers
1946 are always vector registers, and need constructing as such. */
1947 const struct builtin_type *bt = builtin_type (gdbarch);
1948
1949 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1950 TYPE_CODE_UNION);
1951
1952 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1953 TYPE_CODE_UNION);
1954 append_composite_type_field (sub, "f",
1955 init_vector_type (bt->builtin_double, 2));
1956 append_composite_type_field (sub, "u",
1957 init_vector_type (bt->builtin_uint64, 2));
1958 append_composite_type_field (sub, "s",
1959 init_vector_type (bt->builtin_int64, 2));
1960 append_composite_type_field (t, "d", sub);
1961
1962 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1963 TYPE_CODE_UNION);
1964 append_composite_type_field (sub, "f",
1965 init_vector_type (bt->builtin_float, 4));
1966 append_composite_type_field (sub, "u",
1967 init_vector_type (bt->builtin_uint32, 4));
1968 append_composite_type_field (sub, "s",
1969 init_vector_type (bt->builtin_int32, 4));
1970 append_composite_type_field (t, "s", sub);
1971
1972 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1973 TYPE_CODE_UNION);
1974 append_composite_type_field (sub, "f",
1975 init_vector_type (bt->builtin_half, 8));
1976 append_composite_type_field (sub, "u",
1977 init_vector_type (bt->builtin_uint16, 8));
1978 append_composite_type_field (sub, "s",
1979 init_vector_type (bt->builtin_int16, 8));
1980 append_composite_type_field (t, "h", sub);
1981
1982 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1983 TYPE_CODE_UNION);
1984 append_composite_type_field (sub, "u",
1985 init_vector_type (bt->builtin_uint8, 16));
1986 append_composite_type_field (sub, "s",
1987 init_vector_type (bt->builtin_int8, 16));
1988 append_composite_type_field (t, "b", sub);
1989
1990 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1991 TYPE_CODE_UNION);
1992 append_composite_type_field (sub, "u",
1993 init_vector_type (bt->builtin_uint128, 1));
1994 append_composite_type_field (sub, "s",
1995 init_vector_type (bt->builtin_int128, 1));
1996 append_composite_type_field (t, "q", sub);
1997
1998 tdep->vnv_type = t;
1999 }
2000
2001 return tdep->vnv_type;
2002 }
2003
2004 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2005
2006 static int
2007 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2008 {
2009 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2010
2011 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2012 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2013
2014 if (reg == AARCH64_DWARF_SP)
2015 return AARCH64_SP_REGNUM;
2016
2017 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2018 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2019
2020 if (reg == AARCH64_DWARF_SVE_VG)
2021 return AARCH64_SVE_VG_REGNUM;
2022
2023 if (reg == AARCH64_DWARF_SVE_FFR)
2024 return AARCH64_SVE_FFR_REGNUM;
2025
2026 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2027 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2028
2029 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2030 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2031
2032 if (tdep->has_pauth ())
2033 {
2034 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2035 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2036
2037 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2038 return tdep->pauth_ra_state_regnum;
2039 }
2040
2041 return -1;
2042 }
2043
2044 /* Implement the "print_insn" gdbarch method. */
2045
2046 static int
2047 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2048 {
2049 info->symbols = NULL;
2050 return default_print_insn (memaddr, info);
2051 }
2052
2053 /* AArch64 BRK software debug mode instruction.
2054 Note that AArch64 code is always little-endian.
2055 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2056 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2057
2058 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2059
2060 /* Extract from an array REGS containing the (raw) register state a
2061 function return value of type TYPE, and copy that, in virtual
2062 format, into VALBUF. */
2063
2064 static void
2065 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2066 gdb_byte *valbuf)
2067 {
2068 struct gdbarch *gdbarch = regs->arch ();
2069 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2070 int elements;
2071 struct type *fundamental_type;
2072
2073 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2074 &fundamental_type))
2075 {
2076 int len = TYPE_LENGTH (fundamental_type);
2077
2078 for (int i = 0; i < elements; i++)
2079 {
2080 int regno = AARCH64_V0_REGNUM + i;
2081 /* Enough space for a full vector register. */
2082 gdb_byte buf[register_size (gdbarch, regno)];
2083 gdb_assert (len <= sizeof (buf));
2084
2085 if (aarch64_debug)
2086 {
2087 debug_printf ("read HFA or HVA return value element %d from %s\n",
2088 i + 1,
2089 gdbarch_register_name (gdbarch, regno));
2090 }
2091 regs->cooked_read (regno, buf);
2092
2093 memcpy (valbuf, buf, len);
2094 valbuf += len;
2095 }
2096 }
2097 else if (TYPE_CODE (type) == TYPE_CODE_INT
2098 || TYPE_CODE (type) == TYPE_CODE_CHAR
2099 || TYPE_CODE (type) == TYPE_CODE_BOOL
2100 || TYPE_CODE (type) == TYPE_CODE_PTR
2101 || TYPE_IS_REFERENCE (type)
2102 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2103 {
2104 /* If the type is a plain integer, then the access is
2105 straight-forward. Otherwise we have to play around a bit
2106 more. */
2107 int len = TYPE_LENGTH (type);
2108 int regno = AARCH64_X0_REGNUM;
2109 ULONGEST tmp;
2110
2111 while (len > 0)
2112 {
2113 /* By using store_unsigned_integer we avoid having to do
2114 anything special for small big-endian values. */
2115 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2116 store_unsigned_integer (valbuf,
2117 (len > X_REGISTER_SIZE
2118 ? X_REGISTER_SIZE : len), byte_order, tmp);
2119 len -= X_REGISTER_SIZE;
2120 valbuf += X_REGISTER_SIZE;
2121 }
2122 }
2123 else
2124 {
2125 /* For a structure or union the behaviour is as if the value had
2126 been stored to word-aligned memory and then loaded into
2127 registers with 64-bit load instruction(s). */
2128 int len = TYPE_LENGTH (type);
2129 int regno = AARCH64_X0_REGNUM;
2130 bfd_byte buf[X_REGISTER_SIZE];
2131
2132 while (len > 0)
2133 {
2134 regs->cooked_read (regno++, buf);
2135 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2136 len -= X_REGISTER_SIZE;
2137 valbuf += X_REGISTER_SIZE;
2138 }
2139 }
2140 }
2141
2142
2143 /* Will a function return an aggregate type in memory or in a
2144 register? Return 0 if an aggregate type can be returned in a
2145 register, 1 if it must be returned in memory. */
2146
2147 static int
2148 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2149 {
2150 type = check_typedef (type);
2151 int elements;
2152 struct type *fundamental_type;
2153
2154 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2155 &fundamental_type))
2156 {
2157 /* v0-v7 are used to return values and one register is allocated
2158 for one member. However, HFA or HVA has at most four members. */
2159 return 0;
2160 }
2161
2162 if (TYPE_LENGTH (type) > 16)
2163 {
2164 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2165 invisible reference. */
2166
2167 return 1;
2168 }
2169
2170 return 0;
2171 }
2172
2173 /* Write into appropriate registers a function return value of type
2174 TYPE, given in virtual format. */
2175
2176 static void
2177 aarch64_store_return_value (struct type *type, struct regcache *regs,
2178 const gdb_byte *valbuf)
2179 {
2180 struct gdbarch *gdbarch = regs->arch ();
2181 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2182 int elements;
2183 struct type *fundamental_type;
2184
2185 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2186 &fundamental_type))
2187 {
2188 int len = TYPE_LENGTH (fundamental_type);
2189
2190 for (int i = 0; i < elements; i++)
2191 {
2192 int regno = AARCH64_V0_REGNUM + i;
2193 /* Enough space for a full vector register. */
2194 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2195 gdb_assert (len <= sizeof (tmpbuf));
2196
2197 if (aarch64_debug)
2198 {
2199 debug_printf ("write HFA or HVA return value element %d to %s\n",
2200 i + 1,
2201 gdbarch_register_name (gdbarch, regno));
2202 }
2203
2204 memcpy (tmpbuf, valbuf,
2205 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2206 regs->cooked_write (regno, tmpbuf);
2207 valbuf += len;
2208 }
2209 }
2210 else if (TYPE_CODE (type) == TYPE_CODE_INT
2211 || TYPE_CODE (type) == TYPE_CODE_CHAR
2212 || TYPE_CODE (type) == TYPE_CODE_BOOL
2213 || TYPE_CODE (type) == TYPE_CODE_PTR
2214 || TYPE_IS_REFERENCE (type)
2215 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2216 {
2217 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2218 {
2219 /* Values of one word or less are zero/sign-extended and
2220 returned in r0. */
2221 bfd_byte tmpbuf[X_REGISTER_SIZE];
2222 LONGEST val = unpack_long (type, valbuf);
2223
2224 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2225 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2226 }
2227 else
2228 {
2229 /* Integral values greater than one word are stored in
2230 consecutive registers starting with r0. This will always
2231 be a multiple of the regiser size. */
2232 int len = TYPE_LENGTH (type);
2233 int regno = AARCH64_X0_REGNUM;
2234
2235 while (len > 0)
2236 {
2237 regs->cooked_write (regno++, valbuf);
2238 len -= X_REGISTER_SIZE;
2239 valbuf += X_REGISTER_SIZE;
2240 }
2241 }
2242 }
2243 else
2244 {
2245 /* For a structure or union the behaviour is as if the value had
2246 been stored to word-aligned memory and then loaded into
2247 registers with 64-bit load instruction(s). */
2248 int len = TYPE_LENGTH (type);
2249 int regno = AARCH64_X0_REGNUM;
2250 bfd_byte tmpbuf[X_REGISTER_SIZE];
2251
2252 while (len > 0)
2253 {
2254 memcpy (tmpbuf, valbuf,
2255 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2256 regs->cooked_write (regno++, tmpbuf);
2257 len -= X_REGISTER_SIZE;
2258 valbuf += X_REGISTER_SIZE;
2259 }
2260 }
2261 }
2262
2263 /* Implement the "return_value" gdbarch method. */
2264
2265 static enum return_value_convention
2266 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2267 struct type *valtype, struct regcache *regcache,
2268 gdb_byte *readbuf, const gdb_byte *writebuf)
2269 {
2270
2271 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2272 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2273 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2274 {
2275 if (aarch64_return_in_memory (gdbarch, valtype))
2276 {
2277 if (aarch64_debug)
2278 debug_printf ("return value in memory\n");
2279 return RETURN_VALUE_STRUCT_CONVENTION;
2280 }
2281 }
2282
2283 if (writebuf)
2284 aarch64_store_return_value (valtype, regcache, writebuf);
2285
2286 if (readbuf)
2287 aarch64_extract_return_value (valtype, regcache, readbuf);
2288
2289 if (aarch64_debug)
2290 debug_printf ("return value in registers\n");
2291
2292 return RETURN_VALUE_REGISTER_CONVENTION;
2293 }
2294
2295 /* Implement the "get_longjmp_target" gdbarch method. */
2296
2297 static int
2298 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2299 {
2300 CORE_ADDR jb_addr;
2301 gdb_byte buf[X_REGISTER_SIZE];
2302 struct gdbarch *gdbarch = get_frame_arch (frame);
2303 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2304 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2305
2306 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2307
2308 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2309 X_REGISTER_SIZE))
2310 return 0;
2311
2312 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2313 return 1;
2314 }
2315
2316 /* Implement the "gen_return_address" gdbarch method. */
2317
2318 static void
2319 aarch64_gen_return_address (struct gdbarch *gdbarch,
2320 struct agent_expr *ax, struct axs_value *value,
2321 CORE_ADDR scope)
2322 {
2323 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2324 value->kind = axs_lvalue_register;
2325 value->u.reg = AARCH64_LR_REGNUM;
2326 }
2327 \f
2328
2329 /* Return the pseudo register name corresponding to register regnum. */
2330
2331 static const char *
2332 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2333 {
2334 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2335
2336 static const char *const q_name[] =
2337 {
2338 "q0", "q1", "q2", "q3",
2339 "q4", "q5", "q6", "q7",
2340 "q8", "q9", "q10", "q11",
2341 "q12", "q13", "q14", "q15",
2342 "q16", "q17", "q18", "q19",
2343 "q20", "q21", "q22", "q23",
2344 "q24", "q25", "q26", "q27",
2345 "q28", "q29", "q30", "q31",
2346 };
2347
2348 static const char *const d_name[] =
2349 {
2350 "d0", "d1", "d2", "d3",
2351 "d4", "d5", "d6", "d7",
2352 "d8", "d9", "d10", "d11",
2353 "d12", "d13", "d14", "d15",
2354 "d16", "d17", "d18", "d19",
2355 "d20", "d21", "d22", "d23",
2356 "d24", "d25", "d26", "d27",
2357 "d28", "d29", "d30", "d31",
2358 };
2359
2360 static const char *const s_name[] =
2361 {
2362 "s0", "s1", "s2", "s3",
2363 "s4", "s5", "s6", "s7",
2364 "s8", "s9", "s10", "s11",
2365 "s12", "s13", "s14", "s15",
2366 "s16", "s17", "s18", "s19",
2367 "s20", "s21", "s22", "s23",
2368 "s24", "s25", "s26", "s27",
2369 "s28", "s29", "s30", "s31",
2370 };
2371
2372 static const char *const h_name[] =
2373 {
2374 "h0", "h1", "h2", "h3",
2375 "h4", "h5", "h6", "h7",
2376 "h8", "h9", "h10", "h11",
2377 "h12", "h13", "h14", "h15",
2378 "h16", "h17", "h18", "h19",
2379 "h20", "h21", "h22", "h23",
2380 "h24", "h25", "h26", "h27",
2381 "h28", "h29", "h30", "h31",
2382 };
2383
2384 static const char *const b_name[] =
2385 {
2386 "b0", "b1", "b2", "b3",
2387 "b4", "b5", "b6", "b7",
2388 "b8", "b9", "b10", "b11",
2389 "b12", "b13", "b14", "b15",
2390 "b16", "b17", "b18", "b19",
2391 "b20", "b21", "b22", "b23",
2392 "b24", "b25", "b26", "b27",
2393 "b28", "b29", "b30", "b31",
2394 };
2395
2396 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2397
2398 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2399 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2400
2401 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2402 return d_name[p_regnum - AARCH64_D0_REGNUM];
2403
2404 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2405 return s_name[p_regnum - AARCH64_S0_REGNUM];
2406
2407 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2408 return h_name[p_regnum - AARCH64_H0_REGNUM];
2409
2410 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2411 return b_name[p_regnum - AARCH64_B0_REGNUM];
2412
2413 if (tdep->has_sve ())
2414 {
2415 static const char *const sve_v_name[] =
2416 {
2417 "v0", "v1", "v2", "v3",
2418 "v4", "v5", "v6", "v7",
2419 "v8", "v9", "v10", "v11",
2420 "v12", "v13", "v14", "v15",
2421 "v16", "v17", "v18", "v19",
2422 "v20", "v21", "v22", "v23",
2423 "v24", "v25", "v26", "v27",
2424 "v28", "v29", "v30", "v31",
2425 };
2426
2427 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2428 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2429 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2430 }
2431
2432 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2433 prevents it from being read by methods such as
2434 mi_cmd_trace_frame_collected. */
2435 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2436 return "";
2437
2438 internal_error (__FILE__, __LINE__,
2439 _("aarch64_pseudo_register_name: bad register number %d"),
2440 p_regnum);
2441 }
2442
2443 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2444
2445 static struct type *
2446 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2447 {
2448 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2449
2450 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2451
2452 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2453 return aarch64_vnq_type (gdbarch);
2454
2455 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2456 return aarch64_vnd_type (gdbarch);
2457
2458 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2459 return aarch64_vns_type (gdbarch);
2460
2461 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2462 return aarch64_vnh_type (gdbarch);
2463
2464 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2465 return aarch64_vnb_type (gdbarch);
2466
2467 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2468 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2469 return aarch64_vnv_type (gdbarch);
2470
2471 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2472 return builtin_type (gdbarch)->builtin_uint64;
2473
2474 internal_error (__FILE__, __LINE__,
2475 _("aarch64_pseudo_register_type: bad register number %d"),
2476 p_regnum);
2477 }
2478
2479 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2480
2481 static int
2482 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2483 struct reggroup *group)
2484 {
2485 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2486
2487 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2488
2489 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2490 return group == all_reggroup || group == vector_reggroup;
2491 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2492 return (group == all_reggroup || group == vector_reggroup
2493 || group == float_reggroup);
2494 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2495 return (group == all_reggroup || group == vector_reggroup
2496 || group == float_reggroup);
2497 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2498 return group == all_reggroup || group == vector_reggroup;
2499 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2500 return group == all_reggroup || group == vector_reggroup;
2501 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2502 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2503 return group == all_reggroup || group == vector_reggroup;
2504 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2505 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2506 return 0;
2507
2508 return group == all_reggroup;
2509 }
2510
2511 /* Helper for aarch64_pseudo_read_value. */
2512
2513 static struct value *
2514 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2515 readable_regcache *regcache, int regnum_offset,
2516 int regsize, struct value *result_value)
2517 {
2518 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2519
2520 /* Enough space for a full vector register. */
2521 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2522 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2523
2524 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2525 mark_value_bytes_unavailable (result_value, 0,
2526 TYPE_LENGTH (value_type (result_value)));
2527 else
2528 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2529
2530 return result_value;
2531 }
2532
2533 /* Implement the "pseudo_register_read_value" gdbarch method. */
2534
2535 static struct value *
2536 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2537 int regnum)
2538 {
2539 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2540 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2541
2542 VALUE_LVAL (result_value) = lval_register;
2543 VALUE_REGNUM (result_value) = regnum;
2544
2545 regnum -= gdbarch_num_regs (gdbarch);
2546
2547 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2548 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2549 regnum - AARCH64_Q0_REGNUM,
2550 Q_REGISTER_SIZE, result_value);
2551
2552 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2553 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2554 regnum - AARCH64_D0_REGNUM,
2555 D_REGISTER_SIZE, result_value);
2556
2557 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2558 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2559 regnum - AARCH64_S0_REGNUM,
2560 S_REGISTER_SIZE, result_value);
2561
2562 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2563 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2564 regnum - AARCH64_H0_REGNUM,
2565 H_REGISTER_SIZE, result_value);
2566
2567 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2568 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2569 regnum - AARCH64_B0_REGNUM,
2570 B_REGISTER_SIZE, result_value);
2571
2572 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2573 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2574 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2575 regnum - AARCH64_SVE_V0_REGNUM,
2576 V_REGISTER_SIZE, result_value);
2577
2578 gdb_assert_not_reached ("regnum out of bound");
2579 }
2580
2581 /* Helper for aarch64_pseudo_write. */
2582
2583 static void
2584 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2585 int regnum_offset, int regsize, const gdb_byte *buf)
2586 {
2587 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2588
2589 /* Enough space for a full vector register. */
2590 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2591 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2592
2593 /* Ensure the register buffer is zero, we want gdb writes of the
2594 various 'scalar' pseudo registers to behavior like architectural
2595 writes, register width bytes are written the remainder are set to
2596 zero. */
2597 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2598
2599 memcpy (reg_buf, buf, regsize);
2600 regcache->raw_write (v_regnum, reg_buf);
2601 }
2602
2603 /* Implement the "pseudo_register_write" gdbarch method. */
2604
2605 static void
2606 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2607 int regnum, const gdb_byte *buf)
2608 {
2609 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2610 regnum -= gdbarch_num_regs (gdbarch);
2611
2612 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2613 return aarch64_pseudo_write_1 (gdbarch, regcache,
2614 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2615 buf);
2616
2617 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2618 return aarch64_pseudo_write_1 (gdbarch, regcache,
2619 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2620 buf);
2621
2622 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2623 return aarch64_pseudo_write_1 (gdbarch, regcache,
2624 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2625 buf);
2626
2627 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2628 return aarch64_pseudo_write_1 (gdbarch, regcache,
2629 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2630 buf);
2631
2632 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2633 return aarch64_pseudo_write_1 (gdbarch, regcache,
2634 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2635 buf);
2636
2637 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2638 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2639 return aarch64_pseudo_write_1 (gdbarch, regcache,
2640 regnum - AARCH64_SVE_V0_REGNUM,
2641 V_REGISTER_SIZE, buf);
2642
2643 gdb_assert_not_reached ("regnum out of bound");
2644 }
2645
2646 /* Callback function for user_reg_add. */
2647
2648 static struct value *
2649 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2650 {
2651 const int *reg_p = (const int *) baton;
2652
2653 return value_of_register (*reg_p, frame);
2654 }
2655 \f
2656
2657 /* Implement the "software_single_step" gdbarch method, needed to
2658 single step through atomic sequences on AArch64. */
2659
2660 static std::vector<CORE_ADDR>
2661 aarch64_software_single_step (struct regcache *regcache)
2662 {
2663 struct gdbarch *gdbarch = regcache->arch ();
2664 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2665 const int insn_size = 4;
2666 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2667 CORE_ADDR pc = regcache_read_pc (regcache);
2668 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2669 CORE_ADDR loc = pc;
2670 CORE_ADDR closing_insn = 0;
2671 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2672 byte_order_for_code);
2673 int index;
2674 int insn_count;
2675 int bc_insn_count = 0; /* Conditional branch instruction count. */
2676 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2677 aarch64_inst inst;
2678
2679 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2680 return {};
2681
2682 /* Look for a Load Exclusive instruction which begins the sequence. */
2683 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2684 return {};
2685
2686 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2687 {
2688 loc += insn_size;
2689 insn = read_memory_unsigned_integer (loc, insn_size,
2690 byte_order_for_code);
2691
2692 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2693 return {};
2694 /* Check if the instruction is a conditional branch. */
2695 if (inst.opcode->iclass == condbranch)
2696 {
2697 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2698
2699 if (bc_insn_count >= 1)
2700 return {};
2701
2702 /* It is, so we'll try to set a breakpoint at the destination. */
2703 breaks[1] = loc + inst.operands[0].imm.value;
2704
2705 bc_insn_count++;
2706 last_breakpoint++;
2707 }
2708
2709 /* Look for the Store Exclusive which closes the atomic sequence. */
2710 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2711 {
2712 closing_insn = loc;
2713 break;
2714 }
2715 }
2716
2717 /* We didn't find a closing Store Exclusive instruction, fall back. */
2718 if (!closing_insn)
2719 return {};
2720
2721 /* Insert breakpoint after the end of the atomic sequence. */
2722 breaks[0] = loc + insn_size;
2723
2724 /* Check for duplicated breakpoints, and also check that the second
2725 breakpoint is not within the atomic sequence. */
2726 if (last_breakpoint
2727 && (breaks[1] == breaks[0]
2728 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2729 last_breakpoint = 0;
2730
2731 std::vector<CORE_ADDR> next_pcs;
2732
2733 /* Insert the breakpoint at the end of the sequence, and one at the
2734 destination of the conditional branch, if it exists. */
2735 for (index = 0; index <= last_breakpoint; index++)
2736 next_pcs.push_back (breaks[index]);
2737
2738 return next_pcs;
2739 }
2740
2741 struct aarch64_displaced_step_closure : public displaced_step_closure
2742 {
2743 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2744 is being displaced stepping. */
2745 int cond = 0;
2746
2747 /* PC adjustment offset after displaced stepping. */
2748 int32_t pc_adjust = 0;
2749 };
2750
2751 /* Data when visiting instructions for displaced stepping. */
2752
2753 struct aarch64_displaced_step_data
2754 {
2755 struct aarch64_insn_data base;
2756
2757 /* The address where the instruction will be executed at. */
2758 CORE_ADDR new_addr;
2759 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2760 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2761 /* Number of instructions in INSN_BUF. */
2762 unsigned insn_count;
2763 /* Registers when doing displaced stepping. */
2764 struct regcache *regs;
2765
2766 aarch64_displaced_step_closure *dsc;
2767 };
2768
2769 /* Implementation of aarch64_insn_visitor method "b". */
2770
2771 static void
2772 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2773 struct aarch64_insn_data *data)
2774 {
2775 struct aarch64_displaced_step_data *dsd
2776 = (struct aarch64_displaced_step_data *) data;
2777 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2778
2779 if (can_encode_int32 (new_offset, 28))
2780 {
2781 /* Emit B rather than BL, because executing BL on a new address
2782 will get the wrong address into LR. In order to avoid this,
2783 we emit B, and update LR if the instruction is BL. */
2784 emit_b (dsd->insn_buf, 0, new_offset);
2785 dsd->insn_count++;
2786 }
2787 else
2788 {
2789 /* Write NOP. */
2790 emit_nop (dsd->insn_buf);
2791 dsd->insn_count++;
2792 dsd->dsc->pc_adjust = offset;
2793 }
2794
2795 if (is_bl)
2796 {
2797 /* Update LR. */
2798 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2799 data->insn_addr + 4);
2800 }
2801 }
2802
2803 /* Implementation of aarch64_insn_visitor method "b_cond". */
2804
2805 static void
2806 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2807 struct aarch64_insn_data *data)
2808 {
2809 struct aarch64_displaced_step_data *dsd
2810 = (struct aarch64_displaced_step_data *) data;
2811
2812 /* GDB has to fix up PC after displaced step this instruction
2813 differently according to the condition is true or false. Instead
2814 of checking COND against conditional flags, we can use
2815 the following instructions, and GDB can tell how to fix up PC
2816 according to the PC value.
2817
2818 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2819 INSN1 ;
2820 TAKEN:
2821 INSN2
2822 */
2823
2824 emit_bcond (dsd->insn_buf, cond, 8);
2825 dsd->dsc->cond = 1;
2826 dsd->dsc->pc_adjust = offset;
2827 dsd->insn_count = 1;
2828 }
2829
2830 /* Dynamically allocate a new register. If we know the register
2831 statically, we should make it a global as above instead of using this
2832 helper function. */
2833
2834 static struct aarch64_register
2835 aarch64_register (unsigned num, int is64)
2836 {
2837 return (struct aarch64_register) { num, is64 };
2838 }
2839
2840 /* Implementation of aarch64_insn_visitor method "cb". */
2841
2842 static void
2843 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2844 const unsigned rn, int is64,
2845 struct aarch64_insn_data *data)
2846 {
2847 struct aarch64_displaced_step_data *dsd
2848 = (struct aarch64_displaced_step_data *) data;
2849
2850 /* The offset is out of range for a compare and branch
2851 instruction. We can use the following instructions instead:
2852
2853 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2854 INSN1 ;
2855 TAKEN:
2856 INSN2
2857 */
2858 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2859 dsd->insn_count = 1;
2860 dsd->dsc->cond = 1;
2861 dsd->dsc->pc_adjust = offset;
2862 }
2863
2864 /* Implementation of aarch64_insn_visitor method "tb". */
2865
2866 static void
2867 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2868 const unsigned rt, unsigned bit,
2869 struct aarch64_insn_data *data)
2870 {
2871 struct aarch64_displaced_step_data *dsd
2872 = (struct aarch64_displaced_step_data *) data;
2873
2874 /* The offset is out of range for a test bit and branch
2875 instruction We can use the following instructions instead:
2876
2877 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2878 INSN1 ;
2879 TAKEN:
2880 INSN2
2881
2882 */
2883 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2884 dsd->insn_count = 1;
2885 dsd->dsc->cond = 1;
2886 dsd->dsc->pc_adjust = offset;
2887 }
2888
2889 /* Implementation of aarch64_insn_visitor method "adr". */
2890
2891 static void
2892 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2893 const int is_adrp, struct aarch64_insn_data *data)
2894 {
2895 struct aarch64_displaced_step_data *dsd
2896 = (struct aarch64_displaced_step_data *) data;
2897 /* We know exactly the address the ADR{P,} instruction will compute.
2898 We can just write it to the destination register. */
2899 CORE_ADDR address = data->insn_addr + offset;
2900
2901 if (is_adrp)
2902 {
2903 /* Clear the lower 12 bits of the offset to get the 4K page. */
2904 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2905 address & ~0xfff);
2906 }
2907 else
2908 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2909 address);
2910
2911 dsd->dsc->pc_adjust = 4;
2912 emit_nop (dsd->insn_buf);
2913 dsd->insn_count = 1;
2914 }
2915
2916 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2917
2918 static void
2919 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2920 const unsigned rt, const int is64,
2921 struct aarch64_insn_data *data)
2922 {
2923 struct aarch64_displaced_step_data *dsd
2924 = (struct aarch64_displaced_step_data *) data;
2925 CORE_ADDR address = data->insn_addr + offset;
2926 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2927
2928 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2929 address);
2930
2931 if (is_sw)
2932 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2933 aarch64_register (rt, 1), zero);
2934 else
2935 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2936 aarch64_register (rt, 1), zero);
2937
2938 dsd->dsc->pc_adjust = 4;
2939 }
2940
2941 /* Implementation of aarch64_insn_visitor method "others". */
2942
2943 static void
2944 aarch64_displaced_step_others (const uint32_t insn,
2945 struct aarch64_insn_data *data)
2946 {
2947 struct aarch64_displaced_step_data *dsd
2948 = (struct aarch64_displaced_step_data *) data;
2949
2950 aarch64_emit_insn (dsd->insn_buf, insn);
2951 dsd->insn_count = 1;
2952
2953 if ((insn & 0xfffffc1f) == 0xd65f0000)
2954 {
2955 /* RET */
2956 dsd->dsc->pc_adjust = 0;
2957 }
2958 else
2959 dsd->dsc->pc_adjust = 4;
2960 }
2961
2962 static const struct aarch64_insn_visitor visitor =
2963 {
2964 aarch64_displaced_step_b,
2965 aarch64_displaced_step_b_cond,
2966 aarch64_displaced_step_cb,
2967 aarch64_displaced_step_tb,
2968 aarch64_displaced_step_adr,
2969 aarch64_displaced_step_ldr_literal,
2970 aarch64_displaced_step_others,
2971 };
2972
2973 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2974
2975 struct displaced_step_closure *
2976 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2977 CORE_ADDR from, CORE_ADDR to,
2978 struct regcache *regs)
2979 {
2980 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2981 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2982 struct aarch64_displaced_step_data dsd;
2983 aarch64_inst inst;
2984
2985 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2986 return NULL;
2987
2988 /* Look for a Load Exclusive instruction which begins the sequence. */
2989 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2990 {
2991 /* We can't displaced step atomic sequences. */
2992 return NULL;
2993 }
2994
2995 std::unique_ptr<aarch64_displaced_step_closure> dsc
2996 (new aarch64_displaced_step_closure);
2997 dsd.base.insn_addr = from;
2998 dsd.new_addr = to;
2999 dsd.regs = regs;
3000 dsd.dsc = dsc.get ();
3001 dsd.insn_count = 0;
3002 aarch64_relocate_instruction (insn, &visitor,
3003 (struct aarch64_insn_data *) &dsd);
3004 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3005
3006 if (dsd.insn_count != 0)
3007 {
3008 int i;
3009
3010 /* Instruction can be relocated to scratch pad. Copy
3011 relocated instruction(s) there. */
3012 for (i = 0; i < dsd.insn_count; i++)
3013 {
3014 if (debug_displaced)
3015 {
3016 debug_printf ("displaced: writing insn ");
3017 debug_printf ("%.8x", dsd.insn_buf[i]);
3018 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3019 }
3020 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3021 (ULONGEST) dsd.insn_buf[i]);
3022 }
3023 }
3024 else
3025 {
3026 dsc = NULL;
3027 }
3028
3029 return dsc.release ();
3030 }
3031
3032 /* Implement the "displaced_step_fixup" gdbarch method. */
3033
3034 void
3035 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3036 struct displaced_step_closure *dsc_,
3037 CORE_ADDR from, CORE_ADDR to,
3038 struct regcache *regs)
3039 {
3040 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3041
3042 if (dsc->cond)
3043 {
3044 ULONGEST pc;
3045
3046 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3047 if (pc - to == 8)
3048 {
3049 /* Condition is true. */
3050 }
3051 else if (pc - to == 4)
3052 {
3053 /* Condition is false. */
3054 dsc->pc_adjust = 4;
3055 }
3056 else
3057 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3058 }
3059
3060 if (dsc->pc_adjust != 0)
3061 {
3062 if (debug_displaced)
3063 {
3064 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3065 paddress (gdbarch, from), dsc->pc_adjust);
3066 }
3067 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3068 from + dsc->pc_adjust);
3069 }
3070 }
3071
3072 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3073
3074 int
3075 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3076 struct displaced_step_closure *closure)
3077 {
3078 return 1;
3079 }
3080
3081 /* Get the correct target description for the given VQ value.
3082 If VQ is zero then it is assumed SVE is not supported.
3083 (It is not possible to set VQ to zero on an SVE system). */
3084
3085 const target_desc *
3086 aarch64_read_description (uint64_t vq, bool pauth_p)
3087 {
3088 if (vq > AARCH64_MAX_SVE_VQ)
3089 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3090 AARCH64_MAX_SVE_VQ);
3091
3092 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3093
3094 if (tdesc == NULL)
3095 {
3096 tdesc = aarch64_create_target_description (vq, pauth_p);
3097 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3098 }
3099
3100 return tdesc;
3101 }
3102
3103 /* Return the VQ used when creating the target description TDESC. */
3104
3105 static uint64_t
3106 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3107 {
3108 const struct tdesc_feature *feature_sve;
3109
3110 if (!tdesc_has_registers (tdesc))
3111 return 0;
3112
3113 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3114
3115 if (feature_sve == nullptr)
3116 return 0;
3117
3118 uint64_t vl = tdesc_register_bitsize (feature_sve,
3119 aarch64_sve_register_names[0]) / 8;
3120 return sve_vq_from_vl (vl);
3121 }
3122
3123 /* Add all the expected register sets into GDBARCH. */
3124
3125 static void
3126 aarch64_add_reggroups (struct gdbarch *gdbarch)
3127 {
3128 reggroup_add (gdbarch, general_reggroup);
3129 reggroup_add (gdbarch, float_reggroup);
3130 reggroup_add (gdbarch, system_reggroup);
3131 reggroup_add (gdbarch, vector_reggroup);
3132 reggroup_add (gdbarch, all_reggroup);
3133 reggroup_add (gdbarch, save_reggroup);
3134 reggroup_add (gdbarch, restore_reggroup);
3135 }
3136
3137 /* Implement the "cannot_store_register" gdbarch method. */
3138
3139 static int
3140 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3141 {
3142 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3143
3144 if (!tdep->has_pauth ())
3145 return 0;
3146
3147 /* Pointer authentication registers are read-only. */
3148 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3149 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3150 }
3151
3152 /* Initialize the current architecture based on INFO. If possible,
3153 re-use an architecture from ARCHES, which is a list of
3154 architectures already created during this debugging session.
3155
3156 Called e.g. at program startup, when reading a core file, and when
3157 reading a binary file. */
3158
3159 static struct gdbarch *
3160 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3161 {
3162 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3163 const struct tdesc_feature *feature_pauth;
3164 bool valid_p = true;
3165 int i, num_regs = 0, num_pseudo_regs = 0;
3166 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3167
3168 /* Use the vector length passed via the target info. Here -1 is used for no
3169 SVE, and 0 is unset. If unset then use the vector length from the existing
3170 tdesc. */
3171 uint64_t vq = 0;
3172 if (info.id == (int *) -1)
3173 vq = 0;
3174 else if (info.id != 0)
3175 vq = (uint64_t) info.id;
3176 else
3177 vq = aarch64_get_tdesc_vq (info.target_desc);
3178
3179 if (vq > AARCH64_MAX_SVE_VQ)
3180 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3181 pulongest (vq), AARCH64_MAX_SVE_VQ);
3182
3183 /* If there is already a candidate, use it. */
3184 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3185 best_arch != nullptr;
3186 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3187 {
3188 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3189 if (tdep && tdep->vq == vq)
3190 return best_arch->gdbarch;
3191 }
3192
3193 /* Ensure we always have a target descriptor, and that it is for the given VQ
3194 value. */
3195 const struct target_desc *tdesc = info.target_desc;
3196 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3197 tdesc = aarch64_read_description (vq, false);
3198 gdb_assert (tdesc);
3199
3200 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3201 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3202 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3203 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3204
3205 if (feature_core == nullptr)
3206 return nullptr;
3207
3208 struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
3209
3210 /* Validate the description provides the mandatory core R registers
3211 and allocate their numbers. */
3212 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3213 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3214 AARCH64_X0_REGNUM + i,
3215 aarch64_r_register_names[i]);
3216
3217 num_regs = AARCH64_X0_REGNUM + i;
3218
3219 /* Add the V registers. */
3220 if (feature_fpu != nullptr)
3221 {
3222 if (feature_sve != nullptr)
3223 error (_("Program contains both fpu and SVE features."));
3224
3225 /* Validate the description provides the mandatory V registers
3226 and allocate their numbers. */
3227 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3228 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3229 AARCH64_V0_REGNUM + i,
3230 aarch64_v_register_names[i]);
3231
3232 num_regs = AARCH64_V0_REGNUM + i;
3233 }
3234
3235 /* Add the SVE registers. */
3236 if (feature_sve != nullptr)
3237 {
3238 /* Validate the description provides the mandatory SVE registers
3239 and allocate their numbers. */
3240 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3241 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3242 AARCH64_SVE_Z0_REGNUM + i,
3243 aarch64_sve_register_names[i]);
3244
3245 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3246 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3247 }
3248
3249 if (feature_fpu != nullptr || feature_sve != nullptr)
3250 {
3251 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3252 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3253 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3254 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3255 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3256 }
3257
3258 /* Add the pauth registers. */
3259 if (feature_pauth != NULL)
3260 {
3261 first_pauth_regnum = num_regs;
3262 pauth_ra_state_offset = num_pseudo_regs;
3263 /* Validate the descriptor provides the mandatory PAUTH registers and
3264 allocate their numbers. */
3265 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3266 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3267 first_pauth_regnum + i,
3268 aarch64_pauth_register_names[i]);
3269
3270 num_regs += i;
3271 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3272 }
3273
3274 if (!valid_p)
3275 {
3276 tdesc_data_cleanup (tdesc_data);
3277 return nullptr;
3278 }
3279
3280 /* AArch64 code is always little-endian. */
3281 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3282
3283 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3284 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3285
3286 /* This should be low enough for everything. */
3287 tdep->lowest_pc = 0x20;
3288 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3289 tdep->jb_elt_size = 8;
3290 tdep->vq = vq;
3291 tdep->pauth_reg_base = first_pauth_regnum;
3292 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3293 : pauth_ra_state_offset + num_regs;
3294
3295 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3296 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3297
3298 /* Advance PC across function entry code. */
3299 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3300
3301 /* The stack grows downward. */
3302 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3303
3304 /* Breakpoint manipulation. */
3305 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3306 aarch64_breakpoint::kind_from_pc);
3307 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3308 aarch64_breakpoint::bp_from_kind);
3309 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3310 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3311
3312 /* Information about registers, etc. */
3313 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3314 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3315 set_gdbarch_num_regs (gdbarch, num_regs);
3316
3317 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3318 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3319 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3320 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3321 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3322 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3323 aarch64_pseudo_register_reggroup_p);
3324 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3325
3326 /* ABI */
3327 set_gdbarch_short_bit (gdbarch, 16);
3328 set_gdbarch_int_bit (gdbarch, 32);
3329 set_gdbarch_float_bit (gdbarch, 32);
3330 set_gdbarch_double_bit (gdbarch, 64);
3331 set_gdbarch_long_double_bit (gdbarch, 128);
3332 set_gdbarch_long_bit (gdbarch, 64);
3333 set_gdbarch_long_long_bit (gdbarch, 64);
3334 set_gdbarch_ptr_bit (gdbarch, 64);
3335 set_gdbarch_char_signed (gdbarch, 0);
3336 set_gdbarch_wchar_signed (gdbarch, 0);
3337 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3338 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3339 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3340 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3341
3342 /* Internal <-> external register number maps. */
3343 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3344
3345 /* Returning results. */
3346 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3347
3348 /* Disassembly. */
3349 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3350
3351 /* Virtual tables. */
3352 set_gdbarch_vbit_in_delta (gdbarch, 1);
3353
3354 /* Register architecture. */
3355 aarch64_add_reggroups (gdbarch);
3356
3357 /* Hook in the ABI-specific overrides, if they have been registered. */
3358 info.target_desc = tdesc;
3359 info.tdesc_data = tdesc_data;
3360 gdbarch_init_osabi (info, gdbarch);
3361
3362 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3363 /* Register DWARF CFA vendor handler. */
3364 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3365 aarch64_execute_dwarf_cfa_vendor_op);
3366
3367 /* Add some default predicates. */
3368 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3369 dwarf2_append_unwinders (gdbarch);
3370 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3371
3372 frame_base_set_default (gdbarch, &aarch64_normal_base);
3373
3374 /* Now we have tuned the configuration, set a few final things,
3375 based on what the OS ABI has told us. */
3376
3377 if (tdep->jb_pc >= 0)
3378 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3379
3380 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3381
3382 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3383
3384 /* Add standard register aliases. */
3385 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3386 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3387 value_of_aarch64_user_reg,
3388 &aarch64_register_aliases[i].regnum);
3389
3390 register_aarch64_ravenscar_ops (gdbarch);
3391
3392 return gdbarch;
3393 }
3394
3395 static void
3396 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3397 {
3398 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3399
3400 if (tdep == NULL)
3401 return;
3402
3403 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3404 paddress (gdbarch, tdep->lowest_pc));
3405 }
3406
3407 #if GDB_SELF_TEST
3408 namespace selftests
3409 {
3410 static void aarch64_process_record_test (void);
3411 }
3412 #endif
3413
3414 void
3415 _initialize_aarch64_tdep (void)
3416 {
3417 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3418 aarch64_dump_tdep);
3419
3420 /* Debug this file's internals. */
3421 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3422 Set AArch64 debugging."), _("\
3423 Show AArch64 debugging."), _("\
3424 When on, AArch64 specific debugging is enabled."),
3425 NULL,
3426 show_aarch64_debug,
3427 &setdebuglist, &showdebuglist);
3428
3429 #if GDB_SELF_TEST
3430 selftests::register_test ("aarch64-analyze-prologue",
3431 selftests::aarch64_analyze_prologue_test);
3432 selftests::register_test ("aarch64-process-record",
3433 selftests::aarch64_process_record_test);
3434 #endif
3435 }
3436
3437 /* AArch64 process record-replay related structures, defines etc. */
3438
3439 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3440 do \
3441 { \
3442 unsigned int reg_len = LENGTH; \
3443 if (reg_len) \
3444 { \
3445 REGS = XNEWVEC (uint32_t, reg_len); \
3446 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3447 } \
3448 } \
3449 while (0)
3450
3451 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3452 do \
3453 { \
3454 unsigned int mem_len = LENGTH; \
3455 if (mem_len) \
3456 { \
3457 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3458 memcpy(&MEMS->len, &RECORD_BUF[0], \
3459 sizeof(struct aarch64_mem_r) * LENGTH); \
3460 } \
3461 } \
3462 while (0)
3463
3464 /* AArch64 record/replay structures and enumerations. */
3465
3466 struct aarch64_mem_r
3467 {
3468 uint64_t len; /* Record length. */
3469 uint64_t addr; /* Memory address. */
3470 };
3471
3472 enum aarch64_record_result
3473 {
3474 AARCH64_RECORD_SUCCESS,
3475 AARCH64_RECORD_UNSUPPORTED,
3476 AARCH64_RECORD_UNKNOWN
3477 };
3478
3479 typedef struct insn_decode_record_t
3480 {
3481 struct gdbarch *gdbarch;
3482 struct regcache *regcache;
3483 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3484 uint32_t aarch64_insn; /* Insn to be recorded. */
3485 uint32_t mem_rec_count; /* Count of memory records. */
3486 uint32_t reg_rec_count; /* Count of register records. */
3487 uint32_t *aarch64_regs; /* Registers to be recorded. */
3488 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3489 } insn_decode_record;
3490
3491 /* Record handler for data processing - register instructions. */
3492
3493 static unsigned int
3494 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3495 {
3496 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3497 uint32_t record_buf[4];
3498
3499 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3500 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3501 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3502
3503 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3504 {
3505 uint8_t setflags;
3506
3507 /* Logical (shifted register). */
3508 if (insn_bits24_27 == 0x0a)
3509 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3510 /* Add/subtract. */
3511 else if (insn_bits24_27 == 0x0b)
3512 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3513 else
3514 return AARCH64_RECORD_UNKNOWN;
3515
3516 record_buf[0] = reg_rd;
3517 aarch64_insn_r->reg_rec_count = 1;
3518 if (setflags)
3519 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3520 }
3521 else
3522 {
3523 if (insn_bits24_27 == 0x0b)
3524 {
3525 /* Data-processing (3 source). */
3526 record_buf[0] = reg_rd;
3527 aarch64_insn_r->reg_rec_count = 1;
3528 }
3529 else if (insn_bits24_27 == 0x0a)
3530 {
3531 if (insn_bits21_23 == 0x00)
3532 {
3533 /* Add/subtract (with carry). */
3534 record_buf[0] = reg_rd;
3535 aarch64_insn_r->reg_rec_count = 1;
3536 if (bit (aarch64_insn_r->aarch64_insn, 29))
3537 {
3538 record_buf[1] = AARCH64_CPSR_REGNUM;
3539 aarch64_insn_r->reg_rec_count = 2;
3540 }
3541 }
3542 else if (insn_bits21_23 == 0x02)
3543 {
3544 /* Conditional compare (register) and conditional compare
3545 (immediate) instructions. */
3546 record_buf[0] = AARCH64_CPSR_REGNUM;
3547 aarch64_insn_r->reg_rec_count = 1;
3548 }
3549 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3550 {
3551 /* CConditional select. */
3552 /* Data-processing (2 source). */
3553 /* Data-processing (1 source). */
3554 record_buf[0] = reg_rd;
3555 aarch64_insn_r->reg_rec_count = 1;
3556 }
3557 else
3558 return AARCH64_RECORD_UNKNOWN;
3559 }
3560 }
3561
3562 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3563 record_buf);
3564 return AARCH64_RECORD_SUCCESS;
3565 }
3566
3567 /* Record handler for data processing - immediate instructions. */
3568
3569 static unsigned int
3570 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3571 {
3572 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3573 uint32_t record_buf[4];
3574
3575 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3576 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3577 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3578
3579 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3580 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3581 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3582 {
3583 record_buf[0] = reg_rd;
3584 aarch64_insn_r->reg_rec_count = 1;
3585 }
3586 else if (insn_bits24_27 == 0x01)
3587 {
3588 /* Add/Subtract (immediate). */
3589 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3590 record_buf[0] = reg_rd;
3591 aarch64_insn_r->reg_rec_count = 1;
3592 if (setflags)
3593 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3594 }
3595 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3596 {
3597 /* Logical (immediate). */
3598 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3599 record_buf[0] = reg_rd;
3600 aarch64_insn_r->reg_rec_count = 1;
3601 if (setflags)
3602 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3603 }
3604 else
3605 return AARCH64_RECORD_UNKNOWN;
3606
3607 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3608 record_buf);
3609 return AARCH64_RECORD_SUCCESS;
3610 }
3611
3612 /* Record handler for branch, exception generation and system instructions. */
3613
3614 static unsigned int
3615 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3616 {
3617 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3618 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3619 uint32_t record_buf[4];
3620
3621 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3622 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3623 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3624
3625 if (insn_bits28_31 == 0x0d)
3626 {
3627 /* Exception generation instructions. */
3628 if (insn_bits24_27 == 0x04)
3629 {
3630 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3631 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3632 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3633 {
3634 ULONGEST svc_number;
3635
3636 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3637 &svc_number);
3638 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3639 svc_number);
3640 }
3641 else
3642 return AARCH64_RECORD_UNSUPPORTED;
3643 }
3644 /* System instructions. */
3645 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3646 {
3647 uint32_t reg_rt, reg_crn;
3648
3649 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3650 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3651
3652 /* Record rt in case of sysl and mrs instructions. */
3653 if (bit (aarch64_insn_r->aarch64_insn, 21))
3654 {
3655 record_buf[0] = reg_rt;
3656 aarch64_insn_r->reg_rec_count = 1;
3657 }
3658 /* Record cpsr for hint and msr(immediate) instructions. */
3659 else if (reg_crn == 0x02 || reg_crn == 0x04)
3660 {
3661 record_buf[0] = AARCH64_CPSR_REGNUM;
3662 aarch64_insn_r->reg_rec_count = 1;
3663 }
3664 }
3665 /* Unconditional branch (register). */
3666 else if((insn_bits24_27 & 0x0e) == 0x06)
3667 {
3668 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3669 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3670 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3671 }
3672 else
3673 return AARCH64_RECORD_UNKNOWN;
3674 }
3675 /* Unconditional branch (immediate). */
3676 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3677 {
3678 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3679 if (bit (aarch64_insn_r->aarch64_insn, 31))
3680 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3681 }
3682 else
3683 /* Compare & branch (immediate), Test & branch (immediate) and
3684 Conditional branch (immediate). */
3685 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3686
3687 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3688 record_buf);
3689 return AARCH64_RECORD_SUCCESS;
3690 }
3691
3692 /* Record handler for advanced SIMD load and store instructions. */
3693
3694 static unsigned int
3695 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3696 {
3697 CORE_ADDR address;
3698 uint64_t addr_offset = 0;
3699 uint32_t record_buf[24];
3700 uint64_t record_buf_mem[24];
3701 uint32_t reg_rn, reg_rt;
3702 uint32_t reg_index = 0, mem_index = 0;
3703 uint8_t opcode_bits, size_bits;
3704
3705 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3706 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3707 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3708 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3709 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3710
3711 if (record_debug)
3712 debug_printf ("Process record: Advanced SIMD load/store\n");
3713
3714 /* Load/store single structure. */
3715 if (bit (aarch64_insn_r->aarch64_insn, 24))
3716 {
3717 uint8_t sindex, scale, selem, esize, replicate = 0;
3718 scale = opcode_bits >> 2;
3719 selem = ((opcode_bits & 0x02) |
3720 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3721 switch (scale)
3722 {
3723 case 1:
3724 if (size_bits & 0x01)
3725 return AARCH64_RECORD_UNKNOWN;
3726 break;
3727 case 2:
3728 if ((size_bits >> 1) & 0x01)
3729 return AARCH64_RECORD_UNKNOWN;
3730 if (size_bits & 0x01)
3731 {
3732 if (!((opcode_bits >> 1) & 0x01))
3733 scale = 3;
3734 else
3735 return AARCH64_RECORD_UNKNOWN;
3736 }
3737 break;
3738 case 3:
3739 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3740 {
3741 scale = size_bits;
3742 replicate = 1;
3743 break;
3744 }
3745 else
3746 return AARCH64_RECORD_UNKNOWN;
3747 default:
3748 break;
3749 }
3750 esize = 8 << scale;
3751 if (replicate)
3752 for (sindex = 0; sindex < selem; sindex++)
3753 {
3754 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3755 reg_rt = (reg_rt + 1) % 32;
3756 }
3757 else
3758 {
3759 for (sindex = 0; sindex < selem; sindex++)
3760 {
3761 if (bit (aarch64_insn_r->aarch64_insn, 22))
3762 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3763 else
3764 {
3765 record_buf_mem[mem_index++] = esize / 8;
3766 record_buf_mem[mem_index++] = address + addr_offset;
3767 }
3768 addr_offset = addr_offset + (esize / 8);
3769 reg_rt = (reg_rt + 1) % 32;
3770 }
3771 }
3772 }
3773 /* Load/store multiple structure. */
3774 else
3775 {
3776 uint8_t selem, esize, rpt, elements;
3777 uint8_t eindex, rindex;
3778
3779 esize = 8 << size_bits;
3780 if (bit (aarch64_insn_r->aarch64_insn, 30))
3781 elements = 128 / esize;
3782 else
3783 elements = 64 / esize;
3784
3785 switch (opcode_bits)
3786 {
3787 /*LD/ST4 (4 Registers). */
3788 case 0:
3789 rpt = 1;
3790 selem = 4;
3791 break;
3792 /*LD/ST1 (4 Registers). */
3793 case 2:
3794 rpt = 4;
3795 selem = 1;
3796 break;
3797 /*LD/ST3 (3 Registers). */
3798 case 4:
3799 rpt = 1;
3800 selem = 3;
3801 break;
3802 /*LD/ST1 (3 Registers). */
3803 case 6:
3804 rpt = 3;
3805 selem = 1;
3806 break;
3807 /*LD/ST1 (1 Register). */
3808 case 7:
3809 rpt = 1;
3810 selem = 1;
3811 break;
3812 /*LD/ST2 (2 Registers). */
3813 case 8:
3814 rpt = 1;
3815 selem = 2;
3816 break;
3817 /*LD/ST1 (2 Registers). */
3818 case 10:
3819 rpt = 2;
3820 selem = 1;
3821 break;
3822 default:
3823 return AARCH64_RECORD_UNSUPPORTED;
3824 break;
3825 }
3826 for (rindex = 0; rindex < rpt; rindex++)
3827 for (eindex = 0; eindex < elements; eindex++)
3828 {
3829 uint8_t reg_tt, sindex;
3830 reg_tt = (reg_rt + rindex) % 32;
3831 for (sindex = 0; sindex < selem; sindex++)
3832 {
3833 if (bit (aarch64_insn_r->aarch64_insn, 22))
3834 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3835 else
3836 {
3837 record_buf_mem[mem_index++] = esize / 8;
3838 record_buf_mem[mem_index++] = address + addr_offset;
3839 }
3840 addr_offset = addr_offset + (esize / 8);
3841 reg_tt = (reg_tt + 1) % 32;
3842 }
3843 }
3844 }
3845
3846 if (bit (aarch64_insn_r->aarch64_insn, 23))
3847 record_buf[reg_index++] = reg_rn;
3848
3849 aarch64_insn_r->reg_rec_count = reg_index;
3850 aarch64_insn_r->mem_rec_count = mem_index / 2;
3851 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3852 record_buf_mem);
3853 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3854 record_buf);
3855 return AARCH64_RECORD_SUCCESS;
3856 }
3857
3858 /* Record handler for load and store instructions. */
3859
3860 static unsigned int
3861 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3862 {
3863 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3864 uint8_t insn_bit23, insn_bit21;
3865 uint8_t opc, size_bits, ld_flag, vector_flag;
3866 uint32_t reg_rn, reg_rt, reg_rt2;
3867 uint64_t datasize, offset;
3868 uint32_t record_buf[8];
3869 uint64_t record_buf_mem[8];
3870 CORE_ADDR address;
3871
3872 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3873 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3874 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3875 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3876 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3877 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3878 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3879 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3880 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3881 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3882 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3883
3884 /* Load/store exclusive. */
3885 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3886 {
3887 if (record_debug)
3888 debug_printf ("Process record: load/store exclusive\n");
3889
3890 if (ld_flag)
3891 {
3892 record_buf[0] = reg_rt;
3893 aarch64_insn_r->reg_rec_count = 1;
3894 if (insn_bit21)
3895 {
3896 record_buf[1] = reg_rt2;
3897 aarch64_insn_r->reg_rec_count = 2;
3898 }
3899 }
3900 else
3901 {
3902 if (insn_bit21)
3903 datasize = (8 << size_bits) * 2;
3904 else
3905 datasize = (8 << size_bits);
3906 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3907 &address);
3908 record_buf_mem[0] = datasize / 8;
3909 record_buf_mem[1] = address;
3910 aarch64_insn_r->mem_rec_count = 1;
3911 if (!insn_bit23)
3912 {
3913 /* Save register rs. */
3914 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3915 aarch64_insn_r->reg_rec_count = 1;
3916 }
3917 }
3918 }
3919 /* Load register (literal) instructions decoding. */
3920 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3921 {
3922 if (record_debug)
3923 debug_printf ("Process record: load register (literal)\n");
3924 if (vector_flag)
3925 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3926 else
3927 record_buf[0] = reg_rt;
3928 aarch64_insn_r->reg_rec_count = 1;
3929 }
3930 /* All types of load/store pair instructions decoding. */
3931 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3932 {
3933 if (record_debug)
3934 debug_printf ("Process record: load/store pair\n");
3935
3936 if (ld_flag)
3937 {
3938 if (vector_flag)
3939 {
3940 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3941 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3942 }
3943 else
3944 {
3945 record_buf[0] = reg_rt;
3946 record_buf[1] = reg_rt2;
3947 }
3948 aarch64_insn_r->reg_rec_count = 2;
3949 }
3950 else
3951 {
3952 uint16_t imm7_off;
3953 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3954 if (!vector_flag)
3955 size_bits = size_bits >> 1;
3956 datasize = 8 << (2 + size_bits);
3957 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3958 offset = offset << (2 + size_bits);
3959 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3960 &address);
3961 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3962 {
3963 if (imm7_off & 0x40)
3964 address = address - offset;
3965 else
3966 address = address + offset;
3967 }
3968
3969 record_buf_mem[0] = datasize / 8;
3970 record_buf_mem[1] = address;
3971 record_buf_mem[2] = datasize / 8;
3972 record_buf_mem[3] = address + (datasize / 8);
3973 aarch64_insn_r->mem_rec_count = 2;
3974 }
3975 if (bit (aarch64_insn_r->aarch64_insn, 23))
3976 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3977 }
3978 /* Load/store register (unsigned immediate) instructions. */
3979 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3980 {
3981 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3982 if (!(opc >> 1))
3983 {
3984 if (opc & 0x01)
3985 ld_flag = 0x01;
3986 else
3987 ld_flag = 0x0;
3988 }
3989 else
3990 {
3991 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3992 {
3993 /* PRFM (immediate) */
3994 return AARCH64_RECORD_SUCCESS;
3995 }
3996 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3997 {
3998 /* LDRSW (immediate) */
3999 ld_flag = 0x1;
4000 }
4001 else
4002 {
4003 if (opc & 0x01)
4004 ld_flag = 0x01;
4005 else
4006 ld_flag = 0x0;
4007 }
4008 }
4009
4010 if (record_debug)
4011 {
4012 debug_printf ("Process record: load/store (unsigned immediate):"
4013 " size %x V %d opc %x\n", size_bits, vector_flag,
4014 opc);
4015 }
4016
4017 if (!ld_flag)
4018 {
4019 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4020 datasize = 8 << size_bits;
4021 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4022 &address);
4023 offset = offset << size_bits;
4024 address = address + offset;
4025
4026 record_buf_mem[0] = datasize >> 3;
4027 record_buf_mem[1] = address;
4028 aarch64_insn_r->mem_rec_count = 1;
4029 }
4030 else
4031 {
4032 if (vector_flag)
4033 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4034 else
4035 record_buf[0] = reg_rt;
4036 aarch64_insn_r->reg_rec_count = 1;
4037 }
4038 }
4039 /* Load/store register (register offset) instructions. */
4040 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4041 && insn_bits10_11 == 0x02 && insn_bit21)
4042 {
4043 if (record_debug)
4044 debug_printf ("Process record: load/store (register offset)\n");
4045 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4046 if (!(opc >> 1))
4047 if (opc & 0x01)
4048 ld_flag = 0x01;
4049 else
4050 ld_flag = 0x0;
4051 else
4052 if (size_bits != 0x03)
4053 ld_flag = 0x01;
4054 else
4055 return AARCH64_RECORD_UNKNOWN;
4056
4057 if (!ld_flag)
4058 {
4059 ULONGEST reg_rm_val;
4060
4061 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4062 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4063 if (bit (aarch64_insn_r->aarch64_insn, 12))
4064 offset = reg_rm_val << size_bits;
4065 else
4066 offset = reg_rm_val;
4067 datasize = 8 << size_bits;
4068 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4069 &address);
4070 address = address + offset;
4071 record_buf_mem[0] = datasize >> 3;
4072 record_buf_mem[1] = address;
4073 aarch64_insn_r->mem_rec_count = 1;
4074 }
4075 else
4076 {
4077 if (vector_flag)
4078 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4079 else
4080 record_buf[0] = reg_rt;
4081 aarch64_insn_r->reg_rec_count = 1;
4082 }
4083 }
4084 /* Load/store register (immediate and unprivileged) instructions. */
4085 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4086 && !insn_bit21)
4087 {
4088 if (record_debug)
4089 {
4090 debug_printf ("Process record: load/store "
4091 "(immediate and unprivileged)\n");
4092 }
4093 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4094 if (!(opc >> 1))
4095 if (opc & 0x01)
4096 ld_flag = 0x01;
4097 else
4098 ld_flag = 0x0;
4099 else
4100 if (size_bits != 0x03)
4101 ld_flag = 0x01;
4102 else
4103 return AARCH64_RECORD_UNKNOWN;
4104
4105 if (!ld_flag)
4106 {
4107 uint16_t imm9_off;
4108 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4109 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4110 datasize = 8 << size_bits;
4111 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4112 &address);
4113 if (insn_bits10_11 != 0x01)
4114 {
4115 if (imm9_off & 0x0100)
4116 address = address - offset;
4117 else
4118 address = address + offset;
4119 }
4120 record_buf_mem[0] = datasize >> 3;
4121 record_buf_mem[1] = address;
4122 aarch64_insn_r->mem_rec_count = 1;
4123 }
4124 else
4125 {
4126 if (vector_flag)
4127 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4128 else
4129 record_buf[0] = reg_rt;
4130 aarch64_insn_r->reg_rec_count = 1;
4131 }
4132 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4133 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4134 }
4135 /* Advanced SIMD load/store instructions. */
4136 else
4137 return aarch64_record_asimd_load_store (aarch64_insn_r);
4138
4139 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4140 record_buf_mem);
4141 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4142 record_buf);
4143 return AARCH64_RECORD_SUCCESS;
4144 }
4145
4146 /* Record handler for data processing SIMD and floating point instructions. */
4147
4148 static unsigned int
4149 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4150 {
4151 uint8_t insn_bit21, opcode, rmode, reg_rd;
4152 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4153 uint8_t insn_bits11_14;
4154 uint32_t record_buf[2];
4155
4156 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4157 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4158 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4159 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4160 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4161 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4162 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4163 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4164 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4165
4166 if (record_debug)
4167 debug_printf ("Process record: data processing SIMD/FP: ");
4168
4169 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4170 {
4171 /* Floating point - fixed point conversion instructions. */
4172 if (!insn_bit21)
4173 {
4174 if (record_debug)
4175 debug_printf ("FP - fixed point conversion");
4176
4177 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4178 record_buf[0] = reg_rd;
4179 else
4180 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4181 }
4182 /* Floating point - conditional compare instructions. */
4183 else if (insn_bits10_11 == 0x01)
4184 {
4185 if (record_debug)
4186 debug_printf ("FP - conditional compare");
4187
4188 record_buf[0] = AARCH64_CPSR_REGNUM;
4189 }
4190 /* Floating point - data processing (2-source) and
4191 conditional select instructions. */
4192 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4193 {
4194 if (record_debug)
4195 debug_printf ("FP - DP (2-source)");
4196
4197 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4198 }
4199 else if (insn_bits10_11 == 0x00)
4200 {
4201 /* Floating point - immediate instructions. */
4202 if ((insn_bits12_15 & 0x01) == 0x01
4203 || (insn_bits12_15 & 0x07) == 0x04)
4204 {
4205 if (record_debug)
4206 debug_printf ("FP - immediate");
4207 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4208 }
4209 /* Floating point - compare instructions. */
4210 else if ((insn_bits12_15 & 0x03) == 0x02)
4211 {
4212 if (record_debug)
4213 debug_printf ("FP - immediate");
4214 record_buf[0] = AARCH64_CPSR_REGNUM;
4215 }
4216 /* Floating point - integer conversions instructions. */
4217 else if (insn_bits12_15 == 0x00)
4218 {
4219 /* Convert float to integer instruction. */
4220 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4221 {
4222 if (record_debug)
4223 debug_printf ("float to int conversion");
4224
4225 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4226 }
4227 /* Convert integer to float instruction. */
4228 else if ((opcode >> 1) == 0x01 && !rmode)
4229 {
4230 if (record_debug)
4231 debug_printf ("int to float conversion");
4232
4233 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4234 }
4235 /* Move float to integer instruction. */
4236 else if ((opcode >> 1) == 0x03)
4237 {
4238 if (record_debug)
4239 debug_printf ("move float to int");
4240
4241 if (!(opcode & 0x01))
4242 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4243 else
4244 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4245 }
4246 else
4247 return AARCH64_RECORD_UNKNOWN;
4248 }
4249 else
4250 return AARCH64_RECORD_UNKNOWN;
4251 }
4252 else
4253 return AARCH64_RECORD_UNKNOWN;
4254 }
4255 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4256 {
4257 if (record_debug)
4258 debug_printf ("SIMD copy");
4259
4260 /* Advanced SIMD copy instructions. */
4261 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4262 && !bit (aarch64_insn_r->aarch64_insn, 15)
4263 && bit (aarch64_insn_r->aarch64_insn, 10))
4264 {
4265 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4266 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4267 else
4268 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4269 }
4270 else
4271 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4272 }
4273 /* All remaining floating point or advanced SIMD instructions. */
4274 else
4275 {
4276 if (record_debug)
4277 debug_printf ("all remain");
4278
4279 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4280 }
4281
4282 if (record_debug)
4283 debug_printf ("\n");
4284
4285 aarch64_insn_r->reg_rec_count++;
4286 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4287 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4288 record_buf);
4289 return AARCH64_RECORD_SUCCESS;
4290 }
4291
4292 /* Decodes insns type and invokes its record handler. */
4293
4294 static unsigned int
4295 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4296 {
4297 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4298
4299 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4300 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4301 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4302 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4303
4304 /* Data processing - immediate instructions. */
4305 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4306 return aarch64_record_data_proc_imm (aarch64_insn_r);
4307
4308 /* Branch, exception generation and system instructions. */
4309 if (ins_bit26 && !ins_bit27 && ins_bit28)
4310 return aarch64_record_branch_except_sys (aarch64_insn_r);
4311
4312 /* Load and store instructions. */
4313 if (!ins_bit25 && ins_bit27)
4314 return aarch64_record_load_store (aarch64_insn_r);
4315
4316 /* Data processing - register instructions. */
4317 if (ins_bit25 && !ins_bit26 && ins_bit27)
4318 return aarch64_record_data_proc_reg (aarch64_insn_r);
4319
4320 /* Data processing - SIMD and floating point instructions. */
4321 if (ins_bit25 && ins_bit26 && ins_bit27)
4322 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4323
4324 return AARCH64_RECORD_UNSUPPORTED;
4325 }
4326
4327 /* Cleans up local record registers and memory allocations. */
4328
4329 static void
4330 deallocate_reg_mem (insn_decode_record *record)
4331 {
4332 xfree (record->aarch64_regs);
4333 xfree (record->aarch64_mems);
4334 }
4335
4336 #if GDB_SELF_TEST
4337 namespace selftests {
4338
4339 static void
4340 aarch64_process_record_test (void)
4341 {
4342 struct gdbarch_info info;
4343 uint32_t ret;
4344
4345 gdbarch_info_init (&info);
4346 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4347
4348 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4349 SELF_CHECK (gdbarch != NULL);
4350
4351 insn_decode_record aarch64_record;
4352
4353 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4354 aarch64_record.regcache = NULL;
4355 aarch64_record.this_addr = 0;
4356 aarch64_record.gdbarch = gdbarch;
4357
4358 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4359 aarch64_record.aarch64_insn = 0xf9800020;
4360 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4361 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4362 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4363 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4364
4365 deallocate_reg_mem (&aarch64_record);
4366 }
4367
4368 } // namespace selftests
4369 #endif /* GDB_SELF_TEST */
4370
4371 /* Parse the current instruction and record the values of the registers and
4372 memory that will be changed in current instruction to record_arch_list
4373 return -1 if something is wrong. */
4374
4375 int
4376 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4377 CORE_ADDR insn_addr)
4378 {
4379 uint32_t rec_no = 0;
4380 uint8_t insn_size = 4;
4381 uint32_t ret = 0;
4382 gdb_byte buf[insn_size];
4383 insn_decode_record aarch64_record;
4384
4385 memset (&buf[0], 0, insn_size);
4386 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4387 target_read_memory (insn_addr, &buf[0], insn_size);
4388 aarch64_record.aarch64_insn
4389 = (uint32_t) extract_unsigned_integer (&buf[0],
4390 insn_size,
4391 gdbarch_byte_order (gdbarch));
4392 aarch64_record.regcache = regcache;
4393 aarch64_record.this_addr = insn_addr;
4394 aarch64_record.gdbarch = gdbarch;
4395
4396 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4397 if (ret == AARCH64_RECORD_UNSUPPORTED)
4398 {
4399 printf_unfiltered (_("Process record does not support instruction "
4400 "0x%0x at address %s.\n"),
4401 aarch64_record.aarch64_insn,
4402 paddress (gdbarch, insn_addr));
4403 ret = -1;
4404 }
4405
4406 if (0 == ret)
4407 {
4408 /* Record registers. */
4409 record_full_arch_list_add_reg (aarch64_record.regcache,
4410 AARCH64_PC_REGNUM);
4411 /* Always record register CPSR. */
4412 record_full_arch_list_add_reg (aarch64_record.regcache,
4413 AARCH64_CPSR_REGNUM);
4414 if (aarch64_record.aarch64_regs)
4415 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4416 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4417 aarch64_record.aarch64_regs[rec_no]))
4418 ret = -1;
4419
4420 /* Record memories. */
4421 if (aarch64_record.aarch64_mems)
4422 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4423 if (record_full_arch_list_add_mem
4424 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4425 aarch64_record.aarch64_mems[rec_no].len))
4426 ret = -1;
4427
4428 if (record_full_arch_list_add_end ())
4429 ret = -1;
4430 }
4431
4432 deallocate_reg_mem (&aarch64_record);
4433 return ret;
4434 }
This page took 0.154009 seconds and 4 git commands to generate.