Rename gdb exception types
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47 #include "common/selftest.h"
48
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
51
52 #include "elf-bfd.h"
53 #include "elf/aarch64.h"
54
55 #include "common/vec.h"
56
57 #include "record.h"
58 #include "record-full.h"
59 #include "arch/aarch64-insn.h"
60
61 #include "opcode/aarch64.h"
62 #include <algorithm>
63
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
68 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
69 four members. */
70 #define HA_MAX_NUM_FLDS 4
71
72 /* All possible aarch64 target descriptors. */
73 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
74
75 /* The standard register names, and all the valid aliases for them. */
76 static const struct
77 {
78 const char *const name;
79 int regnum;
80 } aarch64_register_aliases[] =
81 {
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123 };
124
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
127 {
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139 };
140
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
143 {
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156 };
157
158 /* The SVE 'Z' and 'P' registers. */
159 static const char *const aarch64_sve_register_names[] =
160 {
161 /* These registers must appear in consecutive RAW register number
162 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
163 "z0", "z1", "z2", "z3",
164 "z4", "z5", "z6", "z7",
165 "z8", "z9", "z10", "z11",
166 "z12", "z13", "z14", "z15",
167 "z16", "z17", "z18", "z19",
168 "z20", "z21", "z22", "z23",
169 "z24", "z25", "z26", "z27",
170 "z28", "z29", "z30", "z31",
171 "fpsr", "fpcr",
172 "p0", "p1", "p2", "p3",
173 "p4", "p5", "p6", "p7",
174 "p8", "p9", "p10", "p11",
175 "p12", "p13", "p14", "p15",
176 "ffr", "vg"
177 };
178
179 static const char *const aarch64_pauth_register_names[] =
180 {
181 /* Authentication mask for data pointer. */
182 "pauth_dmask",
183 /* Authentication mask for code pointer. */
184 "pauth_cmask"
185 };
186
187 /* AArch64 prologue cache structure. */
188 struct aarch64_prologue_cache
189 {
190 /* The program counter at the start of the function. It is used to
191 identify this frame as a prologue frame. */
192 CORE_ADDR func;
193
194 /* The program counter at the time this frame was created; i.e. where
195 this function was called from. It is used to identify this frame as a
196 stub frame. */
197 CORE_ADDR prev_pc;
198
199 /* The stack pointer at the time this frame was created; i.e. the
200 caller's stack pointer when this function was called. It is used
201 to identify this frame. */
202 CORE_ADDR prev_sp;
203
204 /* Is the target available to read from? */
205 int available_p;
206
207 /* The frame base for this frame is just prev_sp - frame size.
208 FRAMESIZE is the distance from the frame pointer to the
209 initial stack pointer. */
210 int framesize;
211
212 /* The register used to hold the frame pointer for this frame. */
213 int framereg;
214
215 /* Saved register offsets. */
216 struct trad_frame_saved_reg *saved_regs;
217 };
218
219 static void
220 show_aarch64_debug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
224 }
225
226 namespace {
227
228 /* Abstract instruction reader. */
229
230 class abstract_instruction_reader
231 {
232 public:
233 /* Read in one instruction. */
234 virtual ULONGEST read (CORE_ADDR memaddr, int len,
235 enum bfd_endian byte_order) = 0;
236 };
237
238 /* Instruction reader from real target. */
239
240 class instruction_reader : public abstract_instruction_reader
241 {
242 public:
243 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
244 override
245 {
246 return read_code_unsigned_integer (memaddr, len, byte_order);
247 }
248 };
249
250 } // namespace
251
252 /* If address signing is enabled, mask off the signature bits from ADDR, using
253 the register values in THIS_FRAME. */
254
255 static CORE_ADDR
256 aarch64_frame_unmask_address (struct gdbarch_tdep *tdep,
257 struct frame_info *this_frame,
258 CORE_ADDR addr)
259 {
260 if (tdep->has_pauth ()
261 && frame_unwind_register_unsigned (this_frame,
262 tdep->pauth_ra_state_regnum))
263 {
264 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
265 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
266 addr = addr & ~cmask;
267 }
268
269 return addr;
270 }
271
272 /* Analyze a prologue, looking for a recognizable stack frame
273 and frame pointer. Scan until we encounter a store that could
274 clobber the stack frame unexpectedly, or an unknown instruction. */
275
276 static CORE_ADDR
277 aarch64_analyze_prologue (struct gdbarch *gdbarch,
278 CORE_ADDR start, CORE_ADDR limit,
279 struct aarch64_prologue_cache *cache,
280 abstract_instruction_reader& reader)
281 {
282 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
283 int i;
284 /* Track X registers and D registers in prologue. */
285 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
286
287 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
288 regs[i] = pv_register (i, 0);
289 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
290
291 for (; start < limit; start += 4)
292 {
293 uint32_t insn;
294 aarch64_inst inst;
295
296 insn = reader.read (start, 4, byte_order_for_code);
297
298 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
299 break;
300
301 if (inst.opcode->iclass == addsub_imm
302 && (inst.opcode->op == OP_ADD
303 || strcmp ("sub", inst.opcode->name) == 0))
304 {
305 unsigned rd = inst.operands[0].reg.regno;
306 unsigned rn = inst.operands[1].reg.regno;
307
308 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
309 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
310 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
311 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
312
313 if (inst.opcode->op == OP_ADD)
314 {
315 regs[rd] = pv_add_constant (regs[rn],
316 inst.operands[2].imm.value);
317 }
318 else
319 {
320 regs[rd] = pv_add_constant (regs[rn],
321 -inst.operands[2].imm.value);
322 }
323 }
324 else if (inst.opcode->iclass == pcreladdr
325 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
326 {
327 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
328 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
329
330 regs[inst.operands[0].reg.regno] = pv_unknown ();
331 }
332 else if (inst.opcode->iclass == branch_imm)
333 {
334 /* Stop analysis on branch. */
335 break;
336 }
337 else if (inst.opcode->iclass == condbranch)
338 {
339 /* Stop analysis on branch. */
340 break;
341 }
342 else if (inst.opcode->iclass == branch_reg)
343 {
344 /* Stop analysis on branch. */
345 break;
346 }
347 else if (inst.opcode->iclass == compbranch)
348 {
349 /* Stop analysis on branch. */
350 break;
351 }
352 else if (inst.opcode->op == OP_MOVZ)
353 {
354 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
355 regs[inst.operands[0].reg.regno] = pv_unknown ();
356 }
357 else if (inst.opcode->iclass == log_shift
358 && strcmp (inst.opcode->name, "orr") == 0)
359 {
360 unsigned rd = inst.operands[0].reg.regno;
361 unsigned rn = inst.operands[1].reg.regno;
362 unsigned rm = inst.operands[2].reg.regno;
363
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
365 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
366 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
367
368 if (inst.operands[2].shifter.amount == 0
369 && rn == AARCH64_SP_REGNUM)
370 regs[rd] = regs[rm];
371 else
372 {
373 if (aarch64_debug)
374 {
375 debug_printf ("aarch64: prologue analysis gave up "
376 "addr=%s opcode=0x%x (orr x register)\n",
377 core_addr_to_string_nz (start), insn);
378 }
379 break;
380 }
381 }
382 else if (inst.opcode->op == OP_STUR)
383 {
384 unsigned rt = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].addr.base_regno;
386 int is64
387 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
388
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
393
394 stack.store (pv_add_constant (regs[rn],
395 inst.operands[1].addr.offset.imm),
396 is64 ? 8 : 4, regs[rt]);
397 }
398 else if ((inst.opcode->iclass == ldstpair_off
399 || (inst.opcode->iclass == ldstpair_indexed
400 && inst.operands[2].addr.preind))
401 && strcmp ("stp", inst.opcode->name) == 0)
402 {
403 /* STP with addressing mode Pre-indexed and Base register. */
404 unsigned rt1;
405 unsigned rt2;
406 unsigned rn = inst.operands[2].addr.base_regno;
407 int32_t imm = inst.operands[2].addr.offset.imm;
408
409 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
410 || inst.operands[0].type == AARCH64_OPND_Ft);
411 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
412 || inst.operands[1].type == AARCH64_OPND_Ft2);
413 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
414 gdb_assert (!inst.operands[2].addr.offset.is_reg);
415
416 /* If recording this store would invalidate the store area
417 (perhaps because rn is not known) then we should abandon
418 further prologue analysis. */
419 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
420 break;
421
422 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
423 break;
424
425 rt1 = inst.operands[0].reg.regno;
426 rt2 = inst.operands[1].reg.regno;
427 if (inst.operands[0].type == AARCH64_OPND_Ft)
428 {
429 /* Only bottom 64-bit of each V register (D register) need
430 to be preserved. */
431 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
432 rt1 += AARCH64_X_REGISTER_COUNT;
433 rt2 += AARCH64_X_REGISTER_COUNT;
434 }
435
436 stack.store (pv_add_constant (regs[rn], imm), 8,
437 regs[rt1]);
438 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
439 regs[rt2]);
440
441 if (inst.operands[2].addr.writeback)
442 regs[rn] = pv_add_constant (regs[rn], imm);
443
444 }
445 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
446 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
447 && (inst.opcode->op == OP_STR_POS
448 || inst.opcode->op == OP_STRF_POS)))
449 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
450 && strcmp ("str", inst.opcode->name) == 0)
451 {
452 /* STR (immediate) */
453 unsigned int rt = inst.operands[0].reg.regno;
454 int32_t imm = inst.operands[1].addr.offset.imm;
455 unsigned int rn = inst.operands[1].addr.base_regno;
456 bool is64
457 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
458 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
459 || inst.operands[0].type == AARCH64_OPND_Ft);
460
461 if (inst.operands[0].type == AARCH64_OPND_Ft)
462 {
463 /* Only bottom 64-bit of each V register (D register) need
464 to be preserved. */
465 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
466 rt += AARCH64_X_REGISTER_COUNT;
467 }
468
469 stack.store (pv_add_constant (regs[rn], imm),
470 is64 ? 8 : 4, regs[rt]);
471 if (inst.operands[1].addr.writeback)
472 regs[rn] = pv_add_constant (regs[rn], imm);
473 }
474 else if (inst.opcode->iclass == testbranch)
475 {
476 /* Stop analysis on branch. */
477 break;
478 }
479 else if (inst.opcode->iclass == ic_system)
480 {
481 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
482 int ra_state_val = 0;
483
484 if (insn == 0xd503233f /* paciasp. */
485 || insn == 0xd503237f /* pacibsp. */)
486 {
487 /* Return addresses are mangled. */
488 ra_state_val = 1;
489 }
490 else if (insn == 0xd50323bf /* autiasp. */
491 || insn == 0xd50323ff /* autibsp. */)
492 {
493 /* Return addresses are not mangled. */
494 ra_state_val = 0;
495 }
496 else
497 {
498 if (aarch64_debug)
499 debug_printf ("aarch64: prologue analysis gave up addr=%s"
500 " opcode=0x%x (iclass)\n",
501 core_addr_to_string_nz (start), insn);
502 break;
503 }
504
505 if (tdep->has_pauth () && cache != nullptr)
506 trad_frame_set_value (cache->saved_regs,
507 tdep->pauth_ra_state_regnum,
508 ra_state_val);
509 }
510 else
511 {
512 if (aarch64_debug)
513 {
514 debug_printf ("aarch64: prologue analysis gave up addr=%s"
515 " opcode=0x%x\n",
516 core_addr_to_string_nz (start), insn);
517 }
518 break;
519 }
520 }
521
522 if (cache == NULL)
523 return start;
524
525 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
526 {
527 /* Frame pointer is fp. Frame size is constant. */
528 cache->framereg = AARCH64_FP_REGNUM;
529 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
530 }
531 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
532 {
533 /* Try the stack pointer. */
534 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
535 cache->framereg = AARCH64_SP_REGNUM;
536 }
537 else
538 {
539 /* We're just out of luck. We don't know where the frame is. */
540 cache->framereg = -1;
541 cache->framesize = 0;
542 }
543
544 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
545 {
546 CORE_ADDR offset;
547
548 if (stack.find_reg (gdbarch, i, &offset))
549 cache->saved_regs[i].addr = offset;
550 }
551
552 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
553 {
554 int regnum = gdbarch_num_regs (gdbarch);
555 CORE_ADDR offset;
556
557 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
558 &offset))
559 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
560 }
561
562 return start;
563 }
564
565 static CORE_ADDR
566 aarch64_analyze_prologue (struct gdbarch *gdbarch,
567 CORE_ADDR start, CORE_ADDR limit,
568 struct aarch64_prologue_cache *cache)
569 {
570 instruction_reader reader;
571
572 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
573 reader);
574 }
575
576 #if GDB_SELF_TEST
577
578 namespace selftests {
579
580 /* Instruction reader from manually cooked instruction sequences. */
581
582 class instruction_reader_test : public abstract_instruction_reader
583 {
584 public:
585 template<size_t SIZE>
586 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
587 : m_insns (insns), m_insns_size (SIZE)
588 {}
589
590 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
591 override
592 {
593 SELF_CHECK (len == 4);
594 SELF_CHECK (memaddr % 4 == 0);
595 SELF_CHECK (memaddr / 4 < m_insns_size);
596
597 return m_insns[memaddr / 4];
598 }
599
600 private:
601 const uint32_t *m_insns;
602 size_t m_insns_size;
603 };
604
605 static void
606 aarch64_analyze_prologue_test (void)
607 {
608 struct gdbarch_info info;
609
610 gdbarch_info_init (&info);
611 info.bfd_arch_info = bfd_scan_arch ("aarch64");
612
613 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
614 SELF_CHECK (gdbarch != NULL);
615
616 struct aarch64_prologue_cache cache;
617 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
618
619 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
620
621 /* Test the simple prologue in which frame pointer is used. */
622 {
623 static const uint32_t insns[] = {
624 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
625 0x910003fd, /* mov x29, sp */
626 0x97ffffe6, /* bl 0x400580 */
627 };
628 instruction_reader_test reader (insns);
629
630 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
631 SELF_CHECK (end == 4 * 2);
632
633 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
634 SELF_CHECK (cache.framesize == 272);
635
636 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
637 {
638 if (i == AARCH64_FP_REGNUM)
639 SELF_CHECK (cache.saved_regs[i].addr == -272);
640 else if (i == AARCH64_LR_REGNUM)
641 SELF_CHECK (cache.saved_regs[i].addr == -264);
642 else
643 SELF_CHECK (cache.saved_regs[i].addr == -1);
644 }
645
646 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
647 {
648 int regnum = gdbarch_num_regs (gdbarch);
649
650 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
651 == -1);
652 }
653 }
654
655 /* Test a prologue in which STR is used and frame pointer is not
656 used. */
657 {
658 static const uint32_t insns[] = {
659 0xf81d0ff3, /* str x19, [sp, #-48]! */
660 0xb9002fe0, /* str w0, [sp, #44] */
661 0xf90013e1, /* str x1, [sp, #32]*/
662 0xfd000fe0, /* str d0, [sp, #24] */
663 0xaa0203f3, /* mov x19, x2 */
664 0xf94013e0, /* ldr x0, [sp, #32] */
665 };
666 instruction_reader_test reader (insns);
667
668 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
669
670 SELF_CHECK (end == 4 * 5);
671
672 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
673 SELF_CHECK (cache.framesize == 48);
674
675 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
676 {
677 if (i == 1)
678 SELF_CHECK (cache.saved_regs[i].addr == -16);
679 else if (i == 19)
680 SELF_CHECK (cache.saved_regs[i].addr == -48);
681 else
682 SELF_CHECK (cache.saved_regs[i].addr == -1);
683 }
684
685 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
686 {
687 int regnum = gdbarch_num_regs (gdbarch);
688
689 if (i == 0)
690 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
691 == -24);
692 else
693 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
694 == -1);
695 }
696 }
697
698 /* Test a prologue in which there is a return address signing instruction. */
699 if (tdep->has_pauth ())
700 {
701 static const uint32_t insns[] = {
702 0xd503233f, /* paciasp */
703 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
704 0x910003fd, /* mov x29, sp */
705 0xf801c3f3, /* str x19, [sp, #28] */
706 0xb9401fa0, /* ldr x19, [x29, #28] */
707 };
708 instruction_reader_test reader (insns);
709
710 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
711 reader);
712
713 SELF_CHECK (end == 4 * 4);
714 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
715 SELF_CHECK (cache.framesize == 48);
716
717 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
718 {
719 if (i == 19)
720 SELF_CHECK (cache.saved_regs[i].addr == -20);
721 else if (i == AARCH64_FP_REGNUM)
722 SELF_CHECK (cache.saved_regs[i].addr == -48);
723 else if (i == AARCH64_LR_REGNUM)
724 SELF_CHECK (cache.saved_regs[i].addr == -40);
725 else
726 SELF_CHECK (cache.saved_regs[i].addr == -1);
727 }
728
729 if (tdep->has_pauth ())
730 {
731 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
732 tdep->pauth_ra_state_regnum));
733 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
734 }
735 }
736 }
737 } // namespace selftests
738 #endif /* GDB_SELF_TEST */
739
740 /* Implement the "skip_prologue" gdbarch method. */
741
742 static CORE_ADDR
743 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
744 {
745 CORE_ADDR func_addr, limit_pc;
746
747 /* See if we can determine the end of the prologue via the symbol
748 table. If so, then return either PC, or the PC after the
749 prologue, whichever is greater. */
750 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
751 {
752 CORE_ADDR post_prologue_pc
753 = skip_prologue_using_sal (gdbarch, func_addr);
754
755 if (post_prologue_pc != 0)
756 return std::max (pc, post_prologue_pc);
757 }
758
759 /* Can't determine prologue from the symbol table, need to examine
760 instructions. */
761
762 /* Find an upper limit on the function prologue using the debug
763 information. If the debug information could not be used to
764 provide that bound, then use an arbitrary large number as the
765 upper bound. */
766 limit_pc = skip_prologue_using_sal (gdbarch, pc);
767 if (limit_pc == 0)
768 limit_pc = pc + 128; /* Magic. */
769
770 /* Try disassembling prologue. */
771 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
772 }
773
774 /* Scan the function prologue for THIS_FRAME and populate the prologue
775 cache CACHE. */
776
777 static void
778 aarch64_scan_prologue (struct frame_info *this_frame,
779 struct aarch64_prologue_cache *cache)
780 {
781 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
782 CORE_ADDR prologue_start;
783 CORE_ADDR prologue_end;
784 CORE_ADDR prev_pc = get_frame_pc (this_frame);
785 struct gdbarch *gdbarch = get_frame_arch (this_frame);
786
787 cache->prev_pc = prev_pc;
788
789 /* Assume we do not find a frame. */
790 cache->framereg = -1;
791 cache->framesize = 0;
792
793 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
794 &prologue_end))
795 {
796 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
797
798 if (sal.line == 0)
799 {
800 /* No line info so use the current PC. */
801 prologue_end = prev_pc;
802 }
803 else if (sal.end < prologue_end)
804 {
805 /* The next line begins after the function end. */
806 prologue_end = sal.end;
807 }
808
809 prologue_end = std::min (prologue_end, prev_pc);
810 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
811 }
812 else
813 {
814 CORE_ADDR frame_loc;
815
816 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
817 if (frame_loc == 0)
818 return;
819
820 cache->framereg = AARCH64_FP_REGNUM;
821 cache->framesize = 16;
822 cache->saved_regs[29].addr = 0;
823 cache->saved_regs[30].addr = 8;
824 }
825 }
826
827 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
828 function may throw an exception if the inferior's registers or memory is
829 not available. */
830
831 static void
832 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
833 struct aarch64_prologue_cache *cache)
834 {
835 CORE_ADDR unwound_fp;
836 int reg;
837
838 aarch64_scan_prologue (this_frame, cache);
839
840 if (cache->framereg == -1)
841 return;
842
843 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
844 if (unwound_fp == 0)
845 return;
846
847 cache->prev_sp = unwound_fp + cache->framesize;
848
849 /* Calculate actual addresses of saved registers using offsets
850 determined by aarch64_analyze_prologue. */
851 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
852 if (trad_frame_addr_p (cache->saved_regs, reg))
853 cache->saved_regs[reg].addr += cache->prev_sp;
854
855 cache->func = get_frame_func (this_frame);
856
857 cache->available_p = 1;
858 }
859
860 /* Allocate and fill in *THIS_CACHE with information about the prologue of
861 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
862 Return a pointer to the current aarch64_prologue_cache in
863 *THIS_CACHE. */
864
865 static struct aarch64_prologue_cache *
866 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
867 {
868 struct aarch64_prologue_cache *cache;
869
870 if (*this_cache != NULL)
871 return (struct aarch64_prologue_cache *) *this_cache;
872
873 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
874 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
875 *this_cache = cache;
876
877 try
878 {
879 aarch64_make_prologue_cache_1 (this_frame, cache);
880 }
881 catch (const gdb_exception_error &ex)
882 {
883 if (ex.error != NOT_AVAILABLE_ERROR)
884 throw_exception (ex);
885 }
886
887 return cache;
888 }
889
890 /* Implement the "stop_reason" frame_unwind method. */
891
892 static enum unwind_stop_reason
893 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
894 void **this_cache)
895 {
896 struct aarch64_prologue_cache *cache
897 = aarch64_make_prologue_cache (this_frame, this_cache);
898
899 if (!cache->available_p)
900 return UNWIND_UNAVAILABLE;
901
902 /* Halt the backtrace at "_start". */
903 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
904 return UNWIND_OUTERMOST;
905
906 /* We've hit a wall, stop. */
907 if (cache->prev_sp == 0)
908 return UNWIND_OUTERMOST;
909
910 return UNWIND_NO_REASON;
911 }
912
913 /* Our frame ID for a normal frame is the current function's starting
914 PC and the caller's SP when we were called. */
915
916 static void
917 aarch64_prologue_this_id (struct frame_info *this_frame,
918 void **this_cache, struct frame_id *this_id)
919 {
920 struct aarch64_prologue_cache *cache
921 = aarch64_make_prologue_cache (this_frame, this_cache);
922
923 if (!cache->available_p)
924 *this_id = frame_id_build_unavailable_stack (cache->func);
925 else
926 *this_id = frame_id_build (cache->prev_sp, cache->func);
927 }
928
929 /* Implement the "prev_register" frame_unwind method. */
930
931 static struct value *
932 aarch64_prologue_prev_register (struct frame_info *this_frame,
933 void **this_cache, int prev_regnum)
934 {
935 struct aarch64_prologue_cache *cache
936 = aarch64_make_prologue_cache (this_frame, this_cache);
937
938 /* If we are asked to unwind the PC, then we need to return the LR
939 instead. The prologue may save PC, but it will point into this
940 frame's prologue, not the next frame's resume location. */
941 if (prev_regnum == AARCH64_PC_REGNUM)
942 {
943 CORE_ADDR lr;
944 struct gdbarch *gdbarch = get_frame_arch (this_frame);
945 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
946
947 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
948
949 if (tdep->has_pauth ()
950 && trad_frame_value_p (cache->saved_regs,
951 tdep->pauth_ra_state_regnum))
952 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
953
954 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
955 }
956
957 /* SP is generally not saved to the stack, but this frame is
958 identified by the next frame's stack pointer at the time of the
959 call. The value was already reconstructed into PREV_SP. */
960 /*
961 +----------+ ^
962 | saved lr | |
963 +->| saved fp |--+
964 | | |
965 | | | <- Previous SP
966 | +----------+
967 | | saved lr |
968 +--| saved fp |<- FP
969 | |
970 | |<- SP
971 +----------+ */
972 if (prev_regnum == AARCH64_SP_REGNUM)
973 return frame_unwind_got_constant (this_frame, prev_regnum,
974 cache->prev_sp);
975
976 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
977 prev_regnum);
978 }
979
980 /* AArch64 prologue unwinder. */
981 struct frame_unwind aarch64_prologue_unwind =
982 {
983 NORMAL_FRAME,
984 aarch64_prologue_frame_unwind_stop_reason,
985 aarch64_prologue_this_id,
986 aarch64_prologue_prev_register,
987 NULL,
988 default_frame_sniffer
989 };
990
991 /* Allocate and fill in *THIS_CACHE with information about the prologue of
992 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
993 Return a pointer to the current aarch64_prologue_cache in
994 *THIS_CACHE. */
995
996 static struct aarch64_prologue_cache *
997 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
998 {
999 struct aarch64_prologue_cache *cache;
1000
1001 if (*this_cache != NULL)
1002 return (struct aarch64_prologue_cache *) *this_cache;
1003
1004 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1005 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1006 *this_cache = cache;
1007
1008 try
1009 {
1010 cache->prev_sp = get_frame_register_unsigned (this_frame,
1011 AARCH64_SP_REGNUM);
1012 cache->prev_pc = get_frame_pc (this_frame);
1013 cache->available_p = 1;
1014 }
1015 catch (const gdb_exception_error &ex)
1016 {
1017 if (ex.error != NOT_AVAILABLE_ERROR)
1018 throw_exception (ex);
1019 }
1020
1021 return cache;
1022 }
1023
1024 /* Implement the "stop_reason" frame_unwind method. */
1025
1026 static enum unwind_stop_reason
1027 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1028 void **this_cache)
1029 {
1030 struct aarch64_prologue_cache *cache
1031 = aarch64_make_stub_cache (this_frame, this_cache);
1032
1033 if (!cache->available_p)
1034 return UNWIND_UNAVAILABLE;
1035
1036 return UNWIND_NO_REASON;
1037 }
1038
1039 /* Our frame ID for a stub frame is the current SP and LR. */
1040
1041 static void
1042 aarch64_stub_this_id (struct frame_info *this_frame,
1043 void **this_cache, struct frame_id *this_id)
1044 {
1045 struct aarch64_prologue_cache *cache
1046 = aarch64_make_stub_cache (this_frame, this_cache);
1047
1048 if (cache->available_p)
1049 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1050 else
1051 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1052 }
1053
1054 /* Implement the "sniffer" frame_unwind method. */
1055
1056 static int
1057 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1058 struct frame_info *this_frame,
1059 void **this_prologue_cache)
1060 {
1061 CORE_ADDR addr_in_block;
1062 gdb_byte dummy[4];
1063
1064 addr_in_block = get_frame_address_in_block (this_frame);
1065 if (in_plt_section (addr_in_block)
1066 /* We also use the stub winder if the target memory is unreadable
1067 to avoid having the prologue unwinder trying to read it. */
1068 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1069 return 1;
1070
1071 return 0;
1072 }
1073
1074 /* AArch64 stub unwinder. */
1075 struct frame_unwind aarch64_stub_unwind =
1076 {
1077 NORMAL_FRAME,
1078 aarch64_stub_frame_unwind_stop_reason,
1079 aarch64_stub_this_id,
1080 aarch64_prologue_prev_register,
1081 NULL,
1082 aarch64_stub_unwind_sniffer
1083 };
1084
1085 /* Return the frame base address of *THIS_FRAME. */
1086
1087 static CORE_ADDR
1088 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1089 {
1090 struct aarch64_prologue_cache *cache
1091 = aarch64_make_prologue_cache (this_frame, this_cache);
1092
1093 return cache->prev_sp - cache->framesize;
1094 }
1095
1096 /* AArch64 default frame base information. */
1097 struct frame_base aarch64_normal_base =
1098 {
1099 &aarch64_prologue_unwind,
1100 aarch64_normal_frame_base,
1101 aarch64_normal_frame_base,
1102 aarch64_normal_frame_base
1103 };
1104
1105 /* Return the value of the REGNUM register in the previous frame of
1106 *THIS_FRAME. */
1107
1108 static struct value *
1109 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1110 void **this_cache, int regnum)
1111 {
1112 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1113 CORE_ADDR lr;
1114
1115 switch (regnum)
1116 {
1117 case AARCH64_PC_REGNUM:
1118 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1119 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
1120 return frame_unwind_got_constant (this_frame, regnum, lr);
1121
1122 default:
1123 internal_error (__FILE__, __LINE__,
1124 _("Unexpected register %d"), regnum);
1125 }
1126 }
1127
1128 static const unsigned char op_lit0 = DW_OP_lit0;
1129 static const unsigned char op_lit1 = DW_OP_lit1;
1130
1131 /* Implement the "init_reg" dwarf2_frame_ops method. */
1132
1133 static void
1134 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1135 struct dwarf2_frame_state_reg *reg,
1136 struct frame_info *this_frame)
1137 {
1138 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1139
1140 switch (regnum)
1141 {
1142 case AARCH64_PC_REGNUM:
1143 reg->how = DWARF2_FRAME_REG_FN;
1144 reg->loc.fn = aarch64_dwarf2_prev_register;
1145 return;
1146
1147 case AARCH64_SP_REGNUM:
1148 reg->how = DWARF2_FRAME_REG_CFA;
1149 return;
1150 }
1151
1152 /* Init pauth registers. */
1153 if (tdep->has_pauth ())
1154 {
1155 if (regnum == tdep->pauth_ra_state_regnum)
1156 {
1157 /* Initialize RA_STATE to zero. */
1158 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1159 reg->loc.exp.start = &op_lit0;
1160 reg->loc.exp.len = 1;
1161 return;
1162 }
1163 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1164 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1165 {
1166 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1167 return;
1168 }
1169 }
1170 }
1171
1172 /* Implement the execute_dwarf_cfa_vendor_op method. */
1173
1174 static bool
1175 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1176 struct dwarf2_frame_state *fs)
1177 {
1178 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1179 struct dwarf2_frame_state_reg *ra_state;
1180
1181 if (tdep->has_pauth () && op == DW_CFA_AARCH64_negate_ra_state)
1182 {
1183 /* Allocate RA_STATE column if it's not allocated yet. */
1184 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1185
1186 /* Toggle the status of RA_STATE between 0 and 1. */
1187 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1188 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1189
1190 if (ra_state->loc.exp.start == nullptr
1191 || ra_state->loc.exp.start == &op_lit0)
1192 ra_state->loc.exp.start = &op_lit1;
1193 else
1194 ra_state->loc.exp.start = &op_lit0;
1195
1196 ra_state->loc.exp.len = 1;
1197
1198 return true;
1199 }
1200
1201 return false;
1202 }
1203
1204 /* When arguments must be pushed onto the stack, they go on in reverse
1205 order. The code below implements a FILO (stack) to do this. */
1206
1207 typedef struct
1208 {
1209 /* Value to pass on stack. It can be NULL if this item is for stack
1210 padding. */
1211 const gdb_byte *data;
1212
1213 /* Size in bytes of value to pass on stack. */
1214 int len;
1215 } stack_item_t;
1216
1217 DEF_VEC_O (stack_item_t);
1218
1219 /* Return the alignment (in bytes) of the given type. */
1220
1221 static int
1222 aarch64_type_align (struct type *t)
1223 {
1224 int n;
1225 int align;
1226 int falign;
1227
1228 t = check_typedef (t);
1229 switch (TYPE_CODE (t))
1230 {
1231 default:
1232 /* Should never happen. */
1233 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1234 return 4;
1235
1236 case TYPE_CODE_PTR:
1237 case TYPE_CODE_ENUM:
1238 case TYPE_CODE_INT:
1239 case TYPE_CODE_FLT:
1240 case TYPE_CODE_SET:
1241 case TYPE_CODE_RANGE:
1242 case TYPE_CODE_BITSTRING:
1243 case TYPE_CODE_REF:
1244 case TYPE_CODE_RVALUE_REF:
1245 case TYPE_CODE_CHAR:
1246 case TYPE_CODE_BOOL:
1247 return TYPE_LENGTH (t);
1248
1249 case TYPE_CODE_ARRAY:
1250 if (TYPE_VECTOR (t))
1251 {
1252 /* Use the natural alignment for vector types (the same for
1253 scalar type), but the maximum alignment is 128-bit. */
1254 if (TYPE_LENGTH (t) > 16)
1255 return 16;
1256 else
1257 return TYPE_LENGTH (t);
1258 }
1259 else
1260 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1261 case TYPE_CODE_COMPLEX:
1262 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1263
1264 case TYPE_CODE_STRUCT:
1265 case TYPE_CODE_UNION:
1266 align = 1;
1267 for (n = 0; n < TYPE_NFIELDS (t); n++)
1268 {
1269 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1270 if (falign > align)
1271 align = falign;
1272 }
1273 return align;
1274 }
1275 }
1276
1277 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1278
1279 Return the number of register required, or -1 on failure.
1280
1281 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1282 to the element, else fail if the type of this element does not match the
1283 existing value. */
1284
1285 static int
1286 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1287 struct type **fundamental_type)
1288 {
1289 if (type == nullptr)
1290 return -1;
1291
1292 switch (TYPE_CODE (type))
1293 {
1294 case TYPE_CODE_FLT:
1295 if (TYPE_LENGTH (type) > 16)
1296 return -1;
1297
1298 if (*fundamental_type == nullptr)
1299 *fundamental_type = type;
1300 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1301 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1302 return -1;
1303
1304 return 1;
1305
1306 case TYPE_CODE_COMPLEX:
1307 {
1308 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1309 if (TYPE_LENGTH (target_type) > 16)
1310 return -1;
1311
1312 if (*fundamental_type == nullptr)
1313 *fundamental_type = target_type;
1314 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1315 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1316 return -1;
1317
1318 return 2;
1319 }
1320
1321 case TYPE_CODE_ARRAY:
1322 {
1323 if (TYPE_VECTOR (type))
1324 {
1325 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1326 return -1;
1327
1328 if (*fundamental_type == nullptr)
1329 *fundamental_type = type;
1330 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1331 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1332 return -1;
1333
1334 return 1;
1335 }
1336 else
1337 {
1338 struct type *target_type = TYPE_TARGET_TYPE (type);
1339 int count = aapcs_is_vfp_call_or_return_candidate_1
1340 (target_type, fundamental_type);
1341
1342 if (count == -1)
1343 return count;
1344
1345 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1346 return count;
1347 }
1348 }
1349
1350 case TYPE_CODE_STRUCT:
1351 case TYPE_CODE_UNION:
1352 {
1353 int count = 0;
1354
1355 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1356 {
1357 /* Ignore any static fields. */
1358 if (field_is_static (&TYPE_FIELD (type, i)))
1359 continue;
1360
1361 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1362
1363 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1364 (member, fundamental_type);
1365 if (sub_count == -1)
1366 return -1;
1367 count += sub_count;
1368 }
1369
1370 /* Ensure there is no padding between the fields (allowing for empty
1371 zero length structs) */
1372 int ftype_length = (*fundamental_type == nullptr)
1373 ? 0 : TYPE_LENGTH (*fundamental_type);
1374 if (count * ftype_length != TYPE_LENGTH (type))
1375 return -1;
1376
1377 return count;
1378 }
1379
1380 default:
1381 break;
1382 }
1383
1384 return -1;
1385 }
1386
1387 /* Return true if an argument, whose type is described by TYPE, can be passed or
1388 returned in simd/fp registers, providing enough parameter passing registers
1389 are available. This is as described in the AAPCS64.
1390
1391 Upon successful return, *COUNT returns the number of needed registers,
1392 *FUNDAMENTAL_TYPE contains the type of those registers.
1393
1394 Candidate as per the AAPCS64 5.4.2.C is either a:
1395 - float.
1396 - short-vector.
1397 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1398 all the members are floats and has at most 4 members.
1399 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1400 all the members are short vectors and has at most 4 members.
1401 - Complex (7.1.1)
1402
1403 Note that HFAs and HVAs can include nested structures and arrays. */
1404
1405 static bool
1406 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1407 struct type **fundamental_type)
1408 {
1409 if (type == nullptr)
1410 return false;
1411
1412 *fundamental_type = nullptr;
1413
1414 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1415 fundamental_type);
1416
1417 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1418 {
1419 *count = ag_count;
1420 return true;
1421 }
1422 else
1423 return false;
1424 }
1425
1426 /* AArch64 function call information structure. */
1427 struct aarch64_call_info
1428 {
1429 /* the current argument number. */
1430 unsigned argnum;
1431
1432 /* The next general purpose register number, equivalent to NGRN as
1433 described in the AArch64 Procedure Call Standard. */
1434 unsigned ngrn;
1435
1436 /* The next SIMD and floating point register number, equivalent to
1437 NSRN as described in the AArch64 Procedure Call Standard. */
1438 unsigned nsrn;
1439
1440 /* The next stacked argument address, equivalent to NSAA as
1441 described in the AArch64 Procedure Call Standard. */
1442 unsigned nsaa;
1443
1444 /* Stack item vector. */
1445 VEC(stack_item_t) *si;
1446 };
1447
1448 /* Pass a value in a sequence of consecutive X registers. The caller
1449 is responsbile for ensuring sufficient registers are available. */
1450
1451 static void
1452 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1453 struct aarch64_call_info *info, struct type *type,
1454 struct value *arg)
1455 {
1456 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1457 int len = TYPE_LENGTH (type);
1458 enum type_code typecode = TYPE_CODE (type);
1459 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1460 const bfd_byte *buf = value_contents (arg);
1461
1462 info->argnum++;
1463
1464 while (len > 0)
1465 {
1466 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1467 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1468 byte_order);
1469
1470
1471 /* Adjust sub-word struct/union args when big-endian. */
1472 if (byte_order == BFD_ENDIAN_BIG
1473 && partial_len < X_REGISTER_SIZE
1474 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1475 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1476
1477 if (aarch64_debug)
1478 {
1479 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1480 gdbarch_register_name (gdbarch, regnum),
1481 phex (regval, X_REGISTER_SIZE));
1482 }
1483 regcache_cooked_write_unsigned (regcache, regnum, regval);
1484 len -= partial_len;
1485 buf += partial_len;
1486 regnum++;
1487 }
1488 }
1489
1490 /* Attempt to marshall a value in a V register. Return 1 if
1491 successful, or 0 if insufficient registers are available. This
1492 function, unlike the equivalent pass_in_x() function does not
1493 handle arguments spread across multiple registers. */
1494
1495 static int
1496 pass_in_v (struct gdbarch *gdbarch,
1497 struct regcache *regcache,
1498 struct aarch64_call_info *info,
1499 int len, const bfd_byte *buf)
1500 {
1501 if (info->nsrn < 8)
1502 {
1503 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1504 /* Enough space for a full vector register. */
1505 gdb_byte reg[register_size (gdbarch, regnum)];
1506 gdb_assert (len <= sizeof (reg));
1507
1508 info->argnum++;
1509 info->nsrn++;
1510
1511 memset (reg, 0, sizeof (reg));
1512 /* PCS C.1, the argument is allocated to the least significant
1513 bits of V register. */
1514 memcpy (reg, buf, len);
1515 regcache->cooked_write (regnum, reg);
1516
1517 if (aarch64_debug)
1518 {
1519 debug_printf ("arg %d in %s\n", info->argnum,
1520 gdbarch_register_name (gdbarch, regnum));
1521 }
1522 return 1;
1523 }
1524 info->nsrn = 8;
1525 return 0;
1526 }
1527
1528 /* Marshall an argument onto the stack. */
1529
1530 static void
1531 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1532 struct value *arg)
1533 {
1534 const bfd_byte *buf = value_contents (arg);
1535 int len = TYPE_LENGTH (type);
1536 int align;
1537 stack_item_t item;
1538
1539 info->argnum++;
1540
1541 align = aarch64_type_align (type);
1542
1543 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1544 Natural alignment of the argument's type. */
1545 align = align_up (align, 8);
1546
1547 /* The AArch64 PCS requires at most doubleword alignment. */
1548 if (align > 16)
1549 align = 16;
1550
1551 if (aarch64_debug)
1552 {
1553 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1554 info->nsaa);
1555 }
1556
1557 item.len = len;
1558 item.data = buf;
1559 VEC_safe_push (stack_item_t, info->si, &item);
1560
1561 info->nsaa += len;
1562 if (info->nsaa & (align - 1))
1563 {
1564 /* Push stack alignment padding. */
1565 int pad = align - (info->nsaa & (align - 1));
1566
1567 item.len = pad;
1568 item.data = NULL;
1569
1570 VEC_safe_push (stack_item_t, info->si, &item);
1571 info->nsaa += pad;
1572 }
1573 }
1574
1575 /* Marshall an argument into a sequence of one or more consecutive X
1576 registers or, if insufficient X registers are available then onto
1577 the stack. */
1578
1579 static void
1580 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1581 struct aarch64_call_info *info, struct type *type,
1582 struct value *arg)
1583 {
1584 int len = TYPE_LENGTH (type);
1585 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1586
1587 /* PCS C.13 - Pass in registers if we have enough spare */
1588 if (info->ngrn + nregs <= 8)
1589 {
1590 pass_in_x (gdbarch, regcache, info, type, arg);
1591 info->ngrn += nregs;
1592 }
1593 else
1594 {
1595 info->ngrn = 8;
1596 pass_on_stack (info, type, arg);
1597 }
1598 }
1599
1600 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1601 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1602 registers. A return value of false is an error state as the value will have
1603 been partially passed to the stack. */
1604 static bool
1605 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1606 struct aarch64_call_info *info, struct type *arg_type,
1607 struct value *arg)
1608 {
1609 switch (TYPE_CODE (arg_type))
1610 {
1611 case TYPE_CODE_FLT:
1612 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1613 value_contents (arg));
1614 break;
1615
1616 case TYPE_CODE_COMPLEX:
1617 {
1618 const bfd_byte *buf = value_contents (arg);
1619 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1620
1621 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1622 buf))
1623 return false;
1624
1625 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1626 buf + TYPE_LENGTH (target_type));
1627 }
1628
1629 case TYPE_CODE_ARRAY:
1630 if (TYPE_VECTOR (arg_type))
1631 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1632 value_contents (arg));
1633 /* fall through. */
1634
1635 case TYPE_CODE_STRUCT:
1636 case TYPE_CODE_UNION:
1637 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1638 {
1639 /* Don't include static fields. */
1640 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1641 continue;
1642
1643 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1644 struct type *field_type = check_typedef (value_type (field));
1645
1646 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1647 field))
1648 return false;
1649 }
1650 return true;
1651
1652 default:
1653 return false;
1654 }
1655 }
1656
1657 /* Implement the "push_dummy_call" gdbarch method. */
1658
1659 static CORE_ADDR
1660 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1661 struct regcache *regcache, CORE_ADDR bp_addr,
1662 int nargs,
1663 struct value **args, CORE_ADDR sp,
1664 function_call_return_method return_method,
1665 CORE_ADDR struct_addr)
1666 {
1667 int argnum;
1668 struct aarch64_call_info info;
1669
1670 memset (&info, 0, sizeof (info));
1671
1672 /* We need to know what the type of the called function is in order
1673 to determine the number of named/anonymous arguments for the
1674 actual argument placement, and the return type in order to handle
1675 return value correctly.
1676
1677 The generic code above us views the decision of return in memory
1678 or return in registers as a two stage processes. The language
1679 handler is consulted first and may decide to return in memory (eg
1680 class with copy constructor returned by value), this will cause
1681 the generic code to allocate space AND insert an initial leading
1682 argument.
1683
1684 If the language code does not decide to pass in memory then the
1685 target code is consulted.
1686
1687 If the language code decides to pass in memory we want to move
1688 the pointer inserted as the initial argument from the argument
1689 list and into X8, the conventional AArch64 struct return pointer
1690 register. */
1691
1692 /* Set the return address. For the AArch64, the return breakpoint
1693 is always at BP_ADDR. */
1694 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1695
1696 /* If we were given an initial argument for the return slot, lose it. */
1697 if (return_method == return_method_hidden_param)
1698 {
1699 args++;
1700 nargs--;
1701 }
1702
1703 /* The struct_return pointer occupies X8. */
1704 if (return_method != return_method_normal)
1705 {
1706 if (aarch64_debug)
1707 {
1708 debug_printf ("struct return in %s = 0x%s\n",
1709 gdbarch_register_name (gdbarch,
1710 AARCH64_STRUCT_RETURN_REGNUM),
1711 paddress (gdbarch, struct_addr));
1712 }
1713 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1714 struct_addr);
1715 }
1716
1717 for (argnum = 0; argnum < nargs; argnum++)
1718 {
1719 struct value *arg = args[argnum];
1720 struct type *arg_type, *fundamental_type;
1721 int len, elements;
1722
1723 arg_type = check_typedef (value_type (arg));
1724 len = TYPE_LENGTH (arg_type);
1725
1726 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1727 if there are enough spare registers. */
1728 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1729 &fundamental_type))
1730 {
1731 if (info.nsrn + elements <= 8)
1732 {
1733 /* We know that we have sufficient registers available therefore
1734 this will never need to fallback to the stack. */
1735 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1736 arg))
1737 gdb_assert_not_reached ("Failed to push args");
1738 }
1739 else
1740 {
1741 info.nsrn = 8;
1742 pass_on_stack (&info, arg_type, arg);
1743 }
1744 continue;
1745 }
1746
1747 switch (TYPE_CODE (arg_type))
1748 {
1749 case TYPE_CODE_INT:
1750 case TYPE_CODE_BOOL:
1751 case TYPE_CODE_CHAR:
1752 case TYPE_CODE_RANGE:
1753 case TYPE_CODE_ENUM:
1754 if (len < 4)
1755 {
1756 /* Promote to 32 bit integer. */
1757 if (TYPE_UNSIGNED (arg_type))
1758 arg_type = builtin_type (gdbarch)->builtin_uint32;
1759 else
1760 arg_type = builtin_type (gdbarch)->builtin_int32;
1761 arg = value_cast (arg_type, arg);
1762 }
1763 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1764 break;
1765
1766 case TYPE_CODE_STRUCT:
1767 case TYPE_CODE_ARRAY:
1768 case TYPE_CODE_UNION:
1769 if (len > 16)
1770 {
1771 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1772 invisible reference. */
1773
1774 /* Allocate aligned storage. */
1775 sp = align_down (sp - len, 16);
1776
1777 /* Write the real data into the stack. */
1778 write_memory (sp, value_contents (arg), len);
1779
1780 /* Construct the indirection. */
1781 arg_type = lookup_pointer_type (arg_type);
1782 arg = value_from_pointer (arg_type, sp);
1783 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1784 }
1785 else
1786 /* PCS C.15 / C.18 multiple values pass. */
1787 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1788 break;
1789
1790 default:
1791 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1792 break;
1793 }
1794 }
1795
1796 /* Make sure stack retains 16 byte alignment. */
1797 if (info.nsaa & 15)
1798 sp -= 16 - (info.nsaa & 15);
1799
1800 while (!VEC_empty (stack_item_t, info.si))
1801 {
1802 stack_item_t *si = VEC_last (stack_item_t, info.si);
1803
1804 sp -= si->len;
1805 if (si->data != NULL)
1806 write_memory (sp, si->data, si->len);
1807 VEC_pop (stack_item_t, info.si);
1808 }
1809
1810 VEC_free (stack_item_t, info.si);
1811
1812 /* Finally, update the SP register. */
1813 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1814
1815 return sp;
1816 }
1817
1818 /* Implement the "frame_align" gdbarch method. */
1819
1820 static CORE_ADDR
1821 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1822 {
1823 /* Align the stack to sixteen bytes. */
1824 return sp & ~(CORE_ADDR) 15;
1825 }
1826
1827 /* Return the type for an AdvSISD Q register. */
1828
1829 static struct type *
1830 aarch64_vnq_type (struct gdbarch *gdbarch)
1831 {
1832 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1833
1834 if (tdep->vnq_type == NULL)
1835 {
1836 struct type *t;
1837 struct type *elem;
1838
1839 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1840 TYPE_CODE_UNION);
1841
1842 elem = builtin_type (gdbarch)->builtin_uint128;
1843 append_composite_type_field (t, "u", elem);
1844
1845 elem = builtin_type (gdbarch)->builtin_int128;
1846 append_composite_type_field (t, "s", elem);
1847
1848 tdep->vnq_type = t;
1849 }
1850
1851 return tdep->vnq_type;
1852 }
1853
1854 /* Return the type for an AdvSISD D register. */
1855
1856 static struct type *
1857 aarch64_vnd_type (struct gdbarch *gdbarch)
1858 {
1859 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1860
1861 if (tdep->vnd_type == NULL)
1862 {
1863 struct type *t;
1864 struct type *elem;
1865
1866 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1867 TYPE_CODE_UNION);
1868
1869 elem = builtin_type (gdbarch)->builtin_double;
1870 append_composite_type_field (t, "f", elem);
1871
1872 elem = builtin_type (gdbarch)->builtin_uint64;
1873 append_composite_type_field (t, "u", elem);
1874
1875 elem = builtin_type (gdbarch)->builtin_int64;
1876 append_composite_type_field (t, "s", elem);
1877
1878 tdep->vnd_type = t;
1879 }
1880
1881 return tdep->vnd_type;
1882 }
1883
1884 /* Return the type for an AdvSISD S register. */
1885
1886 static struct type *
1887 aarch64_vns_type (struct gdbarch *gdbarch)
1888 {
1889 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1890
1891 if (tdep->vns_type == NULL)
1892 {
1893 struct type *t;
1894 struct type *elem;
1895
1896 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1897 TYPE_CODE_UNION);
1898
1899 elem = builtin_type (gdbarch)->builtin_float;
1900 append_composite_type_field (t, "f", elem);
1901
1902 elem = builtin_type (gdbarch)->builtin_uint32;
1903 append_composite_type_field (t, "u", elem);
1904
1905 elem = builtin_type (gdbarch)->builtin_int32;
1906 append_composite_type_field (t, "s", elem);
1907
1908 tdep->vns_type = t;
1909 }
1910
1911 return tdep->vns_type;
1912 }
1913
1914 /* Return the type for an AdvSISD H register. */
1915
1916 static struct type *
1917 aarch64_vnh_type (struct gdbarch *gdbarch)
1918 {
1919 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1920
1921 if (tdep->vnh_type == NULL)
1922 {
1923 struct type *t;
1924 struct type *elem;
1925
1926 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1927 TYPE_CODE_UNION);
1928
1929 elem = builtin_type (gdbarch)->builtin_uint16;
1930 append_composite_type_field (t, "u", elem);
1931
1932 elem = builtin_type (gdbarch)->builtin_int16;
1933 append_composite_type_field (t, "s", elem);
1934
1935 tdep->vnh_type = t;
1936 }
1937
1938 return tdep->vnh_type;
1939 }
1940
1941 /* Return the type for an AdvSISD B register. */
1942
1943 static struct type *
1944 aarch64_vnb_type (struct gdbarch *gdbarch)
1945 {
1946 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1947
1948 if (tdep->vnb_type == NULL)
1949 {
1950 struct type *t;
1951 struct type *elem;
1952
1953 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1954 TYPE_CODE_UNION);
1955
1956 elem = builtin_type (gdbarch)->builtin_uint8;
1957 append_composite_type_field (t, "u", elem);
1958
1959 elem = builtin_type (gdbarch)->builtin_int8;
1960 append_composite_type_field (t, "s", elem);
1961
1962 tdep->vnb_type = t;
1963 }
1964
1965 return tdep->vnb_type;
1966 }
1967
1968 /* Return the type for an AdvSISD V register. */
1969
1970 static struct type *
1971 aarch64_vnv_type (struct gdbarch *gdbarch)
1972 {
1973 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1974
1975 if (tdep->vnv_type == NULL)
1976 {
1977 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1978 slice from the non-pseudo vector registers. However NEON V registers
1979 are always vector registers, and need constructing as such. */
1980 const struct builtin_type *bt = builtin_type (gdbarch);
1981
1982 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1983 TYPE_CODE_UNION);
1984
1985 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1986 TYPE_CODE_UNION);
1987 append_composite_type_field (sub, "f",
1988 init_vector_type (bt->builtin_double, 2));
1989 append_composite_type_field (sub, "u",
1990 init_vector_type (bt->builtin_uint64, 2));
1991 append_composite_type_field (sub, "s",
1992 init_vector_type (bt->builtin_int64, 2));
1993 append_composite_type_field (t, "d", sub);
1994
1995 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1996 TYPE_CODE_UNION);
1997 append_composite_type_field (sub, "f",
1998 init_vector_type (bt->builtin_float, 4));
1999 append_composite_type_field (sub, "u",
2000 init_vector_type (bt->builtin_uint32, 4));
2001 append_composite_type_field (sub, "s",
2002 init_vector_type (bt->builtin_int32, 4));
2003 append_composite_type_field (t, "s", sub);
2004
2005 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2006 TYPE_CODE_UNION);
2007 append_composite_type_field (sub, "u",
2008 init_vector_type (bt->builtin_uint16, 8));
2009 append_composite_type_field (sub, "s",
2010 init_vector_type (bt->builtin_int16, 8));
2011 append_composite_type_field (t, "h", sub);
2012
2013 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2014 TYPE_CODE_UNION);
2015 append_composite_type_field (sub, "u",
2016 init_vector_type (bt->builtin_uint8, 16));
2017 append_composite_type_field (sub, "s",
2018 init_vector_type (bt->builtin_int8, 16));
2019 append_composite_type_field (t, "b", sub);
2020
2021 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2022 TYPE_CODE_UNION);
2023 append_composite_type_field (sub, "u",
2024 init_vector_type (bt->builtin_uint128, 1));
2025 append_composite_type_field (sub, "s",
2026 init_vector_type (bt->builtin_int128, 1));
2027 append_composite_type_field (t, "q", sub);
2028
2029 tdep->vnv_type = t;
2030 }
2031
2032 return tdep->vnv_type;
2033 }
2034
2035 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2036
2037 static int
2038 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2039 {
2040 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2041
2042 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2043 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2044
2045 if (reg == AARCH64_DWARF_SP)
2046 return AARCH64_SP_REGNUM;
2047
2048 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2049 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2050
2051 if (reg == AARCH64_DWARF_SVE_VG)
2052 return AARCH64_SVE_VG_REGNUM;
2053
2054 if (reg == AARCH64_DWARF_SVE_FFR)
2055 return AARCH64_SVE_FFR_REGNUM;
2056
2057 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2058 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2059
2060 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2061 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2062
2063 if (tdep->has_pauth ())
2064 {
2065 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2066 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2067
2068 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2069 return tdep->pauth_ra_state_regnum;
2070 }
2071
2072 return -1;
2073 }
2074
2075 /* Implement the "print_insn" gdbarch method. */
2076
2077 static int
2078 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2079 {
2080 info->symbols = NULL;
2081 return default_print_insn (memaddr, info);
2082 }
2083
2084 /* AArch64 BRK software debug mode instruction.
2085 Note that AArch64 code is always little-endian.
2086 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2087 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2088
2089 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2090
2091 /* Extract from an array REGS containing the (raw) register state a
2092 function return value of type TYPE, and copy that, in virtual
2093 format, into VALBUF. */
2094
2095 static void
2096 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2097 gdb_byte *valbuf)
2098 {
2099 struct gdbarch *gdbarch = regs->arch ();
2100 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2101 int elements;
2102 struct type *fundamental_type;
2103
2104 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2105 &fundamental_type))
2106 {
2107 int len = TYPE_LENGTH (fundamental_type);
2108
2109 for (int i = 0; i < elements; i++)
2110 {
2111 int regno = AARCH64_V0_REGNUM + i;
2112 /* Enough space for a full vector register. */
2113 gdb_byte buf[register_size (gdbarch, regno)];
2114 gdb_assert (len <= sizeof (buf));
2115
2116 if (aarch64_debug)
2117 {
2118 debug_printf ("read HFA or HVA return value element %d from %s\n",
2119 i + 1,
2120 gdbarch_register_name (gdbarch, regno));
2121 }
2122 regs->cooked_read (regno, buf);
2123
2124 memcpy (valbuf, buf, len);
2125 valbuf += len;
2126 }
2127 }
2128 else if (TYPE_CODE (type) == TYPE_CODE_INT
2129 || TYPE_CODE (type) == TYPE_CODE_CHAR
2130 || TYPE_CODE (type) == TYPE_CODE_BOOL
2131 || TYPE_CODE (type) == TYPE_CODE_PTR
2132 || TYPE_IS_REFERENCE (type)
2133 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2134 {
2135 /* If the type is a plain integer, then the access is
2136 straight-forward. Otherwise we have to play around a bit
2137 more. */
2138 int len = TYPE_LENGTH (type);
2139 int regno = AARCH64_X0_REGNUM;
2140 ULONGEST tmp;
2141
2142 while (len > 0)
2143 {
2144 /* By using store_unsigned_integer we avoid having to do
2145 anything special for small big-endian values. */
2146 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2147 store_unsigned_integer (valbuf,
2148 (len > X_REGISTER_SIZE
2149 ? X_REGISTER_SIZE : len), byte_order, tmp);
2150 len -= X_REGISTER_SIZE;
2151 valbuf += X_REGISTER_SIZE;
2152 }
2153 }
2154 else
2155 {
2156 /* For a structure or union the behaviour is as if the value had
2157 been stored to word-aligned memory and then loaded into
2158 registers with 64-bit load instruction(s). */
2159 int len = TYPE_LENGTH (type);
2160 int regno = AARCH64_X0_REGNUM;
2161 bfd_byte buf[X_REGISTER_SIZE];
2162
2163 while (len > 0)
2164 {
2165 regs->cooked_read (regno++, buf);
2166 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2167 len -= X_REGISTER_SIZE;
2168 valbuf += X_REGISTER_SIZE;
2169 }
2170 }
2171 }
2172
2173
2174 /* Will a function return an aggregate type in memory or in a
2175 register? Return 0 if an aggregate type can be returned in a
2176 register, 1 if it must be returned in memory. */
2177
2178 static int
2179 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2180 {
2181 type = check_typedef (type);
2182 int elements;
2183 struct type *fundamental_type;
2184
2185 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2186 &fundamental_type))
2187 {
2188 /* v0-v7 are used to return values and one register is allocated
2189 for one member. However, HFA or HVA has at most four members. */
2190 return 0;
2191 }
2192
2193 if (TYPE_LENGTH (type) > 16)
2194 {
2195 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2196 invisible reference. */
2197
2198 return 1;
2199 }
2200
2201 return 0;
2202 }
2203
2204 /* Write into appropriate registers a function return value of type
2205 TYPE, given in virtual format. */
2206
2207 static void
2208 aarch64_store_return_value (struct type *type, struct regcache *regs,
2209 const gdb_byte *valbuf)
2210 {
2211 struct gdbarch *gdbarch = regs->arch ();
2212 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2213 int elements;
2214 struct type *fundamental_type;
2215
2216 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2217 &fundamental_type))
2218 {
2219 int len = TYPE_LENGTH (fundamental_type);
2220
2221 for (int i = 0; i < elements; i++)
2222 {
2223 int regno = AARCH64_V0_REGNUM + i;
2224 /* Enough space for a full vector register. */
2225 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2226 gdb_assert (len <= sizeof (tmpbuf));
2227
2228 if (aarch64_debug)
2229 {
2230 debug_printf ("write HFA or HVA return value element %d to %s\n",
2231 i + 1,
2232 gdbarch_register_name (gdbarch, regno));
2233 }
2234
2235 memcpy (tmpbuf, valbuf,
2236 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2237 regs->cooked_write (regno, tmpbuf);
2238 valbuf += len;
2239 }
2240 }
2241 else if (TYPE_CODE (type) == TYPE_CODE_INT
2242 || TYPE_CODE (type) == TYPE_CODE_CHAR
2243 || TYPE_CODE (type) == TYPE_CODE_BOOL
2244 || TYPE_CODE (type) == TYPE_CODE_PTR
2245 || TYPE_IS_REFERENCE (type)
2246 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2247 {
2248 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2249 {
2250 /* Values of one word or less are zero/sign-extended and
2251 returned in r0. */
2252 bfd_byte tmpbuf[X_REGISTER_SIZE];
2253 LONGEST val = unpack_long (type, valbuf);
2254
2255 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2256 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2257 }
2258 else
2259 {
2260 /* Integral values greater than one word are stored in
2261 consecutive registers starting with r0. This will always
2262 be a multiple of the regiser size. */
2263 int len = TYPE_LENGTH (type);
2264 int regno = AARCH64_X0_REGNUM;
2265
2266 while (len > 0)
2267 {
2268 regs->cooked_write (regno++, valbuf);
2269 len -= X_REGISTER_SIZE;
2270 valbuf += X_REGISTER_SIZE;
2271 }
2272 }
2273 }
2274 else
2275 {
2276 /* For a structure or union the behaviour is as if the value had
2277 been stored to word-aligned memory and then loaded into
2278 registers with 64-bit load instruction(s). */
2279 int len = TYPE_LENGTH (type);
2280 int regno = AARCH64_X0_REGNUM;
2281 bfd_byte tmpbuf[X_REGISTER_SIZE];
2282
2283 while (len > 0)
2284 {
2285 memcpy (tmpbuf, valbuf,
2286 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2287 regs->cooked_write (regno++, tmpbuf);
2288 len -= X_REGISTER_SIZE;
2289 valbuf += X_REGISTER_SIZE;
2290 }
2291 }
2292 }
2293
2294 /* Implement the "return_value" gdbarch method. */
2295
2296 static enum return_value_convention
2297 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2298 struct type *valtype, struct regcache *regcache,
2299 gdb_byte *readbuf, const gdb_byte *writebuf)
2300 {
2301
2302 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2303 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2304 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2305 {
2306 if (aarch64_return_in_memory (gdbarch, valtype))
2307 {
2308 if (aarch64_debug)
2309 debug_printf ("return value in memory\n");
2310 return RETURN_VALUE_STRUCT_CONVENTION;
2311 }
2312 }
2313
2314 if (writebuf)
2315 aarch64_store_return_value (valtype, regcache, writebuf);
2316
2317 if (readbuf)
2318 aarch64_extract_return_value (valtype, regcache, readbuf);
2319
2320 if (aarch64_debug)
2321 debug_printf ("return value in registers\n");
2322
2323 return RETURN_VALUE_REGISTER_CONVENTION;
2324 }
2325
2326 /* Implement the "get_longjmp_target" gdbarch method. */
2327
2328 static int
2329 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2330 {
2331 CORE_ADDR jb_addr;
2332 gdb_byte buf[X_REGISTER_SIZE];
2333 struct gdbarch *gdbarch = get_frame_arch (frame);
2334 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2335 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2336
2337 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2338
2339 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2340 X_REGISTER_SIZE))
2341 return 0;
2342
2343 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2344 return 1;
2345 }
2346
2347 /* Implement the "gen_return_address" gdbarch method. */
2348
2349 static void
2350 aarch64_gen_return_address (struct gdbarch *gdbarch,
2351 struct agent_expr *ax, struct axs_value *value,
2352 CORE_ADDR scope)
2353 {
2354 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2355 value->kind = axs_lvalue_register;
2356 value->u.reg = AARCH64_LR_REGNUM;
2357 }
2358 \f
2359
2360 /* Return the pseudo register name corresponding to register regnum. */
2361
2362 static const char *
2363 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2364 {
2365 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2366
2367 static const char *const q_name[] =
2368 {
2369 "q0", "q1", "q2", "q3",
2370 "q4", "q5", "q6", "q7",
2371 "q8", "q9", "q10", "q11",
2372 "q12", "q13", "q14", "q15",
2373 "q16", "q17", "q18", "q19",
2374 "q20", "q21", "q22", "q23",
2375 "q24", "q25", "q26", "q27",
2376 "q28", "q29", "q30", "q31",
2377 };
2378
2379 static const char *const d_name[] =
2380 {
2381 "d0", "d1", "d2", "d3",
2382 "d4", "d5", "d6", "d7",
2383 "d8", "d9", "d10", "d11",
2384 "d12", "d13", "d14", "d15",
2385 "d16", "d17", "d18", "d19",
2386 "d20", "d21", "d22", "d23",
2387 "d24", "d25", "d26", "d27",
2388 "d28", "d29", "d30", "d31",
2389 };
2390
2391 static const char *const s_name[] =
2392 {
2393 "s0", "s1", "s2", "s3",
2394 "s4", "s5", "s6", "s7",
2395 "s8", "s9", "s10", "s11",
2396 "s12", "s13", "s14", "s15",
2397 "s16", "s17", "s18", "s19",
2398 "s20", "s21", "s22", "s23",
2399 "s24", "s25", "s26", "s27",
2400 "s28", "s29", "s30", "s31",
2401 };
2402
2403 static const char *const h_name[] =
2404 {
2405 "h0", "h1", "h2", "h3",
2406 "h4", "h5", "h6", "h7",
2407 "h8", "h9", "h10", "h11",
2408 "h12", "h13", "h14", "h15",
2409 "h16", "h17", "h18", "h19",
2410 "h20", "h21", "h22", "h23",
2411 "h24", "h25", "h26", "h27",
2412 "h28", "h29", "h30", "h31",
2413 };
2414
2415 static const char *const b_name[] =
2416 {
2417 "b0", "b1", "b2", "b3",
2418 "b4", "b5", "b6", "b7",
2419 "b8", "b9", "b10", "b11",
2420 "b12", "b13", "b14", "b15",
2421 "b16", "b17", "b18", "b19",
2422 "b20", "b21", "b22", "b23",
2423 "b24", "b25", "b26", "b27",
2424 "b28", "b29", "b30", "b31",
2425 };
2426
2427 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2428
2429 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2430 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2431
2432 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2433 return d_name[p_regnum - AARCH64_D0_REGNUM];
2434
2435 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2436 return s_name[p_regnum - AARCH64_S0_REGNUM];
2437
2438 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2439 return h_name[p_regnum - AARCH64_H0_REGNUM];
2440
2441 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2442 return b_name[p_regnum - AARCH64_B0_REGNUM];
2443
2444 if (tdep->has_sve ())
2445 {
2446 static const char *const sve_v_name[] =
2447 {
2448 "v0", "v1", "v2", "v3",
2449 "v4", "v5", "v6", "v7",
2450 "v8", "v9", "v10", "v11",
2451 "v12", "v13", "v14", "v15",
2452 "v16", "v17", "v18", "v19",
2453 "v20", "v21", "v22", "v23",
2454 "v24", "v25", "v26", "v27",
2455 "v28", "v29", "v30", "v31",
2456 };
2457
2458 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2459 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2460 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2461 }
2462
2463 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2464 prevents it from being read by methods such as
2465 mi_cmd_trace_frame_collected. */
2466 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2467 return "";
2468
2469 internal_error (__FILE__, __LINE__,
2470 _("aarch64_pseudo_register_name: bad register number %d"),
2471 p_regnum);
2472 }
2473
2474 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2475
2476 static struct type *
2477 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2478 {
2479 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2480
2481 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2482
2483 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2484 return aarch64_vnq_type (gdbarch);
2485
2486 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2487 return aarch64_vnd_type (gdbarch);
2488
2489 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2490 return aarch64_vns_type (gdbarch);
2491
2492 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2493 return aarch64_vnh_type (gdbarch);
2494
2495 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2496 return aarch64_vnb_type (gdbarch);
2497
2498 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2499 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2500 return aarch64_vnv_type (gdbarch);
2501
2502 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2503 return builtin_type (gdbarch)->builtin_uint64;
2504
2505 internal_error (__FILE__, __LINE__,
2506 _("aarch64_pseudo_register_type: bad register number %d"),
2507 p_regnum);
2508 }
2509
2510 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2511
2512 static int
2513 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2514 struct reggroup *group)
2515 {
2516 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2517
2518 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2519
2520 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2521 return group == all_reggroup || group == vector_reggroup;
2522 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2523 return (group == all_reggroup || group == vector_reggroup
2524 || group == float_reggroup);
2525 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2526 return (group == all_reggroup || group == vector_reggroup
2527 || group == float_reggroup);
2528 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2529 return group == all_reggroup || group == vector_reggroup;
2530 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2531 return group == all_reggroup || group == vector_reggroup;
2532 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2533 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2534 return group == all_reggroup || group == vector_reggroup;
2535 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2536 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2537 return 0;
2538
2539 return group == all_reggroup;
2540 }
2541
2542 /* Helper for aarch64_pseudo_read_value. */
2543
2544 static struct value *
2545 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2546 readable_regcache *regcache, int regnum_offset,
2547 int regsize, struct value *result_value)
2548 {
2549 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2550
2551 /* Enough space for a full vector register. */
2552 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2553 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2554
2555 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2556 mark_value_bytes_unavailable (result_value, 0,
2557 TYPE_LENGTH (value_type (result_value)));
2558 else
2559 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2560
2561 return result_value;
2562 }
2563
2564 /* Implement the "pseudo_register_read_value" gdbarch method. */
2565
2566 static struct value *
2567 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2568 int regnum)
2569 {
2570 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2571 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2572
2573 VALUE_LVAL (result_value) = lval_register;
2574 VALUE_REGNUM (result_value) = regnum;
2575
2576 regnum -= gdbarch_num_regs (gdbarch);
2577
2578 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2579 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2580 regnum - AARCH64_Q0_REGNUM,
2581 Q_REGISTER_SIZE, result_value);
2582
2583 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2584 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2585 regnum - AARCH64_D0_REGNUM,
2586 D_REGISTER_SIZE, result_value);
2587
2588 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2589 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2590 regnum - AARCH64_S0_REGNUM,
2591 S_REGISTER_SIZE, result_value);
2592
2593 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2594 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2595 regnum - AARCH64_H0_REGNUM,
2596 H_REGISTER_SIZE, result_value);
2597
2598 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2599 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2600 regnum - AARCH64_B0_REGNUM,
2601 B_REGISTER_SIZE, result_value);
2602
2603 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2604 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2605 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2606 regnum - AARCH64_SVE_V0_REGNUM,
2607 V_REGISTER_SIZE, result_value);
2608
2609 gdb_assert_not_reached ("regnum out of bound");
2610 }
2611
2612 /* Helper for aarch64_pseudo_write. */
2613
2614 static void
2615 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2616 int regnum_offset, int regsize, const gdb_byte *buf)
2617 {
2618 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2619
2620 /* Enough space for a full vector register. */
2621 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2622 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2623
2624 /* Ensure the register buffer is zero, we want gdb writes of the
2625 various 'scalar' pseudo registers to behavior like architectural
2626 writes, register width bytes are written the remainder are set to
2627 zero. */
2628 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2629
2630 memcpy (reg_buf, buf, regsize);
2631 regcache->raw_write (v_regnum, reg_buf);
2632 }
2633
2634 /* Implement the "pseudo_register_write" gdbarch method. */
2635
2636 static void
2637 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2638 int regnum, const gdb_byte *buf)
2639 {
2640 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2641 regnum -= gdbarch_num_regs (gdbarch);
2642
2643 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2644 return aarch64_pseudo_write_1 (gdbarch, regcache,
2645 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2646 buf);
2647
2648 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2649 return aarch64_pseudo_write_1 (gdbarch, regcache,
2650 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2651 buf);
2652
2653 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2654 return aarch64_pseudo_write_1 (gdbarch, regcache,
2655 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2656 buf);
2657
2658 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2659 return aarch64_pseudo_write_1 (gdbarch, regcache,
2660 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2661 buf);
2662
2663 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2664 return aarch64_pseudo_write_1 (gdbarch, regcache,
2665 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2666 buf);
2667
2668 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2669 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2670 return aarch64_pseudo_write_1 (gdbarch, regcache,
2671 regnum - AARCH64_SVE_V0_REGNUM,
2672 V_REGISTER_SIZE, buf);
2673
2674 gdb_assert_not_reached ("regnum out of bound");
2675 }
2676
2677 /* Callback function for user_reg_add. */
2678
2679 static struct value *
2680 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2681 {
2682 const int *reg_p = (const int *) baton;
2683
2684 return value_of_register (*reg_p, frame);
2685 }
2686 \f
2687
2688 /* Implement the "software_single_step" gdbarch method, needed to
2689 single step through atomic sequences on AArch64. */
2690
2691 static std::vector<CORE_ADDR>
2692 aarch64_software_single_step (struct regcache *regcache)
2693 {
2694 struct gdbarch *gdbarch = regcache->arch ();
2695 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2696 const int insn_size = 4;
2697 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2698 CORE_ADDR pc = regcache_read_pc (regcache);
2699 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2700 CORE_ADDR loc = pc;
2701 CORE_ADDR closing_insn = 0;
2702 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2703 byte_order_for_code);
2704 int index;
2705 int insn_count;
2706 int bc_insn_count = 0; /* Conditional branch instruction count. */
2707 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2708 aarch64_inst inst;
2709
2710 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2711 return {};
2712
2713 /* Look for a Load Exclusive instruction which begins the sequence. */
2714 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2715 return {};
2716
2717 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2718 {
2719 loc += insn_size;
2720 insn = read_memory_unsigned_integer (loc, insn_size,
2721 byte_order_for_code);
2722
2723 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2724 return {};
2725 /* Check if the instruction is a conditional branch. */
2726 if (inst.opcode->iclass == condbranch)
2727 {
2728 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2729
2730 if (bc_insn_count >= 1)
2731 return {};
2732
2733 /* It is, so we'll try to set a breakpoint at the destination. */
2734 breaks[1] = loc + inst.operands[0].imm.value;
2735
2736 bc_insn_count++;
2737 last_breakpoint++;
2738 }
2739
2740 /* Look for the Store Exclusive which closes the atomic sequence. */
2741 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2742 {
2743 closing_insn = loc;
2744 break;
2745 }
2746 }
2747
2748 /* We didn't find a closing Store Exclusive instruction, fall back. */
2749 if (!closing_insn)
2750 return {};
2751
2752 /* Insert breakpoint after the end of the atomic sequence. */
2753 breaks[0] = loc + insn_size;
2754
2755 /* Check for duplicated breakpoints, and also check that the second
2756 breakpoint is not within the atomic sequence. */
2757 if (last_breakpoint
2758 && (breaks[1] == breaks[0]
2759 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2760 last_breakpoint = 0;
2761
2762 std::vector<CORE_ADDR> next_pcs;
2763
2764 /* Insert the breakpoint at the end of the sequence, and one at the
2765 destination of the conditional branch, if it exists. */
2766 for (index = 0; index <= last_breakpoint; index++)
2767 next_pcs.push_back (breaks[index]);
2768
2769 return next_pcs;
2770 }
2771
2772 struct aarch64_displaced_step_closure : public displaced_step_closure
2773 {
2774 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2775 is being displaced stepping. */
2776 int cond = 0;
2777
2778 /* PC adjustment offset after displaced stepping. */
2779 int32_t pc_adjust = 0;
2780 };
2781
2782 /* Data when visiting instructions for displaced stepping. */
2783
2784 struct aarch64_displaced_step_data
2785 {
2786 struct aarch64_insn_data base;
2787
2788 /* The address where the instruction will be executed at. */
2789 CORE_ADDR new_addr;
2790 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2791 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2792 /* Number of instructions in INSN_BUF. */
2793 unsigned insn_count;
2794 /* Registers when doing displaced stepping. */
2795 struct regcache *regs;
2796
2797 aarch64_displaced_step_closure *dsc;
2798 };
2799
2800 /* Implementation of aarch64_insn_visitor method "b". */
2801
2802 static void
2803 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2804 struct aarch64_insn_data *data)
2805 {
2806 struct aarch64_displaced_step_data *dsd
2807 = (struct aarch64_displaced_step_data *) data;
2808 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2809
2810 if (can_encode_int32 (new_offset, 28))
2811 {
2812 /* Emit B rather than BL, because executing BL on a new address
2813 will get the wrong address into LR. In order to avoid this,
2814 we emit B, and update LR if the instruction is BL. */
2815 emit_b (dsd->insn_buf, 0, new_offset);
2816 dsd->insn_count++;
2817 }
2818 else
2819 {
2820 /* Write NOP. */
2821 emit_nop (dsd->insn_buf);
2822 dsd->insn_count++;
2823 dsd->dsc->pc_adjust = offset;
2824 }
2825
2826 if (is_bl)
2827 {
2828 /* Update LR. */
2829 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2830 data->insn_addr + 4);
2831 }
2832 }
2833
2834 /* Implementation of aarch64_insn_visitor method "b_cond". */
2835
2836 static void
2837 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2838 struct aarch64_insn_data *data)
2839 {
2840 struct aarch64_displaced_step_data *dsd
2841 = (struct aarch64_displaced_step_data *) data;
2842
2843 /* GDB has to fix up PC after displaced step this instruction
2844 differently according to the condition is true or false. Instead
2845 of checking COND against conditional flags, we can use
2846 the following instructions, and GDB can tell how to fix up PC
2847 according to the PC value.
2848
2849 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2850 INSN1 ;
2851 TAKEN:
2852 INSN2
2853 */
2854
2855 emit_bcond (dsd->insn_buf, cond, 8);
2856 dsd->dsc->cond = 1;
2857 dsd->dsc->pc_adjust = offset;
2858 dsd->insn_count = 1;
2859 }
2860
2861 /* Dynamically allocate a new register. If we know the register
2862 statically, we should make it a global as above instead of using this
2863 helper function. */
2864
2865 static struct aarch64_register
2866 aarch64_register (unsigned num, int is64)
2867 {
2868 return (struct aarch64_register) { num, is64 };
2869 }
2870
2871 /* Implementation of aarch64_insn_visitor method "cb". */
2872
2873 static void
2874 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2875 const unsigned rn, int is64,
2876 struct aarch64_insn_data *data)
2877 {
2878 struct aarch64_displaced_step_data *dsd
2879 = (struct aarch64_displaced_step_data *) data;
2880
2881 /* The offset is out of range for a compare and branch
2882 instruction. We can use the following instructions instead:
2883
2884 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2885 INSN1 ;
2886 TAKEN:
2887 INSN2
2888 */
2889 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2890 dsd->insn_count = 1;
2891 dsd->dsc->cond = 1;
2892 dsd->dsc->pc_adjust = offset;
2893 }
2894
2895 /* Implementation of aarch64_insn_visitor method "tb". */
2896
2897 static void
2898 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2899 const unsigned rt, unsigned bit,
2900 struct aarch64_insn_data *data)
2901 {
2902 struct aarch64_displaced_step_data *dsd
2903 = (struct aarch64_displaced_step_data *) data;
2904
2905 /* The offset is out of range for a test bit and branch
2906 instruction We can use the following instructions instead:
2907
2908 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2909 INSN1 ;
2910 TAKEN:
2911 INSN2
2912
2913 */
2914 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2915 dsd->insn_count = 1;
2916 dsd->dsc->cond = 1;
2917 dsd->dsc->pc_adjust = offset;
2918 }
2919
2920 /* Implementation of aarch64_insn_visitor method "adr". */
2921
2922 static void
2923 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2924 const int is_adrp, struct aarch64_insn_data *data)
2925 {
2926 struct aarch64_displaced_step_data *dsd
2927 = (struct aarch64_displaced_step_data *) data;
2928 /* We know exactly the address the ADR{P,} instruction will compute.
2929 We can just write it to the destination register. */
2930 CORE_ADDR address = data->insn_addr + offset;
2931
2932 if (is_adrp)
2933 {
2934 /* Clear the lower 12 bits of the offset to get the 4K page. */
2935 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2936 address & ~0xfff);
2937 }
2938 else
2939 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2940 address);
2941
2942 dsd->dsc->pc_adjust = 4;
2943 emit_nop (dsd->insn_buf);
2944 dsd->insn_count = 1;
2945 }
2946
2947 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2948
2949 static void
2950 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2951 const unsigned rt, const int is64,
2952 struct aarch64_insn_data *data)
2953 {
2954 struct aarch64_displaced_step_data *dsd
2955 = (struct aarch64_displaced_step_data *) data;
2956 CORE_ADDR address = data->insn_addr + offset;
2957 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2958
2959 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2960 address);
2961
2962 if (is_sw)
2963 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2964 aarch64_register (rt, 1), zero);
2965 else
2966 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2967 aarch64_register (rt, 1), zero);
2968
2969 dsd->dsc->pc_adjust = 4;
2970 }
2971
2972 /* Implementation of aarch64_insn_visitor method "others". */
2973
2974 static void
2975 aarch64_displaced_step_others (const uint32_t insn,
2976 struct aarch64_insn_data *data)
2977 {
2978 struct aarch64_displaced_step_data *dsd
2979 = (struct aarch64_displaced_step_data *) data;
2980
2981 aarch64_emit_insn (dsd->insn_buf, insn);
2982 dsd->insn_count = 1;
2983
2984 if ((insn & 0xfffffc1f) == 0xd65f0000)
2985 {
2986 /* RET */
2987 dsd->dsc->pc_adjust = 0;
2988 }
2989 else
2990 dsd->dsc->pc_adjust = 4;
2991 }
2992
2993 static const struct aarch64_insn_visitor visitor =
2994 {
2995 aarch64_displaced_step_b,
2996 aarch64_displaced_step_b_cond,
2997 aarch64_displaced_step_cb,
2998 aarch64_displaced_step_tb,
2999 aarch64_displaced_step_adr,
3000 aarch64_displaced_step_ldr_literal,
3001 aarch64_displaced_step_others,
3002 };
3003
3004 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3005
3006 struct displaced_step_closure *
3007 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3008 CORE_ADDR from, CORE_ADDR to,
3009 struct regcache *regs)
3010 {
3011 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3012 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3013 struct aarch64_displaced_step_data dsd;
3014 aarch64_inst inst;
3015
3016 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3017 return NULL;
3018
3019 /* Look for a Load Exclusive instruction which begins the sequence. */
3020 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3021 {
3022 /* We can't displaced step atomic sequences. */
3023 return NULL;
3024 }
3025
3026 std::unique_ptr<aarch64_displaced_step_closure> dsc
3027 (new aarch64_displaced_step_closure);
3028 dsd.base.insn_addr = from;
3029 dsd.new_addr = to;
3030 dsd.regs = regs;
3031 dsd.dsc = dsc.get ();
3032 dsd.insn_count = 0;
3033 aarch64_relocate_instruction (insn, &visitor,
3034 (struct aarch64_insn_data *) &dsd);
3035 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
3036
3037 if (dsd.insn_count != 0)
3038 {
3039 int i;
3040
3041 /* Instruction can be relocated to scratch pad. Copy
3042 relocated instruction(s) there. */
3043 for (i = 0; i < dsd.insn_count; i++)
3044 {
3045 if (debug_displaced)
3046 {
3047 debug_printf ("displaced: writing insn ");
3048 debug_printf ("%.8x", dsd.insn_buf[i]);
3049 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3050 }
3051 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3052 (ULONGEST) dsd.insn_buf[i]);
3053 }
3054 }
3055 else
3056 {
3057 dsc = NULL;
3058 }
3059
3060 return dsc.release ();
3061 }
3062
3063 /* Implement the "displaced_step_fixup" gdbarch method. */
3064
3065 void
3066 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3067 struct displaced_step_closure *dsc_,
3068 CORE_ADDR from, CORE_ADDR to,
3069 struct regcache *regs)
3070 {
3071 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3072
3073 if (dsc->cond)
3074 {
3075 ULONGEST pc;
3076
3077 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3078 if (pc - to == 8)
3079 {
3080 /* Condition is true. */
3081 }
3082 else if (pc - to == 4)
3083 {
3084 /* Condition is false. */
3085 dsc->pc_adjust = 4;
3086 }
3087 else
3088 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3089 }
3090
3091 if (dsc->pc_adjust != 0)
3092 {
3093 if (debug_displaced)
3094 {
3095 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3096 paddress (gdbarch, from), dsc->pc_adjust);
3097 }
3098 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3099 from + dsc->pc_adjust);
3100 }
3101 }
3102
3103 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3104
3105 int
3106 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3107 struct displaced_step_closure *closure)
3108 {
3109 return 1;
3110 }
3111
3112 /* Get the correct target description for the given VQ value.
3113 If VQ is zero then it is assumed SVE is not supported.
3114 (It is not possible to set VQ to zero on an SVE system). */
3115
3116 const target_desc *
3117 aarch64_read_description (uint64_t vq, bool pauth_p)
3118 {
3119 if (vq > AARCH64_MAX_SVE_VQ)
3120 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3121 AARCH64_MAX_SVE_VQ);
3122
3123 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3124
3125 if (tdesc == NULL)
3126 {
3127 tdesc = aarch64_create_target_description (vq, pauth_p);
3128 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3129 }
3130
3131 return tdesc;
3132 }
3133
3134 /* Return the VQ used when creating the target description TDESC. */
3135
3136 static uint64_t
3137 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3138 {
3139 const struct tdesc_feature *feature_sve;
3140
3141 if (!tdesc_has_registers (tdesc))
3142 return 0;
3143
3144 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3145
3146 if (feature_sve == nullptr)
3147 return 0;
3148
3149 uint64_t vl = tdesc_register_bitsize (feature_sve,
3150 aarch64_sve_register_names[0]) / 8;
3151 return sve_vq_from_vl (vl);
3152 }
3153
3154 /* Add all the expected register sets into GDBARCH. */
3155
3156 static void
3157 aarch64_add_reggroups (struct gdbarch *gdbarch)
3158 {
3159 reggroup_add (gdbarch, general_reggroup);
3160 reggroup_add (gdbarch, float_reggroup);
3161 reggroup_add (gdbarch, system_reggroup);
3162 reggroup_add (gdbarch, vector_reggroup);
3163 reggroup_add (gdbarch, all_reggroup);
3164 reggroup_add (gdbarch, save_reggroup);
3165 reggroup_add (gdbarch, restore_reggroup);
3166 }
3167
3168 /* Implement the "cannot_store_register" gdbarch method. */
3169
3170 static int
3171 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3172 {
3173 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3174
3175 if (!tdep->has_pauth ())
3176 return 0;
3177
3178 /* Pointer authentication registers are read-only. */
3179 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3180 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3181 }
3182
3183 /* Initialize the current architecture based on INFO. If possible,
3184 re-use an architecture from ARCHES, which is a list of
3185 architectures already created during this debugging session.
3186
3187 Called e.g. at program startup, when reading a core file, and when
3188 reading a binary file. */
3189
3190 static struct gdbarch *
3191 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3192 {
3193 struct gdbarch_tdep *tdep;
3194 struct gdbarch *gdbarch;
3195 struct gdbarch_list *best_arch;
3196 struct tdesc_arch_data *tdesc_data = NULL;
3197 const struct target_desc *tdesc = info.target_desc;
3198 int i;
3199 int valid_p = 1;
3200 const struct tdesc_feature *feature_core;
3201 const struct tdesc_feature *feature_fpu;
3202 const struct tdesc_feature *feature_sve;
3203 const struct tdesc_feature *feature_pauth;
3204 int num_regs = 0;
3205 int num_pseudo_regs = 0;
3206 int first_pauth_regnum = -1;
3207 int pauth_ra_state_offset = -1;
3208
3209 /* Ensure we always have a target description. */
3210 if (!tdesc_has_registers (tdesc))
3211 tdesc = aarch64_read_description (0, false);
3212 gdb_assert (tdesc);
3213
3214 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
3215 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3216 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3217 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3218
3219 if (feature_core == NULL)
3220 return NULL;
3221
3222 tdesc_data = tdesc_data_alloc ();
3223
3224 /* Validate the description provides the mandatory core R registers
3225 and allocate their numbers. */
3226 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3227 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3228 AARCH64_X0_REGNUM + i,
3229 aarch64_r_register_names[i]);
3230
3231 num_regs = AARCH64_X0_REGNUM + i;
3232
3233 /* Add the V registers. */
3234 if (feature_fpu != NULL)
3235 {
3236 if (feature_sve != NULL)
3237 error (_("Program contains both fpu and SVE features."));
3238
3239 /* Validate the description provides the mandatory V registers
3240 and allocate their numbers. */
3241 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3242 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3243 AARCH64_V0_REGNUM + i,
3244 aarch64_v_register_names[i]);
3245
3246 num_regs = AARCH64_V0_REGNUM + i;
3247 }
3248
3249 /* Add the SVE registers. */
3250 if (feature_sve != NULL)
3251 {
3252 /* Validate the description provides the mandatory SVE registers
3253 and allocate their numbers. */
3254 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3255 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3256 AARCH64_SVE_Z0_REGNUM + i,
3257 aarch64_sve_register_names[i]);
3258
3259 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3260 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3261 }
3262
3263 if (feature_fpu != NULL || feature_sve != NULL)
3264 {
3265 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3266 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3267 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3268 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3269 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3270 }
3271
3272 /* Add the pauth registers. */
3273 if (feature_pauth != NULL)
3274 {
3275 first_pauth_regnum = num_regs;
3276 pauth_ra_state_offset = num_pseudo_regs;
3277 /* Validate the descriptor provides the mandatory PAUTH registers and
3278 allocate their numbers. */
3279 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3280 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3281 first_pauth_regnum + i,
3282 aarch64_pauth_register_names[i]);
3283
3284 num_regs += i;
3285 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3286 }
3287
3288 if (!valid_p)
3289 {
3290 tdesc_data_cleanup (tdesc_data);
3291 return NULL;
3292 }
3293
3294 /* AArch64 code is always little-endian. */
3295 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3296
3297 /* If there is already a candidate, use it. */
3298 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3299 best_arch != NULL;
3300 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3301 {
3302 /* Found a match. */
3303 break;
3304 }
3305
3306 if (best_arch != NULL)
3307 {
3308 if (tdesc_data != NULL)
3309 tdesc_data_cleanup (tdesc_data);
3310 return best_arch->gdbarch;
3311 }
3312
3313 tdep = XCNEW (struct gdbarch_tdep);
3314 gdbarch = gdbarch_alloc (&info, tdep);
3315
3316 /* This should be low enough for everything. */
3317 tdep->lowest_pc = 0x20;
3318 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3319 tdep->jb_elt_size = 8;
3320 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3321 tdep->pauth_reg_base = first_pauth_regnum;
3322 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3323 : pauth_ra_state_offset + num_regs;
3324
3325
3326 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3327 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3328
3329 /* Advance PC across function entry code. */
3330 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3331
3332 /* The stack grows downward. */
3333 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3334
3335 /* Breakpoint manipulation. */
3336 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3337 aarch64_breakpoint::kind_from_pc);
3338 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3339 aarch64_breakpoint::bp_from_kind);
3340 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3341 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3342
3343 /* Information about registers, etc. */
3344 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3345 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3346 set_gdbarch_num_regs (gdbarch, num_regs);
3347
3348 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3349 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3350 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3351 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3352 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3353 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3354 aarch64_pseudo_register_reggroup_p);
3355 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3356
3357 /* ABI */
3358 set_gdbarch_short_bit (gdbarch, 16);
3359 set_gdbarch_int_bit (gdbarch, 32);
3360 set_gdbarch_float_bit (gdbarch, 32);
3361 set_gdbarch_double_bit (gdbarch, 64);
3362 set_gdbarch_long_double_bit (gdbarch, 128);
3363 set_gdbarch_long_bit (gdbarch, 64);
3364 set_gdbarch_long_long_bit (gdbarch, 64);
3365 set_gdbarch_ptr_bit (gdbarch, 64);
3366 set_gdbarch_char_signed (gdbarch, 0);
3367 set_gdbarch_wchar_signed (gdbarch, 0);
3368 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3369 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3370 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3371
3372 /* Internal <-> external register number maps. */
3373 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3374
3375 /* Returning results. */
3376 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3377
3378 /* Disassembly. */
3379 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3380
3381 /* Virtual tables. */
3382 set_gdbarch_vbit_in_delta (gdbarch, 1);
3383
3384 /* Register architecture. */
3385 aarch64_add_reggroups (gdbarch);
3386
3387 /* Hook in the ABI-specific overrides, if they have been registered. */
3388 info.target_desc = tdesc;
3389 info.tdesc_data = tdesc_data;
3390 gdbarch_init_osabi (info, gdbarch);
3391
3392 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3393 /* Register DWARF CFA vendor handler. */
3394 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3395 aarch64_execute_dwarf_cfa_vendor_op);
3396
3397 /* Add some default predicates. */
3398 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3399 dwarf2_append_unwinders (gdbarch);
3400 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3401
3402 frame_base_set_default (gdbarch, &aarch64_normal_base);
3403
3404 /* Now we have tuned the configuration, set a few final things,
3405 based on what the OS ABI has told us. */
3406
3407 if (tdep->jb_pc >= 0)
3408 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3409
3410 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3411
3412 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3413
3414 /* Add standard register aliases. */
3415 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3416 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3417 value_of_aarch64_user_reg,
3418 &aarch64_register_aliases[i].regnum);
3419
3420 register_aarch64_ravenscar_ops (gdbarch);
3421
3422 return gdbarch;
3423 }
3424
3425 static void
3426 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3427 {
3428 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3429
3430 if (tdep == NULL)
3431 return;
3432
3433 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3434 paddress (gdbarch, tdep->lowest_pc));
3435 }
3436
3437 #if GDB_SELF_TEST
3438 namespace selftests
3439 {
3440 static void aarch64_process_record_test (void);
3441 }
3442 #endif
3443
3444 void
3445 _initialize_aarch64_tdep (void)
3446 {
3447 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3448 aarch64_dump_tdep);
3449
3450 /* Debug this file's internals. */
3451 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3452 Set AArch64 debugging."), _("\
3453 Show AArch64 debugging."), _("\
3454 When on, AArch64 specific debugging is enabled."),
3455 NULL,
3456 show_aarch64_debug,
3457 &setdebuglist, &showdebuglist);
3458
3459 #if GDB_SELF_TEST
3460 selftests::register_test ("aarch64-analyze-prologue",
3461 selftests::aarch64_analyze_prologue_test);
3462 selftests::register_test ("aarch64-process-record",
3463 selftests::aarch64_process_record_test);
3464 selftests::record_xml_tdesc ("aarch64.xml",
3465 aarch64_create_target_description (0, false));
3466 #endif
3467 }
3468
3469 /* AArch64 process record-replay related structures, defines etc. */
3470
3471 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3472 do \
3473 { \
3474 unsigned int reg_len = LENGTH; \
3475 if (reg_len) \
3476 { \
3477 REGS = XNEWVEC (uint32_t, reg_len); \
3478 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3479 } \
3480 } \
3481 while (0)
3482
3483 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3484 do \
3485 { \
3486 unsigned int mem_len = LENGTH; \
3487 if (mem_len) \
3488 { \
3489 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3490 memcpy(&MEMS->len, &RECORD_BUF[0], \
3491 sizeof(struct aarch64_mem_r) * LENGTH); \
3492 } \
3493 } \
3494 while (0)
3495
3496 /* AArch64 record/replay structures and enumerations. */
3497
3498 struct aarch64_mem_r
3499 {
3500 uint64_t len; /* Record length. */
3501 uint64_t addr; /* Memory address. */
3502 };
3503
3504 enum aarch64_record_result
3505 {
3506 AARCH64_RECORD_SUCCESS,
3507 AARCH64_RECORD_UNSUPPORTED,
3508 AARCH64_RECORD_UNKNOWN
3509 };
3510
3511 typedef struct insn_decode_record_t
3512 {
3513 struct gdbarch *gdbarch;
3514 struct regcache *regcache;
3515 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3516 uint32_t aarch64_insn; /* Insn to be recorded. */
3517 uint32_t mem_rec_count; /* Count of memory records. */
3518 uint32_t reg_rec_count; /* Count of register records. */
3519 uint32_t *aarch64_regs; /* Registers to be recorded. */
3520 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3521 } insn_decode_record;
3522
3523 /* Record handler for data processing - register instructions. */
3524
3525 static unsigned int
3526 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3527 {
3528 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3529 uint32_t record_buf[4];
3530
3531 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3532 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3533 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3534
3535 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3536 {
3537 uint8_t setflags;
3538
3539 /* Logical (shifted register). */
3540 if (insn_bits24_27 == 0x0a)
3541 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3542 /* Add/subtract. */
3543 else if (insn_bits24_27 == 0x0b)
3544 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3545 else
3546 return AARCH64_RECORD_UNKNOWN;
3547
3548 record_buf[0] = reg_rd;
3549 aarch64_insn_r->reg_rec_count = 1;
3550 if (setflags)
3551 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3552 }
3553 else
3554 {
3555 if (insn_bits24_27 == 0x0b)
3556 {
3557 /* Data-processing (3 source). */
3558 record_buf[0] = reg_rd;
3559 aarch64_insn_r->reg_rec_count = 1;
3560 }
3561 else if (insn_bits24_27 == 0x0a)
3562 {
3563 if (insn_bits21_23 == 0x00)
3564 {
3565 /* Add/subtract (with carry). */
3566 record_buf[0] = reg_rd;
3567 aarch64_insn_r->reg_rec_count = 1;
3568 if (bit (aarch64_insn_r->aarch64_insn, 29))
3569 {
3570 record_buf[1] = AARCH64_CPSR_REGNUM;
3571 aarch64_insn_r->reg_rec_count = 2;
3572 }
3573 }
3574 else if (insn_bits21_23 == 0x02)
3575 {
3576 /* Conditional compare (register) and conditional compare
3577 (immediate) instructions. */
3578 record_buf[0] = AARCH64_CPSR_REGNUM;
3579 aarch64_insn_r->reg_rec_count = 1;
3580 }
3581 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3582 {
3583 /* CConditional select. */
3584 /* Data-processing (2 source). */
3585 /* Data-processing (1 source). */
3586 record_buf[0] = reg_rd;
3587 aarch64_insn_r->reg_rec_count = 1;
3588 }
3589 else
3590 return AARCH64_RECORD_UNKNOWN;
3591 }
3592 }
3593
3594 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3595 record_buf);
3596 return AARCH64_RECORD_SUCCESS;
3597 }
3598
3599 /* Record handler for data processing - immediate instructions. */
3600
3601 static unsigned int
3602 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3603 {
3604 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3605 uint32_t record_buf[4];
3606
3607 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3608 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3609 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3610
3611 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3612 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3613 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3614 {
3615 record_buf[0] = reg_rd;
3616 aarch64_insn_r->reg_rec_count = 1;
3617 }
3618 else if (insn_bits24_27 == 0x01)
3619 {
3620 /* Add/Subtract (immediate). */
3621 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3622 record_buf[0] = reg_rd;
3623 aarch64_insn_r->reg_rec_count = 1;
3624 if (setflags)
3625 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3626 }
3627 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3628 {
3629 /* Logical (immediate). */
3630 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3631 record_buf[0] = reg_rd;
3632 aarch64_insn_r->reg_rec_count = 1;
3633 if (setflags)
3634 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3635 }
3636 else
3637 return AARCH64_RECORD_UNKNOWN;
3638
3639 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3640 record_buf);
3641 return AARCH64_RECORD_SUCCESS;
3642 }
3643
3644 /* Record handler for branch, exception generation and system instructions. */
3645
3646 static unsigned int
3647 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3648 {
3649 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3650 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3651 uint32_t record_buf[4];
3652
3653 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3654 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3655 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3656
3657 if (insn_bits28_31 == 0x0d)
3658 {
3659 /* Exception generation instructions. */
3660 if (insn_bits24_27 == 0x04)
3661 {
3662 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3663 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3664 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3665 {
3666 ULONGEST svc_number;
3667
3668 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3669 &svc_number);
3670 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3671 svc_number);
3672 }
3673 else
3674 return AARCH64_RECORD_UNSUPPORTED;
3675 }
3676 /* System instructions. */
3677 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3678 {
3679 uint32_t reg_rt, reg_crn;
3680
3681 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3682 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3683
3684 /* Record rt in case of sysl and mrs instructions. */
3685 if (bit (aarch64_insn_r->aarch64_insn, 21))
3686 {
3687 record_buf[0] = reg_rt;
3688 aarch64_insn_r->reg_rec_count = 1;
3689 }
3690 /* Record cpsr for hint and msr(immediate) instructions. */
3691 else if (reg_crn == 0x02 || reg_crn == 0x04)
3692 {
3693 record_buf[0] = AARCH64_CPSR_REGNUM;
3694 aarch64_insn_r->reg_rec_count = 1;
3695 }
3696 }
3697 /* Unconditional branch (register). */
3698 else if((insn_bits24_27 & 0x0e) == 0x06)
3699 {
3700 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3701 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3702 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3703 }
3704 else
3705 return AARCH64_RECORD_UNKNOWN;
3706 }
3707 /* Unconditional branch (immediate). */
3708 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3709 {
3710 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3711 if (bit (aarch64_insn_r->aarch64_insn, 31))
3712 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3713 }
3714 else
3715 /* Compare & branch (immediate), Test & branch (immediate) and
3716 Conditional branch (immediate). */
3717 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3718
3719 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3720 record_buf);
3721 return AARCH64_RECORD_SUCCESS;
3722 }
3723
3724 /* Record handler for advanced SIMD load and store instructions. */
3725
3726 static unsigned int
3727 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3728 {
3729 CORE_ADDR address;
3730 uint64_t addr_offset = 0;
3731 uint32_t record_buf[24];
3732 uint64_t record_buf_mem[24];
3733 uint32_t reg_rn, reg_rt;
3734 uint32_t reg_index = 0, mem_index = 0;
3735 uint8_t opcode_bits, size_bits;
3736
3737 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3738 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3739 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3740 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3741 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3742
3743 if (record_debug)
3744 debug_printf ("Process record: Advanced SIMD load/store\n");
3745
3746 /* Load/store single structure. */
3747 if (bit (aarch64_insn_r->aarch64_insn, 24))
3748 {
3749 uint8_t sindex, scale, selem, esize, replicate = 0;
3750 scale = opcode_bits >> 2;
3751 selem = ((opcode_bits & 0x02) |
3752 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3753 switch (scale)
3754 {
3755 case 1:
3756 if (size_bits & 0x01)
3757 return AARCH64_RECORD_UNKNOWN;
3758 break;
3759 case 2:
3760 if ((size_bits >> 1) & 0x01)
3761 return AARCH64_RECORD_UNKNOWN;
3762 if (size_bits & 0x01)
3763 {
3764 if (!((opcode_bits >> 1) & 0x01))
3765 scale = 3;
3766 else
3767 return AARCH64_RECORD_UNKNOWN;
3768 }
3769 break;
3770 case 3:
3771 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3772 {
3773 scale = size_bits;
3774 replicate = 1;
3775 break;
3776 }
3777 else
3778 return AARCH64_RECORD_UNKNOWN;
3779 default:
3780 break;
3781 }
3782 esize = 8 << scale;
3783 if (replicate)
3784 for (sindex = 0; sindex < selem; sindex++)
3785 {
3786 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3787 reg_rt = (reg_rt + 1) % 32;
3788 }
3789 else
3790 {
3791 for (sindex = 0; sindex < selem; sindex++)
3792 {
3793 if (bit (aarch64_insn_r->aarch64_insn, 22))
3794 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3795 else
3796 {
3797 record_buf_mem[mem_index++] = esize / 8;
3798 record_buf_mem[mem_index++] = address + addr_offset;
3799 }
3800 addr_offset = addr_offset + (esize / 8);
3801 reg_rt = (reg_rt + 1) % 32;
3802 }
3803 }
3804 }
3805 /* Load/store multiple structure. */
3806 else
3807 {
3808 uint8_t selem, esize, rpt, elements;
3809 uint8_t eindex, rindex;
3810
3811 esize = 8 << size_bits;
3812 if (bit (aarch64_insn_r->aarch64_insn, 30))
3813 elements = 128 / esize;
3814 else
3815 elements = 64 / esize;
3816
3817 switch (opcode_bits)
3818 {
3819 /*LD/ST4 (4 Registers). */
3820 case 0:
3821 rpt = 1;
3822 selem = 4;
3823 break;
3824 /*LD/ST1 (4 Registers). */
3825 case 2:
3826 rpt = 4;
3827 selem = 1;
3828 break;
3829 /*LD/ST3 (3 Registers). */
3830 case 4:
3831 rpt = 1;
3832 selem = 3;
3833 break;
3834 /*LD/ST1 (3 Registers). */
3835 case 6:
3836 rpt = 3;
3837 selem = 1;
3838 break;
3839 /*LD/ST1 (1 Register). */
3840 case 7:
3841 rpt = 1;
3842 selem = 1;
3843 break;
3844 /*LD/ST2 (2 Registers). */
3845 case 8:
3846 rpt = 1;
3847 selem = 2;
3848 break;
3849 /*LD/ST1 (2 Registers). */
3850 case 10:
3851 rpt = 2;
3852 selem = 1;
3853 break;
3854 default:
3855 return AARCH64_RECORD_UNSUPPORTED;
3856 break;
3857 }
3858 for (rindex = 0; rindex < rpt; rindex++)
3859 for (eindex = 0; eindex < elements; eindex++)
3860 {
3861 uint8_t reg_tt, sindex;
3862 reg_tt = (reg_rt + rindex) % 32;
3863 for (sindex = 0; sindex < selem; sindex++)
3864 {
3865 if (bit (aarch64_insn_r->aarch64_insn, 22))
3866 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3867 else
3868 {
3869 record_buf_mem[mem_index++] = esize / 8;
3870 record_buf_mem[mem_index++] = address + addr_offset;
3871 }
3872 addr_offset = addr_offset + (esize / 8);
3873 reg_tt = (reg_tt + 1) % 32;
3874 }
3875 }
3876 }
3877
3878 if (bit (aarch64_insn_r->aarch64_insn, 23))
3879 record_buf[reg_index++] = reg_rn;
3880
3881 aarch64_insn_r->reg_rec_count = reg_index;
3882 aarch64_insn_r->mem_rec_count = mem_index / 2;
3883 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3884 record_buf_mem);
3885 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3886 record_buf);
3887 return AARCH64_RECORD_SUCCESS;
3888 }
3889
3890 /* Record handler for load and store instructions. */
3891
3892 static unsigned int
3893 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3894 {
3895 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3896 uint8_t insn_bit23, insn_bit21;
3897 uint8_t opc, size_bits, ld_flag, vector_flag;
3898 uint32_t reg_rn, reg_rt, reg_rt2;
3899 uint64_t datasize, offset;
3900 uint32_t record_buf[8];
3901 uint64_t record_buf_mem[8];
3902 CORE_ADDR address;
3903
3904 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3905 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3906 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3907 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3908 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3909 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3910 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3911 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3912 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3913 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3914 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3915
3916 /* Load/store exclusive. */
3917 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3918 {
3919 if (record_debug)
3920 debug_printf ("Process record: load/store exclusive\n");
3921
3922 if (ld_flag)
3923 {
3924 record_buf[0] = reg_rt;
3925 aarch64_insn_r->reg_rec_count = 1;
3926 if (insn_bit21)
3927 {
3928 record_buf[1] = reg_rt2;
3929 aarch64_insn_r->reg_rec_count = 2;
3930 }
3931 }
3932 else
3933 {
3934 if (insn_bit21)
3935 datasize = (8 << size_bits) * 2;
3936 else
3937 datasize = (8 << size_bits);
3938 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3939 &address);
3940 record_buf_mem[0] = datasize / 8;
3941 record_buf_mem[1] = address;
3942 aarch64_insn_r->mem_rec_count = 1;
3943 if (!insn_bit23)
3944 {
3945 /* Save register rs. */
3946 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3947 aarch64_insn_r->reg_rec_count = 1;
3948 }
3949 }
3950 }
3951 /* Load register (literal) instructions decoding. */
3952 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3953 {
3954 if (record_debug)
3955 debug_printf ("Process record: load register (literal)\n");
3956 if (vector_flag)
3957 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3958 else
3959 record_buf[0] = reg_rt;
3960 aarch64_insn_r->reg_rec_count = 1;
3961 }
3962 /* All types of load/store pair instructions decoding. */
3963 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3964 {
3965 if (record_debug)
3966 debug_printf ("Process record: load/store pair\n");
3967
3968 if (ld_flag)
3969 {
3970 if (vector_flag)
3971 {
3972 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3973 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3974 }
3975 else
3976 {
3977 record_buf[0] = reg_rt;
3978 record_buf[1] = reg_rt2;
3979 }
3980 aarch64_insn_r->reg_rec_count = 2;
3981 }
3982 else
3983 {
3984 uint16_t imm7_off;
3985 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3986 if (!vector_flag)
3987 size_bits = size_bits >> 1;
3988 datasize = 8 << (2 + size_bits);
3989 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3990 offset = offset << (2 + size_bits);
3991 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3992 &address);
3993 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3994 {
3995 if (imm7_off & 0x40)
3996 address = address - offset;
3997 else
3998 address = address + offset;
3999 }
4000
4001 record_buf_mem[0] = datasize / 8;
4002 record_buf_mem[1] = address;
4003 record_buf_mem[2] = datasize / 8;
4004 record_buf_mem[3] = address + (datasize / 8);
4005 aarch64_insn_r->mem_rec_count = 2;
4006 }
4007 if (bit (aarch64_insn_r->aarch64_insn, 23))
4008 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4009 }
4010 /* Load/store register (unsigned immediate) instructions. */
4011 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4012 {
4013 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4014 if (!(opc >> 1))
4015 {
4016 if (opc & 0x01)
4017 ld_flag = 0x01;
4018 else
4019 ld_flag = 0x0;
4020 }
4021 else
4022 {
4023 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4024 {
4025 /* PRFM (immediate) */
4026 return AARCH64_RECORD_SUCCESS;
4027 }
4028 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4029 {
4030 /* LDRSW (immediate) */
4031 ld_flag = 0x1;
4032 }
4033 else
4034 {
4035 if (opc & 0x01)
4036 ld_flag = 0x01;
4037 else
4038 ld_flag = 0x0;
4039 }
4040 }
4041
4042 if (record_debug)
4043 {
4044 debug_printf ("Process record: load/store (unsigned immediate):"
4045 " size %x V %d opc %x\n", size_bits, vector_flag,
4046 opc);
4047 }
4048
4049 if (!ld_flag)
4050 {
4051 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4052 datasize = 8 << size_bits;
4053 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4054 &address);
4055 offset = offset << size_bits;
4056 address = address + offset;
4057
4058 record_buf_mem[0] = datasize >> 3;
4059 record_buf_mem[1] = address;
4060 aarch64_insn_r->mem_rec_count = 1;
4061 }
4062 else
4063 {
4064 if (vector_flag)
4065 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4066 else
4067 record_buf[0] = reg_rt;
4068 aarch64_insn_r->reg_rec_count = 1;
4069 }
4070 }
4071 /* Load/store register (register offset) instructions. */
4072 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4073 && insn_bits10_11 == 0x02 && insn_bit21)
4074 {
4075 if (record_debug)
4076 debug_printf ("Process record: load/store (register offset)\n");
4077 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4078 if (!(opc >> 1))
4079 if (opc & 0x01)
4080 ld_flag = 0x01;
4081 else
4082 ld_flag = 0x0;
4083 else
4084 if (size_bits != 0x03)
4085 ld_flag = 0x01;
4086 else
4087 return AARCH64_RECORD_UNKNOWN;
4088
4089 if (!ld_flag)
4090 {
4091 ULONGEST reg_rm_val;
4092
4093 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4094 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4095 if (bit (aarch64_insn_r->aarch64_insn, 12))
4096 offset = reg_rm_val << size_bits;
4097 else
4098 offset = reg_rm_val;
4099 datasize = 8 << size_bits;
4100 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4101 &address);
4102 address = address + offset;
4103 record_buf_mem[0] = datasize >> 3;
4104 record_buf_mem[1] = address;
4105 aarch64_insn_r->mem_rec_count = 1;
4106 }
4107 else
4108 {
4109 if (vector_flag)
4110 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4111 else
4112 record_buf[0] = reg_rt;
4113 aarch64_insn_r->reg_rec_count = 1;
4114 }
4115 }
4116 /* Load/store register (immediate and unprivileged) instructions. */
4117 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4118 && !insn_bit21)
4119 {
4120 if (record_debug)
4121 {
4122 debug_printf ("Process record: load/store "
4123 "(immediate and unprivileged)\n");
4124 }
4125 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4126 if (!(opc >> 1))
4127 if (opc & 0x01)
4128 ld_flag = 0x01;
4129 else
4130 ld_flag = 0x0;
4131 else
4132 if (size_bits != 0x03)
4133 ld_flag = 0x01;
4134 else
4135 return AARCH64_RECORD_UNKNOWN;
4136
4137 if (!ld_flag)
4138 {
4139 uint16_t imm9_off;
4140 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4141 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4142 datasize = 8 << size_bits;
4143 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4144 &address);
4145 if (insn_bits10_11 != 0x01)
4146 {
4147 if (imm9_off & 0x0100)
4148 address = address - offset;
4149 else
4150 address = address + offset;
4151 }
4152 record_buf_mem[0] = datasize >> 3;
4153 record_buf_mem[1] = address;
4154 aarch64_insn_r->mem_rec_count = 1;
4155 }
4156 else
4157 {
4158 if (vector_flag)
4159 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4160 else
4161 record_buf[0] = reg_rt;
4162 aarch64_insn_r->reg_rec_count = 1;
4163 }
4164 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4165 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4166 }
4167 /* Advanced SIMD load/store instructions. */
4168 else
4169 return aarch64_record_asimd_load_store (aarch64_insn_r);
4170
4171 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4172 record_buf_mem);
4173 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4174 record_buf);
4175 return AARCH64_RECORD_SUCCESS;
4176 }
4177
4178 /* Record handler for data processing SIMD and floating point instructions. */
4179
4180 static unsigned int
4181 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4182 {
4183 uint8_t insn_bit21, opcode, rmode, reg_rd;
4184 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4185 uint8_t insn_bits11_14;
4186 uint32_t record_buf[2];
4187
4188 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4189 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4190 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4191 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4192 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4193 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4194 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4195 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4196 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4197
4198 if (record_debug)
4199 debug_printf ("Process record: data processing SIMD/FP: ");
4200
4201 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4202 {
4203 /* Floating point - fixed point conversion instructions. */
4204 if (!insn_bit21)
4205 {
4206 if (record_debug)
4207 debug_printf ("FP - fixed point conversion");
4208
4209 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4210 record_buf[0] = reg_rd;
4211 else
4212 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4213 }
4214 /* Floating point - conditional compare instructions. */
4215 else if (insn_bits10_11 == 0x01)
4216 {
4217 if (record_debug)
4218 debug_printf ("FP - conditional compare");
4219
4220 record_buf[0] = AARCH64_CPSR_REGNUM;
4221 }
4222 /* Floating point - data processing (2-source) and
4223 conditional select instructions. */
4224 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4225 {
4226 if (record_debug)
4227 debug_printf ("FP - DP (2-source)");
4228
4229 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4230 }
4231 else if (insn_bits10_11 == 0x00)
4232 {
4233 /* Floating point - immediate instructions. */
4234 if ((insn_bits12_15 & 0x01) == 0x01
4235 || (insn_bits12_15 & 0x07) == 0x04)
4236 {
4237 if (record_debug)
4238 debug_printf ("FP - immediate");
4239 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4240 }
4241 /* Floating point - compare instructions. */
4242 else if ((insn_bits12_15 & 0x03) == 0x02)
4243 {
4244 if (record_debug)
4245 debug_printf ("FP - immediate");
4246 record_buf[0] = AARCH64_CPSR_REGNUM;
4247 }
4248 /* Floating point - integer conversions instructions. */
4249 else if (insn_bits12_15 == 0x00)
4250 {
4251 /* Convert float to integer instruction. */
4252 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4253 {
4254 if (record_debug)
4255 debug_printf ("float to int conversion");
4256
4257 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4258 }
4259 /* Convert integer to float instruction. */
4260 else if ((opcode >> 1) == 0x01 && !rmode)
4261 {
4262 if (record_debug)
4263 debug_printf ("int to float conversion");
4264
4265 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4266 }
4267 /* Move float to integer instruction. */
4268 else if ((opcode >> 1) == 0x03)
4269 {
4270 if (record_debug)
4271 debug_printf ("move float to int");
4272
4273 if (!(opcode & 0x01))
4274 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4275 else
4276 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4277 }
4278 else
4279 return AARCH64_RECORD_UNKNOWN;
4280 }
4281 else
4282 return AARCH64_RECORD_UNKNOWN;
4283 }
4284 else
4285 return AARCH64_RECORD_UNKNOWN;
4286 }
4287 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4288 {
4289 if (record_debug)
4290 debug_printf ("SIMD copy");
4291
4292 /* Advanced SIMD copy instructions. */
4293 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4294 && !bit (aarch64_insn_r->aarch64_insn, 15)
4295 && bit (aarch64_insn_r->aarch64_insn, 10))
4296 {
4297 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4298 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4299 else
4300 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4301 }
4302 else
4303 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4304 }
4305 /* All remaining floating point or advanced SIMD instructions. */
4306 else
4307 {
4308 if (record_debug)
4309 debug_printf ("all remain");
4310
4311 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4312 }
4313
4314 if (record_debug)
4315 debug_printf ("\n");
4316
4317 aarch64_insn_r->reg_rec_count++;
4318 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4319 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4320 record_buf);
4321 return AARCH64_RECORD_SUCCESS;
4322 }
4323
4324 /* Decodes insns type and invokes its record handler. */
4325
4326 static unsigned int
4327 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4328 {
4329 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4330
4331 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4332 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4333 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4334 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4335
4336 /* Data processing - immediate instructions. */
4337 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4338 return aarch64_record_data_proc_imm (aarch64_insn_r);
4339
4340 /* Branch, exception generation and system instructions. */
4341 if (ins_bit26 && !ins_bit27 && ins_bit28)
4342 return aarch64_record_branch_except_sys (aarch64_insn_r);
4343
4344 /* Load and store instructions. */
4345 if (!ins_bit25 && ins_bit27)
4346 return aarch64_record_load_store (aarch64_insn_r);
4347
4348 /* Data processing - register instructions. */
4349 if (ins_bit25 && !ins_bit26 && ins_bit27)
4350 return aarch64_record_data_proc_reg (aarch64_insn_r);
4351
4352 /* Data processing - SIMD and floating point instructions. */
4353 if (ins_bit25 && ins_bit26 && ins_bit27)
4354 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4355
4356 return AARCH64_RECORD_UNSUPPORTED;
4357 }
4358
4359 /* Cleans up local record registers and memory allocations. */
4360
4361 static void
4362 deallocate_reg_mem (insn_decode_record *record)
4363 {
4364 xfree (record->aarch64_regs);
4365 xfree (record->aarch64_mems);
4366 }
4367
4368 #if GDB_SELF_TEST
4369 namespace selftests {
4370
4371 static void
4372 aarch64_process_record_test (void)
4373 {
4374 struct gdbarch_info info;
4375 uint32_t ret;
4376
4377 gdbarch_info_init (&info);
4378 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4379
4380 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4381 SELF_CHECK (gdbarch != NULL);
4382
4383 insn_decode_record aarch64_record;
4384
4385 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4386 aarch64_record.regcache = NULL;
4387 aarch64_record.this_addr = 0;
4388 aarch64_record.gdbarch = gdbarch;
4389
4390 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4391 aarch64_record.aarch64_insn = 0xf9800020;
4392 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4393 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4394 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4395 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4396
4397 deallocate_reg_mem (&aarch64_record);
4398 }
4399
4400 } // namespace selftests
4401 #endif /* GDB_SELF_TEST */
4402
4403 /* Parse the current instruction and record the values of the registers and
4404 memory that will be changed in current instruction to record_arch_list
4405 return -1 if something is wrong. */
4406
4407 int
4408 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4409 CORE_ADDR insn_addr)
4410 {
4411 uint32_t rec_no = 0;
4412 uint8_t insn_size = 4;
4413 uint32_t ret = 0;
4414 gdb_byte buf[insn_size];
4415 insn_decode_record aarch64_record;
4416
4417 memset (&buf[0], 0, insn_size);
4418 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4419 target_read_memory (insn_addr, &buf[0], insn_size);
4420 aarch64_record.aarch64_insn
4421 = (uint32_t) extract_unsigned_integer (&buf[0],
4422 insn_size,
4423 gdbarch_byte_order (gdbarch));
4424 aarch64_record.regcache = regcache;
4425 aarch64_record.this_addr = insn_addr;
4426 aarch64_record.gdbarch = gdbarch;
4427
4428 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4429 if (ret == AARCH64_RECORD_UNSUPPORTED)
4430 {
4431 printf_unfiltered (_("Process record does not support instruction "
4432 "0x%0x at address %s.\n"),
4433 aarch64_record.aarch64_insn,
4434 paddress (gdbarch, insn_addr));
4435 ret = -1;
4436 }
4437
4438 if (0 == ret)
4439 {
4440 /* Record registers. */
4441 record_full_arch_list_add_reg (aarch64_record.regcache,
4442 AARCH64_PC_REGNUM);
4443 /* Always record register CPSR. */
4444 record_full_arch_list_add_reg (aarch64_record.regcache,
4445 AARCH64_CPSR_REGNUM);
4446 if (aarch64_record.aarch64_regs)
4447 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4448 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4449 aarch64_record.aarch64_regs[rec_no]))
4450 ret = -1;
4451
4452 /* Record memories. */
4453 if (aarch64_record.aarch64_mems)
4454 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4455 if (record_full_arch_list_add_mem
4456 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4457 aarch64_record.aarch64_mems[rec_no].len))
4458 ret = -1;
4459
4460 if (record_full_arch_list_add_end ())
4461 ret = -1;
4462 }
4463
4464 deallocate_reg_mem (&aarch64_record);
4465 return ret;
4466 }
This page took 0.143882 seconds and 4 git commands to generate.