gdb: add target_ops methods for displaced stepping
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2020 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "gdbcmd.h"
25 #include "gdbcore.h"
26 #include "dis-asm.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "value.h"
30 #include "arch-utils.h"
31 #include "osabi.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
35 #include "objfiles.h"
36 #include "dwarf2.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "ax-gdb.h"
43 #include "gdbsupport/selftest.h"
44
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
47
48 #include "record.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
51 #include "gdbarch.h"
52
53 #include "opcode/aarch64.h"
54 #include <algorithm>
55
56 #define submask(x) ((1L << ((x) + 1)) - 1)
57 #define bit(obj,st) (((obj) >> (st)) & 1)
58 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
59
60 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
61 four members. */
62 #define HA_MAX_NUM_FLDS 4
63
64 /* All possible aarch64 target descriptors. */
65 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
66
67 /* The standard register names, and all the valid aliases for them. */
68 static const struct
69 {
70 const char *const name;
71 int regnum;
72 } aarch64_register_aliases[] =
73 {
74 /* 64-bit register names. */
75 {"fp", AARCH64_FP_REGNUM},
76 {"lr", AARCH64_LR_REGNUM},
77 {"sp", AARCH64_SP_REGNUM},
78
79 /* 32-bit register names. */
80 {"w0", AARCH64_X0_REGNUM + 0},
81 {"w1", AARCH64_X0_REGNUM + 1},
82 {"w2", AARCH64_X0_REGNUM + 2},
83 {"w3", AARCH64_X0_REGNUM + 3},
84 {"w4", AARCH64_X0_REGNUM + 4},
85 {"w5", AARCH64_X0_REGNUM + 5},
86 {"w6", AARCH64_X0_REGNUM + 6},
87 {"w7", AARCH64_X0_REGNUM + 7},
88 {"w8", AARCH64_X0_REGNUM + 8},
89 {"w9", AARCH64_X0_REGNUM + 9},
90 {"w10", AARCH64_X0_REGNUM + 10},
91 {"w11", AARCH64_X0_REGNUM + 11},
92 {"w12", AARCH64_X0_REGNUM + 12},
93 {"w13", AARCH64_X0_REGNUM + 13},
94 {"w14", AARCH64_X0_REGNUM + 14},
95 {"w15", AARCH64_X0_REGNUM + 15},
96 {"w16", AARCH64_X0_REGNUM + 16},
97 {"w17", AARCH64_X0_REGNUM + 17},
98 {"w18", AARCH64_X0_REGNUM + 18},
99 {"w19", AARCH64_X0_REGNUM + 19},
100 {"w20", AARCH64_X0_REGNUM + 20},
101 {"w21", AARCH64_X0_REGNUM + 21},
102 {"w22", AARCH64_X0_REGNUM + 22},
103 {"w23", AARCH64_X0_REGNUM + 23},
104 {"w24", AARCH64_X0_REGNUM + 24},
105 {"w25", AARCH64_X0_REGNUM + 25},
106 {"w26", AARCH64_X0_REGNUM + 26},
107 {"w27", AARCH64_X0_REGNUM + 27},
108 {"w28", AARCH64_X0_REGNUM + 28},
109 {"w29", AARCH64_X0_REGNUM + 29},
110 {"w30", AARCH64_X0_REGNUM + 30},
111
112 /* specials */
113 {"ip0", AARCH64_X0_REGNUM + 16},
114 {"ip1", AARCH64_X0_REGNUM + 17}
115 };
116
117 /* The required core 'R' registers. */
118 static const char *const aarch64_r_register_names[] =
119 {
120 /* These registers must appear in consecutive RAW register number
121 order and they must begin with AARCH64_X0_REGNUM! */
122 "x0", "x1", "x2", "x3",
123 "x4", "x5", "x6", "x7",
124 "x8", "x9", "x10", "x11",
125 "x12", "x13", "x14", "x15",
126 "x16", "x17", "x18", "x19",
127 "x20", "x21", "x22", "x23",
128 "x24", "x25", "x26", "x27",
129 "x28", "x29", "x30", "sp",
130 "pc", "cpsr"
131 };
132
133 /* The FP/SIMD 'V' registers. */
134 static const char *const aarch64_v_register_names[] =
135 {
136 /* These registers must appear in consecutive RAW register number
137 order and they must begin with AARCH64_V0_REGNUM! */
138 "v0", "v1", "v2", "v3",
139 "v4", "v5", "v6", "v7",
140 "v8", "v9", "v10", "v11",
141 "v12", "v13", "v14", "v15",
142 "v16", "v17", "v18", "v19",
143 "v20", "v21", "v22", "v23",
144 "v24", "v25", "v26", "v27",
145 "v28", "v29", "v30", "v31",
146 "fpsr",
147 "fpcr"
148 };
149
150 /* The SVE 'Z' and 'P' registers. */
151 static const char *const aarch64_sve_register_names[] =
152 {
153 /* These registers must appear in consecutive RAW register number
154 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
155 "z0", "z1", "z2", "z3",
156 "z4", "z5", "z6", "z7",
157 "z8", "z9", "z10", "z11",
158 "z12", "z13", "z14", "z15",
159 "z16", "z17", "z18", "z19",
160 "z20", "z21", "z22", "z23",
161 "z24", "z25", "z26", "z27",
162 "z28", "z29", "z30", "z31",
163 "fpsr", "fpcr",
164 "p0", "p1", "p2", "p3",
165 "p4", "p5", "p6", "p7",
166 "p8", "p9", "p10", "p11",
167 "p12", "p13", "p14", "p15",
168 "ffr", "vg"
169 };
170
171 static const char *const aarch64_pauth_register_names[] =
172 {
173 /* Authentication mask for data pointer. */
174 "pauth_dmask",
175 /* Authentication mask for code pointer. */
176 "pauth_cmask"
177 };
178
179 /* AArch64 prologue cache structure. */
180 struct aarch64_prologue_cache
181 {
182 /* The program counter at the start of the function. It is used to
183 identify this frame as a prologue frame. */
184 CORE_ADDR func;
185
186 /* The program counter at the time this frame was created; i.e. where
187 this function was called from. It is used to identify this frame as a
188 stub frame. */
189 CORE_ADDR prev_pc;
190
191 /* The stack pointer at the time this frame was created; i.e. the
192 caller's stack pointer when this function was called. It is used
193 to identify this frame. */
194 CORE_ADDR prev_sp;
195
196 /* Is the target available to read from? */
197 int available_p;
198
199 /* The frame base for this frame is just prev_sp - frame size.
200 FRAMESIZE is the distance from the frame pointer to the
201 initial stack pointer. */
202 int framesize;
203
204 /* The register used to hold the frame pointer for this frame. */
205 int framereg;
206
207 /* Saved register offsets. */
208 struct trad_frame_saved_reg *saved_regs;
209 };
210
211 static void
212 show_aarch64_debug (struct ui_file *file, int from_tty,
213 struct cmd_list_element *c, const char *value)
214 {
215 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
216 }
217
218 namespace {
219
220 /* Abstract instruction reader. */
221
222 class abstract_instruction_reader
223 {
224 public:
225 /* Read in one instruction. */
226 virtual ULONGEST read (CORE_ADDR memaddr, int len,
227 enum bfd_endian byte_order) = 0;
228 };
229
230 /* Instruction reader from real target. */
231
232 class instruction_reader : public abstract_instruction_reader
233 {
234 public:
235 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
236 override
237 {
238 return read_code_unsigned_integer (memaddr, len, byte_order);
239 }
240 };
241
242 } // namespace
243
244 /* If address signing is enabled, mask off the signature bits from the link
245 register, which is passed by value in ADDR, using the register values in
246 THIS_FRAME. */
247
248 static CORE_ADDR
249 aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
250 struct frame_info *this_frame, CORE_ADDR addr)
251 {
252 if (tdep->has_pauth ()
253 && frame_unwind_register_unsigned (this_frame,
254 tdep->pauth_ra_state_regnum))
255 {
256 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
257 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
258 addr = addr & ~cmask;
259
260 /* Record in the frame that the link register required unmasking. */
261 set_frame_previous_pc_masked (this_frame);
262 }
263
264 return addr;
265 }
266
267 /* Implement the "get_pc_address_flags" gdbarch method. */
268
269 static std::string
270 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
271 {
272 if (pc != 0 && get_frame_pc_masked (frame))
273 return "PAC";
274
275 return "";
276 }
277
278 /* Analyze a prologue, looking for a recognizable stack frame
279 and frame pointer. Scan until we encounter a store that could
280 clobber the stack frame unexpectedly, or an unknown instruction. */
281
282 static CORE_ADDR
283 aarch64_analyze_prologue (struct gdbarch *gdbarch,
284 CORE_ADDR start, CORE_ADDR limit,
285 struct aarch64_prologue_cache *cache,
286 abstract_instruction_reader& reader)
287 {
288 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
289 int i;
290 /* Track X registers and D registers in prologue. */
291 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
292
293 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
294 regs[i] = pv_register (i, 0);
295 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
296
297 for (; start < limit; start += 4)
298 {
299 uint32_t insn;
300 aarch64_inst inst;
301
302 insn = reader.read (start, 4, byte_order_for_code);
303
304 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
305 break;
306
307 if (inst.opcode->iclass == addsub_imm
308 && (inst.opcode->op == OP_ADD
309 || strcmp ("sub", inst.opcode->name) == 0))
310 {
311 unsigned rd = inst.operands[0].reg.regno;
312 unsigned rn = inst.operands[1].reg.regno;
313
314 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
315 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
316 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
317 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
318
319 if (inst.opcode->op == OP_ADD)
320 {
321 regs[rd] = pv_add_constant (regs[rn],
322 inst.operands[2].imm.value);
323 }
324 else
325 {
326 regs[rd] = pv_add_constant (regs[rn],
327 -inst.operands[2].imm.value);
328 }
329 }
330 else if (inst.opcode->iclass == pcreladdr
331 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
332 {
333 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
334 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
335
336 regs[inst.operands[0].reg.regno] = pv_unknown ();
337 }
338 else if (inst.opcode->iclass == branch_imm)
339 {
340 /* Stop analysis on branch. */
341 break;
342 }
343 else if (inst.opcode->iclass == condbranch)
344 {
345 /* Stop analysis on branch. */
346 break;
347 }
348 else if (inst.opcode->iclass == branch_reg)
349 {
350 /* Stop analysis on branch. */
351 break;
352 }
353 else if (inst.opcode->iclass == compbranch)
354 {
355 /* Stop analysis on branch. */
356 break;
357 }
358 else if (inst.opcode->op == OP_MOVZ)
359 {
360 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
361 regs[inst.operands[0].reg.regno] = pv_unknown ();
362 }
363 else if (inst.opcode->iclass == log_shift
364 && strcmp (inst.opcode->name, "orr") == 0)
365 {
366 unsigned rd = inst.operands[0].reg.regno;
367 unsigned rn = inst.operands[1].reg.regno;
368 unsigned rm = inst.operands[2].reg.regno;
369
370 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
371 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
372 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
373
374 if (inst.operands[2].shifter.amount == 0
375 && rn == AARCH64_SP_REGNUM)
376 regs[rd] = regs[rm];
377 else
378 {
379 if (aarch64_debug)
380 {
381 debug_printf ("aarch64: prologue analysis gave up "
382 "addr=%s opcode=0x%x (orr x register)\n",
383 core_addr_to_string_nz (start), insn);
384 }
385 break;
386 }
387 }
388 else if (inst.opcode->op == OP_STUR)
389 {
390 unsigned rt = inst.operands[0].reg.regno;
391 unsigned rn = inst.operands[1].addr.base_regno;
392 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
393
394 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
395 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
396 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
397 gdb_assert (!inst.operands[1].addr.offset.is_reg);
398
399 stack.store
400 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
401 size, regs[rt]);
402 }
403 else if ((inst.opcode->iclass == ldstpair_off
404 || (inst.opcode->iclass == ldstpair_indexed
405 && inst.operands[2].addr.preind))
406 && strcmp ("stp", inst.opcode->name) == 0)
407 {
408 /* STP with addressing mode Pre-indexed and Base register. */
409 unsigned rt1;
410 unsigned rt2;
411 unsigned rn = inst.operands[2].addr.base_regno;
412 int32_t imm = inst.operands[2].addr.offset.imm;
413 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
414
415 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
416 || inst.operands[0].type == AARCH64_OPND_Ft);
417 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
418 || inst.operands[1].type == AARCH64_OPND_Ft2);
419 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
420 gdb_assert (!inst.operands[2].addr.offset.is_reg);
421
422 /* If recording this store would invalidate the store area
423 (perhaps because rn is not known) then we should abandon
424 further prologue analysis. */
425 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
426 break;
427
428 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
429 break;
430
431 rt1 = inst.operands[0].reg.regno;
432 rt2 = inst.operands[1].reg.regno;
433 if (inst.operands[0].type == AARCH64_OPND_Ft)
434 {
435 rt1 += AARCH64_X_REGISTER_COUNT;
436 rt2 += AARCH64_X_REGISTER_COUNT;
437 }
438
439 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
440 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
441
442 if (inst.operands[2].addr.writeback)
443 regs[rn] = pv_add_constant (regs[rn], imm);
444
445 }
446 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
447 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
448 && (inst.opcode->op == OP_STR_POS
449 || inst.opcode->op == OP_STRF_POS)))
450 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
451 && strcmp ("str", inst.opcode->name) == 0)
452 {
453 /* STR (immediate) */
454 unsigned int rt = inst.operands[0].reg.regno;
455 int32_t imm = inst.operands[1].addr.offset.imm;
456 unsigned int rn = inst.operands[1].addr.base_regno;
457 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
458 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
459 || inst.operands[0].type == AARCH64_OPND_Ft);
460
461 if (inst.operands[0].type == AARCH64_OPND_Ft)
462 rt += AARCH64_X_REGISTER_COUNT;
463
464 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
465 if (inst.operands[1].addr.writeback)
466 regs[rn] = pv_add_constant (regs[rn], imm);
467 }
468 else if (inst.opcode->iclass == testbranch)
469 {
470 /* Stop analysis on branch. */
471 break;
472 }
473 else if (inst.opcode->iclass == ic_system)
474 {
475 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
476 int ra_state_val = 0;
477
478 if (insn == 0xd503233f /* paciasp. */
479 || insn == 0xd503237f /* pacibsp. */)
480 {
481 /* Return addresses are mangled. */
482 ra_state_val = 1;
483 }
484 else if (insn == 0xd50323bf /* autiasp. */
485 || insn == 0xd50323ff /* autibsp. */)
486 {
487 /* Return addresses are not mangled. */
488 ra_state_val = 0;
489 }
490 else
491 {
492 if (aarch64_debug)
493 debug_printf ("aarch64: prologue analysis gave up addr=%s"
494 " opcode=0x%x (iclass)\n",
495 core_addr_to_string_nz (start), insn);
496 break;
497 }
498
499 if (tdep->has_pauth () && cache != nullptr)
500 trad_frame_set_value (cache->saved_regs,
501 tdep->pauth_ra_state_regnum,
502 ra_state_val);
503 }
504 else
505 {
506 if (aarch64_debug)
507 {
508 debug_printf ("aarch64: prologue analysis gave up addr=%s"
509 " opcode=0x%x\n",
510 core_addr_to_string_nz (start), insn);
511 }
512 break;
513 }
514 }
515
516 if (cache == NULL)
517 return start;
518
519 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
520 {
521 /* Frame pointer is fp. Frame size is constant. */
522 cache->framereg = AARCH64_FP_REGNUM;
523 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
524 }
525 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
526 {
527 /* Try the stack pointer. */
528 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
529 cache->framereg = AARCH64_SP_REGNUM;
530 }
531 else
532 {
533 /* We're just out of luck. We don't know where the frame is. */
534 cache->framereg = -1;
535 cache->framesize = 0;
536 }
537
538 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
539 {
540 CORE_ADDR offset;
541
542 if (stack.find_reg (gdbarch, i, &offset))
543 cache->saved_regs[i].addr = offset;
544 }
545
546 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
547 {
548 int regnum = gdbarch_num_regs (gdbarch);
549 CORE_ADDR offset;
550
551 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
552 &offset))
553 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
554 }
555
556 return start;
557 }
558
559 static CORE_ADDR
560 aarch64_analyze_prologue (struct gdbarch *gdbarch,
561 CORE_ADDR start, CORE_ADDR limit,
562 struct aarch64_prologue_cache *cache)
563 {
564 instruction_reader reader;
565
566 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
567 reader);
568 }
569
570 #if GDB_SELF_TEST
571
572 namespace selftests {
573
574 /* Instruction reader from manually cooked instruction sequences. */
575
576 class instruction_reader_test : public abstract_instruction_reader
577 {
578 public:
579 template<size_t SIZE>
580 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
581 : m_insns (insns), m_insns_size (SIZE)
582 {}
583
584 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
585 override
586 {
587 SELF_CHECK (len == 4);
588 SELF_CHECK (memaddr % 4 == 0);
589 SELF_CHECK (memaddr / 4 < m_insns_size);
590
591 return m_insns[memaddr / 4];
592 }
593
594 private:
595 const uint32_t *m_insns;
596 size_t m_insns_size;
597 };
598
599 static void
600 aarch64_analyze_prologue_test (void)
601 {
602 struct gdbarch_info info;
603
604 gdbarch_info_init (&info);
605 info.bfd_arch_info = bfd_scan_arch ("aarch64");
606
607 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
608 SELF_CHECK (gdbarch != NULL);
609
610 struct aarch64_prologue_cache cache;
611 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
612
613 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
614
615 /* Test the simple prologue in which frame pointer is used. */
616 {
617 static const uint32_t insns[] = {
618 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
619 0x910003fd, /* mov x29, sp */
620 0x97ffffe6, /* bl 0x400580 */
621 };
622 instruction_reader_test reader (insns);
623
624 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
625 SELF_CHECK (end == 4 * 2);
626
627 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
628 SELF_CHECK (cache.framesize == 272);
629
630 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
631 {
632 if (i == AARCH64_FP_REGNUM)
633 SELF_CHECK (cache.saved_regs[i].addr == -272);
634 else if (i == AARCH64_LR_REGNUM)
635 SELF_CHECK (cache.saved_regs[i].addr == -264);
636 else
637 SELF_CHECK (cache.saved_regs[i].addr == -1);
638 }
639
640 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
641 {
642 int regnum = gdbarch_num_regs (gdbarch);
643
644 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
645 == -1);
646 }
647 }
648
649 /* Test a prologue in which STR is used and frame pointer is not
650 used. */
651 {
652 static const uint32_t insns[] = {
653 0xf81d0ff3, /* str x19, [sp, #-48]! */
654 0xb9002fe0, /* str w0, [sp, #44] */
655 0xf90013e1, /* str x1, [sp, #32]*/
656 0xfd000fe0, /* str d0, [sp, #24] */
657 0xaa0203f3, /* mov x19, x2 */
658 0xf94013e0, /* ldr x0, [sp, #32] */
659 };
660 instruction_reader_test reader (insns);
661
662 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
663 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
664
665 SELF_CHECK (end == 4 * 5);
666
667 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
668 SELF_CHECK (cache.framesize == 48);
669
670 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
671 {
672 if (i == 1)
673 SELF_CHECK (cache.saved_regs[i].addr == -16);
674 else if (i == 19)
675 SELF_CHECK (cache.saved_regs[i].addr == -48);
676 else
677 SELF_CHECK (cache.saved_regs[i].addr == -1);
678 }
679
680 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
681 {
682 int regnum = gdbarch_num_regs (gdbarch);
683
684 if (i == 0)
685 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
686 == -24);
687 else
688 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
689 == -1);
690 }
691 }
692
693 /* Test a prologue in which there is a return address signing instruction. */
694 if (tdep->has_pauth ())
695 {
696 static const uint32_t insns[] = {
697 0xd503233f, /* paciasp */
698 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
699 0x910003fd, /* mov x29, sp */
700 0xf801c3f3, /* str x19, [sp, #28] */
701 0xb9401fa0, /* ldr x19, [x29, #28] */
702 };
703 instruction_reader_test reader (insns);
704
705 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
706 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
707 reader);
708
709 SELF_CHECK (end == 4 * 4);
710 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
711 SELF_CHECK (cache.framesize == 48);
712
713 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
714 {
715 if (i == 19)
716 SELF_CHECK (cache.saved_regs[i].addr == -20);
717 else if (i == AARCH64_FP_REGNUM)
718 SELF_CHECK (cache.saved_regs[i].addr == -48);
719 else if (i == AARCH64_LR_REGNUM)
720 SELF_CHECK (cache.saved_regs[i].addr == -40);
721 else
722 SELF_CHECK (cache.saved_regs[i].addr == -1);
723 }
724
725 if (tdep->has_pauth ())
726 {
727 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
728 tdep->pauth_ra_state_regnum));
729 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
730 }
731 }
732 }
733 } // namespace selftests
734 #endif /* GDB_SELF_TEST */
735
736 /* Implement the "skip_prologue" gdbarch method. */
737
738 static CORE_ADDR
739 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
740 {
741 CORE_ADDR func_addr, limit_pc;
742
743 /* See if we can determine the end of the prologue via the symbol
744 table. If so, then return either PC, or the PC after the
745 prologue, whichever is greater. */
746 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
747 {
748 CORE_ADDR post_prologue_pc
749 = skip_prologue_using_sal (gdbarch, func_addr);
750
751 if (post_prologue_pc != 0)
752 return std::max (pc, post_prologue_pc);
753 }
754
755 /* Can't determine prologue from the symbol table, need to examine
756 instructions. */
757
758 /* Find an upper limit on the function prologue using the debug
759 information. If the debug information could not be used to
760 provide that bound, then use an arbitrary large number as the
761 upper bound. */
762 limit_pc = skip_prologue_using_sal (gdbarch, pc);
763 if (limit_pc == 0)
764 limit_pc = pc + 128; /* Magic. */
765
766 /* Try disassembling prologue. */
767 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
768 }
769
770 /* Scan the function prologue for THIS_FRAME and populate the prologue
771 cache CACHE. */
772
773 static void
774 aarch64_scan_prologue (struct frame_info *this_frame,
775 struct aarch64_prologue_cache *cache)
776 {
777 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
778 CORE_ADDR prologue_start;
779 CORE_ADDR prologue_end;
780 CORE_ADDR prev_pc = get_frame_pc (this_frame);
781 struct gdbarch *gdbarch = get_frame_arch (this_frame);
782
783 cache->prev_pc = prev_pc;
784
785 /* Assume we do not find a frame. */
786 cache->framereg = -1;
787 cache->framesize = 0;
788
789 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
790 &prologue_end))
791 {
792 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
793
794 if (sal.line == 0)
795 {
796 /* No line info so use the current PC. */
797 prologue_end = prev_pc;
798 }
799 else if (sal.end < prologue_end)
800 {
801 /* The next line begins after the function end. */
802 prologue_end = sal.end;
803 }
804
805 prologue_end = std::min (prologue_end, prev_pc);
806 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
807 }
808 else
809 {
810 CORE_ADDR frame_loc;
811
812 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
813 if (frame_loc == 0)
814 return;
815
816 cache->framereg = AARCH64_FP_REGNUM;
817 cache->framesize = 16;
818 cache->saved_regs[29].addr = 0;
819 cache->saved_regs[30].addr = 8;
820 }
821 }
822
823 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
824 function may throw an exception if the inferior's registers or memory is
825 not available. */
826
827 static void
828 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
829 struct aarch64_prologue_cache *cache)
830 {
831 CORE_ADDR unwound_fp;
832 int reg;
833
834 aarch64_scan_prologue (this_frame, cache);
835
836 if (cache->framereg == -1)
837 return;
838
839 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
840 if (unwound_fp == 0)
841 return;
842
843 cache->prev_sp = unwound_fp + cache->framesize;
844
845 /* Calculate actual addresses of saved registers using offsets
846 determined by aarch64_analyze_prologue. */
847 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
848 if (trad_frame_addr_p (cache->saved_regs, reg))
849 cache->saved_regs[reg].addr += cache->prev_sp;
850
851 cache->func = get_frame_func (this_frame);
852
853 cache->available_p = 1;
854 }
855
856 /* Allocate and fill in *THIS_CACHE with information about the prologue of
857 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
858 Return a pointer to the current aarch64_prologue_cache in
859 *THIS_CACHE. */
860
861 static struct aarch64_prologue_cache *
862 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
863 {
864 struct aarch64_prologue_cache *cache;
865
866 if (*this_cache != NULL)
867 return (struct aarch64_prologue_cache *) *this_cache;
868
869 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
870 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
871 *this_cache = cache;
872
873 try
874 {
875 aarch64_make_prologue_cache_1 (this_frame, cache);
876 }
877 catch (const gdb_exception_error &ex)
878 {
879 if (ex.error != NOT_AVAILABLE_ERROR)
880 throw;
881 }
882
883 return cache;
884 }
885
886 /* Implement the "stop_reason" frame_unwind method. */
887
888 static enum unwind_stop_reason
889 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
890 void **this_cache)
891 {
892 struct aarch64_prologue_cache *cache
893 = aarch64_make_prologue_cache (this_frame, this_cache);
894
895 if (!cache->available_p)
896 return UNWIND_UNAVAILABLE;
897
898 /* Halt the backtrace at "_start". */
899 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
900 return UNWIND_OUTERMOST;
901
902 /* We've hit a wall, stop. */
903 if (cache->prev_sp == 0)
904 return UNWIND_OUTERMOST;
905
906 return UNWIND_NO_REASON;
907 }
908
909 /* Our frame ID for a normal frame is the current function's starting
910 PC and the caller's SP when we were called. */
911
912 static void
913 aarch64_prologue_this_id (struct frame_info *this_frame,
914 void **this_cache, struct frame_id *this_id)
915 {
916 struct aarch64_prologue_cache *cache
917 = aarch64_make_prologue_cache (this_frame, this_cache);
918
919 if (!cache->available_p)
920 *this_id = frame_id_build_unavailable_stack (cache->func);
921 else
922 *this_id = frame_id_build (cache->prev_sp, cache->func);
923 }
924
925 /* Implement the "prev_register" frame_unwind method. */
926
927 static struct value *
928 aarch64_prologue_prev_register (struct frame_info *this_frame,
929 void **this_cache, int prev_regnum)
930 {
931 struct aarch64_prologue_cache *cache
932 = aarch64_make_prologue_cache (this_frame, this_cache);
933
934 /* If we are asked to unwind the PC, then we need to return the LR
935 instead. The prologue may save PC, but it will point into this
936 frame's prologue, not the next frame's resume location. */
937 if (prev_regnum == AARCH64_PC_REGNUM)
938 {
939 CORE_ADDR lr;
940 struct gdbarch *gdbarch = get_frame_arch (this_frame);
941 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
942
943 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
944
945 if (tdep->has_pauth ()
946 && trad_frame_value_p (cache->saved_regs,
947 tdep->pauth_ra_state_regnum))
948 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
949
950 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
951 }
952
953 /* SP is generally not saved to the stack, but this frame is
954 identified by the next frame's stack pointer at the time of the
955 call. The value was already reconstructed into PREV_SP. */
956 /*
957 +----------+ ^
958 | saved lr | |
959 +->| saved fp |--+
960 | | |
961 | | | <- Previous SP
962 | +----------+
963 | | saved lr |
964 +--| saved fp |<- FP
965 | |
966 | |<- SP
967 +----------+ */
968 if (prev_regnum == AARCH64_SP_REGNUM)
969 return frame_unwind_got_constant (this_frame, prev_regnum,
970 cache->prev_sp);
971
972 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
973 prev_regnum);
974 }
975
976 /* AArch64 prologue unwinder. */
977 struct frame_unwind aarch64_prologue_unwind =
978 {
979 NORMAL_FRAME,
980 aarch64_prologue_frame_unwind_stop_reason,
981 aarch64_prologue_this_id,
982 aarch64_prologue_prev_register,
983 NULL,
984 default_frame_sniffer
985 };
986
987 /* Allocate and fill in *THIS_CACHE with information about the prologue of
988 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
989 Return a pointer to the current aarch64_prologue_cache in
990 *THIS_CACHE. */
991
992 static struct aarch64_prologue_cache *
993 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
994 {
995 struct aarch64_prologue_cache *cache;
996
997 if (*this_cache != NULL)
998 return (struct aarch64_prologue_cache *) *this_cache;
999
1000 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1001 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1002 *this_cache = cache;
1003
1004 try
1005 {
1006 cache->prev_sp = get_frame_register_unsigned (this_frame,
1007 AARCH64_SP_REGNUM);
1008 cache->prev_pc = get_frame_pc (this_frame);
1009 cache->available_p = 1;
1010 }
1011 catch (const gdb_exception_error &ex)
1012 {
1013 if (ex.error != NOT_AVAILABLE_ERROR)
1014 throw;
1015 }
1016
1017 return cache;
1018 }
1019
1020 /* Implement the "stop_reason" frame_unwind method. */
1021
1022 static enum unwind_stop_reason
1023 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1024 void **this_cache)
1025 {
1026 struct aarch64_prologue_cache *cache
1027 = aarch64_make_stub_cache (this_frame, this_cache);
1028
1029 if (!cache->available_p)
1030 return UNWIND_UNAVAILABLE;
1031
1032 return UNWIND_NO_REASON;
1033 }
1034
1035 /* Our frame ID for a stub frame is the current SP and LR. */
1036
1037 static void
1038 aarch64_stub_this_id (struct frame_info *this_frame,
1039 void **this_cache, struct frame_id *this_id)
1040 {
1041 struct aarch64_prologue_cache *cache
1042 = aarch64_make_stub_cache (this_frame, this_cache);
1043
1044 if (cache->available_p)
1045 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1046 else
1047 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1048 }
1049
1050 /* Implement the "sniffer" frame_unwind method. */
1051
1052 static int
1053 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1054 struct frame_info *this_frame,
1055 void **this_prologue_cache)
1056 {
1057 CORE_ADDR addr_in_block;
1058 gdb_byte dummy[4];
1059
1060 addr_in_block = get_frame_address_in_block (this_frame);
1061 if (in_plt_section (addr_in_block)
1062 /* We also use the stub winder if the target memory is unreadable
1063 to avoid having the prologue unwinder trying to read it. */
1064 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1065 return 1;
1066
1067 return 0;
1068 }
1069
1070 /* AArch64 stub unwinder. */
1071 struct frame_unwind aarch64_stub_unwind =
1072 {
1073 NORMAL_FRAME,
1074 aarch64_stub_frame_unwind_stop_reason,
1075 aarch64_stub_this_id,
1076 aarch64_prologue_prev_register,
1077 NULL,
1078 aarch64_stub_unwind_sniffer
1079 };
1080
1081 /* Return the frame base address of *THIS_FRAME. */
1082
1083 static CORE_ADDR
1084 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1085 {
1086 struct aarch64_prologue_cache *cache
1087 = aarch64_make_prologue_cache (this_frame, this_cache);
1088
1089 return cache->prev_sp - cache->framesize;
1090 }
1091
1092 /* AArch64 default frame base information. */
1093 struct frame_base aarch64_normal_base =
1094 {
1095 &aarch64_prologue_unwind,
1096 aarch64_normal_frame_base,
1097 aarch64_normal_frame_base,
1098 aarch64_normal_frame_base
1099 };
1100
1101 /* Return the value of the REGNUM register in the previous frame of
1102 *THIS_FRAME. */
1103
1104 static struct value *
1105 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1106 void **this_cache, int regnum)
1107 {
1108 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1109 CORE_ADDR lr;
1110
1111 switch (regnum)
1112 {
1113 case AARCH64_PC_REGNUM:
1114 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1115 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1116 return frame_unwind_got_constant (this_frame, regnum, lr);
1117
1118 default:
1119 internal_error (__FILE__, __LINE__,
1120 _("Unexpected register %d"), regnum);
1121 }
1122 }
1123
1124 static const unsigned char op_lit0 = DW_OP_lit0;
1125 static const unsigned char op_lit1 = DW_OP_lit1;
1126
1127 /* Implement the "init_reg" dwarf2_frame_ops method. */
1128
1129 static void
1130 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1131 struct dwarf2_frame_state_reg *reg,
1132 struct frame_info *this_frame)
1133 {
1134 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1135
1136 switch (regnum)
1137 {
1138 case AARCH64_PC_REGNUM:
1139 reg->how = DWARF2_FRAME_REG_FN;
1140 reg->loc.fn = aarch64_dwarf2_prev_register;
1141 return;
1142
1143 case AARCH64_SP_REGNUM:
1144 reg->how = DWARF2_FRAME_REG_CFA;
1145 return;
1146 }
1147
1148 /* Init pauth registers. */
1149 if (tdep->has_pauth ())
1150 {
1151 if (regnum == tdep->pauth_ra_state_regnum)
1152 {
1153 /* Initialize RA_STATE to zero. */
1154 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1155 reg->loc.exp.start = &op_lit0;
1156 reg->loc.exp.len = 1;
1157 return;
1158 }
1159 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1160 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1161 {
1162 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1163 return;
1164 }
1165 }
1166 }
1167
1168 /* Implement the execute_dwarf_cfa_vendor_op method. */
1169
1170 static bool
1171 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1172 struct dwarf2_frame_state *fs)
1173 {
1174 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1175 struct dwarf2_frame_state_reg *ra_state;
1176
1177 if (op == DW_CFA_AARCH64_negate_ra_state)
1178 {
1179 /* On systems without pauth, treat as a nop. */
1180 if (!tdep->has_pauth ())
1181 return true;
1182
1183 /* Allocate RA_STATE column if it's not allocated yet. */
1184 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1185
1186 /* Toggle the status of RA_STATE between 0 and 1. */
1187 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1188 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1189
1190 if (ra_state->loc.exp.start == nullptr
1191 || ra_state->loc.exp.start == &op_lit0)
1192 ra_state->loc.exp.start = &op_lit1;
1193 else
1194 ra_state->loc.exp.start = &op_lit0;
1195
1196 ra_state->loc.exp.len = 1;
1197
1198 return true;
1199 }
1200
1201 return false;
1202 }
1203
1204 /* When arguments must be pushed onto the stack, they go on in reverse
1205 order. The code below implements a FILO (stack) to do this. */
1206
1207 struct stack_item_t
1208 {
1209 /* Value to pass on stack. It can be NULL if this item is for stack
1210 padding. */
1211 const gdb_byte *data;
1212
1213 /* Size in bytes of value to pass on stack. */
1214 int len;
1215 };
1216
1217 /* Implement the gdbarch type alignment method, overrides the generic
1218 alignment algorithm for anything that is aarch64 specific. */
1219
1220 static ULONGEST
1221 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1222 {
1223 t = check_typedef (t);
1224 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
1225 {
1226 /* Use the natural alignment for vector types (the same for
1227 scalar type), but the maximum alignment is 128-bit. */
1228 if (TYPE_LENGTH (t) > 16)
1229 return 16;
1230 else
1231 return TYPE_LENGTH (t);
1232 }
1233
1234 /* Allow the common code to calculate the alignment. */
1235 return 0;
1236 }
1237
1238 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1239
1240 Return the number of register required, or -1 on failure.
1241
1242 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1243 to the element, else fail if the type of this element does not match the
1244 existing value. */
1245
1246 static int
1247 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1248 struct type **fundamental_type)
1249 {
1250 if (type == nullptr)
1251 return -1;
1252
1253 switch (TYPE_CODE (type))
1254 {
1255 case TYPE_CODE_FLT:
1256 if (TYPE_LENGTH (type) > 16)
1257 return -1;
1258
1259 if (*fundamental_type == nullptr)
1260 *fundamental_type = type;
1261 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1262 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1263 return -1;
1264
1265 return 1;
1266
1267 case TYPE_CODE_COMPLEX:
1268 {
1269 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1270 if (TYPE_LENGTH (target_type) > 16)
1271 return -1;
1272
1273 if (*fundamental_type == nullptr)
1274 *fundamental_type = target_type;
1275 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1276 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1277 return -1;
1278
1279 return 2;
1280 }
1281
1282 case TYPE_CODE_ARRAY:
1283 {
1284 if (TYPE_VECTOR (type))
1285 {
1286 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1287 return -1;
1288
1289 if (*fundamental_type == nullptr)
1290 *fundamental_type = type;
1291 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1292 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1293 return -1;
1294
1295 return 1;
1296 }
1297 else
1298 {
1299 struct type *target_type = TYPE_TARGET_TYPE (type);
1300 int count = aapcs_is_vfp_call_or_return_candidate_1
1301 (target_type, fundamental_type);
1302
1303 if (count == -1)
1304 return count;
1305
1306 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1307 return count;
1308 }
1309 }
1310
1311 case TYPE_CODE_STRUCT:
1312 case TYPE_CODE_UNION:
1313 {
1314 int count = 0;
1315
1316 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1317 {
1318 /* Ignore any static fields. */
1319 if (field_is_static (&TYPE_FIELD (type, i)))
1320 continue;
1321
1322 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1323
1324 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1325 (member, fundamental_type);
1326 if (sub_count == -1)
1327 return -1;
1328 count += sub_count;
1329 }
1330
1331 /* Ensure there is no padding between the fields (allowing for empty
1332 zero length structs) */
1333 int ftype_length = (*fundamental_type == nullptr)
1334 ? 0 : TYPE_LENGTH (*fundamental_type);
1335 if (count * ftype_length != TYPE_LENGTH (type))
1336 return -1;
1337
1338 return count;
1339 }
1340
1341 default:
1342 break;
1343 }
1344
1345 return -1;
1346 }
1347
1348 /* Return true if an argument, whose type is described by TYPE, can be passed or
1349 returned in simd/fp registers, providing enough parameter passing registers
1350 are available. This is as described in the AAPCS64.
1351
1352 Upon successful return, *COUNT returns the number of needed registers,
1353 *FUNDAMENTAL_TYPE contains the type of those registers.
1354
1355 Candidate as per the AAPCS64 5.4.2.C is either a:
1356 - float.
1357 - short-vector.
1358 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1359 all the members are floats and has at most 4 members.
1360 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1361 all the members are short vectors and has at most 4 members.
1362 - Complex (7.1.1)
1363
1364 Note that HFAs and HVAs can include nested structures and arrays. */
1365
1366 static bool
1367 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1368 struct type **fundamental_type)
1369 {
1370 if (type == nullptr)
1371 return false;
1372
1373 *fundamental_type = nullptr;
1374
1375 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1376 fundamental_type);
1377
1378 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1379 {
1380 *count = ag_count;
1381 return true;
1382 }
1383 else
1384 return false;
1385 }
1386
1387 /* AArch64 function call information structure. */
1388 struct aarch64_call_info
1389 {
1390 /* the current argument number. */
1391 unsigned argnum = 0;
1392
1393 /* The next general purpose register number, equivalent to NGRN as
1394 described in the AArch64 Procedure Call Standard. */
1395 unsigned ngrn = 0;
1396
1397 /* The next SIMD and floating point register number, equivalent to
1398 NSRN as described in the AArch64 Procedure Call Standard. */
1399 unsigned nsrn = 0;
1400
1401 /* The next stacked argument address, equivalent to NSAA as
1402 described in the AArch64 Procedure Call Standard. */
1403 unsigned nsaa = 0;
1404
1405 /* Stack item vector. */
1406 std::vector<stack_item_t> si;
1407 };
1408
1409 /* Pass a value in a sequence of consecutive X registers. The caller
1410 is responsible for ensuring sufficient registers are available. */
1411
1412 static void
1413 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1414 struct aarch64_call_info *info, struct type *type,
1415 struct value *arg)
1416 {
1417 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1418 int len = TYPE_LENGTH (type);
1419 enum type_code typecode = TYPE_CODE (type);
1420 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1421 const bfd_byte *buf = value_contents (arg);
1422
1423 info->argnum++;
1424
1425 while (len > 0)
1426 {
1427 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1428 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1429 byte_order);
1430
1431
1432 /* Adjust sub-word struct/union args when big-endian. */
1433 if (byte_order == BFD_ENDIAN_BIG
1434 && partial_len < X_REGISTER_SIZE
1435 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1436 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1437
1438 if (aarch64_debug)
1439 {
1440 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1441 gdbarch_register_name (gdbarch, regnum),
1442 phex (regval, X_REGISTER_SIZE));
1443 }
1444 regcache_cooked_write_unsigned (regcache, regnum, regval);
1445 len -= partial_len;
1446 buf += partial_len;
1447 regnum++;
1448 }
1449 }
1450
1451 /* Attempt to marshall a value in a V register. Return 1 if
1452 successful, or 0 if insufficient registers are available. This
1453 function, unlike the equivalent pass_in_x() function does not
1454 handle arguments spread across multiple registers. */
1455
1456 static int
1457 pass_in_v (struct gdbarch *gdbarch,
1458 struct regcache *regcache,
1459 struct aarch64_call_info *info,
1460 int len, const bfd_byte *buf)
1461 {
1462 if (info->nsrn < 8)
1463 {
1464 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1465 /* Enough space for a full vector register. */
1466 gdb_byte reg[register_size (gdbarch, regnum)];
1467 gdb_assert (len <= sizeof (reg));
1468
1469 info->argnum++;
1470 info->nsrn++;
1471
1472 memset (reg, 0, sizeof (reg));
1473 /* PCS C.1, the argument is allocated to the least significant
1474 bits of V register. */
1475 memcpy (reg, buf, len);
1476 regcache->cooked_write (regnum, reg);
1477
1478 if (aarch64_debug)
1479 {
1480 debug_printf ("arg %d in %s\n", info->argnum,
1481 gdbarch_register_name (gdbarch, regnum));
1482 }
1483 return 1;
1484 }
1485 info->nsrn = 8;
1486 return 0;
1487 }
1488
1489 /* Marshall an argument onto the stack. */
1490
1491 static void
1492 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1493 struct value *arg)
1494 {
1495 const bfd_byte *buf = value_contents (arg);
1496 int len = TYPE_LENGTH (type);
1497 int align;
1498 stack_item_t item;
1499
1500 info->argnum++;
1501
1502 align = type_align (type);
1503
1504 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1505 Natural alignment of the argument's type. */
1506 align = align_up (align, 8);
1507
1508 /* The AArch64 PCS requires at most doubleword alignment. */
1509 if (align > 16)
1510 align = 16;
1511
1512 if (aarch64_debug)
1513 {
1514 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1515 info->nsaa);
1516 }
1517
1518 item.len = len;
1519 item.data = buf;
1520 info->si.push_back (item);
1521
1522 info->nsaa += len;
1523 if (info->nsaa & (align - 1))
1524 {
1525 /* Push stack alignment padding. */
1526 int pad = align - (info->nsaa & (align - 1));
1527
1528 item.len = pad;
1529 item.data = NULL;
1530
1531 info->si.push_back (item);
1532 info->nsaa += pad;
1533 }
1534 }
1535
1536 /* Marshall an argument into a sequence of one or more consecutive X
1537 registers or, if insufficient X registers are available then onto
1538 the stack. */
1539
1540 static void
1541 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1542 struct aarch64_call_info *info, struct type *type,
1543 struct value *arg)
1544 {
1545 int len = TYPE_LENGTH (type);
1546 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1547
1548 /* PCS C.13 - Pass in registers if we have enough spare */
1549 if (info->ngrn + nregs <= 8)
1550 {
1551 pass_in_x (gdbarch, regcache, info, type, arg);
1552 info->ngrn += nregs;
1553 }
1554 else
1555 {
1556 info->ngrn = 8;
1557 pass_on_stack (info, type, arg);
1558 }
1559 }
1560
1561 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1562 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1563 registers. A return value of false is an error state as the value will have
1564 been partially passed to the stack. */
1565 static bool
1566 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1567 struct aarch64_call_info *info, struct type *arg_type,
1568 struct value *arg)
1569 {
1570 switch (TYPE_CODE (arg_type))
1571 {
1572 case TYPE_CODE_FLT:
1573 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1574 value_contents (arg));
1575 break;
1576
1577 case TYPE_CODE_COMPLEX:
1578 {
1579 const bfd_byte *buf = value_contents (arg);
1580 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1581
1582 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1583 buf))
1584 return false;
1585
1586 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1587 buf + TYPE_LENGTH (target_type));
1588 }
1589
1590 case TYPE_CODE_ARRAY:
1591 if (TYPE_VECTOR (arg_type))
1592 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1593 value_contents (arg));
1594 /* fall through. */
1595
1596 case TYPE_CODE_STRUCT:
1597 case TYPE_CODE_UNION:
1598 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1599 {
1600 /* Don't include static fields. */
1601 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1602 continue;
1603
1604 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1605 struct type *field_type = check_typedef (value_type (field));
1606
1607 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1608 field))
1609 return false;
1610 }
1611 return true;
1612
1613 default:
1614 return false;
1615 }
1616 }
1617
1618 /* Implement the "push_dummy_call" gdbarch method. */
1619
1620 static CORE_ADDR
1621 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1622 struct regcache *regcache, CORE_ADDR bp_addr,
1623 int nargs,
1624 struct value **args, CORE_ADDR sp,
1625 function_call_return_method return_method,
1626 CORE_ADDR struct_addr)
1627 {
1628 int argnum;
1629 struct aarch64_call_info info;
1630
1631 /* We need to know what the type of the called function is in order
1632 to determine the number of named/anonymous arguments for the
1633 actual argument placement, and the return type in order to handle
1634 return value correctly.
1635
1636 The generic code above us views the decision of return in memory
1637 or return in registers as a two stage processes. The language
1638 handler is consulted first and may decide to return in memory (eg
1639 class with copy constructor returned by value), this will cause
1640 the generic code to allocate space AND insert an initial leading
1641 argument.
1642
1643 If the language code does not decide to pass in memory then the
1644 target code is consulted.
1645
1646 If the language code decides to pass in memory we want to move
1647 the pointer inserted as the initial argument from the argument
1648 list and into X8, the conventional AArch64 struct return pointer
1649 register. */
1650
1651 /* Set the return address. For the AArch64, the return breakpoint
1652 is always at BP_ADDR. */
1653 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1654
1655 /* If we were given an initial argument for the return slot, lose it. */
1656 if (return_method == return_method_hidden_param)
1657 {
1658 args++;
1659 nargs--;
1660 }
1661
1662 /* The struct_return pointer occupies X8. */
1663 if (return_method != return_method_normal)
1664 {
1665 if (aarch64_debug)
1666 {
1667 debug_printf ("struct return in %s = 0x%s\n",
1668 gdbarch_register_name (gdbarch,
1669 AARCH64_STRUCT_RETURN_REGNUM),
1670 paddress (gdbarch, struct_addr));
1671 }
1672 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1673 struct_addr);
1674 }
1675
1676 for (argnum = 0; argnum < nargs; argnum++)
1677 {
1678 struct value *arg = args[argnum];
1679 struct type *arg_type, *fundamental_type;
1680 int len, elements;
1681
1682 arg_type = check_typedef (value_type (arg));
1683 len = TYPE_LENGTH (arg_type);
1684
1685 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1686 if there are enough spare registers. */
1687 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1688 &fundamental_type))
1689 {
1690 if (info.nsrn + elements <= 8)
1691 {
1692 /* We know that we have sufficient registers available therefore
1693 this will never need to fallback to the stack. */
1694 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1695 arg))
1696 gdb_assert_not_reached ("Failed to push args");
1697 }
1698 else
1699 {
1700 info.nsrn = 8;
1701 pass_on_stack (&info, arg_type, arg);
1702 }
1703 continue;
1704 }
1705
1706 switch (TYPE_CODE (arg_type))
1707 {
1708 case TYPE_CODE_INT:
1709 case TYPE_CODE_BOOL:
1710 case TYPE_CODE_CHAR:
1711 case TYPE_CODE_RANGE:
1712 case TYPE_CODE_ENUM:
1713 if (len < 4)
1714 {
1715 /* Promote to 32 bit integer. */
1716 if (TYPE_UNSIGNED (arg_type))
1717 arg_type = builtin_type (gdbarch)->builtin_uint32;
1718 else
1719 arg_type = builtin_type (gdbarch)->builtin_int32;
1720 arg = value_cast (arg_type, arg);
1721 }
1722 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1723 break;
1724
1725 case TYPE_CODE_STRUCT:
1726 case TYPE_CODE_ARRAY:
1727 case TYPE_CODE_UNION:
1728 if (len > 16)
1729 {
1730 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1731 invisible reference. */
1732
1733 /* Allocate aligned storage. */
1734 sp = align_down (sp - len, 16);
1735
1736 /* Write the real data into the stack. */
1737 write_memory (sp, value_contents (arg), len);
1738
1739 /* Construct the indirection. */
1740 arg_type = lookup_pointer_type (arg_type);
1741 arg = value_from_pointer (arg_type, sp);
1742 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1743 }
1744 else
1745 /* PCS C.15 / C.18 multiple values pass. */
1746 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1747 break;
1748
1749 default:
1750 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1751 break;
1752 }
1753 }
1754
1755 /* Make sure stack retains 16 byte alignment. */
1756 if (info.nsaa & 15)
1757 sp -= 16 - (info.nsaa & 15);
1758
1759 while (!info.si.empty ())
1760 {
1761 const stack_item_t &si = info.si.back ();
1762
1763 sp -= si.len;
1764 if (si.data != NULL)
1765 write_memory (sp, si.data, si.len);
1766 info.si.pop_back ();
1767 }
1768
1769 /* Finally, update the SP register. */
1770 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1771
1772 return sp;
1773 }
1774
1775 /* Implement the "frame_align" gdbarch method. */
1776
1777 static CORE_ADDR
1778 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1779 {
1780 /* Align the stack to sixteen bytes. */
1781 return sp & ~(CORE_ADDR) 15;
1782 }
1783
1784 /* Return the type for an AdvSISD Q register. */
1785
1786 static struct type *
1787 aarch64_vnq_type (struct gdbarch *gdbarch)
1788 {
1789 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1790
1791 if (tdep->vnq_type == NULL)
1792 {
1793 struct type *t;
1794 struct type *elem;
1795
1796 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1797 TYPE_CODE_UNION);
1798
1799 elem = builtin_type (gdbarch)->builtin_uint128;
1800 append_composite_type_field (t, "u", elem);
1801
1802 elem = builtin_type (gdbarch)->builtin_int128;
1803 append_composite_type_field (t, "s", elem);
1804
1805 tdep->vnq_type = t;
1806 }
1807
1808 return tdep->vnq_type;
1809 }
1810
1811 /* Return the type for an AdvSISD D register. */
1812
1813 static struct type *
1814 aarch64_vnd_type (struct gdbarch *gdbarch)
1815 {
1816 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1817
1818 if (tdep->vnd_type == NULL)
1819 {
1820 struct type *t;
1821 struct type *elem;
1822
1823 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1824 TYPE_CODE_UNION);
1825
1826 elem = builtin_type (gdbarch)->builtin_double;
1827 append_composite_type_field (t, "f", elem);
1828
1829 elem = builtin_type (gdbarch)->builtin_uint64;
1830 append_composite_type_field (t, "u", elem);
1831
1832 elem = builtin_type (gdbarch)->builtin_int64;
1833 append_composite_type_field (t, "s", elem);
1834
1835 tdep->vnd_type = t;
1836 }
1837
1838 return tdep->vnd_type;
1839 }
1840
1841 /* Return the type for an AdvSISD S register. */
1842
1843 static struct type *
1844 aarch64_vns_type (struct gdbarch *gdbarch)
1845 {
1846 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1847
1848 if (tdep->vns_type == NULL)
1849 {
1850 struct type *t;
1851 struct type *elem;
1852
1853 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1854 TYPE_CODE_UNION);
1855
1856 elem = builtin_type (gdbarch)->builtin_float;
1857 append_composite_type_field (t, "f", elem);
1858
1859 elem = builtin_type (gdbarch)->builtin_uint32;
1860 append_composite_type_field (t, "u", elem);
1861
1862 elem = builtin_type (gdbarch)->builtin_int32;
1863 append_composite_type_field (t, "s", elem);
1864
1865 tdep->vns_type = t;
1866 }
1867
1868 return tdep->vns_type;
1869 }
1870
1871 /* Return the type for an AdvSISD H register. */
1872
1873 static struct type *
1874 aarch64_vnh_type (struct gdbarch *gdbarch)
1875 {
1876 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1877
1878 if (tdep->vnh_type == NULL)
1879 {
1880 struct type *t;
1881 struct type *elem;
1882
1883 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1884 TYPE_CODE_UNION);
1885
1886 elem = builtin_type (gdbarch)->builtin_half;
1887 append_composite_type_field (t, "f", elem);
1888
1889 elem = builtin_type (gdbarch)->builtin_uint16;
1890 append_composite_type_field (t, "u", elem);
1891
1892 elem = builtin_type (gdbarch)->builtin_int16;
1893 append_composite_type_field (t, "s", elem);
1894
1895 tdep->vnh_type = t;
1896 }
1897
1898 return tdep->vnh_type;
1899 }
1900
1901 /* Return the type for an AdvSISD B register. */
1902
1903 static struct type *
1904 aarch64_vnb_type (struct gdbarch *gdbarch)
1905 {
1906 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1907
1908 if (tdep->vnb_type == NULL)
1909 {
1910 struct type *t;
1911 struct type *elem;
1912
1913 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1914 TYPE_CODE_UNION);
1915
1916 elem = builtin_type (gdbarch)->builtin_uint8;
1917 append_composite_type_field (t, "u", elem);
1918
1919 elem = builtin_type (gdbarch)->builtin_int8;
1920 append_composite_type_field (t, "s", elem);
1921
1922 tdep->vnb_type = t;
1923 }
1924
1925 return tdep->vnb_type;
1926 }
1927
1928 /* Return the type for an AdvSISD V register. */
1929
1930 static struct type *
1931 aarch64_vnv_type (struct gdbarch *gdbarch)
1932 {
1933 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1934
1935 if (tdep->vnv_type == NULL)
1936 {
1937 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1938 slice from the non-pseudo vector registers. However NEON V registers
1939 are always vector registers, and need constructing as such. */
1940 const struct builtin_type *bt = builtin_type (gdbarch);
1941
1942 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1943 TYPE_CODE_UNION);
1944
1945 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1946 TYPE_CODE_UNION);
1947 append_composite_type_field (sub, "f",
1948 init_vector_type (bt->builtin_double, 2));
1949 append_composite_type_field (sub, "u",
1950 init_vector_type (bt->builtin_uint64, 2));
1951 append_composite_type_field (sub, "s",
1952 init_vector_type (bt->builtin_int64, 2));
1953 append_composite_type_field (t, "d", sub);
1954
1955 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1956 TYPE_CODE_UNION);
1957 append_composite_type_field (sub, "f",
1958 init_vector_type (bt->builtin_float, 4));
1959 append_composite_type_field (sub, "u",
1960 init_vector_type (bt->builtin_uint32, 4));
1961 append_composite_type_field (sub, "s",
1962 init_vector_type (bt->builtin_int32, 4));
1963 append_composite_type_field (t, "s", sub);
1964
1965 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1966 TYPE_CODE_UNION);
1967 append_composite_type_field (sub, "f",
1968 init_vector_type (bt->builtin_half, 8));
1969 append_composite_type_field (sub, "u",
1970 init_vector_type (bt->builtin_uint16, 8));
1971 append_composite_type_field (sub, "s",
1972 init_vector_type (bt->builtin_int16, 8));
1973 append_composite_type_field (t, "h", sub);
1974
1975 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1976 TYPE_CODE_UNION);
1977 append_composite_type_field (sub, "u",
1978 init_vector_type (bt->builtin_uint8, 16));
1979 append_composite_type_field (sub, "s",
1980 init_vector_type (bt->builtin_int8, 16));
1981 append_composite_type_field (t, "b", sub);
1982
1983 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1984 TYPE_CODE_UNION);
1985 append_composite_type_field (sub, "u",
1986 init_vector_type (bt->builtin_uint128, 1));
1987 append_composite_type_field (sub, "s",
1988 init_vector_type (bt->builtin_int128, 1));
1989 append_composite_type_field (t, "q", sub);
1990
1991 tdep->vnv_type = t;
1992 }
1993
1994 return tdep->vnv_type;
1995 }
1996
1997 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1998
1999 static int
2000 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2001 {
2002 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2003
2004 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2005 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2006
2007 if (reg == AARCH64_DWARF_SP)
2008 return AARCH64_SP_REGNUM;
2009
2010 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2011 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2012
2013 if (reg == AARCH64_DWARF_SVE_VG)
2014 return AARCH64_SVE_VG_REGNUM;
2015
2016 if (reg == AARCH64_DWARF_SVE_FFR)
2017 return AARCH64_SVE_FFR_REGNUM;
2018
2019 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2020 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2021
2022 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2023 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2024
2025 if (tdep->has_pauth ())
2026 {
2027 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2028 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2029
2030 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2031 return tdep->pauth_ra_state_regnum;
2032 }
2033
2034 return -1;
2035 }
2036
2037 /* Implement the "print_insn" gdbarch method. */
2038
2039 static int
2040 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2041 {
2042 info->symbols = NULL;
2043 return default_print_insn (memaddr, info);
2044 }
2045
2046 /* AArch64 BRK software debug mode instruction.
2047 Note that AArch64 code is always little-endian.
2048 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2049 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2050
2051 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2052
2053 /* Extract from an array REGS containing the (raw) register state a
2054 function return value of type TYPE, and copy that, in virtual
2055 format, into VALBUF. */
2056
2057 static void
2058 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2059 gdb_byte *valbuf)
2060 {
2061 struct gdbarch *gdbarch = regs->arch ();
2062 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2063 int elements;
2064 struct type *fundamental_type;
2065
2066 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2067 &fundamental_type))
2068 {
2069 int len = TYPE_LENGTH (fundamental_type);
2070
2071 for (int i = 0; i < elements; i++)
2072 {
2073 int regno = AARCH64_V0_REGNUM + i;
2074 /* Enough space for a full vector register. */
2075 gdb_byte buf[register_size (gdbarch, regno)];
2076 gdb_assert (len <= sizeof (buf));
2077
2078 if (aarch64_debug)
2079 {
2080 debug_printf ("read HFA or HVA return value element %d from %s\n",
2081 i + 1,
2082 gdbarch_register_name (gdbarch, regno));
2083 }
2084 regs->cooked_read (regno, buf);
2085
2086 memcpy (valbuf, buf, len);
2087 valbuf += len;
2088 }
2089 }
2090 else if (TYPE_CODE (type) == TYPE_CODE_INT
2091 || TYPE_CODE (type) == TYPE_CODE_CHAR
2092 || TYPE_CODE (type) == TYPE_CODE_BOOL
2093 || TYPE_CODE (type) == TYPE_CODE_PTR
2094 || TYPE_IS_REFERENCE (type)
2095 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2096 {
2097 /* If the type is a plain integer, then the access is
2098 straight-forward. Otherwise we have to play around a bit
2099 more. */
2100 int len = TYPE_LENGTH (type);
2101 int regno = AARCH64_X0_REGNUM;
2102 ULONGEST tmp;
2103
2104 while (len > 0)
2105 {
2106 /* By using store_unsigned_integer we avoid having to do
2107 anything special for small big-endian values. */
2108 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2109 store_unsigned_integer (valbuf,
2110 (len > X_REGISTER_SIZE
2111 ? X_REGISTER_SIZE : len), byte_order, tmp);
2112 len -= X_REGISTER_SIZE;
2113 valbuf += X_REGISTER_SIZE;
2114 }
2115 }
2116 else
2117 {
2118 /* For a structure or union the behaviour is as if the value had
2119 been stored to word-aligned memory and then loaded into
2120 registers with 64-bit load instruction(s). */
2121 int len = TYPE_LENGTH (type);
2122 int regno = AARCH64_X0_REGNUM;
2123 bfd_byte buf[X_REGISTER_SIZE];
2124
2125 while (len > 0)
2126 {
2127 regs->cooked_read (regno++, buf);
2128 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2129 len -= X_REGISTER_SIZE;
2130 valbuf += X_REGISTER_SIZE;
2131 }
2132 }
2133 }
2134
2135
2136 /* Will a function return an aggregate type in memory or in a
2137 register? Return 0 if an aggregate type can be returned in a
2138 register, 1 if it must be returned in memory. */
2139
2140 static int
2141 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2142 {
2143 type = check_typedef (type);
2144 int elements;
2145 struct type *fundamental_type;
2146
2147 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2148 &fundamental_type))
2149 {
2150 /* v0-v7 are used to return values and one register is allocated
2151 for one member. However, HFA or HVA has at most four members. */
2152 return 0;
2153 }
2154
2155 if (TYPE_LENGTH (type) > 16)
2156 {
2157 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2158 invisible reference. */
2159
2160 return 1;
2161 }
2162
2163 return 0;
2164 }
2165
2166 /* Write into appropriate registers a function return value of type
2167 TYPE, given in virtual format. */
2168
2169 static void
2170 aarch64_store_return_value (struct type *type, struct regcache *regs,
2171 const gdb_byte *valbuf)
2172 {
2173 struct gdbarch *gdbarch = regs->arch ();
2174 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2175 int elements;
2176 struct type *fundamental_type;
2177
2178 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2179 &fundamental_type))
2180 {
2181 int len = TYPE_LENGTH (fundamental_type);
2182
2183 for (int i = 0; i < elements; i++)
2184 {
2185 int regno = AARCH64_V0_REGNUM + i;
2186 /* Enough space for a full vector register. */
2187 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2188 gdb_assert (len <= sizeof (tmpbuf));
2189
2190 if (aarch64_debug)
2191 {
2192 debug_printf ("write HFA or HVA return value element %d to %s\n",
2193 i + 1,
2194 gdbarch_register_name (gdbarch, regno));
2195 }
2196
2197 memcpy (tmpbuf, valbuf,
2198 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2199 regs->cooked_write (regno, tmpbuf);
2200 valbuf += len;
2201 }
2202 }
2203 else if (TYPE_CODE (type) == TYPE_CODE_INT
2204 || TYPE_CODE (type) == TYPE_CODE_CHAR
2205 || TYPE_CODE (type) == TYPE_CODE_BOOL
2206 || TYPE_CODE (type) == TYPE_CODE_PTR
2207 || TYPE_IS_REFERENCE (type)
2208 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2209 {
2210 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2211 {
2212 /* Values of one word or less are zero/sign-extended and
2213 returned in r0. */
2214 bfd_byte tmpbuf[X_REGISTER_SIZE];
2215 LONGEST val = unpack_long (type, valbuf);
2216
2217 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2218 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2219 }
2220 else
2221 {
2222 /* Integral values greater than one word are stored in
2223 consecutive registers starting with r0. This will always
2224 be a multiple of the regiser size. */
2225 int len = TYPE_LENGTH (type);
2226 int regno = AARCH64_X0_REGNUM;
2227
2228 while (len > 0)
2229 {
2230 regs->cooked_write (regno++, valbuf);
2231 len -= X_REGISTER_SIZE;
2232 valbuf += X_REGISTER_SIZE;
2233 }
2234 }
2235 }
2236 else
2237 {
2238 /* For a structure or union the behaviour is as if the value had
2239 been stored to word-aligned memory and then loaded into
2240 registers with 64-bit load instruction(s). */
2241 int len = TYPE_LENGTH (type);
2242 int regno = AARCH64_X0_REGNUM;
2243 bfd_byte tmpbuf[X_REGISTER_SIZE];
2244
2245 while (len > 0)
2246 {
2247 memcpy (tmpbuf, valbuf,
2248 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2249 regs->cooked_write (regno++, tmpbuf);
2250 len -= X_REGISTER_SIZE;
2251 valbuf += X_REGISTER_SIZE;
2252 }
2253 }
2254 }
2255
2256 /* Implement the "return_value" gdbarch method. */
2257
2258 static enum return_value_convention
2259 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2260 struct type *valtype, struct regcache *regcache,
2261 gdb_byte *readbuf, const gdb_byte *writebuf)
2262 {
2263
2264 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2265 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2266 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2267 {
2268 if (aarch64_return_in_memory (gdbarch, valtype))
2269 {
2270 if (aarch64_debug)
2271 debug_printf ("return value in memory\n");
2272 return RETURN_VALUE_STRUCT_CONVENTION;
2273 }
2274 }
2275
2276 if (writebuf)
2277 aarch64_store_return_value (valtype, regcache, writebuf);
2278
2279 if (readbuf)
2280 aarch64_extract_return_value (valtype, regcache, readbuf);
2281
2282 if (aarch64_debug)
2283 debug_printf ("return value in registers\n");
2284
2285 return RETURN_VALUE_REGISTER_CONVENTION;
2286 }
2287
2288 /* Implement the "get_longjmp_target" gdbarch method. */
2289
2290 static int
2291 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2292 {
2293 CORE_ADDR jb_addr;
2294 gdb_byte buf[X_REGISTER_SIZE];
2295 struct gdbarch *gdbarch = get_frame_arch (frame);
2296 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2297 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2298
2299 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2300
2301 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2302 X_REGISTER_SIZE))
2303 return 0;
2304
2305 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2306 return 1;
2307 }
2308
2309 /* Implement the "gen_return_address" gdbarch method. */
2310
2311 static void
2312 aarch64_gen_return_address (struct gdbarch *gdbarch,
2313 struct agent_expr *ax, struct axs_value *value,
2314 CORE_ADDR scope)
2315 {
2316 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2317 value->kind = axs_lvalue_register;
2318 value->u.reg = AARCH64_LR_REGNUM;
2319 }
2320 \f
2321
2322 /* Return the pseudo register name corresponding to register regnum. */
2323
2324 static const char *
2325 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2326 {
2327 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2328
2329 static const char *const q_name[] =
2330 {
2331 "q0", "q1", "q2", "q3",
2332 "q4", "q5", "q6", "q7",
2333 "q8", "q9", "q10", "q11",
2334 "q12", "q13", "q14", "q15",
2335 "q16", "q17", "q18", "q19",
2336 "q20", "q21", "q22", "q23",
2337 "q24", "q25", "q26", "q27",
2338 "q28", "q29", "q30", "q31",
2339 };
2340
2341 static const char *const d_name[] =
2342 {
2343 "d0", "d1", "d2", "d3",
2344 "d4", "d5", "d6", "d7",
2345 "d8", "d9", "d10", "d11",
2346 "d12", "d13", "d14", "d15",
2347 "d16", "d17", "d18", "d19",
2348 "d20", "d21", "d22", "d23",
2349 "d24", "d25", "d26", "d27",
2350 "d28", "d29", "d30", "d31",
2351 };
2352
2353 static const char *const s_name[] =
2354 {
2355 "s0", "s1", "s2", "s3",
2356 "s4", "s5", "s6", "s7",
2357 "s8", "s9", "s10", "s11",
2358 "s12", "s13", "s14", "s15",
2359 "s16", "s17", "s18", "s19",
2360 "s20", "s21", "s22", "s23",
2361 "s24", "s25", "s26", "s27",
2362 "s28", "s29", "s30", "s31",
2363 };
2364
2365 static const char *const h_name[] =
2366 {
2367 "h0", "h1", "h2", "h3",
2368 "h4", "h5", "h6", "h7",
2369 "h8", "h9", "h10", "h11",
2370 "h12", "h13", "h14", "h15",
2371 "h16", "h17", "h18", "h19",
2372 "h20", "h21", "h22", "h23",
2373 "h24", "h25", "h26", "h27",
2374 "h28", "h29", "h30", "h31",
2375 };
2376
2377 static const char *const b_name[] =
2378 {
2379 "b0", "b1", "b2", "b3",
2380 "b4", "b5", "b6", "b7",
2381 "b8", "b9", "b10", "b11",
2382 "b12", "b13", "b14", "b15",
2383 "b16", "b17", "b18", "b19",
2384 "b20", "b21", "b22", "b23",
2385 "b24", "b25", "b26", "b27",
2386 "b28", "b29", "b30", "b31",
2387 };
2388
2389 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2390
2391 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2392 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2393
2394 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2395 return d_name[p_regnum - AARCH64_D0_REGNUM];
2396
2397 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2398 return s_name[p_regnum - AARCH64_S0_REGNUM];
2399
2400 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2401 return h_name[p_regnum - AARCH64_H0_REGNUM];
2402
2403 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2404 return b_name[p_regnum - AARCH64_B0_REGNUM];
2405
2406 if (tdep->has_sve ())
2407 {
2408 static const char *const sve_v_name[] =
2409 {
2410 "v0", "v1", "v2", "v3",
2411 "v4", "v5", "v6", "v7",
2412 "v8", "v9", "v10", "v11",
2413 "v12", "v13", "v14", "v15",
2414 "v16", "v17", "v18", "v19",
2415 "v20", "v21", "v22", "v23",
2416 "v24", "v25", "v26", "v27",
2417 "v28", "v29", "v30", "v31",
2418 };
2419
2420 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2421 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2422 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2423 }
2424
2425 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2426 prevents it from being read by methods such as
2427 mi_cmd_trace_frame_collected. */
2428 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2429 return "";
2430
2431 internal_error (__FILE__, __LINE__,
2432 _("aarch64_pseudo_register_name: bad register number %d"),
2433 p_regnum);
2434 }
2435
2436 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2437
2438 static struct type *
2439 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2440 {
2441 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2442
2443 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2444
2445 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2446 return aarch64_vnq_type (gdbarch);
2447
2448 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2449 return aarch64_vnd_type (gdbarch);
2450
2451 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2452 return aarch64_vns_type (gdbarch);
2453
2454 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2455 return aarch64_vnh_type (gdbarch);
2456
2457 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2458 return aarch64_vnb_type (gdbarch);
2459
2460 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2461 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2462 return aarch64_vnv_type (gdbarch);
2463
2464 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2465 return builtin_type (gdbarch)->builtin_uint64;
2466
2467 internal_error (__FILE__, __LINE__,
2468 _("aarch64_pseudo_register_type: bad register number %d"),
2469 p_regnum);
2470 }
2471
2472 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2473
2474 static int
2475 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2476 struct reggroup *group)
2477 {
2478 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2479
2480 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2481
2482 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2483 return group == all_reggroup || group == vector_reggroup;
2484 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2485 return (group == all_reggroup || group == vector_reggroup
2486 || group == float_reggroup);
2487 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2488 return (group == all_reggroup || group == vector_reggroup
2489 || group == float_reggroup);
2490 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2491 return group == all_reggroup || group == vector_reggroup;
2492 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2493 return group == all_reggroup || group == vector_reggroup;
2494 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2495 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2496 return group == all_reggroup || group == vector_reggroup;
2497 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2498 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2499 return 0;
2500
2501 return group == all_reggroup;
2502 }
2503
2504 /* Helper for aarch64_pseudo_read_value. */
2505
2506 static struct value *
2507 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2508 readable_regcache *regcache, int regnum_offset,
2509 int regsize, struct value *result_value)
2510 {
2511 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2512
2513 /* Enough space for a full vector register. */
2514 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2515 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2516
2517 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2518 mark_value_bytes_unavailable (result_value, 0,
2519 TYPE_LENGTH (value_type (result_value)));
2520 else
2521 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2522
2523 return result_value;
2524 }
2525
2526 /* Implement the "pseudo_register_read_value" gdbarch method. */
2527
2528 static struct value *
2529 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2530 int regnum)
2531 {
2532 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2533 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2534
2535 VALUE_LVAL (result_value) = lval_register;
2536 VALUE_REGNUM (result_value) = regnum;
2537
2538 regnum -= gdbarch_num_regs (gdbarch);
2539
2540 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2541 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2542 regnum - AARCH64_Q0_REGNUM,
2543 Q_REGISTER_SIZE, result_value);
2544
2545 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2546 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2547 regnum - AARCH64_D0_REGNUM,
2548 D_REGISTER_SIZE, result_value);
2549
2550 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2551 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2552 regnum - AARCH64_S0_REGNUM,
2553 S_REGISTER_SIZE, result_value);
2554
2555 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2556 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2557 regnum - AARCH64_H0_REGNUM,
2558 H_REGISTER_SIZE, result_value);
2559
2560 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2561 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2562 regnum - AARCH64_B0_REGNUM,
2563 B_REGISTER_SIZE, result_value);
2564
2565 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2566 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2567 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2568 regnum - AARCH64_SVE_V0_REGNUM,
2569 V_REGISTER_SIZE, result_value);
2570
2571 gdb_assert_not_reached ("regnum out of bound");
2572 }
2573
2574 /* Helper for aarch64_pseudo_write. */
2575
2576 static void
2577 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2578 int regnum_offset, int regsize, const gdb_byte *buf)
2579 {
2580 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2581
2582 /* Enough space for a full vector register. */
2583 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2584 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2585
2586 /* Ensure the register buffer is zero, we want gdb writes of the
2587 various 'scalar' pseudo registers to behavior like architectural
2588 writes, register width bytes are written the remainder are set to
2589 zero. */
2590 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2591
2592 memcpy (reg_buf, buf, regsize);
2593 regcache->raw_write (v_regnum, reg_buf);
2594 }
2595
2596 /* Implement the "pseudo_register_write" gdbarch method. */
2597
2598 static void
2599 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2600 int regnum, const gdb_byte *buf)
2601 {
2602 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2603 regnum -= gdbarch_num_regs (gdbarch);
2604
2605 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2606 return aarch64_pseudo_write_1 (gdbarch, regcache,
2607 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2608 buf);
2609
2610 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2611 return aarch64_pseudo_write_1 (gdbarch, regcache,
2612 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2613 buf);
2614
2615 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2616 return aarch64_pseudo_write_1 (gdbarch, regcache,
2617 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2618 buf);
2619
2620 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2621 return aarch64_pseudo_write_1 (gdbarch, regcache,
2622 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2623 buf);
2624
2625 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2626 return aarch64_pseudo_write_1 (gdbarch, regcache,
2627 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2628 buf);
2629
2630 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2631 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2632 return aarch64_pseudo_write_1 (gdbarch, regcache,
2633 regnum - AARCH64_SVE_V0_REGNUM,
2634 V_REGISTER_SIZE, buf);
2635
2636 gdb_assert_not_reached ("regnum out of bound");
2637 }
2638
2639 /* Callback function for user_reg_add. */
2640
2641 static struct value *
2642 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2643 {
2644 const int *reg_p = (const int *) baton;
2645
2646 return value_of_register (*reg_p, frame);
2647 }
2648 \f
2649
2650 /* Implement the "software_single_step" gdbarch method, needed to
2651 single step through atomic sequences on AArch64. */
2652
2653 static std::vector<CORE_ADDR>
2654 aarch64_software_single_step (struct regcache *regcache)
2655 {
2656 struct gdbarch *gdbarch = regcache->arch ();
2657 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2658 const int insn_size = 4;
2659 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2660 CORE_ADDR pc = regcache_read_pc (regcache);
2661 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2662 CORE_ADDR loc = pc;
2663 CORE_ADDR closing_insn = 0;
2664 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2665 byte_order_for_code);
2666 int index;
2667 int insn_count;
2668 int bc_insn_count = 0; /* Conditional branch instruction count. */
2669 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2670 aarch64_inst inst;
2671
2672 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2673 return {};
2674
2675 /* Look for a Load Exclusive instruction which begins the sequence. */
2676 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2677 return {};
2678
2679 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2680 {
2681 loc += insn_size;
2682 insn = read_memory_unsigned_integer (loc, insn_size,
2683 byte_order_for_code);
2684
2685 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2686 return {};
2687 /* Check if the instruction is a conditional branch. */
2688 if (inst.opcode->iclass == condbranch)
2689 {
2690 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2691
2692 if (bc_insn_count >= 1)
2693 return {};
2694
2695 /* It is, so we'll try to set a breakpoint at the destination. */
2696 breaks[1] = loc + inst.operands[0].imm.value;
2697
2698 bc_insn_count++;
2699 last_breakpoint++;
2700 }
2701
2702 /* Look for the Store Exclusive which closes the atomic sequence. */
2703 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2704 {
2705 closing_insn = loc;
2706 break;
2707 }
2708 }
2709
2710 /* We didn't find a closing Store Exclusive instruction, fall back. */
2711 if (!closing_insn)
2712 return {};
2713
2714 /* Insert breakpoint after the end of the atomic sequence. */
2715 breaks[0] = loc + insn_size;
2716
2717 /* Check for duplicated breakpoints, and also check that the second
2718 breakpoint is not within the atomic sequence. */
2719 if (last_breakpoint
2720 && (breaks[1] == breaks[0]
2721 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2722 last_breakpoint = 0;
2723
2724 std::vector<CORE_ADDR> next_pcs;
2725
2726 /* Insert the breakpoint at the end of the sequence, and one at the
2727 destination of the conditional branch, if it exists. */
2728 for (index = 0; index <= last_breakpoint; index++)
2729 next_pcs.push_back (breaks[index]);
2730
2731 return next_pcs;
2732 }
2733
2734 struct aarch64_displaced_step_copy_insn_closure : public displaced_step_copy_insn_closure
2735 {
2736 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2737 is being displaced stepping. */
2738 int cond = 0;
2739
2740 /* PC adjustment offset after displaced stepping. */
2741 int32_t pc_adjust = 0;
2742 };
2743
2744 /* Data when visiting instructions for displaced stepping. */
2745
2746 struct aarch64_displaced_step_data
2747 {
2748 struct aarch64_insn_data base;
2749
2750 /* The address where the instruction will be executed at. */
2751 CORE_ADDR new_addr;
2752 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2753 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2754 /* Number of instructions in INSN_BUF. */
2755 unsigned insn_count;
2756 /* Registers when doing displaced stepping. */
2757 struct regcache *regs;
2758
2759 aarch64_displaced_step_copy_insn_closure *dsc;
2760 };
2761
2762 /* Implementation of aarch64_insn_visitor method "b". */
2763
2764 static void
2765 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2766 struct aarch64_insn_data *data)
2767 {
2768 struct aarch64_displaced_step_data *dsd
2769 = (struct aarch64_displaced_step_data *) data;
2770 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2771
2772 if (can_encode_int32 (new_offset, 28))
2773 {
2774 /* Emit B rather than BL, because executing BL on a new address
2775 will get the wrong address into LR. In order to avoid this,
2776 we emit B, and update LR if the instruction is BL. */
2777 emit_b (dsd->insn_buf, 0, new_offset);
2778 dsd->insn_count++;
2779 }
2780 else
2781 {
2782 /* Write NOP. */
2783 emit_nop (dsd->insn_buf);
2784 dsd->insn_count++;
2785 dsd->dsc->pc_adjust = offset;
2786 }
2787
2788 if (is_bl)
2789 {
2790 /* Update LR. */
2791 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2792 data->insn_addr + 4);
2793 }
2794 }
2795
2796 /* Implementation of aarch64_insn_visitor method "b_cond". */
2797
2798 static void
2799 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2800 struct aarch64_insn_data *data)
2801 {
2802 struct aarch64_displaced_step_data *dsd
2803 = (struct aarch64_displaced_step_data *) data;
2804
2805 /* GDB has to fix up PC after displaced step this instruction
2806 differently according to the condition is true or false. Instead
2807 of checking COND against conditional flags, we can use
2808 the following instructions, and GDB can tell how to fix up PC
2809 according to the PC value.
2810
2811 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2812 INSN1 ;
2813 TAKEN:
2814 INSN2
2815 */
2816
2817 emit_bcond (dsd->insn_buf, cond, 8);
2818 dsd->dsc->cond = 1;
2819 dsd->dsc->pc_adjust = offset;
2820 dsd->insn_count = 1;
2821 }
2822
2823 /* Dynamically allocate a new register. If we know the register
2824 statically, we should make it a global as above instead of using this
2825 helper function. */
2826
2827 static struct aarch64_register
2828 aarch64_register (unsigned num, int is64)
2829 {
2830 return (struct aarch64_register) { num, is64 };
2831 }
2832
2833 /* Implementation of aarch64_insn_visitor method "cb". */
2834
2835 static void
2836 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2837 const unsigned rn, int is64,
2838 struct aarch64_insn_data *data)
2839 {
2840 struct aarch64_displaced_step_data *dsd
2841 = (struct aarch64_displaced_step_data *) data;
2842
2843 /* The offset is out of range for a compare and branch
2844 instruction. We can use the following instructions instead:
2845
2846 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2847 INSN1 ;
2848 TAKEN:
2849 INSN2
2850 */
2851 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2852 dsd->insn_count = 1;
2853 dsd->dsc->cond = 1;
2854 dsd->dsc->pc_adjust = offset;
2855 }
2856
2857 /* Implementation of aarch64_insn_visitor method "tb". */
2858
2859 static void
2860 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2861 const unsigned rt, unsigned bit,
2862 struct aarch64_insn_data *data)
2863 {
2864 struct aarch64_displaced_step_data *dsd
2865 = (struct aarch64_displaced_step_data *) data;
2866
2867 /* The offset is out of range for a test bit and branch
2868 instruction We can use the following instructions instead:
2869
2870 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2871 INSN1 ;
2872 TAKEN:
2873 INSN2
2874
2875 */
2876 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2877 dsd->insn_count = 1;
2878 dsd->dsc->cond = 1;
2879 dsd->dsc->pc_adjust = offset;
2880 }
2881
2882 /* Implementation of aarch64_insn_visitor method "adr". */
2883
2884 static void
2885 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2886 const int is_adrp, struct aarch64_insn_data *data)
2887 {
2888 struct aarch64_displaced_step_data *dsd
2889 = (struct aarch64_displaced_step_data *) data;
2890 /* We know exactly the address the ADR{P,} instruction will compute.
2891 We can just write it to the destination register. */
2892 CORE_ADDR address = data->insn_addr + offset;
2893
2894 if (is_adrp)
2895 {
2896 /* Clear the lower 12 bits of the offset to get the 4K page. */
2897 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2898 address & ~0xfff);
2899 }
2900 else
2901 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2902 address);
2903
2904 dsd->dsc->pc_adjust = 4;
2905 emit_nop (dsd->insn_buf);
2906 dsd->insn_count = 1;
2907 }
2908
2909 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2910
2911 static void
2912 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2913 const unsigned rt, const int is64,
2914 struct aarch64_insn_data *data)
2915 {
2916 struct aarch64_displaced_step_data *dsd
2917 = (struct aarch64_displaced_step_data *) data;
2918 CORE_ADDR address = data->insn_addr + offset;
2919 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2920
2921 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2922 address);
2923
2924 if (is_sw)
2925 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2926 aarch64_register (rt, 1), zero);
2927 else
2928 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2929 aarch64_register (rt, 1), zero);
2930
2931 dsd->dsc->pc_adjust = 4;
2932 }
2933
2934 /* Implementation of aarch64_insn_visitor method "others". */
2935
2936 static void
2937 aarch64_displaced_step_others (const uint32_t insn,
2938 struct aarch64_insn_data *data)
2939 {
2940 struct aarch64_displaced_step_data *dsd
2941 = (struct aarch64_displaced_step_data *) data;
2942
2943 aarch64_emit_insn (dsd->insn_buf, insn);
2944 dsd->insn_count = 1;
2945
2946 if ((insn & 0xfffffc1f) == 0xd65f0000)
2947 {
2948 /* RET */
2949 dsd->dsc->pc_adjust = 0;
2950 }
2951 else
2952 dsd->dsc->pc_adjust = 4;
2953 }
2954
2955 static const struct aarch64_insn_visitor visitor =
2956 {
2957 aarch64_displaced_step_b,
2958 aarch64_displaced_step_b_cond,
2959 aarch64_displaced_step_cb,
2960 aarch64_displaced_step_tb,
2961 aarch64_displaced_step_adr,
2962 aarch64_displaced_step_ldr_literal,
2963 aarch64_displaced_step_others,
2964 };
2965
2966 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2967
2968 displaced_step_copy_insn_closure_up
2969 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2970 CORE_ADDR from, CORE_ADDR to,
2971 struct regcache *regs)
2972 {
2973 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2974 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2975 struct aarch64_displaced_step_data dsd;
2976 aarch64_inst inst;
2977
2978 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2979 return NULL;
2980
2981 /* Look for a Load Exclusive instruction which begins the sequence. */
2982 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2983 {
2984 /* We can't displaced step atomic sequences. */
2985 return NULL;
2986 }
2987
2988 std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
2989 (new aarch64_displaced_step_copy_insn_closure);
2990 dsd.base.insn_addr = from;
2991 dsd.new_addr = to;
2992 dsd.regs = regs;
2993 dsd.dsc = dsc.get ();
2994 dsd.insn_count = 0;
2995 aarch64_relocate_instruction (insn, &visitor,
2996 (struct aarch64_insn_data *) &dsd);
2997 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
2998
2999 if (dsd.insn_count != 0)
3000 {
3001 int i;
3002
3003 /* Instruction can be relocated to scratch pad. Copy
3004 relocated instruction(s) there. */
3005 for (i = 0; i < dsd.insn_count; i++)
3006 {
3007 if (debug_displaced)
3008 {
3009 debug_printf ("displaced: writing insn ");
3010 debug_printf ("%.8x", dsd.insn_buf[i]);
3011 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3012 }
3013 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3014 (ULONGEST) dsd.insn_buf[i]);
3015 }
3016 }
3017 else
3018 {
3019 dsc = NULL;
3020 }
3021
3022 /* This is a work around for a problem with g++ 4.8. */
3023 return displaced_step_copy_insn_closure_up (dsc.release ());
3024 }
3025
3026 /* Implement the "displaced_step_fixup" gdbarch method. */
3027
3028 void
3029 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3030 struct displaced_step_copy_insn_closure *dsc_,
3031 CORE_ADDR from, CORE_ADDR to,
3032 struct regcache *regs)
3033 {
3034 aarch64_displaced_step_copy_insn_closure *dsc = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3035
3036 if (dsc->cond)
3037 {
3038 ULONGEST pc;
3039
3040 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3041 if (pc - to == 8)
3042 {
3043 /* Condition is true. */
3044 }
3045 else if (pc - to == 4)
3046 {
3047 /* Condition is false. */
3048 dsc->pc_adjust = 4;
3049 }
3050 else
3051 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3052 }
3053
3054 if (dsc->pc_adjust != 0)
3055 {
3056 if (debug_displaced)
3057 {
3058 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3059 paddress (gdbarch, from), dsc->pc_adjust);
3060 }
3061 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3062 from + dsc->pc_adjust);
3063 }
3064 }
3065
3066 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3067
3068 int
3069 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3070 struct displaced_step_copy_insn_closure *closure)
3071 {
3072 return 1;
3073 }
3074
3075 /* Get the correct target description for the given VQ value.
3076 If VQ is zero then it is assumed SVE is not supported.
3077 (It is not possible to set VQ to zero on an SVE system). */
3078
3079 const target_desc *
3080 aarch64_read_description (uint64_t vq, bool pauth_p)
3081 {
3082 if (vq > AARCH64_MAX_SVE_VQ)
3083 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3084 AARCH64_MAX_SVE_VQ);
3085
3086 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3087
3088 if (tdesc == NULL)
3089 {
3090 tdesc = aarch64_create_target_description (vq, pauth_p);
3091 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3092 }
3093
3094 return tdesc;
3095 }
3096
3097 /* Return the VQ used when creating the target description TDESC. */
3098
3099 static uint64_t
3100 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3101 {
3102 const struct tdesc_feature *feature_sve;
3103
3104 if (!tdesc_has_registers (tdesc))
3105 return 0;
3106
3107 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3108
3109 if (feature_sve == nullptr)
3110 return 0;
3111
3112 uint64_t vl = tdesc_register_bitsize (feature_sve,
3113 aarch64_sve_register_names[0]) / 8;
3114 return sve_vq_from_vl (vl);
3115 }
3116
3117 /* Add all the expected register sets into GDBARCH. */
3118
3119 static void
3120 aarch64_add_reggroups (struct gdbarch *gdbarch)
3121 {
3122 reggroup_add (gdbarch, general_reggroup);
3123 reggroup_add (gdbarch, float_reggroup);
3124 reggroup_add (gdbarch, system_reggroup);
3125 reggroup_add (gdbarch, vector_reggroup);
3126 reggroup_add (gdbarch, all_reggroup);
3127 reggroup_add (gdbarch, save_reggroup);
3128 reggroup_add (gdbarch, restore_reggroup);
3129 }
3130
3131 /* Implement the "cannot_store_register" gdbarch method. */
3132
3133 static int
3134 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3135 {
3136 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3137
3138 if (!tdep->has_pauth ())
3139 return 0;
3140
3141 /* Pointer authentication registers are read-only. */
3142 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3143 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3144 }
3145
3146 /* Initialize the current architecture based on INFO. If possible,
3147 re-use an architecture from ARCHES, which is a list of
3148 architectures already created during this debugging session.
3149
3150 Called e.g. at program startup, when reading a core file, and when
3151 reading a binary file. */
3152
3153 static struct gdbarch *
3154 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3155 {
3156 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3157 const struct tdesc_feature *feature_pauth;
3158 bool valid_p = true;
3159 int i, num_regs = 0, num_pseudo_regs = 0;
3160 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3161
3162 /* Use the vector length passed via the target info. Here -1 is used for no
3163 SVE, and 0 is unset. If unset then use the vector length from the existing
3164 tdesc. */
3165 uint64_t vq = 0;
3166 if (info.id == (int *) -1)
3167 vq = 0;
3168 else if (info.id != 0)
3169 vq = (uint64_t) info.id;
3170 else
3171 vq = aarch64_get_tdesc_vq (info.target_desc);
3172
3173 if (vq > AARCH64_MAX_SVE_VQ)
3174 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3175 pulongest (vq), AARCH64_MAX_SVE_VQ);
3176
3177 /* If there is already a candidate, use it. */
3178 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3179 best_arch != nullptr;
3180 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3181 {
3182 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3183 if (tdep && tdep->vq == vq)
3184 return best_arch->gdbarch;
3185 }
3186
3187 /* Ensure we always have a target descriptor, and that it is for the given VQ
3188 value. */
3189 const struct target_desc *tdesc = info.target_desc;
3190 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3191 tdesc = aarch64_read_description (vq, false);
3192 gdb_assert (tdesc);
3193
3194 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3195 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3196 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3197 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3198
3199 if (feature_core == nullptr)
3200 return nullptr;
3201
3202 struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
3203
3204 /* Validate the description provides the mandatory core R registers
3205 and allocate their numbers. */
3206 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3207 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3208 AARCH64_X0_REGNUM + i,
3209 aarch64_r_register_names[i]);
3210
3211 num_regs = AARCH64_X0_REGNUM + i;
3212
3213 /* Add the V registers. */
3214 if (feature_fpu != nullptr)
3215 {
3216 if (feature_sve != nullptr)
3217 error (_("Program contains both fpu and SVE features."));
3218
3219 /* Validate the description provides the mandatory V registers
3220 and allocate their numbers. */
3221 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3222 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3223 AARCH64_V0_REGNUM + i,
3224 aarch64_v_register_names[i]);
3225
3226 num_regs = AARCH64_V0_REGNUM + i;
3227 }
3228
3229 /* Add the SVE registers. */
3230 if (feature_sve != nullptr)
3231 {
3232 /* Validate the description provides the mandatory SVE registers
3233 and allocate their numbers. */
3234 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3235 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3236 AARCH64_SVE_Z0_REGNUM + i,
3237 aarch64_sve_register_names[i]);
3238
3239 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3240 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3241 }
3242
3243 if (feature_fpu != nullptr || feature_sve != nullptr)
3244 {
3245 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3246 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3247 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3248 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3249 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3250 }
3251
3252 /* Add the pauth registers. */
3253 if (feature_pauth != NULL)
3254 {
3255 first_pauth_regnum = num_regs;
3256 pauth_ra_state_offset = num_pseudo_regs;
3257 /* Validate the descriptor provides the mandatory PAUTH registers and
3258 allocate their numbers. */
3259 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3260 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3261 first_pauth_regnum + i,
3262 aarch64_pauth_register_names[i]);
3263
3264 num_regs += i;
3265 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3266 }
3267
3268 if (!valid_p)
3269 {
3270 tdesc_data_cleanup (tdesc_data);
3271 return nullptr;
3272 }
3273
3274 /* AArch64 code is always little-endian. */
3275 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3276
3277 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3278 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3279
3280 /* This should be low enough for everything. */
3281 tdep->lowest_pc = 0x20;
3282 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3283 tdep->jb_elt_size = 8;
3284 tdep->vq = vq;
3285 tdep->pauth_reg_base = first_pauth_regnum;
3286 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3287 : pauth_ra_state_offset + num_regs;
3288
3289 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3290 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3291
3292 /* Advance PC across function entry code. */
3293 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3294
3295 /* The stack grows downward. */
3296 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3297
3298 /* Breakpoint manipulation. */
3299 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3300 aarch64_breakpoint::kind_from_pc);
3301 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3302 aarch64_breakpoint::bp_from_kind);
3303 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3304 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3305
3306 /* Information about registers, etc. */
3307 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3308 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3309 set_gdbarch_num_regs (gdbarch, num_regs);
3310
3311 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3312 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3313 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3314 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3315 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3316 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3317 aarch64_pseudo_register_reggroup_p);
3318 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3319
3320 /* ABI */
3321 set_gdbarch_short_bit (gdbarch, 16);
3322 set_gdbarch_int_bit (gdbarch, 32);
3323 set_gdbarch_float_bit (gdbarch, 32);
3324 set_gdbarch_double_bit (gdbarch, 64);
3325 set_gdbarch_long_double_bit (gdbarch, 128);
3326 set_gdbarch_long_bit (gdbarch, 64);
3327 set_gdbarch_long_long_bit (gdbarch, 64);
3328 set_gdbarch_ptr_bit (gdbarch, 64);
3329 set_gdbarch_char_signed (gdbarch, 0);
3330 set_gdbarch_wchar_signed (gdbarch, 0);
3331 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3332 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3333 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3334 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3335
3336 /* Internal <-> external register number maps. */
3337 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3338
3339 /* Returning results. */
3340 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3341
3342 /* Disassembly. */
3343 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3344
3345 /* Virtual tables. */
3346 set_gdbarch_vbit_in_delta (gdbarch, 1);
3347
3348 /* Register architecture. */
3349 aarch64_add_reggroups (gdbarch);
3350
3351 /* Hook in the ABI-specific overrides, if they have been registered. */
3352 info.target_desc = tdesc;
3353 info.tdesc_data = tdesc_data;
3354 gdbarch_init_osabi (info, gdbarch);
3355
3356 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3357 /* Register DWARF CFA vendor handler. */
3358 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3359 aarch64_execute_dwarf_cfa_vendor_op);
3360
3361 /* Add some default predicates. */
3362 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3363 dwarf2_append_unwinders (gdbarch);
3364 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3365
3366 frame_base_set_default (gdbarch, &aarch64_normal_base);
3367
3368 /* Now we have tuned the configuration, set a few final things,
3369 based on what the OS ABI has told us. */
3370
3371 if (tdep->jb_pc >= 0)
3372 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3373
3374 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3375
3376 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3377
3378 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3379
3380 /* Add standard register aliases. */
3381 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3382 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3383 value_of_aarch64_user_reg,
3384 &aarch64_register_aliases[i].regnum);
3385
3386 register_aarch64_ravenscar_ops (gdbarch);
3387
3388 return gdbarch;
3389 }
3390
3391 static void
3392 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3393 {
3394 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3395
3396 if (tdep == NULL)
3397 return;
3398
3399 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3400 paddress (gdbarch, tdep->lowest_pc));
3401 }
3402
3403 #if GDB_SELF_TEST
3404 namespace selftests
3405 {
3406 static void aarch64_process_record_test (void);
3407 }
3408 #endif
3409
3410 void
3411 _initialize_aarch64_tdep (void)
3412 {
3413 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3414 aarch64_dump_tdep);
3415
3416 /* Debug this file's internals. */
3417 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3418 Set AArch64 debugging."), _("\
3419 Show AArch64 debugging."), _("\
3420 When on, AArch64 specific debugging is enabled."),
3421 NULL,
3422 show_aarch64_debug,
3423 &setdebuglist, &showdebuglist);
3424
3425 #if GDB_SELF_TEST
3426 selftests::register_test ("aarch64-analyze-prologue",
3427 selftests::aarch64_analyze_prologue_test);
3428 selftests::register_test ("aarch64-process-record",
3429 selftests::aarch64_process_record_test);
3430 #endif
3431 }
3432
3433 /* AArch64 process record-replay related structures, defines etc. */
3434
3435 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3436 do \
3437 { \
3438 unsigned int reg_len = LENGTH; \
3439 if (reg_len) \
3440 { \
3441 REGS = XNEWVEC (uint32_t, reg_len); \
3442 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3443 } \
3444 } \
3445 while (0)
3446
3447 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3448 do \
3449 { \
3450 unsigned int mem_len = LENGTH; \
3451 if (mem_len) \
3452 { \
3453 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3454 memcpy(&MEMS->len, &RECORD_BUF[0], \
3455 sizeof(struct aarch64_mem_r) * LENGTH); \
3456 } \
3457 } \
3458 while (0)
3459
3460 /* AArch64 record/replay structures and enumerations. */
3461
3462 struct aarch64_mem_r
3463 {
3464 uint64_t len; /* Record length. */
3465 uint64_t addr; /* Memory address. */
3466 };
3467
3468 enum aarch64_record_result
3469 {
3470 AARCH64_RECORD_SUCCESS,
3471 AARCH64_RECORD_UNSUPPORTED,
3472 AARCH64_RECORD_UNKNOWN
3473 };
3474
3475 typedef struct insn_decode_record_t
3476 {
3477 struct gdbarch *gdbarch;
3478 struct regcache *regcache;
3479 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3480 uint32_t aarch64_insn; /* Insn to be recorded. */
3481 uint32_t mem_rec_count; /* Count of memory records. */
3482 uint32_t reg_rec_count; /* Count of register records. */
3483 uint32_t *aarch64_regs; /* Registers to be recorded. */
3484 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3485 } insn_decode_record;
3486
3487 /* Record handler for data processing - register instructions. */
3488
3489 static unsigned int
3490 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3491 {
3492 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3493 uint32_t record_buf[4];
3494
3495 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3496 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3497 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3498
3499 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3500 {
3501 uint8_t setflags;
3502
3503 /* Logical (shifted register). */
3504 if (insn_bits24_27 == 0x0a)
3505 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3506 /* Add/subtract. */
3507 else if (insn_bits24_27 == 0x0b)
3508 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3509 else
3510 return AARCH64_RECORD_UNKNOWN;
3511
3512 record_buf[0] = reg_rd;
3513 aarch64_insn_r->reg_rec_count = 1;
3514 if (setflags)
3515 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3516 }
3517 else
3518 {
3519 if (insn_bits24_27 == 0x0b)
3520 {
3521 /* Data-processing (3 source). */
3522 record_buf[0] = reg_rd;
3523 aarch64_insn_r->reg_rec_count = 1;
3524 }
3525 else if (insn_bits24_27 == 0x0a)
3526 {
3527 if (insn_bits21_23 == 0x00)
3528 {
3529 /* Add/subtract (with carry). */
3530 record_buf[0] = reg_rd;
3531 aarch64_insn_r->reg_rec_count = 1;
3532 if (bit (aarch64_insn_r->aarch64_insn, 29))
3533 {
3534 record_buf[1] = AARCH64_CPSR_REGNUM;
3535 aarch64_insn_r->reg_rec_count = 2;
3536 }
3537 }
3538 else if (insn_bits21_23 == 0x02)
3539 {
3540 /* Conditional compare (register) and conditional compare
3541 (immediate) instructions. */
3542 record_buf[0] = AARCH64_CPSR_REGNUM;
3543 aarch64_insn_r->reg_rec_count = 1;
3544 }
3545 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3546 {
3547 /* Conditional select. */
3548 /* Data-processing (2 source). */
3549 /* Data-processing (1 source). */
3550 record_buf[0] = reg_rd;
3551 aarch64_insn_r->reg_rec_count = 1;
3552 }
3553 else
3554 return AARCH64_RECORD_UNKNOWN;
3555 }
3556 }
3557
3558 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3559 record_buf);
3560 return AARCH64_RECORD_SUCCESS;
3561 }
3562
3563 /* Record handler for data processing - immediate instructions. */
3564
3565 static unsigned int
3566 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3567 {
3568 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3569 uint32_t record_buf[4];
3570
3571 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3572 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3573 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3574
3575 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3576 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3577 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3578 {
3579 record_buf[0] = reg_rd;
3580 aarch64_insn_r->reg_rec_count = 1;
3581 }
3582 else if (insn_bits24_27 == 0x01)
3583 {
3584 /* Add/Subtract (immediate). */
3585 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3586 record_buf[0] = reg_rd;
3587 aarch64_insn_r->reg_rec_count = 1;
3588 if (setflags)
3589 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3590 }
3591 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3592 {
3593 /* Logical (immediate). */
3594 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3595 record_buf[0] = reg_rd;
3596 aarch64_insn_r->reg_rec_count = 1;
3597 if (setflags)
3598 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3599 }
3600 else
3601 return AARCH64_RECORD_UNKNOWN;
3602
3603 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3604 record_buf);
3605 return AARCH64_RECORD_SUCCESS;
3606 }
3607
3608 /* Record handler for branch, exception generation and system instructions. */
3609
3610 static unsigned int
3611 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3612 {
3613 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3614 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3615 uint32_t record_buf[4];
3616
3617 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3618 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3619 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3620
3621 if (insn_bits28_31 == 0x0d)
3622 {
3623 /* Exception generation instructions. */
3624 if (insn_bits24_27 == 0x04)
3625 {
3626 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3627 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3628 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3629 {
3630 ULONGEST svc_number;
3631
3632 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3633 &svc_number);
3634 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3635 svc_number);
3636 }
3637 else
3638 return AARCH64_RECORD_UNSUPPORTED;
3639 }
3640 /* System instructions. */
3641 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3642 {
3643 uint32_t reg_rt, reg_crn;
3644
3645 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3646 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3647
3648 /* Record rt in case of sysl and mrs instructions. */
3649 if (bit (aarch64_insn_r->aarch64_insn, 21))
3650 {
3651 record_buf[0] = reg_rt;
3652 aarch64_insn_r->reg_rec_count = 1;
3653 }
3654 /* Record cpsr for hint and msr(immediate) instructions. */
3655 else if (reg_crn == 0x02 || reg_crn == 0x04)
3656 {
3657 record_buf[0] = AARCH64_CPSR_REGNUM;
3658 aarch64_insn_r->reg_rec_count = 1;
3659 }
3660 }
3661 /* Unconditional branch (register). */
3662 else if((insn_bits24_27 & 0x0e) == 0x06)
3663 {
3664 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3665 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3666 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3667 }
3668 else
3669 return AARCH64_RECORD_UNKNOWN;
3670 }
3671 /* Unconditional branch (immediate). */
3672 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3673 {
3674 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3675 if (bit (aarch64_insn_r->aarch64_insn, 31))
3676 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3677 }
3678 else
3679 /* Compare & branch (immediate), Test & branch (immediate) and
3680 Conditional branch (immediate). */
3681 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3682
3683 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3684 record_buf);
3685 return AARCH64_RECORD_SUCCESS;
3686 }
3687
3688 /* Record handler for advanced SIMD load and store instructions. */
3689
3690 static unsigned int
3691 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3692 {
3693 CORE_ADDR address;
3694 uint64_t addr_offset = 0;
3695 uint32_t record_buf[24];
3696 uint64_t record_buf_mem[24];
3697 uint32_t reg_rn, reg_rt;
3698 uint32_t reg_index = 0, mem_index = 0;
3699 uint8_t opcode_bits, size_bits;
3700
3701 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3702 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3703 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3704 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3705 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3706
3707 if (record_debug)
3708 debug_printf ("Process record: Advanced SIMD load/store\n");
3709
3710 /* Load/store single structure. */
3711 if (bit (aarch64_insn_r->aarch64_insn, 24))
3712 {
3713 uint8_t sindex, scale, selem, esize, replicate = 0;
3714 scale = opcode_bits >> 2;
3715 selem = ((opcode_bits & 0x02) |
3716 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3717 switch (scale)
3718 {
3719 case 1:
3720 if (size_bits & 0x01)
3721 return AARCH64_RECORD_UNKNOWN;
3722 break;
3723 case 2:
3724 if ((size_bits >> 1) & 0x01)
3725 return AARCH64_RECORD_UNKNOWN;
3726 if (size_bits & 0x01)
3727 {
3728 if (!((opcode_bits >> 1) & 0x01))
3729 scale = 3;
3730 else
3731 return AARCH64_RECORD_UNKNOWN;
3732 }
3733 break;
3734 case 3:
3735 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3736 {
3737 scale = size_bits;
3738 replicate = 1;
3739 break;
3740 }
3741 else
3742 return AARCH64_RECORD_UNKNOWN;
3743 default:
3744 break;
3745 }
3746 esize = 8 << scale;
3747 if (replicate)
3748 for (sindex = 0; sindex < selem; sindex++)
3749 {
3750 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3751 reg_rt = (reg_rt + 1) % 32;
3752 }
3753 else
3754 {
3755 for (sindex = 0; sindex < selem; sindex++)
3756 {
3757 if (bit (aarch64_insn_r->aarch64_insn, 22))
3758 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3759 else
3760 {
3761 record_buf_mem[mem_index++] = esize / 8;
3762 record_buf_mem[mem_index++] = address + addr_offset;
3763 }
3764 addr_offset = addr_offset + (esize / 8);
3765 reg_rt = (reg_rt + 1) % 32;
3766 }
3767 }
3768 }
3769 /* Load/store multiple structure. */
3770 else
3771 {
3772 uint8_t selem, esize, rpt, elements;
3773 uint8_t eindex, rindex;
3774
3775 esize = 8 << size_bits;
3776 if (bit (aarch64_insn_r->aarch64_insn, 30))
3777 elements = 128 / esize;
3778 else
3779 elements = 64 / esize;
3780
3781 switch (opcode_bits)
3782 {
3783 /*LD/ST4 (4 Registers). */
3784 case 0:
3785 rpt = 1;
3786 selem = 4;
3787 break;
3788 /*LD/ST1 (4 Registers). */
3789 case 2:
3790 rpt = 4;
3791 selem = 1;
3792 break;
3793 /*LD/ST3 (3 Registers). */
3794 case 4:
3795 rpt = 1;
3796 selem = 3;
3797 break;
3798 /*LD/ST1 (3 Registers). */
3799 case 6:
3800 rpt = 3;
3801 selem = 1;
3802 break;
3803 /*LD/ST1 (1 Register). */
3804 case 7:
3805 rpt = 1;
3806 selem = 1;
3807 break;
3808 /*LD/ST2 (2 Registers). */
3809 case 8:
3810 rpt = 1;
3811 selem = 2;
3812 break;
3813 /*LD/ST1 (2 Registers). */
3814 case 10:
3815 rpt = 2;
3816 selem = 1;
3817 break;
3818 default:
3819 return AARCH64_RECORD_UNSUPPORTED;
3820 break;
3821 }
3822 for (rindex = 0; rindex < rpt; rindex++)
3823 for (eindex = 0; eindex < elements; eindex++)
3824 {
3825 uint8_t reg_tt, sindex;
3826 reg_tt = (reg_rt + rindex) % 32;
3827 for (sindex = 0; sindex < selem; sindex++)
3828 {
3829 if (bit (aarch64_insn_r->aarch64_insn, 22))
3830 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3831 else
3832 {
3833 record_buf_mem[mem_index++] = esize / 8;
3834 record_buf_mem[mem_index++] = address + addr_offset;
3835 }
3836 addr_offset = addr_offset + (esize / 8);
3837 reg_tt = (reg_tt + 1) % 32;
3838 }
3839 }
3840 }
3841
3842 if (bit (aarch64_insn_r->aarch64_insn, 23))
3843 record_buf[reg_index++] = reg_rn;
3844
3845 aarch64_insn_r->reg_rec_count = reg_index;
3846 aarch64_insn_r->mem_rec_count = mem_index / 2;
3847 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3848 record_buf_mem);
3849 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3850 record_buf);
3851 return AARCH64_RECORD_SUCCESS;
3852 }
3853
3854 /* Record handler for load and store instructions. */
3855
3856 static unsigned int
3857 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3858 {
3859 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3860 uint8_t insn_bit23, insn_bit21;
3861 uint8_t opc, size_bits, ld_flag, vector_flag;
3862 uint32_t reg_rn, reg_rt, reg_rt2;
3863 uint64_t datasize, offset;
3864 uint32_t record_buf[8];
3865 uint64_t record_buf_mem[8];
3866 CORE_ADDR address;
3867
3868 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3869 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3870 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3871 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3872 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3873 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3874 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3875 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3876 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3877 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3878 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3879
3880 /* Load/store exclusive. */
3881 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3882 {
3883 if (record_debug)
3884 debug_printf ("Process record: load/store exclusive\n");
3885
3886 if (ld_flag)
3887 {
3888 record_buf[0] = reg_rt;
3889 aarch64_insn_r->reg_rec_count = 1;
3890 if (insn_bit21)
3891 {
3892 record_buf[1] = reg_rt2;
3893 aarch64_insn_r->reg_rec_count = 2;
3894 }
3895 }
3896 else
3897 {
3898 if (insn_bit21)
3899 datasize = (8 << size_bits) * 2;
3900 else
3901 datasize = (8 << size_bits);
3902 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3903 &address);
3904 record_buf_mem[0] = datasize / 8;
3905 record_buf_mem[1] = address;
3906 aarch64_insn_r->mem_rec_count = 1;
3907 if (!insn_bit23)
3908 {
3909 /* Save register rs. */
3910 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3911 aarch64_insn_r->reg_rec_count = 1;
3912 }
3913 }
3914 }
3915 /* Load register (literal) instructions decoding. */
3916 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3917 {
3918 if (record_debug)
3919 debug_printf ("Process record: load register (literal)\n");
3920 if (vector_flag)
3921 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3922 else
3923 record_buf[0] = reg_rt;
3924 aarch64_insn_r->reg_rec_count = 1;
3925 }
3926 /* All types of load/store pair instructions decoding. */
3927 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3928 {
3929 if (record_debug)
3930 debug_printf ("Process record: load/store pair\n");
3931
3932 if (ld_flag)
3933 {
3934 if (vector_flag)
3935 {
3936 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3937 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3938 }
3939 else
3940 {
3941 record_buf[0] = reg_rt;
3942 record_buf[1] = reg_rt2;
3943 }
3944 aarch64_insn_r->reg_rec_count = 2;
3945 }
3946 else
3947 {
3948 uint16_t imm7_off;
3949 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3950 if (!vector_flag)
3951 size_bits = size_bits >> 1;
3952 datasize = 8 << (2 + size_bits);
3953 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3954 offset = offset << (2 + size_bits);
3955 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3956 &address);
3957 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3958 {
3959 if (imm7_off & 0x40)
3960 address = address - offset;
3961 else
3962 address = address + offset;
3963 }
3964
3965 record_buf_mem[0] = datasize / 8;
3966 record_buf_mem[1] = address;
3967 record_buf_mem[2] = datasize / 8;
3968 record_buf_mem[3] = address + (datasize / 8);
3969 aarch64_insn_r->mem_rec_count = 2;
3970 }
3971 if (bit (aarch64_insn_r->aarch64_insn, 23))
3972 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3973 }
3974 /* Load/store register (unsigned immediate) instructions. */
3975 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3976 {
3977 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3978 if (!(opc >> 1))
3979 {
3980 if (opc & 0x01)
3981 ld_flag = 0x01;
3982 else
3983 ld_flag = 0x0;
3984 }
3985 else
3986 {
3987 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3988 {
3989 /* PRFM (immediate) */
3990 return AARCH64_RECORD_SUCCESS;
3991 }
3992 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3993 {
3994 /* LDRSW (immediate) */
3995 ld_flag = 0x1;
3996 }
3997 else
3998 {
3999 if (opc & 0x01)
4000 ld_flag = 0x01;
4001 else
4002 ld_flag = 0x0;
4003 }
4004 }
4005
4006 if (record_debug)
4007 {
4008 debug_printf ("Process record: load/store (unsigned immediate):"
4009 " size %x V %d opc %x\n", size_bits, vector_flag,
4010 opc);
4011 }
4012
4013 if (!ld_flag)
4014 {
4015 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4016 datasize = 8 << size_bits;
4017 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4018 &address);
4019 offset = offset << size_bits;
4020 address = address + offset;
4021
4022 record_buf_mem[0] = datasize >> 3;
4023 record_buf_mem[1] = address;
4024 aarch64_insn_r->mem_rec_count = 1;
4025 }
4026 else
4027 {
4028 if (vector_flag)
4029 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4030 else
4031 record_buf[0] = reg_rt;
4032 aarch64_insn_r->reg_rec_count = 1;
4033 }
4034 }
4035 /* Load/store register (register offset) instructions. */
4036 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4037 && insn_bits10_11 == 0x02 && insn_bit21)
4038 {
4039 if (record_debug)
4040 debug_printf ("Process record: load/store (register offset)\n");
4041 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4042 if (!(opc >> 1))
4043 if (opc & 0x01)
4044 ld_flag = 0x01;
4045 else
4046 ld_flag = 0x0;
4047 else
4048 if (size_bits != 0x03)
4049 ld_flag = 0x01;
4050 else
4051 return AARCH64_RECORD_UNKNOWN;
4052
4053 if (!ld_flag)
4054 {
4055 ULONGEST reg_rm_val;
4056
4057 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4058 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4059 if (bit (aarch64_insn_r->aarch64_insn, 12))
4060 offset = reg_rm_val << size_bits;
4061 else
4062 offset = reg_rm_val;
4063 datasize = 8 << size_bits;
4064 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4065 &address);
4066 address = address + offset;
4067 record_buf_mem[0] = datasize >> 3;
4068 record_buf_mem[1] = address;
4069 aarch64_insn_r->mem_rec_count = 1;
4070 }
4071 else
4072 {
4073 if (vector_flag)
4074 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4075 else
4076 record_buf[0] = reg_rt;
4077 aarch64_insn_r->reg_rec_count = 1;
4078 }
4079 }
4080 /* Load/store register (immediate and unprivileged) instructions. */
4081 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4082 && !insn_bit21)
4083 {
4084 if (record_debug)
4085 {
4086 debug_printf ("Process record: load/store "
4087 "(immediate and unprivileged)\n");
4088 }
4089 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4090 if (!(opc >> 1))
4091 if (opc & 0x01)
4092 ld_flag = 0x01;
4093 else
4094 ld_flag = 0x0;
4095 else
4096 if (size_bits != 0x03)
4097 ld_flag = 0x01;
4098 else
4099 return AARCH64_RECORD_UNKNOWN;
4100
4101 if (!ld_flag)
4102 {
4103 uint16_t imm9_off;
4104 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4105 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4106 datasize = 8 << size_bits;
4107 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4108 &address);
4109 if (insn_bits10_11 != 0x01)
4110 {
4111 if (imm9_off & 0x0100)
4112 address = address - offset;
4113 else
4114 address = address + offset;
4115 }
4116 record_buf_mem[0] = datasize >> 3;
4117 record_buf_mem[1] = address;
4118 aarch64_insn_r->mem_rec_count = 1;
4119 }
4120 else
4121 {
4122 if (vector_flag)
4123 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4124 else
4125 record_buf[0] = reg_rt;
4126 aarch64_insn_r->reg_rec_count = 1;
4127 }
4128 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4129 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4130 }
4131 /* Advanced SIMD load/store instructions. */
4132 else
4133 return aarch64_record_asimd_load_store (aarch64_insn_r);
4134
4135 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4136 record_buf_mem);
4137 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4138 record_buf);
4139 return AARCH64_RECORD_SUCCESS;
4140 }
4141
4142 /* Record handler for data processing SIMD and floating point instructions. */
4143
4144 static unsigned int
4145 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4146 {
4147 uint8_t insn_bit21, opcode, rmode, reg_rd;
4148 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4149 uint8_t insn_bits11_14;
4150 uint32_t record_buf[2];
4151
4152 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4153 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4154 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4155 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4156 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4157 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4158 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4159 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4160 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4161
4162 if (record_debug)
4163 debug_printf ("Process record: data processing SIMD/FP: ");
4164
4165 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4166 {
4167 /* Floating point - fixed point conversion instructions. */
4168 if (!insn_bit21)
4169 {
4170 if (record_debug)
4171 debug_printf ("FP - fixed point conversion");
4172
4173 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4174 record_buf[0] = reg_rd;
4175 else
4176 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4177 }
4178 /* Floating point - conditional compare instructions. */
4179 else if (insn_bits10_11 == 0x01)
4180 {
4181 if (record_debug)
4182 debug_printf ("FP - conditional compare");
4183
4184 record_buf[0] = AARCH64_CPSR_REGNUM;
4185 }
4186 /* Floating point - data processing (2-source) and
4187 conditional select instructions. */
4188 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4189 {
4190 if (record_debug)
4191 debug_printf ("FP - DP (2-source)");
4192
4193 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4194 }
4195 else if (insn_bits10_11 == 0x00)
4196 {
4197 /* Floating point - immediate instructions. */
4198 if ((insn_bits12_15 & 0x01) == 0x01
4199 || (insn_bits12_15 & 0x07) == 0x04)
4200 {
4201 if (record_debug)
4202 debug_printf ("FP - immediate");
4203 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4204 }
4205 /* Floating point - compare instructions. */
4206 else if ((insn_bits12_15 & 0x03) == 0x02)
4207 {
4208 if (record_debug)
4209 debug_printf ("FP - immediate");
4210 record_buf[0] = AARCH64_CPSR_REGNUM;
4211 }
4212 /* Floating point - integer conversions instructions. */
4213 else if (insn_bits12_15 == 0x00)
4214 {
4215 /* Convert float to integer instruction. */
4216 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4217 {
4218 if (record_debug)
4219 debug_printf ("float to int conversion");
4220
4221 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4222 }
4223 /* Convert integer to float instruction. */
4224 else if ((opcode >> 1) == 0x01 && !rmode)
4225 {
4226 if (record_debug)
4227 debug_printf ("int to float conversion");
4228
4229 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4230 }
4231 /* Move float to integer instruction. */
4232 else if ((opcode >> 1) == 0x03)
4233 {
4234 if (record_debug)
4235 debug_printf ("move float to int");
4236
4237 if (!(opcode & 0x01))
4238 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4239 else
4240 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4241 }
4242 else
4243 return AARCH64_RECORD_UNKNOWN;
4244 }
4245 else
4246 return AARCH64_RECORD_UNKNOWN;
4247 }
4248 else
4249 return AARCH64_RECORD_UNKNOWN;
4250 }
4251 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4252 {
4253 if (record_debug)
4254 debug_printf ("SIMD copy");
4255
4256 /* Advanced SIMD copy instructions. */
4257 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4258 && !bit (aarch64_insn_r->aarch64_insn, 15)
4259 && bit (aarch64_insn_r->aarch64_insn, 10))
4260 {
4261 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4262 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4263 else
4264 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4265 }
4266 else
4267 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4268 }
4269 /* All remaining floating point or advanced SIMD instructions. */
4270 else
4271 {
4272 if (record_debug)
4273 debug_printf ("all remain");
4274
4275 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4276 }
4277
4278 if (record_debug)
4279 debug_printf ("\n");
4280
4281 aarch64_insn_r->reg_rec_count++;
4282 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4283 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4284 record_buf);
4285 return AARCH64_RECORD_SUCCESS;
4286 }
4287
4288 /* Decodes insns type and invokes its record handler. */
4289
4290 static unsigned int
4291 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4292 {
4293 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4294
4295 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4296 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4297 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4298 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4299
4300 /* Data processing - immediate instructions. */
4301 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4302 return aarch64_record_data_proc_imm (aarch64_insn_r);
4303
4304 /* Branch, exception generation and system instructions. */
4305 if (ins_bit26 && !ins_bit27 && ins_bit28)
4306 return aarch64_record_branch_except_sys (aarch64_insn_r);
4307
4308 /* Load and store instructions. */
4309 if (!ins_bit25 && ins_bit27)
4310 return aarch64_record_load_store (aarch64_insn_r);
4311
4312 /* Data processing - register instructions. */
4313 if (ins_bit25 && !ins_bit26 && ins_bit27)
4314 return aarch64_record_data_proc_reg (aarch64_insn_r);
4315
4316 /* Data processing - SIMD and floating point instructions. */
4317 if (ins_bit25 && ins_bit26 && ins_bit27)
4318 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4319
4320 return AARCH64_RECORD_UNSUPPORTED;
4321 }
4322
4323 /* Cleans up local record registers and memory allocations. */
4324
4325 static void
4326 deallocate_reg_mem (insn_decode_record *record)
4327 {
4328 xfree (record->aarch64_regs);
4329 xfree (record->aarch64_mems);
4330 }
4331
4332 #if GDB_SELF_TEST
4333 namespace selftests {
4334
4335 static void
4336 aarch64_process_record_test (void)
4337 {
4338 struct gdbarch_info info;
4339 uint32_t ret;
4340
4341 gdbarch_info_init (&info);
4342 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4343
4344 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4345 SELF_CHECK (gdbarch != NULL);
4346
4347 insn_decode_record aarch64_record;
4348
4349 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4350 aarch64_record.regcache = NULL;
4351 aarch64_record.this_addr = 0;
4352 aarch64_record.gdbarch = gdbarch;
4353
4354 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4355 aarch64_record.aarch64_insn = 0xf9800020;
4356 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4357 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4358 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4359 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4360
4361 deallocate_reg_mem (&aarch64_record);
4362 }
4363
4364 } // namespace selftests
4365 #endif /* GDB_SELF_TEST */
4366
4367 /* Parse the current instruction and record the values of the registers and
4368 memory that will be changed in current instruction to record_arch_list
4369 return -1 if something is wrong. */
4370
4371 int
4372 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4373 CORE_ADDR insn_addr)
4374 {
4375 uint32_t rec_no = 0;
4376 uint8_t insn_size = 4;
4377 uint32_t ret = 0;
4378 gdb_byte buf[insn_size];
4379 insn_decode_record aarch64_record;
4380
4381 memset (&buf[0], 0, insn_size);
4382 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4383 target_read_memory (insn_addr, &buf[0], insn_size);
4384 aarch64_record.aarch64_insn
4385 = (uint32_t) extract_unsigned_integer (&buf[0],
4386 insn_size,
4387 gdbarch_byte_order (gdbarch));
4388 aarch64_record.regcache = regcache;
4389 aarch64_record.this_addr = insn_addr;
4390 aarch64_record.gdbarch = gdbarch;
4391
4392 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4393 if (ret == AARCH64_RECORD_UNSUPPORTED)
4394 {
4395 printf_unfiltered (_("Process record does not support instruction "
4396 "0x%0x at address %s.\n"),
4397 aarch64_record.aarch64_insn,
4398 paddress (gdbarch, insn_addr));
4399 ret = -1;
4400 }
4401
4402 if (0 == ret)
4403 {
4404 /* Record registers. */
4405 record_full_arch_list_add_reg (aarch64_record.regcache,
4406 AARCH64_PC_REGNUM);
4407 /* Always record register CPSR. */
4408 record_full_arch_list_add_reg (aarch64_record.regcache,
4409 AARCH64_CPSR_REGNUM);
4410 if (aarch64_record.aarch64_regs)
4411 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4412 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4413 aarch64_record.aarch64_regs[rec_no]))
4414 ret = -1;
4415
4416 /* Record memories. */
4417 if (aarch64_record.aarch64_mems)
4418 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4419 if (record_full_arch_list_add_mem
4420 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4421 aarch64_record.aarch64_mems[rec_no].len))
4422 ret = -1;
4423
4424 if (record_full_arch_list_add_end ())
4425 ret = -1;
4426 }
4427
4428 deallocate_reg_mem (&aarch64_record);
4429 return ret;
4430 }
This page took 0.12172 seconds and 4 git commands to generate.