bbb2a25f956c3b09ba199143d5439e2a135fe719
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47 #include "common/selftest.h"
48
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
51
52 #include "elf-bfd.h"
53 #include "elf/aarch64.h"
54
55 #include "common/vec.h"
56
57 #include "record.h"
58 #include "record-full.h"
59 #include "arch/aarch64-insn.h"
60
61 #include "opcode/aarch64.h"
62 #include <algorithm>
63
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
68 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
69 four members. */
70 #define HA_MAX_NUM_FLDS 4
71
72 /* All possible aarch64 target descriptors. */
73 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
74
75 /* The standard register names, and all the valid aliases for them. */
76 static const struct
77 {
78 const char *const name;
79 int regnum;
80 } aarch64_register_aliases[] =
81 {
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123 };
124
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
127 {
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139 };
140
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
143 {
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156 };
157
158 /* The SVE 'Z' and 'P' registers. */
159 static const char *const aarch64_sve_register_names[] =
160 {
161 /* These registers must appear in consecutive RAW register number
162 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
163 "z0", "z1", "z2", "z3",
164 "z4", "z5", "z6", "z7",
165 "z8", "z9", "z10", "z11",
166 "z12", "z13", "z14", "z15",
167 "z16", "z17", "z18", "z19",
168 "z20", "z21", "z22", "z23",
169 "z24", "z25", "z26", "z27",
170 "z28", "z29", "z30", "z31",
171 "fpsr", "fpcr",
172 "p0", "p1", "p2", "p3",
173 "p4", "p5", "p6", "p7",
174 "p8", "p9", "p10", "p11",
175 "p12", "p13", "p14", "p15",
176 "ffr", "vg"
177 };
178
179 static const char *const aarch64_pauth_register_names[] =
180 {
181 /* Authentication mask for data pointer. */
182 "pauth_dmask",
183 /* Authentication mask for code pointer. */
184 "pauth_cmask"
185 };
186
187 /* AArch64 prologue cache structure. */
188 struct aarch64_prologue_cache
189 {
190 /* The program counter at the start of the function. It is used to
191 identify this frame as a prologue frame. */
192 CORE_ADDR func;
193
194 /* The program counter at the time this frame was created; i.e. where
195 this function was called from. It is used to identify this frame as a
196 stub frame. */
197 CORE_ADDR prev_pc;
198
199 /* The stack pointer at the time this frame was created; i.e. the
200 caller's stack pointer when this function was called. It is used
201 to identify this frame. */
202 CORE_ADDR prev_sp;
203
204 /* Is the target available to read from? */
205 int available_p;
206
207 /* The frame base for this frame is just prev_sp - frame size.
208 FRAMESIZE is the distance from the frame pointer to the
209 initial stack pointer. */
210 int framesize;
211
212 /* The register used to hold the frame pointer for this frame. */
213 int framereg;
214
215 /* Saved register offsets. */
216 struct trad_frame_saved_reg *saved_regs;
217 };
218
219 static void
220 show_aarch64_debug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
224 }
225
226 namespace {
227
228 /* Abstract instruction reader. */
229
230 class abstract_instruction_reader
231 {
232 public:
233 /* Read in one instruction. */
234 virtual ULONGEST read (CORE_ADDR memaddr, int len,
235 enum bfd_endian byte_order) = 0;
236 };
237
238 /* Instruction reader from real target. */
239
240 class instruction_reader : public abstract_instruction_reader
241 {
242 public:
243 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
244 override
245 {
246 return read_code_unsigned_integer (memaddr, len, byte_order);
247 }
248 };
249
250 } // namespace
251
252 /* If address signing is enabled, mask off the signature bits from ADDR, using
253 the register values in THIS_FRAME. */
254
255 static CORE_ADDR
256 aarch64_frame_unmask_address (struct gdbarch_tdep *tdep,
257 struct frame_info *this_frame,
258 CORE_ADDR addr)
259 {
260 if (tdep->has_pauth ()
261 && frame_unwind_register_unsigned (this_frame,
262 tdep->pauth_ra_state_regnum))
263 {
264 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
265 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
266 addr = addr & ~cmask;
267 }
268
269 return addr;
270 }
271
272 /* Analyze a prologue, looking for a recognizable stack frame
273 and frame pointer. Scan until we encounter a store that could
274 clobber the stack frame unexpectedly, or an unknown instruction. */
275
276 static CORE_ADDR
277 aarch64_analyze_prologue (struct gdbarch *gdbarch,
278 CORE_ADDR start, CORE_ADDR limit,
279 struct aarch64_prologue_cache *cache,
280 abstract_instruction_reader& reader)
281 {
282 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
283 int i;
284 /* Track X registers and D registers in prologue. */
285 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
286
287 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
288 regs[i] = pv_register (i, 0);
289 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
290
291 for (; start < limit; start += 4)
292 {
293 uint32_t insn;
294 aarch64_inst inst;
295
296 insn = reader.read (start, 4, byte_order_for_code);
297
298 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
299 break;
300
301 if (inst.opcode->iclass == addsub_imm
302 && (inst.opcode->op == OP_ADD
303 || strcmp ("sub", inst.opcode->name) == 0))
304 {
305 unsigned rd = inst.operands[0].reg.regno;
306 unsigned rn = inst.operands[1].reg.regno;
307
308 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
309 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
310 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
311 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
312
313 if (inst.opcode->op == OP_ADD)
314 {
315 regs[rd] = pv_add_constant (regs[rn],
316 inst.operands[2].imm.value);
317 }
318 else
319 {
320 regs[rd] = pv_add_constant (regs[rn],
321 -inst.operands[2].imm.value);
322 }
323 }
324 else if (inst.opcode->iclass == pcreladdr
325 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
326 {
327 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
328 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
329
330 regs[inst.operands[0].reg.regno] = pv_unknown ();
331 }
332 else if (inst.opcode->iclass == branch_imm)
333 {
334 /* Stop analysis on branch. */
335 break;
336 }
337 else if (inst.opcode->iclass == condbranch)
338 {
339 /* Stop analysis on branch. */
340 break;
341 }
342 else if (inst.opcode->iclass == branch_reg)
343 {
344 /* Stop analysis on branch. */
345 break;
346 }
347 else if (inst.opcode->iclass == compbranch)
348 {
349 /* Stop analysis on branch. */
350 break;
351 }
352 else if (inst.opcode->op == OP_MOVZ)
353 {
354 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
355 regs[inst.operands[0].reg.regno] = pv_unknown ();
356 }
357 else if (inst.opcode->iclass == log_shift
358 && strcmp (inst.opcode->name, "orr") == 0)
359 {
360 unsigned rd = inst.operands[0].reg.regno;
361 unsigned rn = inst.operands[1].reg.regno;
362 unsigned rm = inst.operands[2].reg.regno;
363
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
365 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
366 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
367
368 if (inst.operands[2].shifter.amount == 0
369 && rn == AARCH64_SP_REGNUM)
370 regs[rd] = regs[rm];
371 else
372 {
373 if (aarch64_debug)
374 {
375 debug_printf ("aarch64: prologue analysis gave up "
376 "addr=%s opcode=0x%x (orr x register)\n",
377 core_addr_to_string_nz (start), insn);
378 }
379 break;
380 }
381 }
382 else if (inst.opcode->op == OP_STUR)
383 {
384 unsigned rt = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].addr.base_regno;
386 int is64
387 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
388
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
393
394 stack.store (pv_add_constant (regs[rn],
395 inst.operands[1].addr.offset.imm),
396 is64 ? 8 : 4, regs[rt]);
397 }
398 else if ((inst.opcode->iclass == ldstpair_off
399 || (inst.opcode->iclass == ldstpair_indexed
400 && inst.operands[2].addr.preind))
401 && strcmp ("stp", inst.opcode->name) == 0)
402 {
403 /* STP with addressing mode Pre-indexed and Base register. */
404 unsigned rt1;
405 unsigned rt2;
406 unsigned rn = inst.operands[2].addr.base_regno;
407 int32_t imm = inst.operands[2].addr.offset.imm;
408
409 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
410 || inst.operands[0].type == AARCH64_OPND_Ft);
411 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
412 || inst.operands[1].type == AARCH64_OPND_Ft2);
413 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
414 gdb_assert (!inst.operands[2].addr.offset.is_reg);
415
416 /* If recording this store would invalidate the store area
417 (perhaps because rn is not known) then we should abandon
418 further prologue analysis. */
419 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
420 break;
421
422 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
423 break;
424
425 rt1 = inst.operands[0].reg.regno;
426 rt2 = inst.operands[1].reg.regno;
427 if (inst.operands[0].type == AARCH64_OPND_Ft)
428 {
429 /* Only bottom 64-bit of each V register (D register) need
430 to be preserved. */
431 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
432 rt1 += AARCH64_X_REGISTER_COUNT;
433 rt2 += AARCH64_X_REGISTER_COUNT;
434 }
435
436 stack.store (pv_add_constant (regs[rn], imm), 8,
437 regs[rt1]);
438 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
439 regs[rt2]);
440
441 if (inst.operands[2].addr.writeback)
442 regs[rn] = pv_add_constant (regs[rn], imm);
443
444 }
445 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
446 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
447 && (inst.opcode->op == OP_STR_POS
448 || inst.opcode->op == OP_STRF_POS)))
449 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
450 && strcmp ("str", inst.opcode->name) == 0)
451 {
452 /* STR (immediate) */
453 unsigned int rt = inst.operands[0].reg.regno;
454 int32_t imm = inst.operands[1].addr.offset.imm;
455 unsigned int rn = inst.operands[1].addr.base_regno;
456 bool is64
457 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
458 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
459 || inst.operands[0].type == AARCH64_OPND_Ft);
460
461 if (inst.operands[0].type == AARCH64_OPND_Ft)
462 {
463 /* Only bottom 64-bit of each V register (D register) need
464 to be preserved. */
465 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
466 rt += AARCH64_X_REGISTER_COUNT;
467 }
468
469 stack.store (pv_add_constant (regs[rn], imm),
470 is64 ? 8 : 4, regs[rt]);
471 if (inst.operands[1].addr.writeback)
472 regs[rn] = pv_add_constant (regs[rn], imm);
473 }
474 else if (inst.opcode->iclass == testbranch)
475 {
476 /* Stop analysis on branch. */
477 break;
478 }
479 else if (inst.opcode->iclass == ic_system)
480 {
481 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
482 int ra_state_val = 0;
483
484 if (insn == 0xd503233f /* paciasp. */
485 || insn == 0xd503237f /* pacibsp. */)
486 {
487 /* Return addresses are mangled. */
488 ra_state_val = 1;
489 }
490 else if (insn == 0xd50323bf /* autiasp. */
491 || insn == 0xd50323ff /* autibsp. */)
492 {
493 /* Return addresses are not mangled. */
494 ra_state_val = 0;
495 }
496 else
497 {
498 if (aarch64_debug)
499 debug_printf ("aarch64: prologue analysis gave up addr=%s"
500 " opcode=0x%x (iclass)\n",
501 core_addr_to_string_nz (start), insn);
502 break;
503 }
504
505 if (tdep->has_pauth () && cache != nullptr)
506 trad_frame_set_value (cache->saved_regs,
507 tdep->pauth_ra_state_regnum,
508 ra_state_val);
509 }
510 else
511 {
512 if (aarch64_debug)
513 {
514 debug_printf ("aarch64: prologue analysis gave up addr=%s"
515 " opcode=0x%x\n",
516 core_addr_to_string_nz (start), insn);
517 }
518 break;
519 }
520 }
521
522 if (cache == NULL)
523 return start;
524
525 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
526 {
527 /* Frame pointer is fp. Frame size is constant. */
528 cache->framereg = AARCH64_FP_REGNUM;
529 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
530 }
531 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
532 {
533 /* Try the stack pointer. */
534 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
535 cache->framereg = AARCH64_SP_REGNUM;
536 }
537 else
538 {
539 /* We're just out of luck. We don't know where the frame is. */
540 cache->framereg = -1;
541 cache->framesize = 0;
542 }
543
544 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
545 {
546 CORE_ADDR offset;
547
548 if (stack.find_reg (gdbarch, i, &offset))
549 cache->saved_regs[i].addr = offset;
550 }
551
552 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
553 {
554 int regnum = gdbarch_num_regs (gdbarch);
555 CORE_ADDR offset;
556
557 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
558 &offset))
559 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
560 }
561
562 return start;
563 }
564
565 static CORE_ADDR
566 aarch64_analyze_prologue (struct gdbarch *gdbarch,
567 CORE_ADDR start, CORE_ADDR limit,
568 struct aarch64_prologue_cache *cache)
569 {
570 instruction_reader reader;
571
572 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
573 reader);
574 }
575
576 #if GDB_SELF_TEST
577
578 namespace selftests {
579
580 /* Instruction reader from manually cooked instruction sequences. */
581
582 class instruction_reader_test : public abstract_instruction_reader
583 {
584 public:
585 template<size_t SIZE>
586 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
587 : m_insns (insns), m_insns_size (SIZE)
588 {}
589
590 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
591 override
592 {
593 SELF_CHECK (len == 4);
594 SELF_CHECK (memaddr % 4 == 0);
595 SELF_CHECK (memaddr / 4 < m_insns_size);
596
597 return m_insns[memaddr / 4];
598 }
599
600 private:
601 const uint32_t *m_insns;
602 size_t m_insns_size;
603 };
604
605 static void
606 aarch64_analyze_prologue_test (void)
607 {
608 struct gdbarch_info info;
609
610 gdbarch_info_init (&info);
611 info.bfd_arch_info = bfd_scan_arch ("aarch64");
612
613 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
614 SELF_CHECK (gdbarch != NULL);
615
616 struct aarch64_prologue_cache cache;
617 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
618
619 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
620
621 /* Test the simple prologue in which frame pointer is used. */
622 {
623 static const uint32_t insns[] = {
624 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
625 0x910003fd, /* mov x29, sp */
626 0x97ffffe6, /* bl 0x400580 */
627 };
628 instruction_reader_test reader (insns);
629
630 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
631 SELF_CHECK (end == 4 * 2);
632
633 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
634 SELF_CHECK (cache.framesize == 272);
635
636 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
637 {
638 if (i == AARCH64_FP_REGNUM)
639 SELF_CHECK (cache.saved_regs[i].addr == -272);
640 else if (i == AARCH64_LR_REGNUM)
641 SELF_CHECK (cache.saved_regs[i].addr == -264);
642 else
643 SELF_CHECK (cache.saved_regs[i].addr == -1);
644 }
645
646 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
647 {
648 int regnum = gdbarch_num_regs (gdbarch);
649
650 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
651 == -1);
652 }
653 }
654
655 /* Test a prologue in which STR is used and frame pointer is not
656 used. */
657 {
658 static const uint32_t insns[] = {
659 0xf81d0ff3, /* str x19, [sp, #-48]! */
660 0xb9002fe0, /* str w0, [sp, #44] */
661 0xf90013e1, /* str x1, [sp, #32]*/
662 0xfd000fe0, /* str d0, [sp, #24] */
663 0xaa0203f3, /* mov x19, x2 */
664 0xf94013e0, /* ldr x0, [sp, #32] */
665 };
666 instruction_reader_test reader (insns);
667
668 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
669 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
670
671 SELF_CHECK (end == 4 * 5);
672
673 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
674 SELF_CHECK (cache.framesize == 48);
675
676 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
677 {
678 if (i == 1)
679 SELF_CHECK (cache.saved_regs[i].addr == -16);
680 else if (i == 19)
681 SELF_CHECK (cache.saved_regs[i].addr == -48);
682 else
683 SELF_CHECK (cache.saved_regs[i].addr == -1);
684 }
685
686 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
687 {
688 int regnum = gdbarch_num_regs (gdbarch);
689
690 if (i == 0)
691 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
692 == -24);
693 else
694 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
695 == -1);
696 }
697 }
698
699 /* Test a prologue in which there is a return address signing instruction. */
700 if (tdep->has_pauth ())
701 {
702 static const uint32_t insns[] = {
703 0xd503233f, /* paciasp */
704 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
705 0x910003fd, /* mov x29, sp */
706 0xf801c3f3, /* str x19, [sp, #28] */
707 0xb9401fa0, /* ldr x19, [x29, #28] */
708 };
709 instruction_reader_test reader (insns);
710
711 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
712 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
713 reader);
714
715 SELF_CHECK (end == 4 * 4);
716 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
717 SELF_CHECK (cache.framesize == 48);
718
719 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
720 {
721 if (i == 19)
722 SELF_CHECK (cache.saved_regs[i].addr == -20);
723 else if (i == AARCH64_FP_REGNUM)
724 SELF_CHECK (cache.saved_regs[i].addr == -48);
725 else if (i == AARCH64_LR_REGNUM)
726 SELF_CHECK (cache.saved_regs[i].addr == -40);
727 else
728 SELF_CHECK (cache.saved_regs[i].addr == -1);
729 }
730
731 if (tdep->has_pauth ())
732 {
733 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
734 tdep->pauth_ra_state_regnum));
735 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
736 }
737 }
738 }
739 } // namespace selftests
740 #endif /* GDB_SELF_TEST */
741
742 /* Implement the "skip_prologue" gdbarch method. */
743
744 static CORE_ADDR
745 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
746 {
747 CORE_ADDR func_addr, limit_pc;
748
749 /* See if we can determine the end of the prologue via the symbol
750 table. If so, then return either PC, or the PC after the
751 prologue, whichever is greater. */
752 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
753 {
754 CORE_ADDR post_prologue_pc
755 = skip_prologue_using_sal (gdbarch, func_addr);
756
757 if (post_prologue_pc != 0)
758 return std::max (pc, post_prologue_pc);
759 }
760
761 /* Can't determine prologue from the symbol table, need to examine
762 instructions. */
763
764 /* Find an upper limit on the function prologue using the debug
765 information. If the debug information could not be used to
766 provide that bound, then use an arbitrary large number as the
767 upper bound. */
768 limit_pc = skip_prologue_using_sal (gdbarch, pc);
769 if (limit_pc == 0)
770 limit_pc = pc + 128; /* Magic. */
771
772 /* Try disassembling prologue. */
773 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
774 }
775
776 /* Scan the function prologue for THIS_FRAME and populate the prologue
777 cache CACHE. */
778
779 static void
780 aarch64_scan_prologue (struct frame_info *this_frame,
781 struct aarch64_prologue_cache *cache)
782 {
783 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
784 CORE_ADDR prologue_start;
785 CORE_ADDR prologue_end;
786 CORE_ADDR prev_pc = get_frame_pc (this_frame);
787 struct gdbarch *gdbarch = get_frame_arch (this_frame);
788
789 cache->prev_pc = prev_pc;
790
791 /* Assume we do not find a frame. */
792 cache->framereg = -1;
793 cache->framesize = 0;
794
795 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
796 &prologue_end))
797 {
798 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
799
800 if (sal.line == 0)
801 {
802 /* No line info so use the current PC. */
803 prologue_end = prev_pc;
804 }
805 else if (sal.end < prologue_end)
806 {
807 /* The next line begins after the function end. */
808 prologue_end = sal.end;
809 }
810
811 prologue_end = std::min (prologue_end, prev_pc);
812 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
813 }
814 else
815 {
816 CORE_ADDR frame_loc;
817
818 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
819 if (frame_loc == 0)
820 return;
821
822 cache->framereg = AARCH64_FP_REGNUM;
823 cache->framesize = 16;
824 cache->saved_regs[29].addr = 0;
825 cache->saved_regs[30].addr = 8;
826 }
827 }
828
829 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
830 function may throw an exception if the inferior's registers or memory is
831 not available. */
832
833 static void
834 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
835 struct aarch64_prologue_cache *cache)
836 {
837 CORE_ADDR unwound_fp;
838 int reg;
839
840 aarch64_scan_prologue (this_frame, cache);
841
842 if (cache->framereg == -1)
843 return;
844
845 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
846 if (unwound_fp == 0)
847 return;
848
849 cache->prev_sp = unwound_fp + cache->framesize;
850
851 /* Calculate actual addresses of saved registers using offsets
852 determined by aarch64_analyze_prologue. */
853 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
854 if (trad_frame_addr_p (cache->saved_regs, reg))
855 cache->saved_regs[reg].addr += cache->prev_sp;
856
857 cache->func = get_frame_func (this_frame);
858
859 cache->available_p = 1;
860 }
861
862 /* Allocate and fill in *THIS_CACHE with information about the prologue of
863 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
864 Return a pointer to the current aarch64_prologue_cache in
865 *THIS_CACHE. */
866
867 static struct aarch64_prologue_cache *
868 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
869 {
870 struct aarch64_prologue_cache *cache;
871
872 if (*this_cache != NULL)
873 return (struct aarch64_prologue_cache *) *this_cache;
874
875 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
876 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
877 *this_cache = cache;
878
879 try
880 {
881 aarch64_make_prologue_cache_1 (this_frame, cache);
882 }
883 catch (const gdb_exception_error &ex)
884 {
885 if (ex.error != NOT_AVAILABLE_ERROR)
886 throw;
887 }
888
889 return cache;
890 }
891
892 /* Implement the "stop_reason" frame_unwind method. */
893
894 static enum unwind_stop_reason
895 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
896 void **this_cache)
897 {
898 struct aarch64_prologue_cache *cache
899 = aarch64_make_prologue_cache (this_frame, this_cache);
900
901 if (!cache->available_p)
902 return UNWIND_UNAVAILABLE;
903
904 /* Halt the backtrace at "_start". */
905 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
906 return UNWIND_OUTERMOST;
907
908 /* We've hit a wall, stop. */
909 if (cache->prev_sp == 0)
910 return UNWIND_OUTERMOST;
911
912 return UNWIND_NO_REASON;
913 }
914
915 /* Our frame ID for a normal frame is the current function's starting
916 PC and the caller's SP when we were called. */
917
918 static void
919 aarch64_prologue_this_id (struct frame_info *this_frame,
920 void **this_cache, struct frame_id *this_id)
921 {
922 struct aarch64_prologue_cache *cache
923 = aarch64_make_prologue_cache (this_frame, this_cache);
924
925 if (!cache->available_p)
926 *this_id = frame_id_build_unavailable_stack (cache->func);
927 else
928 *this_id = frame_id_build (cache->prev_sp, cache->func);
929 }
930
931 /* Implement the "prev_register" frame_unwind method. */
932
933 static struct value *
934 aarch64_prologue_prev_register (struct frame_info *this_frame,
935 void **this_cache, int prev_regnum)
936 {
937 struct aarch64_prologue_cache *cache
938 = aarch64_make_prologue_cache (this_frame, this_cache);
939
940 /* If we are asked to unwind the PC, then we need to return the LR
941 instead. The prologue may save PC, but it will point into this
942 frame's prologue, not the next frame's resume location. */
943 if (prev_regnum == AARCH64_PC_REGNUM)
944 {
945 CORE_ADDR lr;
946 struct gdbarch *gdbarch = get_frame_arch (this_frame);
947 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
948
949 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
950
951 if (tdep->has_pauth ()
952 && trad_frame_value_p (cache->saved_regs,
953 tdep->pauth_ra_state_regnum))
954 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
955
956 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
957 }
958
959 /* SP is generally not saved to the stack, but this frame is
960 identified by the next frame's stack pointer at the time of the
961 call. The value was already reconstructed into PREV_SP. */
962 /*
963 +----------+ ^
964 | saved lr | |
965 +->| saved fp |--+
966 | | |
967 | | | <- Previous SP
968 | +----------+
969 | | saved lr |
970 +--| saved fp |<- FP
971 | |
972 | |<- SP
973 +----------+ */
974 if (prev_regnum == AARCH64_SP_REGNUM)
975 return frame_unwind_got_constant (this_frame, prev_regnum,
976 cache->prev_sp);
977
978 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
979 prev_regnum);
980 }
981
982 /* AArch64 prologue unwinder. */
983 struct frame_unwind aarch64_prologue_unwind =
984 {
985 NORMAL_FRAME,
986 aarch64_prologue_frame_unwind_stop_reason,
987 aarch64_prologue_this_id,
988 aarch64_prologue_prev_register,
989 NULL,
990 default_frame_sniffer
991 };
992
993 /* Allocate and fill in *THIS_CACHE with information about the prologue of
994 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
995 Return a pointer to the current aarch64_prologue_cache in
996 *THIS_CACHE. */
997
998 static struct aarch64_prologue_cache *
999 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1000 {
1001 struct aarch64_prologue_cache *cache;
1002
1003 if (*this_cache != NULL)
1004 return (struct aarch64_prologue_cache *) *this_cache;
1005
1006 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1007 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1008 *this_cache = cache;
1009
1010 try
1011 {
1012 cache->prev_sp = get_frame_register_unsigned (this_frame,
1013 AARCH64_SP_REGNUM);
1014 cache->prev_pc = get_frame_pc (this_frame);
1015 cache->available_p = 1;
1016 }
1017 catch (const gdb_exception_error &ex)
1018 {
1019 if (ex.error != NOT_AVAILABLE_ERROR)
1020 throw;
1021 }
1022
1023 return cache;
1024 }
1025
1026 /* Implement the "stop_reason" frame_unwind method. */
1027
1028 static enum unwind_stop_reason
1029 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1030 void **this_cache)
1031 {
1032 struct aarch64_prologue_cache *cache
1033 = aarch64_make_stub_cache (this_frame, this_cache);
1034
1035 if (!cache->available_p)
1036 return UNWIND_UNAVAILABLE;
1037
1038 return UNWIND_NO_REASON;
1039 }
1040
1041 /* Our frame ID for a stub frame is the current SP and LR. */
1042
1043 static void
1044 aarch64_stub_this_id (struct frame_info *this_frame,
1045 void **this_cache, struct frame_id *this_id)
1046 {
1047 struct aarch64_prologue_cache *cache
1048 = aarch64_make_stub_cache (this_frame, this_cache);
1049
1050 if (cache->available_p)
1051 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1052 else
1053 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1054 }
1055
1056 /* Implement the "sniffer" frame_unwind method. */
1057
1058 static int
1059 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1060 struct frame_info *this_frame,
1061 void **this_prologue_cache)
1062 {
1063 CORE_ADDR addr_in_block;
1064 gdb_byte dummy[4];
1065
1066 addr_in_block = get_frame_address_in_block (this_frame);
1067 if (in_plt_section (addr_in_block)
1068 /* We also use the stub winder if the target memory is unreadable
1069 to avoid having the prologue unwinder trying to read it. */
1070 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1071 return 1;
1072
1073 return 0;
1074 }
1075
1076 /* AArch64 stub unwinder. */
1077 struct frame_unwind aarch64_stub_unwind =
1078 {
1079 NORMAL_FRAME,
1080 aarch64_stub_frame_unwind_stop_reason,
1081 aarch64_stub_this_id,
1082 aarch64_prologue_prev_register,
1083 NULL,
1084 aarch64_stub_unwind_sniffer
1085 };
1086
1087 /* Return the frame base address of *THIS_FRAME. */
1088
1089 static CORE_ADDR
1090 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1091 {
1092 struct aarch64_prologue_cache *cache
1093 = aarch64_make_prologue_cache (this_frame, this_cache);
1094
1095 return cache->prev_sp - cache->framesize;
1096 }
1097
1098 /* AArch64 default frame base information. */
1099 struct frame_base aarch64_normal_base =
1100 {
1101 &aarch64_prologue_unwind,
1102 aarch64_normal_frame_base,
1103 aarch64_normal_frame_base,
1104 aarch64_normal_frame_base
1105 };
1106
1107 /* Return the value of the REGNUM register in the previous frame of
1108 *THIS_FRAME. */
1109
1110 static struct value *
1111 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1112 void **this_cache, int regnum)
1113 {
1114 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1115 CORE_ADDR lr;
1116
1117 switch (regnum)
1118 {
1119 case AARCH64_PC_REGNUM:
1120 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1121 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
1122 return frame_unwind_got_constant (this_frame, regnum, lr);
1123
1124 default:
1125 internal_error (__FILE__, __LINE__,
1126 _("Unexpected register %d"), regnum);
1127 }
1128 }
1129
1130 static const unsigned char op_lit0 = DW_OP_lit0;
1131 static const unsigned char op_lit1 = DW_OP_lit1;
1132
1133 /* Implement the "init_reg" dwarf2_frame_ops method. */
1134
1135 static void
1136 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1137 struct dwarf2_frame_state_reg *reg,
1138 struct frame_info *this_frame)
1139 {
1140 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1141
1142 switch (regnum)
1143 {
1144 case AARCH64_PC_REGNUM:
1145 reg->how = DWARF2_FRAME_REG_FN;
1146 reg->loc.fn = aarch64_dwarf2_prev_register;
1147 return;
1148
1149 case AARCH64_SP_REGNUM:
1150 reg->how = DWARF2_FRAME_REG_CFA;
1151 return;
1152 }
1153
1154 /* Init pauth registers. */
1155 if (tdep->has_pauth ())
1156 {
1157 if (regnum == tdep->pauth_ra_state_regnum)
1158 {
1159 /* Initialize RA_STATE to zero. */
1160 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1161 reg->loc.exp.start = &op_lit0;
1162 reg->loc.exp.len = 1;
1163 return;
1164 }
1165 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1166 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1167 {
1168 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1169 return;
1170 }
1171 }
1172 }
1173
1174 /* Implement the execute_dwarf_cfa_vendor_op method. */
1175
1176 static bool
1177 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1178 struct dwarf2_frame_state *fs)
1179 {
1180 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1181 struct dwarf2_frame_state_reg *ra_state;
1182
1183 if (tdep->has_pauth () && op == DW_CFA_AARCH64_negate_ra_state)
1184 {
1185 /* Allocate RA_STATE column if it's not allocated yet. */
1186 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1187
1188 /* Toggle the status of RA_STATE between 0 and 1. */
1189 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1190 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1191
1192 if (ra_state->loc.exp.start == nullptr
1193 || ra_state->loc.exp.start == &op_lit0)
1194 ra_state->loc.exp.start = &op_lit1;
1195 else
1196 ra_state->loc.exp.start = &op_lit0;
1197
1198 ra_state->loc.exp.len = 1;
1199
1200 return true;
1201 }
1202
1203 return false;
1204 }
1205
1206 /* When arguments must be pushed onto the stack, they go on in reverse
1207 order. The code below implements a FILO (stack) to do this. */
1208
1209 struct stack_item_t
1210 {
1211 /* Value to pass on stack. It can be NULL if this item is for stack
1212 padding. */
1213 const gdb_byte *data;
1214
1215 /* Size in bytes of value to pass on stack. */
1216 int len;
1217 };
1218
1219 /* Implement the gdbarch type alignment method, overrides the generic
1220 alignment algorithm for anything that is aarch64 specific. */
1221
1222 static ULONGEST
1223 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1224 {
1225 t = check_typedef (t);
1226 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
1227 {
1228 /* Use the natural alignment for vector types (the same for
1229 scalar type), but the maximum alignment is 128-bit. */
1230 if (TYPE_LENGTH (t) > 16)
1231 return 16;
1232 else
1233 return TYPE_LENGTH (t);
1234 }
1235
1236 /* Allow the common code to calculate the alignment. */
1237 return 0;
1238 }
1239
1240 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1241
1242 Return the number of register required, or -1 on failure.
1243
1244 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1245 to the element, else fail if the type of this element does not match the
1246 existing value. */
1247
1248 static int
1249 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1250 struct type **fundamental_type)
1251 {
1252 if (type == nullptr)
1253 return -1;
1254
1255 switch (TYPE_CODE (type))
1256 {
1257 case TYPE_CODE_FLT:
1258 if (TYPE_LENGTH (type) > 16)
1259 return -1;
1260
1261 if (*fundamental_type == nullptr)
1262 *fundamental_type = type;
1263 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1264 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1265 return -1;
1266
1267 return 1;
1268
1269 case TYPE_CODE_COMPLEX:
1270 {
1271 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1272 if (TYPE_LENGTH (target_type) > 16)
1273 return -1;
1274
1275 if (*fundamental_type == nullptr)
1276 *fundamental_type = target_type;
1277 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1278 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1279 return -1;
1280
1281 return 2;
1282 }
1283
1284 case TYPE_CODE_ARRAY:
1285 {
1286 if (TYPE_VECTOR (type))
1287 {
1288 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1289 return -1;
1290
1291 if (*fundamental_type == nullptr)
1292 *fundamental_type = type;
1293 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1294 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1295 return -1;
1296
1297 return 1;
1298 }
1299 else
1300 {
1301 struct type *target_type = TYPE_TARGET_TYPE (type);
1302 int count = aapcs_is_vfp_call_or_return_candidate_1
1303 (target_type, fundamental_type);
1304
1305 if (count == -1)
1306 return count;
1307
1308 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1309 return count;
1310 }
1311 }
1312
1313 case TYPE_CODE_STRUCT:
1314 case TYPE_CODE_UNION:
1315 {
1316 int count = 0;
1317
1318 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1319 {
1320 /* Ignore any static fields. */
1321 if (field_is_static (&TYPE_FIELD (type, i)))
1322 continue;
1323
1324 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1325
1326 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1327 (member, fundamental_type);
1328 if (sub_count == -1)
1329 return -1;
1330 count += sub_count;
1331 }
1332
1333 /* Ensure there is no padding between the fields (allowing for empty
1334 zero length structs) */
1335 int ftype_length = (*fundamental_type == nullptr)
1336 ? 0 : TYPE_LENGTH (*fundamental_type);
1337 if (count * ftype_length != TYPE_LENGTH (type))
1338 return -1;
1339
1340 return count;
1341 }
1342
1343 default:
1344 break;
1345 }
1346
1347 return -1;
1348 }
1349
1350 /* Return true if an argument, whose type is described by TYPE, can be passed or
1351 returned in simd/fp registers, providing enough parameter passing registers
1352 are available. This is as described in the AAPCS64.
1353
1354 Upon successful return, *COUNT returns the number of needed registers,
1355 *FUNDAMENTAL_TYPE contains the type of those registers.
1356
1357 Candidate as per the AAPCS64 5.4.2.C is either a:
1358 - float.
1359 - short-vector.
1360 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1361 all the members are floats and has at most 4 members.
1362 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1363 all the members are short vectors and has at most 4 members.
1364 - Complex (7.1.1)
1365
1366 Note that HFAs and HVAs can include nested structures and arrays. */
1367
1368 static bool
1369 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1370 struct type **fundamental_type)
1371 {
1372 if (type == nullptr)
1373 return false;
1374
1375 *fundamental_type = nullptr;
1376
1377 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1378 fundamental_type);
1379
1380 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1381 {
1382 *count = ag_count;
1383 return true;
1384 }
1385 else
1386 return false;
1387 }
1388
1389 /* AArch64 function call information structure. */
1390 struct aarch64_call_info
1391 {
1392 /* the current argument number. */
1393 unsigned argnum = 0;
1394
1395 /* The next general purpose register number, equivalent to NGRN as
1396 described in the AArch64 Procedure Call Standard. */
1397 unsigned ngrn = 0;
1398
1399 /* The next SIMD and floating point register number, equivalent to
1400 NSRN as described in the AArch64 Procedure Call Standard. */
1401 unsigned nsrn = 0;
1402
1403 /* The next stacked argument address, equivalent to NSAA as
1404 described in the AArch64 Procedure Call Standard. */
1405 unsigned nsaa = 0;
1406
1407 /* Stack item vector. */
1408 std::vector<stack_item_t> si;
1409 };
1410
1411 /* Pass a value in a sequence of consecutive X registers. The caller
1412 is responsbile for ensuring sufficient registers are available. */
1413
1414 static void
1415 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1416 struct aarch64_call_info *info, struct type *type,
1417 struct value *arg)
1418 {
1419 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1420 int len = TYPE_LENGTH (type);
1421 enum type_code typecode = TYPE_CODE (type);
1422 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1423 const bfd_byte *buf = value_contents (arg);
1424
1425 info->argnum++;
1426
1427 while (len > 0)
1428 {
1429 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1430 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1431 byte_order);
1432
1433
1434 /* Adjust sub-word struct/union args when big-endian. */
1435 if (byte_order == BFD_ENDIAN_BIG
1436 && partial_len < X_REGISTER_SIZE
1437 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1438 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1439
1440 if (aarch64_debug)
1441 {
1442 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1443 gdbarch_register_name (gdbarch, regnum),
1444 phex (regval, X_REGISTER_SIZE));
1445 }
1446 regcache_cooked_write_unsigned (regcache, regnum, regval);
1447 len -= partial_len;
1448 buf += partial_len;
1449 regnum++;
1450 }
1451 }
1452
1453 /* Attempt to marshall a value in a V register. Return 1 if
1454 successful, or 0 if insufficient registers are available. This
1455 function, unlike the equivalent pass_in_x() function does not
1456 handle arguments spread across multiple registers. */
1457
1458 static int
1459 pass_in_v (struct gdbarch *gdbarch,
1460 struct regcache *regcache,
1461 struct aarch64_call_info *info,
1462 int len, const bfd_byte *buf)
1463 {
1464 if (info->nsrn < 8)
1465 {
1466 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1467 /* Enough space for a full vector register. */
1468 gdb_byte reg[register_size (gdbarch, regnum)];
1469 gdb_assert (len <= sizeof (reg));
1470
1471 info->argnum++;
1472 info->nsrn++;
1473
1474 memset (reg, 0, sizeof (reg));
1475 /* PCS C.1, the argument is allocated to the least significant
1476 bits of V register. */
1477 memcpy (reg, buf, len);
1478 regcache->cooked_write (regnum, reg);
1479
1480 if (aarch64_debug)
1481 {
1482 debug_printf ("arg %d in %s\n", info->argnum,
1483 gdbarch_register_name (gdbarch, regnum));
1484 }
1485 return 1;
1486 }
1487 info->nsrn = 8;
1488 return 0;
1489 }
1490
1491 /* Marshall an argument onto the stack. */
1492
1493 static void
1494 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1495 struct value *arg)
1496 {
1497 const bfd_byte *buf = value_contents (arg);
1498 int len = TYPE_LENGTH (type);
1499 int align;
1500 stack_item_t item;
1501
1502 info->argnum++;
1503
1504 align = type_align (type);
1505
1506 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1507 Natural alignment of the argument's type. */
1508 align = align_up (align, 8);
1509
1510 /* The AArch64 PCS requires at most doubleword alignment. */
1511 if (align > 16)
1512 align = 16;
1513
1514 if (aarch64_debug)
1515 {
1516 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1517 info->nsaa);
1518 }
1519
1520 item.len = len;
1521 item.data = buf;
1522 info->si.push_back (item);
1523
1524 info->nsaa += len;
1525 if (info->nsaa & (align - 1))
1526 {
1527 /* Push stack alignment padding. */
1528 int pad = align - (info->nsaa & (align - 1));
1529
1530 item.len = pad;
1531 item.data = NULL;
1532
1533 info->si.push_back (item);
1534 info->nsaa += pad;
1535 }
1536 }
1537
1538 /* Marshall an argument into a sequence of one or more consecutive X
1539 registers or, if insufficient X registers are available then onto
1540 the stack. */
1541
1542 static void
1543 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1544 struct aarch64_call_info *info, struct type *type,
1545 struct value *arg)
1546 {
1547 int len = TYPE_LENGTH (type);
1548 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1549
1550 /* PCS C.13 - Pass in registers if we have enough spare */
1551 if (info->ngrn + nregs <= 8)
1552 {
1553 pass_in_x (gdbarch, regcache, info, type, arg);
1554 info->ngrn += nregs;
1555 }
1556 else
1557 {
1558 info->ngrn = 8;
1559 pass_on_stack (info, type, arg);
1560 }
1561 }
1562
1563 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1564 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1565 registers. A return value of false is an error state as the value will have
1566 been partially passed to the stack. */
1567 static bool
1568 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1569 struct aarch64_call_info *info, struct type *arg_type,
1570 struct value *arg)
1571 {
1572 switch (TYPE_CODE (arg_type))
1573 {
1574 case TYPE_CODE_FLT:
1575 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1576 value_contents (arg));
1577 break;
1578
1579 case TYPE_CODE_COMPLEX:
1580 {
1581 const bfd_byte *buf = value_contents (arg);
1582 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1583
1584 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1585 buf))
1586 return false;
1587
1588 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1589 buf + TYPE_LENGTH (target_type));
1590 }
1591
1592 case TYPE_CODE_ARRAY:
1593 if (TYPE_VECTOR (arg_type))
1594 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1595 value_contents (arg));
1596 /* fall through. */
1597
1598 case TYPE_CODE_STRUCT:
1599 case TYPE_CODE_UNION:
1600 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1601 {
1602 /* Don't include static fields. */
1603 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1604 continue;
1605
1606 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1607 struct type *field_type = check_typedef (value_type (field));
1608
1609 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1610 field))
1611 return false;
1612 }
1613 return true;
1614
1615 default:
1616 return false;
1617 }
1618 }
1619
1620 /* Implement the "push_dummy_call" gdbarch method. */
1621
1622 static CORE_ADDR
1623 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1624 struct regcache *regcache, CORE_ADDR bp_addr,
1625 int nargs,
1626 struct value **args, CORE_ADDR sp,
1627 function_call_return_method return_method,
1628 CORE_ADDR struct_addr)
1629 {
1630 int argnum;
1631 struct aarch64_call_info info;
1632
1633 /* We need to know what the type of the called function is in order
1634 to determine the number of named/anonymous arguments for the
1635 actual argument placement, and the return type in order to handle
1636 return value correctly.
1637
1638 The generic code above us views the decision of return in memory
1639 or return in registers as a two stage processes. The language
1640 handler is consulted first and may decide to return in memory (eg
1641 class with copy constructor returned by value), this will cause
1642 the generic code to allocate space AND insert an initial leading
1643 argument.
1644
1645 If the language code does not decide to pass in memory then the
1646 target code is consulted.
1647
1648 If the language code decides to pass in memory we want to move
1649 the pointer inserted as the initial argument from the argument
1650 list and into X8, the conventional AArch64 struct return pointer
1651 register. */
1652
1653 /* Set the return address. For the AArch64, the return breakpoint
1654 is always at BP_ADDR. */
1655 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1656
1657 /* If we were given an initial argument for the return slot, lose it. */
1658 if (return_method == return_method_hidden_param)
1659 {
1660 args++;
1661 nargs--;
1662 }
1663
1664 /* The struct_return pointer occupies X8. */
1665 if (return_method != return_method_normal)
1666 {
1667 if (aarch64_debug)
1668 {
1669 debug_printf ("struct return in %s = 0x%s\n",
1670 gdbarch_register_name (gdbarch,
1671 AARCH64_STRUCT_RETURN_REGNUM),
1672 paddress (gdbarch, struct_addr));
1673 }
1674 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1675 struct_addr);
1676 }
1677
1678 for (argnum = 0; argnum < nargs; argnum++)
1679 {
1680 struct value *arg = args[argnum];
1681 struct type *arg_type, *fundamental_type;
1682 int len, elements;
1683
1684 arg_type = check_typedef (value_type (arg));
1685 len = TYPE_LENGTH (arg_type);
1686
1687 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1688 if there are enough spare registers. */
1689 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1690 &fundamental_type))
1691 {
1692 if (info.nsrn + elements <= 8)
1693 {
1694 /* We know that we have sufficient registers available therefore
1695 this will never need to fallback to the stack. */
1696 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1697 arg))
1698 gdb_assert_not_reached ("Failed to push args");
1699 }
1700 else
1701 {
1702 info.nsrn = 8;
1703 pass_on_stack (&info, arg_type, arg);
1704 }
1705 continue;
1706 }
1707
1708 switch (TYPE_CODE (arg_type))
1709 {
1710 case TYPE_CODE_INT:
1711 case TYPE_CODE_BOOL:
1712 case TYPE_CODE_CHAR:
1713 case TYPE_CODE_RANGE:
1714 case TYPE_CODE_ENUM:
1715 if (len < 4)
1716 {
1717 /* Promote to 32 bit integer. */
1718 if (TYPE_UNSIGNED (arg_type))
1719 arg_type = builtin_type (gdbarch)->builtin_uint32;
1720 else
1721 arg_type = builtin_type (gdbarch)->builtin_int32;
1722 arg = value_cast (arg_type, arg);
1723 }
1724 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1725 break;
1726
1727 case TYPE_CODE_STRUCT:
1728 case TYPE_CODE_ARRAY:
1729 case TYPE_CODE_UNION:
1730 if (len > 16)
1731 {
1732 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1733 invisible reference. */
1734
1735 /* Allocate aligned storage. */
1736 sp = align_down (sp - len, 16);
1737
1738 /* Write the real data into the stack. */
1739 write_memory (sp, value_contents (arg), len);
1740
1741 /* Construct the indirection. */
1742 arg_type = lookup_pointer_type (arg_type);
1743 arg = value_from_pointer (arg_type, sp);
1744 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1745 }
1746 else
1747 /* PCS C.15 / C.18 multiple values pass. */
1748 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1749 break;
1750
1751 default:
1752 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1753 break;
1754 }
1755 }
1756
1757 /* Make sure stack retains 16 byte alignment. */
1758 if (info.nsaa & 15)
1759 sp -= 16 - (info.nsaa & 15);
1760
1761 while (!info.si.empty ())
1762 {
1763 const stack_item_t &si = info.si.back ();
1764
1765 sp -= si.len;
1766 if (si.data != NULL)
1767 write_memory (sp, si.data, si.len);
1768 info.si.pop_back ();
1769 }
1770
1771 /* Finally, update the SP register. */
1772 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1773
1774 return sp;
1775 }
1776
1777 /* Implement the "frame_align" gdbarch method. */
1778
1779 static CORE_ADDR
1780 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1781 {
1782 /* Align the stack to sixteen bytes. */
1783 return sp & ~(CORE_ADDR) 15;
1784 }
1785
1786 /* Return the type for an AdvSISD Q register. */
1787
1788 static struct type *
1789 aarch64_vnq_type (struct gdbarch *gdbarch)
1790 {
1791 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1792
1793 if (tdep->vnq_type == NULL)
1794 {
1795 struct type *t;
1796 struct type *elem;
1797
1798 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1799 TYPE_CODE_UNION);
1800
1801 elem = builtin_type (gdbarch)->builtin_uint128;
1802 append_composite_type_field (t, "u", elem);
1803
1804 elem = builtin_type (gdbarch)->builtin_int128;
1805 append_composite_type_field (t, "s", elem);
1806
1807 tdep->vnq_type = t;
1808 }
1809
1810 return tdep->vnq_type;
1811 }
1812
1813 /* Return the type for an AdvSISD D register. */
1814
1815 static struct type *
1816 aarch64_vnd_type (struct gdbarch *gdbarch)
1817 {
1818 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1819
1820 if (tdep->vnd_type == NULL)
1821 {
1822 struct type *t;
1823 struct type *elem;
1824
1825 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1826 TYPE_CODE_UNION);
1827
1828 elem = builtin_type (gdbarch)->builtin_double;
1829 append_composite_type_field (t, "f", elem);
1830
1831 elem = builtin_type (gdbarch)->builtin_uint64;
1832 append_composite_type_field (t, "u", elem);
1833
1834 elem = builtin_type (gdbarch)->builtin_int64;
1835 append_composite_type_field (t, "s", elem);
1836
1837 tdep->vnd_type = t;
1838 }
1839
1840 return tdep->vnd_type;
1841 }
1842
1843 /* Return the type for an AdvSISD S register. */
1844
1845 static struct type *
1846 aarch64_vns_type (struct gdbarch *gdbarch)
1847 {
1848 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1849
1850 if (tdep->vns_type == NULL)
1851 {
1852 struct type *t;
1853 struct type *elem;
1854
1855 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1856 TYPE_CODE_UNION);
1857
1858 elem = builtin_type (gdbarch)->builtin_float;
1859 append_composite_type_field (t, "f", elem);
1860
1861 elem = builtin_type (gdbarch)->builtin_uint32;
1862 append_composite_type_field (t, "u", elem);
1863
1864 elem = builtin_type (gdbarch)->builtin_int32;
1865 append_composite_type_field (t, "s", elem);
1866
1867 tdep->vns_type = t;
1868 }
1869
1870 return tdep->vns_type;
1871 }
1872
1873 /* Return the type for an AdvSISD H register. */
1874
1875 static struct type *
1876 aarch64_vnh_type (struct gdbarch *gdbarch)
1877 {
1878 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1879
1880 if (tdep->vnh_type == NULL)
1881 {
1882 struct type *t;
1883 struct type *elem;
1884
1885 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1886 TYPE_CODE_UNION);
1887
1888 elem = builtin_type (gdbarch)->builtin_half;
1889 append_composite_type_field (t, "f", elem);
1890
1891 elem = builtin_type (gdbarch)->builtin_uint16;
1892 append_composite_type_field (t, "u", elem);
1893
1894 elem = builtin_type (gdbarch)->builtin_int16;
1895 append_composite_type_field (t, "s", elem);
1896
1897 tdep->vnh_type = t;
1898 }
1899
1900 return tdep->vnh_type;
1901 }
1902
1903 /* Return the type for an AdvSISD B register. */
1904
1905 static struct type *
1906 aarch64_vnb_type (struct gdbarch *gdbarch)
1907 {
1908 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1909
1910 if (tdep->vnb_type == NULL)
1911 {
1912 struct type *t;
1913 struct type *elem;
1914
1915 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1916 TYPE_CODE_UNION);
1917
1918 elem = builtin_type (gdbarch)->builtin_uint8;
1919 append_composite_type_field (t, "u", elem);
1920
1921 elem = builtin_type (gdbarch)->builtin_int8;
1922 append_composite_type_field (t, "s", elem);
1923
1924 tdep->vnb_type = t;
1925 }
1926
1927 return tdep->vnb_type;
1928 }
1929
1930 /* Return the type for an AdvSISD V register. */
1931
1932 static struct type *
1933 aarch64_vnv_type (struct gdbarch *gdbarch)
1934 {
1935 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1936
1937 if (tdep->vnv_type == NULL)
1938 {
1939 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1940 slice from the non-pseudo vector registers. However NEON V registers
1941 are always vector registers, and need constructing as such. */
1942 const struct builtin_type *bt = builtin_type (gdbarch);
1943
1944 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1945 TYPE_CODE_UNION);
1946
1947 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1948 TYPE_CODE_UNION);
1949 append_composite_type_field (sub, "f",
1950 init_vector_type (bt->builtin_double, 2));
1951 append_composite_type_field (sub, "u",
1952 init_vector_type (bt->builtin_uint64, 2));
1953 append_composite_type_field (sub, "s",
1954 init_vector_type (bt->builtin_int64, 2));
1955 append_composite_type_field (t, "d", sub);
1956
1957 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1958 TYPE_CODE_UNION);
1959 append_composite_type_field (sub, "f",
1960 init_vector_type (bt->builtin_float, 4));
1961 append_composite_type_field (sub, "u",
1962 init_vector_type (bt->builtin_uint32, 4));
1963 append_composite_type_field (sub, "s",
1964 init_vector_type (bt->builtin_int32, 4));
1965 append_composite_type_field (t, "s", sub);
1966
1967 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1968 TYPE_CODE_UNION);
1969 append_composite_type_field (sub, "f",
1970 init_vector_type (bt->builtin_half, 8));
1971 append_composite_type_field (sub, "u",
1972 init_vector_type (bt->builtin_uint16, 8));
1973 append_composite_type_field (sub, "s",
1974 init_vector_type (bt->builtin_int16, 8));
1975 append_composite_type_field (t, "h", sub);
1976
1977 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1978 TYPE_CODE_UNION);
1979 append_composite_type_field (sub, "u",
1980 init_vector_type (bt->builtin_uint8, 16));
1981 append_composite_type_field (sub, "s",
1982 init_vector_type (bt->builtin_int8, 16));
1983 append_composite_type_field (t, "b", sub);
1984
1985 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1986 TYPE_CODE_UNION);
1987 append_composite_type_field (sub, "u",
1988 init_vector_type (bt->builtin_uint128, 1));
1989 append_composite_type_field (sub, "s",
1990 init_vector_type (bt->builtin_int128, 1));
1991 append_composite_type_field (t, "q", sub);
1992
1993 tdep->vnv_type = t;
1994 }
1995
1996 return tdep->vnv_type;
1997 }
1998
1999 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2000
2001 static int
2002 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2003 {
2004 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2005
2006 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2007 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2008
2009 if (reg == AARCH64_DWARF_SP)
2010 return AARCH64_SP_REGNUM;
2011
2012 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2013 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2014
2015 if (reg == AARCH64_DWARF_SVE_VG)
2016 return AARCH64_SVE_VG_REGNUM;
2017
2018 if (reg == AARCH64_DWARF_SVE_FFR)
2019 return AARCH64_SVE_FFR_REGNUM;
2020
2021 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2022 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2023
2024 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2025 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2026
2027 if (tdep->has_pauth ())
2028 {
2029 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2030 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2031
2032 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2033 return tdep->pauth_ra_state_regnum;
2034 }
2035
2036 return -1;
2037 }
2038
2039 /* Implement the "print_insn" gdbarch method. */
2040
2041 static int
2042 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2043 {
2044 info->symbols = NULL;
2045 return default_print_insn (memaddr, info);
2046 }
2047
2048 /* AArch64 BRK software debug mode instruction.
2049 Note that AArch64 code is always little-endian.
2050 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2051 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2052
2053 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2054
2055 /* Extract from an array REGS containing the (raw) register state a
2056 function return value of type TYPE, and copy that, in virtual
2057 format, into VALBUF. */
2058
2059 static void
2060 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2061 gdb_byte *valbuf)
2062 {
2063 struct gdbarch *gdbarch = regs->arch ();
2064 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2065 int elements;
2066 struct type *fundamental_type;
2067
2068 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2069 &fundamental_type))
2070 {
2071 int len = TYPE_LENGTH (fundamental_type);
2072
2073 for (int i = 0; i < elements; i++)
2074 {
2075 int regno = AARCH64_V0_REGNUM + i;
2076 /* Enough space for a full vector register. */
2077 gdb_byte buf[register_size (gdbarch, regno)];
2078 gdb_assert (len <= sizeof (buf));
2079
2080 if (aarch64_debug)
2081 {
2082 debug_printf ("read HFA or HVA return value element %d from %s\n",
2083 i + 1,
2084 gdbarch_register_name (gdbarch, regno));
2085 }
2086 regs->cooked_read (regno, buf);
2087
2088 memcpy (valbuf, buf, len);
2089 valbuf += len;
2090 }
2091 }
2092 else if (TYPE_CODE (type) == TYPE_CODE_INT
2093 || TYPE_CODE (type) == TYPE_CODE_CHAR
2094 || TYPE_CODE (type) == TYPE_CODE_BOOL
2095 || TYPE_CODE (type) == TYPE_CODE_PTR
2096 || TYPE_IS_REFERENCE (type)
2097 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2098 {
2099 /* If the type is a plain integer, then the access is
2100 straight-forward. Otherwise we have to play around a bit
2101 more. */
2102 int len = TYPE_LENGTH (type);
2103 int regno = AARCH64_X0_REGNUM;
2104 ULONGEST tmp;
2105
2106 while (len > 0)
2107 {
2108 /* By using store_unsigned_integer we avoid having to do
2109 anything special for small big-endian values. */
2110 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2111 store_unsigned_integer (valbuf,
2112 (len > X_REGISTER_SIZE
2113 ? X_REGISTER_SIZE : len), byte_order, tmp);
2114 len -= X_REGISTER_SIZE;
2115 valbuf += X_REGISTER_SIZE;
2116 }
2117 }
2118 else
2119 {
2120 /* For a structure or union the behaviour is as if the value had
2121 been stored to word-aligned memory and then loaded into
2122 registers with 64-bit load instruction(s). */
2123 int len = TYPE_LENGTH (type);
2124 int regno = AARCH64_X0_REGNUM;
2125 bfd_byte buf[X_REGISTER_SIZE];
2126
2127 while (len > 0)
2128 {
2129 regs->cooked_read (regno++, buf);
2130 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2131 len -= X_REGISTER_SIZE;
2132 valbuf += X_REGISTER_SIZE;
2133 }
2134 }
2135 }
2136
2137
2138 /* Will a function return an aggregate type in memory or in a
2139 register? Return 0 if an aggregate type can be returned in a
2140 register, 1 if it must be returned in memory. */
2141
2142 static int
2143 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2144 {
2145 type = check_typedef (type);
2146 int elements;
2147 struct type *fundamental_type;
2148
2149 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2150 &fundamental_type))
2151 {
2152 /* v0-v7 are used to return values and one register is allocated
2153 for one member. However, HFA or HVA has at most four members. */
2154 return 0;
2155 }
2156
2157 if (TYPE_LENGTH (type) > 16)
2158 {
2159 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2160 invisible reference. */
2161
2162 return 1;
2163 }
2164
2165 return 0;
2166 }
2167
2168 /* Write into appropriate registers a function return value of type
2169 TYPE, given in virtual format. */
2170
2171 static void
2172 aarch64_store_return_value (struct type *type, struct regcache *regs,
2173 const gdb_byte *valbuf)
2174 {
2175 struct gdbarch *gdbarch = regs->arch ();
2176 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2177 int elements;
2178 struct type *fundamental_type;
2179
2180 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2181 &fundamental_type))
2182 {
2183 int len = TYPE_LENGTH (fundamental_type);
2184
2185 for (int i = 0; i < elements; i++)
2186 {
2187 int regno = AARCH64_V0_REGNUM + i;
2188 /* Enough space for a full vector register. */
2189 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2190 gdb_assert (len <= sizeof (tmpbuf));
2191
2192 if (aarch64_debug)
2193 {
2194 debug_printf ("write HFA or HVA return value element %d to %s\n",
2195 i + 1,
2196 gdbarch_register_name (gdbarch, regno));
2197 }
2198
2199 memcpy (tmpbuf, valbuf,
2200 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2201 regs->cooked_write (regno, tmpbuf);
2202 valbuf += len;
2203 }
2204 }
2205 else if (TYPE_CODE (type) == TYPE_CODE_INT
2206 || TYPE_CODE (type) == TYPE_CODE_CHAR
2207 || TYPE_CODE (type) == TYPE_CODE_BOOL
2208 || TYPE_CODE (type) == TYPE_CODE_PTR
2209 || TYPE_IS_REFERENCE (type)
2210 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2211 {
2212 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2213 {
2214 /* Values of one word or less are zero/sign-extended and
2215 returned in r0. */
2216 bfd_byte tmpbuf[X_REGISTER_SIZE];
2217 LONGEST val = unpack_long (type, valbuf);
2218
2219 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2220 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2221 }
2222 else
2223 {
2224 /* Integral values greater than one word are stored in
2225 consecutive registers starting with r0. This will always
2226 be a multiple of the regiser size. */
2227 int len = TYPE_LENGTH (type);
2228 int regno = AARCH64_X0_REGNUM;
2229
2230 while (len > 0)
2231 {
2232 regs->cooked_write (regno++, valbuf);
2233 len -= X_REGISTER_SIZE;
2234 valbuf += X_REGISTER_SIZE;
2235 }
2236 }
2237 }
2238 else
2239 {
2240 /* For a structure or union the behaviour is as if the value had
2241 been stored to word-aligned memory and then loaded into
2242 registers with 64-bit load instruction(s). */
2243 int len = TYPE_LENGTH (type);
2244 int regno = AARCH64_X0_REGNUM;
2245 bfd_byte tmpbuf[X_REGISTER_SIZE];
2246
2247 while (len > 0)
2248 {
2249 memcpy (tmpbuf, valbuf,
2250 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2251 regs->cooked_write (regno++, tmpbuf);
2252 len -= X_REGISTER_SIZE;
2253 valbuf += X_REGISTER_SIZE;
2254 }
2255 }
2256 }
2257
2258 /* Implement the "return_value" gdbarch method. */
2259
2260 static enum return_value_convention
2261 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2262 struct type *valtype, struct regcache *regcache,
2263 gdb_byte *readbuf, const gdb_byte *writebuf)
2264 {
2265
2266 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2267 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2268 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2269 {
2270 if (aarch64_return_in_memory (gdbarch, valtype))
2271 {
2272 if (aarch64_debug)
2273 debug_printf ("return value in memory\n");
2274 return RETURN_VALUE_STRUCT_CONVENTION;
2275 }
2276 }
2277
2278 if (writebuf)
2279 aarch64_store_return_value (valtype, regcache, writebuf);
2280
2281 if (readbuf)
2282 aarch64_extract_return_value (valtype, regcache, readbuf);
2283
2284 if (aarch64_debug)
2285 debug_printf ("return value in registers\n");
2286
2287 return RETURN_VALUE_REGISTER_CONVENTION;
2288 }
2289
2290 /* Implement the "get_longjmp_target" gdbarch method. */
2291
2292 static int
2293 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2294 {
2295 CORE_ADDR jb_addr;
2296 gdb_byte buf[X_REGISTER_SIZE];
2297 struct gdbarch *gdbarch = get_frame_arch (frame);
2298 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2299 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2300
2301 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2302
2303 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2304 X_REGISTER_SIZE))
2305 return 0;
2306
2307 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2308 return 1;
2309 }
2310
2311 /* Implement the "gen_return_address" gdbarch method. */
2312
2313 static void
2314 aarch64_gen_return_address (struct gdbarch *gdbarch,
2315 struct agent_expr *ax, struct axs_value *value,
2316 CORE_ADDR scope)
2317 {
2318 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2319 value->kind = axs_lvalue_register;
2320 value->u.reg = AARCH64_LR_REGNUM;
2321 }
2322 \f
2323
2324 /* Return the pseudo register name corresponding to register regnum. */
2325
2326 static const char *
2327 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2328 {
2329 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2330
2331 static const char *const q_name[] =
2332 {
2333 "q0", "q1", "q2", "q3",
2334 "q4", "q5", "q6", "q7",
2335 "q8", "q9", "q10", "q11",
2336 "q12", "q13", "q14", "q15",
2337 "q16", "q17", "q18", "q19",
2338 "q20", "q21", "q22", "q23",
2339 "q24", "q25", "q26", "q27",
2340 "q28", "q29", "q30", "q31",
2341 };
2342
2343 static const char *const d_name[] =
2344 {
2345 "d0", "d1", "d2", "d3",
2346 "d4", "d5", "d6", "d7",
2347 "d8", "d9", "d10", "d11",
2348 "d12", "d13", "d14", "d15",
2349 "d16", "d17", "d18", "d19",
2350 "d20", "d21", "d22", "d23",
2351 "d24", "d25", "d26", "d27",
2352 "d28", "d29", "d30", "d31",
2353 };
2354
2355 static const char *const s_name[] =
2356 {
2357 "s0", "s1", "s2", "s3",
2358 "s4", "s5", "s6", "s7",
2359 "s8", "s9", "s10", "s11",
2360 "s12", "s13", "s14", "s15",
2361 "s16", "s17", "s18", "s19",
2362 "s20", "s21", "s22", "s23",
2363 "s24", "s25", "s26", "s27",
2364 "s28", "s29", "s30", "s31",
2365 };
2366
2367 static const char *const h_name[] =
2368 {
2369 "h0", "h1", "h2", "h3",
2370 "h4", "h5", "h6", "h7",
2371 "h8", "h9", "h10", "h11",
2372 "h12", "h13", "h14", "h15",
2373 "h16", "h17", "h18", "h19",
2374 "h20", "h21", "h22", "h23",
2375 "h24", "h25", "h26", "h27",
2376 "h28", "h29", "h30", "h31",
2377 };
2378
2379 static const char *const b_name[] =
2380 {
2381 "b0", "b1", "b2", "b3",
2382 "b4", "b5", "b6", "b7",
2383 "b8", "b9", "b10", "b11",
2384 "b12", "b13", "b14", "b15",
2385 "b16", "b17", "b18", "b19",
2386 "b20", "b21", "b22", "b23",
2387 "b24", "b25", "b26", "b27",
2388 "b28", "b29", "b30", "b31",
2389 };
2390
2391 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2392
2393 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2394 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2395
2396 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2397 return d_name[p_regnum - AARCH64_D0_REGNUM];
2398
2399 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2400 return s_name[p_regnum - AARCH64_S0_REGNUM];
2401
2402 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2403 return h_name[p_regnum - AARCH64_H0_REGNUM];
2404
2405 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2406 return b_name[p_regnum - AARCH64_B0_REGNUM];
2407
2408 if (tdep->has_sve ())
2409 {
2410 static const char *const sve_v_name[] =
2411 {
2412 "v0", "v1", "v2", "v3",
2413 "v4", "v5", "v6", "v7",
2414 "v8", "v9", "v10", "v11",
2415 "v12", "v13", "v14", "v15",
2416 "v16", "v17", "v18", "v19",
2417 "v20", "v21", "v22", "v23",
2418 "v24", "v25", "v26", "v27",
2419 "v28", "v29", "v30", "v31",
2420 };
2421
2422 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2423 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2424 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2425 }
2426
2427 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2428 prevents it from being read by methods such as
2429 mi_cmd_trace_frame_collected. */
2430 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2431 return "";
2432
2433 internal_error (__FILE__, __LINE__,
2434 _("aarch64_pseudo_register_name: bad register number %d"),
2435 p_regnum);
2436 }
2437
2438 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2439
2440 static struct type *
2441 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2442 {
2443 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2444
2445 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2446
2447 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2448 return aarch64_vnq_type (gdbarch);
2449
2450 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2451 return aarch64_vnd_type (gdbarch);
2452
2453 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2454 return aarch64_vns_type (gdbarch);
2455
2456 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2457 return aarch64_vnh_type (gdbarch);
2458
2459 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2460 return aarch64_vnb_type (gdbarch);
2461
2462 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2463 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2464 return aarch64_vnv_type (gdbarch);
2465
2466 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2467 return builtin_type (gdbarch)->builtin_uint64;
2468
2469 internal_error (__FILE__, __LINE__,
2470 _("aarch64_pseudo_register_type: bad register number %d"),
2471 p_regnum);
2472 }
2473
2474 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2475
2476 static int
2477 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2478 struct reggroup *group)
2479 {
2480 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2481
2482 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2483
2484 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2485 return group == all_reggroup || group == vector_reggroup;
2486 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2487 return (group == all_reggroup || group == vector_reggroup
2488 || group == float_reggroup);
2489 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2490 return (group == all_reggroup || group == vector_reggroup
2491 || group == float_reggroup);
2492 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2493 return group == all_reggroup || group == vector_reggroup;
2494 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2495 return group == all_reggroup || group == vector_reggroup;
2496 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2497 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2498 return group == all_reggroup || group == vector_reggroup;
2499 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2500 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2501 return 0;
2502
2503 return group == all_reggroup;
2504 }
2505
2506 /* Helper for aarch64_pseudo_read_value. */
2507
2508 static struct value *
2509 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2510 readable_regcache *regcache, int regnum_offset,
2511 int regsize, struct value *result_value)
2512 {
2513 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2514
2515 /* Enough space for a full vector register. */
2516 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2517 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2518
2519 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2520 mark_value_bytes_unavailable (result_value, 0,
2521 TYPE_LENGTH (value_type (result_value)));
2522 else
2523 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2524
2525 return result_value;
2526 }
2527
2528 /* Implement the "pseudo_register_read_value" gdbarch method. */
2529
2530 static struct value *
2531 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2532 int regnum)
2533 {
2534 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2535 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2536
2537 VALUE_LVAL (result_value) = lval_register;
2538 VALUE_REGNUM (result_value) = regnum;
2539
2540 regnum -= gdbarch_num_regs (gdbarch);
2541
2542 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2543 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2544 regnum - AARCH64_Q0_REGNUM,
2545 Q_REGISTER_SIZE, result_value);
2546
2547 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2548 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2549 regnum - AARCH64_D0_REGNUM,
2550 D_REGISTER_SIZE, result_value);
2551
2552 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2553 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2554 regnum - AARCH64_S0_REGNUM,
2555 S_REGISTER_SIZE, result_value);
2556
2557 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2558 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2559 regnum - AARCH64_H0_REGNUM,
2560 H_REGISTER_SIZE, result_value);
2561
2562 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2563 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2564 regnum - AARCH64_B0_REGNUM,
2565 B_REGISTER_SIZE, result_value);
2566
2567 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2568 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2569 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2570 regnum - AARCH64_SVE_V0_REGNUM,
2571 V_REGISTER_SIZE, result_value);
2572
2573 gdb_assert_not_reached ("regnum out of bound");
2574 }
2575
2576 /* Helper for aarch64_pseudo_write. */
2577
2578 static void
2579 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2580 int regnum_offset, int regsize, const gdb_byte *buf)
2581 {
2582 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2583
2584 /* Enough space for a full vector register. */
2585 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2586 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2587
2588 /* Ensure the register buffer is zero, we want gdb writes of the
2589 various 'scalar' pseudo registers to behavior like architectural
2590 writes, register width bytes are written the remainder are set to
2591 zero. */
2592 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2593
2594 memcpy (reg_buf, buf, regsize);
2595 regcache->raw_write (v_regnum, reg_buf);
2596 }
2597
2598 /* Implement the "pseudo_register_write" gdbarch method. */
2599
2600 static void
2601 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2602 int regnum, const gdb_byte *buf)
2603 {
2604 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2605 regnum -= gdbarch_num_regs (gdbarch);
2606
2607 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2608 return aarch64_pseudo_write_1 (gdbarch, regcache,
2609 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2610 buf);
2611
2612 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2613 return aarch64_pseudo_write_1 (gdbarch, regcache,
2614 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2615 buf);
2616
2617 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2618 return aarch64_pseudo_write_1 (gdbarch, regcache,
2619 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2620 buf);
2621
2622 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2623 return aarch64_pseudo_write_1 (gdbarch, regcache,
2624 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2625 buf);
2626
2627 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2628 return aarch64_pseudo_write_1 (gdbarch, regcache,
2629 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2630 buf);
2631
2632 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2633 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2634 return aarch64_pseudo_write_1 (gdbarch, regcache,
2635 regnum - AARCH64_SVE_V0_REGNUM,
2636 V_REGISTER_SIZE, buf);
2637
2638 gdb_assert_not_reached ("regnum out of bound");
2639 }
2640
2641 /* Callback function for user_reg_add. */
2642
2643 static struct value *
2644 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2645 {
2646 const int *reg_p = (const int *) baton;
2647
2648 return value_of_register (*reg_p, frame);
2649 }
2650 \f
2651
2652 /* Implement the "software_single_step" gdbarch method, needed to
2653 single step through atomic sequences on AArch64. */
2654
2655 static std::vector<CORE_ADDR>
2656 aarch64_software_single_step (struct regcache *regcache)
2657 {
2658 struct gdbarch *gdbarch = regcache->arch ();
2659 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2660 const int insn_size = 4;
2661 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2662 CORE_ADDR pc = regcache_read_pc (regcache);
2663 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2664 CORE_ADDR loc = pc;
2665 CORE_ADDR closing_insn = 0;
2666 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2667 byte_order_for_code);
2668 int index;
2669 int insn_count;
2670 int bc_insn_count = 0; /* Conditional branch instruction count. */
2671 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2672 aarch64_inst inst;
2673
2674 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2675 return {};
2676
2677 /* Look for a Load Exclusive instruction which begins the sequence. */
2678 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2679 return {};
2680
2681 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2682 {
2683 loc += insn_size;
2684 insn = read_memory_unsigned_integer (loc, insn_size,
2685 byte_order_for_code);
2686
2687 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2688 return {};
2689 /* Check if the instruction is a conditional branch. */
2690 if (inst.opcode->iclass == condbranch)
2691 {
2692 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2693
2694 if (bc_insn_count >= 1)
2695 return {};
2696
2697 /* It is, so we'll try to set a breakpoint at the destination. */
2698 breaks[1] = loc + inst.operands[0].imm.value;
2699
2700 bc_insn_count++;
2701 last_breakpoint++;
2702 }
2703
2704 /* Look for the Store Exclusive which closes the atomic sequence. */
2705 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2706 {
2707 closing_insn = loc;
2708 break;
2709 }
2710 }
2711
2712 /* We didn't find a closing Store Exclusive instruction, fall back. */
2713 if (!closing_insn)
2714 return {};
2715
2716 /* Insert breakpoint after the end of the atomic sequence. */
2717 breaks[0] = loc + insn_size;
2718
2719 /* Check for duplicated breakpoints, and also check that the second
2720 breakpoint is not within the atomic sequence. */
2721 if (last_breakpoint
2722 && (breaks[1] == breaks[0]
2723 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2724 last_breakpoint = 0;
2725
2726 std::vector<CORE_ADDR> next_pcs;
2727
2728 /* Insert the breakpoint at the end of the sequence, and one at the
2729 destination of the conditional branch, if it exists. */
2730 for (index = 0; index <= last_breakpoint; index++)
2731 next_pcs.push_back (breaks[index]);
2732
2733 return next_pcs;
2734 }
2735
2736 struct aarch64_displaced_step_closure : public displaced_step_closure
2737 {
2738 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2739 is being displaced stepping. */
2740 int cond = 0;
2741
2742 /* PC adjustment offset after displaced stepping. */
2743 int32_t pc_adjust = 0;
2744 };
2745
2746 /* Data when visiting instructions for displaced stepping. */
2747
2748 struct aarch64_displaced_step_data
2749 {
2750 struct aarch64_insn_data base;
2751
2752 /* The address where the instruction will be executed at. */
2753 CORE_ADDR new_addr;
2754 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2755 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2756 /* Number of instructions in INSN_BUF. */
2757 unsigned insn_count;
2758 /* Registers when doing displaced stepping. */
2759 struct regcache *regs;
2760
2761 aarch64_displaced_step_closure *dsc;
2762 };
2763
2764 /* Implementation of aarch64_insn_visitor method "b". */
2765
2766 static void
2767 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2768 struct aarch64_insn_data *data)
2769 {
2770 struct aarch64_displaced_step_data *dsd
2771 = (struct aarch64_displaced_step_data *) data;
2772 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2773
2774 if (can_encode_int32 (new_offset, 28))
2775 {
2776 /* Emit B rather than BL, because executing BL on a new address
2777 will get the wrong address into LR. In order to avoid this,
2778 we emit B, and update LR if the instruction is BL. */
2779 emit_b (dsd->insn_buf, 0, new_offset);
2780 dsd->insn_count++;
2781 }
2782 else
2783 {
2784 /* Write NOP. */
2785 emit_nop (dsd->insn_buf);
2786 dsd->insn_count++;
2787 dsd->dsc->pc_adjust = offset;
2788 }
2789
2790 if (is_bl)
2791 {
2792 /* Update LR. */
2793 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2794 data->insn_addr + 4);
2795 }
2796 }
2797
2798 /* Implementation of aarch64_insn_visitor method "b_cond". */
2799
2800 static void
2801 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2802 struct aarch64_insn_data *data)
2803 {
2804 struct aarch64_displaced_step_data *dsd
2805 = (struct aarch64_displaced_step_data *) data;
2806
2807 /* GDB has to fix up PC after displaced step this instruction
2808 differently according to the condition is true or false. Instead
2809 of checking COND against conditional flags, we can use
2810 the following instructions, and GDB can tell how to fix up PC
2811 according to the PC value.
2812
2813 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2814 INSN1 ;
2815 TAKEN:
2816 INSN2
2817 */
2818
2819 emit_bcond (dsd->insn_buf, cond, 8);
2820 dsd->dsc->cond = 1;
2821 dsd->dsc->pc_adjust = offset;
2822 dsd->insn_count = 1;
2823 }
2824
2825 /* Dynamically allocate a new register. If we know the register
2826 statically, we should make it a global as above instead of using this
2827 helper function. */
2828
2829 static struct aarch64_register
2830 aarch64_register (unsigned num, int is64)
2831 {
2832 return (struct aarch64_register) { num, is64 };
2833 }
2834
2835 /* Implementation of aarch64_insn_visitor method "cb". */
2836
2837 static void
2838 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2839 const unsigned rn, int is64,
2840 struct aarch64_insn_data *data)
2841 {
2842 struct aarch64_displaced_step_data *dsd
2843 = (struct aarch64_displaced_step_data *) data;
2844
2845 /* The offset is out of range for a compare and branch
2846 instruction. We can use the following instructions instead:
2847
2848 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2849 INSN1 ;
2850 TAKEN:
2851 INSN2
2852 */
2853 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2854 dsd->insn_count = 1;
2855 dsd->dsc->cond = 1;
2856 dsd->dsc->pc_adjust = offset;
2857 }
2858
2859 /* Implementation of aarch64_insn_visitor method "tb". */
2860
2861 static void
2862 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2863 const unsigned rt, unsigned bit,
2864 struct aarch64_insn_data *data)
2865 {
2866 struct aarch64_displaced_step_data *dsd
2867 = (struct aarch64_displaced_step_data *) data;
2868
2869 /* The offset is out of range for a test bit and branch
2870 instruction We can use the following instructions instead:
2871
2872 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2873 INSN1 ;
2874 TAKEN:
2875 INSN2
2876
2877 */
2878 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2879 dsd->insn_count = 1;
2880 dsd->dsc->cond = 1;
2881 dsd->dsc->pc_adjust = offset;
2882 }
2883
2884 /* Implementation of aarch64_insn_visitor method "adr". */
2885
2886 static void
2887 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2888 const int is_adrp, struct aarch64_insn_data *data)
2889 {
2890 struct aarch64_displaced_step_data *dsd
2891 = (struct aarch64_displaced_step_data *) data;
2892 /* We know exactly the address the ADR{P,} instruction will compute.
2893 We can just write it to the destination register. */
2894 CORE_ADDR address = data->insn_addr + offset;
2895
2896 if (is_adrp)
2897 {
2898 /* Clear the lower 12 bits of the offset to get the 4K page. */
2899 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2900 address & ~0xfff);
2901 }
2902 else
2903 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2904 address);
2905
2906 dsd->dsc->pc_adjust = 4;
2907 emit_nop (dsd->insn_buf);
2908 dsd->insn_count = 1;
2909 }
2910
2911 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2912
2913 static void
2914 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2915 const unsigned rt, const int is64,
2916 struct aarch64_insn_data *data)
2917 {
2918 struct aarch64_displaced_step_data *dsd
2919 = (struct aarch64_displaced_step_data *) data;
2920 CORE_ADDR address = data->insn_addr + offset;
2921 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2922
2923 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2924 address);
2925
2926 if (is_sw)
2927 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2928 aarch64_register (rt, 1), zero);
2929 else
2930 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2931 aarch64_register (rt, 1), zero);
2932
2933 dsd->dsc->pc_adjust = 4;
2934 }
2935
2936 /* Implementation of aarch64_insn_visitor method "others". */
2937
2938 static void
2939 aarch64_displaced_step_others (const uint32_t insn,
2940 struct aarch64_insn_data *data)
2941 {
2942 struct aarch64_displaced_step_data *dsd
2943 = (struct aarch64_displaced_step_data *) data;
2944
2945 aarch64_emit_insn (dsd->insn_buf, insn);
2946 dsd->insn_count = 1;
2947
2948 if ((insn & 0xfffffc1f) == 0xd65f0000)
2949 {
2950 /* RET */
2951 dsd->dsc->pc_adjust = 0;
2952 }
2953 else
2954 dsd->dsc->pc_adjust = 4;
2955 }
2956
2957 static const struct aarch64_insn_visitor visitor =
2958 {
2959 aarch64_displaced_step_b,
2960 aarch64_displaced_step_b_cond,
2961 aarch64_displaced_step_cb,
2962 aarch64_displaced_step_tb,
2963 aarch64_displaced_step_adr,
2964 aarch64_displaced_step_ldr_literal,
2965 aarch64_displaced_step_others,
2966 };
2967
2968 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2969
2970 struct displaced_step_closure *
2971 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2972 CORE_ADDR from, CORE_ADDR to,
2973 struct regcache *regs)
2974 {
2975 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2976 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2977 struct aarch64_displaced_step_data dsd;
2978 aarch64_inst inst;
2979
2980 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2981 return NULL;
2982
2983 /* Look for a Load Exclusive instruction which begins the sequence. */
2984 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2985 {
2986 /* We can't displaced step atomic sequences. */
2987 return NULL;
2988 }
2989
2990 std::unique_ptr<aarch64_displaced_step_closure> dsc
2991 (new aarch64_displaced_step_closure);
2992 dsd.base.insn_addr = from;
2993 dsd.new_addr = to;
2994 dsd.regs = regs;
2995 dsd.dsc = dsc.get ();
2996 dsd.insn_count = 0;
2997 aarch64_relocate_instruction (insn, &visitor,
2998 (struct aarch64_insn_data *) &dsd);
2999 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
3000
3001 if (dsd.insn_count != 0)
3002 {
3003 int i;
3004
3005 /* Instruction can be relocated to scratch pad. Copy
3006 relocated instruction(s) there. */
3007 for (i = 0; i < dsd.insn_count; i++)
3008 {
3009 if (debug_displaced)
3010 {
3011 debug_printf ("displaced: writing insn ");
3012 debug_printf ("%.8x", dsd.insn_buf[i]);
3013 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3014 }
3015 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3016 (ULONGEST) dsd.insn_buf[i]);
3017 }
3018 }
3019 else
3020 {
3021 dsc = NULL;
3022 }
3023
3024 return dsc.release ();
3025 }
3026
3027 /* Implement the "displaced_step_fixup" gdbarch method. */
3028
3029 void
3030 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3031 struct displaced_step_closure *dsc_,
3032 CORE_ADDR from, CORE_ADDR to,
3033 struct regcache *regs)
3034 {
3035 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3036
3037 if (dsc->cond)
3038 {
3039 ULONGEST pc;
3040
3041 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3042 if (pc - to == 8)
3043 {
3044 /* Condition is true. */
3045 }
3046 else if (pc - to == 4)
3047 {
3048 /* Condition is false. */
3049 dsc->pc_adjust = 4;
3050 }
3051 else
3052 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3053 }
3054
3055 if (dsc->pc_adjust != 0)
3056 {
3057 if (debug_displaced)
3058 {
3059 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3060 paddress (gdbarch, from), dsc->pc_adjust);
3061 }
3062 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3063 from + dsc->pc_adjust);
3064 }
3065 }
3066
3067 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3068
3069 int
3070 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3071 struct displaced_step_closure *closure)
3072 {
3073 return 1;
3074 }
3075
3076 /* Get the correct target description for the given VQ value.
3077 If VQ is zero then it is assumed SVE is not supported.
3078 (It is not possible to set VQ to zero on an SVE system). */
3079
3080 const target_desc *
3081 aarch64_read_description (uint64_t vq, bool pauth_p)
3082 {
3083 if (vq > AARCH64_MAX_SVE_VQ)
3084 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3085 AARCH64_MAX_SVE_VQ);
3086
3087 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3088
3089 if (tdesc == NULL)
3090 {
3091 tdesc = aarch64_create_target_description (vq, pauth_p);
3092 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3093 }
3094
3095 return tdesc;
3096 }
3097
3098 /* Return the VQ used when creating the target description TDESC. */
3099
3100 static uint64_t
3101 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3102 {
3103 const struct tdesc_feature *feature_sve;
3104
3105 if (!tdesc_has_registers (tdesc))
3106 return 0;
3107
3108 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3109
3110 if (feature_sve == nullptr)
3111 return 0;
3112
3113 uint64_t vl = tdesc_register_bitsize (feature_sve,
3114 aarch64_sve_register_names[0]) / 8;
3115 return sve_vq_from_vl (vl);
3116 }
3117
3118 /* Add all the expected register sets into GDBARCH. */
3119
3120 static void
3121 aarch64_add_reggroups (struct gdbarch *gdbarch)
3122 {
3123 reggroup_add (gdbarch, general_reggroup);
3124 reggroup_add (gdbarch, float_reggroup);
3125 reggroup_add (gdbarch, system_reggroup);
3126 reggroup_add (gdbarch, vector_reggroup);
3127 reggroup_add (gdbarch, all_reggroup);
3128 reggroup_add (gdbarch, save_reggroup);
3129 reggroup_add (gdbarch, restore_reggroup);
3130 }
3131
3132 /* Implement the "cannot_store_register" gdbarch method. */
3133
3134 static int
3135 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3136 {
3137 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3138
3139 if (!tdep->has_pauth ())
3140 return 0;
3141
3142 /* Pointer authentication registers are read-only. */
3143 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3144 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3145 }
3146
3147 /* Initialize the current architecture based on INFO. If possible,
3148 re-use an architecture from ARCHES, which is a list of
3149 architectures already created during this debugging session.
3150
3151 Called e.g. at program startup, when reading a core file, and when
3152 reading a binary file. */
3153
3154 static struct gdbarch *
3155 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3156 {
3157 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3158 const struct tdesc_feature *feature_pauth;
3159 bool valid_p = true;
3160 int i, num_regs = 0, num_pseudo_regs = 0;
3161 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3162
3163 /* Use the vector length passed via the target info. Here -1 is used for no
3164 SVE, and 0 is unset. If unset then use the vector length from the existing
3165 tdesc. */
3166 uint64_t vq = 0;
3167 if (info.id == (int *) -1)
3168 vq = 0;
3169 else if (info.id != 0)
3170 vq = (uint64_t) info.id;
3171 else
3172 vq = aarch64_get_tdesc_vq (info.target_desc);
3173
3174 if (vq > AARCH64_MAX_SVE_VQ)
3175 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3176 pulongest (vq), AARCH64_MAX_SVE_VQ);
3177
3178 /* If there is already a candidate, use it. */
3179 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3180 best_arch != nullptr;
3181 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3182 {
3183 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3184 if (tdep && tdep->vq == vq)
3185 return best_arch->gdbarch;
3186 }
3187
3188 /* Ensure we always have a target descriptor, and that it is for the given VQ
3189 value. */
3190 const struct target_desc *tdesc = info.target_desc;
3191 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3192 tdesc = aarch64_read_description (vq, false);
3193 gdb_assert (tdesc);
3194
3195 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3196 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3197 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3198 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3199
3200 if (feature_core == nullptr)
3201 return nullptr;
3202
3203 struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
3204
3205 /* Validate the description provides the mandatory core R registers
3206 and allocate their numbers. */
3207 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3208 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3209 AARCH64_X0_REGNUM + i,
3210 aarch64_r_register_names[i]);
3211
3212 num_regs = AARCH64_X0_REGNUM + i;
3213
3214 /* Add the V registers. */
3215 if (feature_fpu != nullptr)
3216 {
3217 if (feature_sve != nullptr)
3218 error (_("Program contains both fpu and SVE features."));
3219
3220 /* Validate the description provides the mandatory V registers
3221 and allocate their numbers. */
3222 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3223 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3224 AARCH64_V0_REGNUM + i,
3225 aarch64_v_register_names[i]);
3226
3227 num_regs = AARCH64_V0_REGNUM + i;
3228 }
3229
3230 /* Add the SVE registers. */
3231 if (feature_sve != nullptr)
3232 {
3233 /* Validate the description provides the mandatory SVE registers
3234 and allocate their numbers. */
3235 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3236 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3237 AARCH64_SVE_Z0_REGNUM + i,
3238 aarch64_sve_register_names[i]);
3239
3240 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3241 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3242 }
3243
3244 if (feature_fpu != nullptr || feature_sve != nullptr)
3245 {
3246 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3247 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3248 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3249 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3250 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3251 }
3252
3253 /* Add the pauth registers. */
3254 if (feature_pauth != NULL)
3255 {
3256 first_pauth_regnum = num_regs;
3257 pauth_ra_state_offset = num_pseudo_regs;
3258 /* Validate the descriptor provides the mandatory PAUTH registers and
3259 allocate their numbers. */
3260 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3261 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3262 first_pauth_regnum + i,
3263 aarch64_pauth_register_names[i]);
3264
3265 num_regs += i;
3266 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3267 }
3268
3269 if (!valid_p)
3270 {
3271 tdesc_data_cleanup (tdesc_data);
3272 return nullptr;
3273 }
3274
3275 /* AArch64 code is always little-endian. */
3276 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3277
3278 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3279 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3280
3281 /* This should be low enough for everything. */
3282 tdep->lowest_pc = 0x20;
3283 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3284 tdep->jb_elt_size = 8;
3285 tdep->vq = vq;
3286 tdep->pauth_reg_base = first_pauth_regnum;
3287 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3288 : pauth_ra_state_offset + num_regs;
3289
3290 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3291 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3292
3293 /* Advance PC across function entry code. */
3294 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3295
3296 /* The stack grows downward. */
3297 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3298
3299 /* Breakpoint manipulation. */
3300 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3301 aarch64_breakpoint::kind_from_pc);
3302 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3303 aarch64_breakpoint::bp_from_kind);
3304 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3305 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3306
3307 /* Information about registers, etc. */
3308 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3309 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3310 set_gdbarch_num_regs (gdbarch, num_regs);
3311
3312 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3313 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3314 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3315 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3316 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3317 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3318 aarch64_pseudo_register_reggroup_p);
3319 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3320
3321 /* ABI */
3322 set_gdbarch_short_bit (gdbarch, 16);
3323 set_gdbarch_int_bit (gdbarch, 32);
3324 set_gdbarch_float_bit (gdbarch, 32);
3325 set_gdbarch_double_bit (gdbarch, 64);
3326 set_gdbarch_long_double_bit (gdbarch, 128);
3327 set_gdbarch_long_bit (gdbarch, 64);
3328 set_gdbarch_long_long_bit (gdbarch, 64);
3329 set_gdbarch_ptr_bit (gdbarch, 64);
3330 set_gdbarch_char_signed (gdbarch, 0);
3331 set_gdbarch_wchar_signed (gdbarch, 0);
3332 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3333 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3334 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3335 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3336
3337 /* Internal <-> external register number maps. */
3338 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3339
3340 /* Returning results. */
3341 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3342
3343 /* Disassembly. */
3344 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3345
3346 /* Virtual tables. */
3347 set_gdbarch_vbit_in_delta (gdbarch, 1);
3348
3349 /* Register architecture. */
3350 aarch64_add_reggroups (gdbarch);
3351
3352 /* Hook in the ABI-specific overrides, if they have been registered. */
3353 info.target_desc = tdesc;
3354 info.tdesc_data = tdesc_data;
3355 gdbarch_init_osabi (info, gdbarch);
3356
3357 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3358 /* Register DWARF CFA vendor handler. */
3359 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3360 aarch64_execute_dwarf_cfa_vendor_op);
3361
3362 /* Add some default predicates. */
3363 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3364 dwarf2_append_unwinders (gdbarch);
3365 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3366
3367 frame_base_set_default (gdbarch, &aarch64_normal_base);
3368
3369 /* Now we have tuned the configuration, set a few final things,
3370 based on what the OS ABI has told us. */
3371
3372 if (tdep->jb_pc >= 0)
3373 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3374
3375 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3376
3377 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3378
3379 /* Add standard register aliases. */
3380 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3381 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3382 value_of_aarch64_user_reg,
3383 &aarch64_register_aliases[i].regnum);
3384
3385 register_aarch64_ravenscar_ops (gdbarch);
3386
3387 return gdbarch;
3388 }
3389
3390 static void
3391 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3392 {
3393 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3394
3395 if (tdep == NULL)
3396 return;
3397
3398 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3399 paddress (gdbarch, tdep->lowest_pc));
3400 }
3401
3402 #if GDB_SELF_TEST
3403 namespace selftests
3404 {
3405 static void aarch64_process_record_test (void);
3406 }
3407 #endif
3408
3409 void
3410 _initialize_aarch64_tdep (void)
3411 {
3412 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3413 aarch64_dump_tdep);
3414
3415 /* Debug this file's internals. */
3416 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3417 Set AArch64 debugging."), _("\
3418 Show AArch64 debugging."), _("\
3419 When on, AArch64 specific debugging is enabled."),
3420 NULL,
3421 show_aarch64_debug,
3422 &setdebuglist, &showdebuglist);
3423
3424 #if GDB_SELF_TEST
3425 selftests::register_test ("aarch64-analyze-prologue",
3426 selftests::aarch64_analyze_prologue_test);
3427 selftests::register_test ("aarch64-process-record",
3428 selftests::aarch64_process_record_test);
3429 selftests::record_xml_tdesc ("aarch64.xml",
3430 aarch64_create_target_description (0, false));
3431 #endif
3432 }
3433
3434 /* AArch64 process record-replay related structures, defines etc. */
3435
3436 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3437 do \
3438 { \
3439 unsigned int reg_len = LENGTH; \
3440 if (reg_len) \
3441 { \
3442 REGS = XNEWVEC (uint32_t, reg_len); \
3443 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3444 } \
3445 } \
3446 while (0)
3447
3448 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3449 do \
3450 { \
3451 unsigned int mem_len = LENGTH; \
3452 if (mem_len) \
3453 { \
3454 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3455 memcpy(&MEMS->len, &RECORD_BUF[0], \
3456 sizeof(struct aarch64_mem_r) * LENGTH); \
3457 } \
3458 } \
3459 while (0)
3460
3461 /* AArch64 record/replay structures and enumerations. */
3462
3463 struct aarch64_mem_r
3464 {
3465 uint64_t len; /* Record length. */
3466 uint64_t addr; /* Memory address. */
3467 };
3468
3469 enum aarch64_record_result
3470 {
3471 AARCH64_RECORD_SUCCESS,
3472 AARCH64_RECORD_UNSUPPORTED,
3473 AARCH64_RECORD_UNKNOWN
3474 };
3475
3476 typedef struct insn_decode_record_t
3477 {
3478 struct gdbarch *gdbarch;
3479 struct regcache *regcache;
3480 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3481 uint32_t aarch64_insn; /* Insn to be recorded. */
3482 uint32_t mem_rec_count; /* Count of memory records. */
3483 uint32_t reg_rec_count; /* Count of register records. */
3484 uint32_t *aarch64_regs; /* Registers to be recorded. */
3485 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3486 } insn_decode_record;
3487
3488 /* Record handler for data processing - register instructions. */
3489
3490 static unsigned int
3491 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3492 {
3493 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3494 uint32_t record_buf[4];
3495
3496 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3497 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3498 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3499
3500 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3501 {
3502 uint8_t setflags;
3503
3504 /* Logical (shifted register). */
3505 if (insn_bits24_27 == 0x0a)
3506 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3507 /* Add/subtract. */
3508 else if (insn_bits24_27 == 0x0b)
3509 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3510 else
3511 return AARCH64_RECORD_UNKNOWN;
3512
3513 record_buf[0] = reg_rd;
3514 aarch64_insn_r->reg_rec_count = 1;
3515 if (setflags)
3516 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3517 }
3518 else
3519 {
3520 if (insn_bits24_27 == 0x0b)
3521 {
3522 /* Data-processing (3 source). */
3523 record_buf[0] = reg_rd;
3524 aarch64_insn_r->reg_rec_count = 1;
3525 }
3526 else if (insn_bits24_27 == 0x0a)
3527 {
3528 if (insn_bits21_23 == 0x00)
3529 {
3530 /* Add/subtract (with carry). */
3531 record_buf[0] = reg_rd;
3532 aarch64_insn_r->reg_rec_count = 1;
3533 if (bit (aarch64_insn_r->aarch64_insn, 29))
3534 {
3535 record_buf[1] = AARCH64_CPSR_REGNUM;
3536 aarch64_insn_r->reg_rec_count = 2;
3537 }
3538 }
3539 else if (insn_bits21_23 == 0x02)
3540 {
3541 /* Conditional compare (register) and conditional compare
3542 (immediate) instructions. */
3543 record_buf[0] = AARCH64_CPSR_REGNUM;
3544 aarch64_insn_r->reg_rec_count = 1;
3545 }
3546 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3547 {
3548 /* CConditional select. */
3549 /* Data-processing (2 source). */
3550 /* Data-processing (1 source). */
3551 record_buf[0] = reg_rd;
3552 aarch64_insn_r->reg_rec_count = 1;
3553 }
3554 else
3555 return AARCH64_RECORD_UNKNOWN;
3556 }
3557 }
3558
3559 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3560 record_buf);
3561 return AARCH64_RECORD_SUCCESS;
3562 }
3563
3564 /* Record handler for data processing - immediate instructions. */
3565
3566 static unsigned int
3567 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3568 {
3569 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3570 uint32_t record_buf[4];
3571
3572 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3573 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3574 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3575
3576 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3577 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3578 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3579 {
3580 record_buf[0] = reg_rd;
3581 aarch64_insn_r->reg_rec_count = 1;
3582 }
3583 else if (insn_bits24_27 == 0x01)
3584 {
3585 /* Add/Subtract (immediate). */
3586 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3587 record_buf[0] = reg_rd;
3588 aarch64_insn_r->reg_rec_count = 1;
3589 if (setflags)
3590 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3591 }
3592 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3593 {
3594 /* Logical (immediate). */
3595 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3596 record_buf[0] = reg_rd;
3597 aarch64_insn_r->reg_rec_count = 1;
3598 if (setflags)
3599 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3600 }
3601 else
3602 return AARCH64_RECORD_UNKNOWN;
3603
3604 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3605 record_buf);
3606 return AARCH64_RECORD_SUCCESS;
3607 }
3608
3609 /* Record handler for branch, exception generation and system instructions. */
3610
3611 static unsigned int
3612 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3613 {
3614 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3615 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3616 uint32_t record_buf[4];
3617
3618 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3619 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3620 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3621
3622 if (insn_bits28_31 == 0x0d)
3623 {
3624 /* Exception generation instructions. */
3625 if (insn_bits24_27 == 0x04)
3626 {
3627 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3628 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3629 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3630 {
3631 ULONGEST svc_number;
3632
3633 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3634 &svc_number);
3635 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3636 svc_number);
3637 }
3638 else
3639 return AARCH64_RECORD_UNSUPPORTED;
3640 }
3641 /* System instructions. */
3642 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3643 {
3644 uint32_t reg_rt, reg_crn;
3645
3646 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3647 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3648
3649 /* Record rt in case of sysl and mrs instructions. */
3650 if (bit (aarch64_insn_r->aarch64_insn, 21))
3651 {
3652 record_buf[0] = reg_rt;
3653 aarch64_insn_r->reg_rec_count = 1;
3654 }
3655 /* Record cpsr for hint and msr(immediate) instructions. */
3656 else if (reg_crn == 0x02 || reg_crn == 0x04)
3657 {
3658 record_buf[0] = AARCH64_CPSR_REGNUM;
3659 aarch64_insn_r->reg_rec_count = 1;
3660 }
3661 }
3662 /* Unconditional branch (register). */
3663 else if((insn_bits24_27 & 0x0e) == 0x06)
3664 {
3665 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3666 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3667 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3668 }
3669 else
3670 return AARCH64_RECORD_UNKNOWN;
3671 }
3672 /* Unconditional branch (immediate). */
3673 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3674 {
3675 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3676 if (bit (aarch64_insn_r->aarch64_insn, 31))
3677 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3678 }
3679 else
3680 /* Compare & branch (immediate), Test & branch (immediate) and
3681 Conditional branch (immediate). */
3682 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3683
3684 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3685 record_buf);
3686 return AARCH64_RECORD_SUCCESS;
3687 }
3688
3689 /* Record handler for advanced SIMD load and store instructions. */
3690
3691 static unsigned int
3692 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3693 {
3694 CORE_ADDR address;
3695 uint64_t addr_offset = 0;
3696 uint32_t record_buf[24];
3697 uint64_t record_buf_mem[24];
3698 uint32_t reg_rn, reg_rt;
3699 uint32_t reg_index = 0, mem_index = 0;
3700 uint8_t opcode_bits, size_bits;
3701
3702 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3703 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3704 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3705 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3706 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3707
3708 if (record_debug)
3709 debug_printf ("Process record: Advanced SIMD load/store\n");
3710
3711 /* Load/store single structure. */
3712 if (bit (aarch64_insn_r->aarch64_insn, 24))
3713 {
3714 uint8_t sindex, scale, selem, esize, replicate = 0;
3715 scale = opcode_bits >> 2;
3716 selem = ((opcode_bits & 0x02) |
3717 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3718 switch (scale)
3719 {
3720 case 1:
3721 if (size_bits & 0x01)
3722 return AARCH64_RECORD_UNKNOWN;
3723 break;
3724 case 2:
3725 if ((size_bits >> 1) & 0x01)
3726 return AARCH64_RECORD_UNKNOWN;
3727 if (size_bits & 0x01)
3728 {
3729 if (!((opcode_bits >> 1) & 0x01))
3730 scale = 3;
3731 else
3732 return AARCH64_RECORD_UNKNOWN;
3733 }
3734 break;
3735 case 3:
3736 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3737 {
3738 scale = size_bits;
3739 replicate = 1;
3740 break;
3741 }
3742 else
3743 return AARCH64_RECORD_UNKNOWN;
3744 default:
3745 break;
3746 }
3747 esize = 8 << scale;
3748 if (replicate)
3749 for (sindex = 0; sindex < selem; sindex++)
3750 {
3751 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3752 reg_rt = (reg_rt + 1) % 32;
3753 }
3754 else
3755 {
3756 for (sindex = 0; sindex < selem; sindex++)
3757 {
3758 if (bit (aarch64_insn_r->aarch64_insn, 22))
3759 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3760 else
3761 {
3762 record_buf_mem[mem_index++] = esize / 8;
3763 record_buf_mem[mem_index++] = address + addr_offset;
3764 }
3765 addr_offset = addr_offset + (esize / 8);
3766 reg_rt = (reg_rt + 1) % 32;
3767 }
3768 }
3769 }
3770 /* Load/store multiple structure. */
3771 else
3772 {
3773 uint8_t selem, esize, rpt, elements;
3774 uint8_t eindex, rindex;
3775
3776 esize = 8 << size_bits;
3777 if (bit (aarch64_insn_r->aarch64_insn, 30))
3778 elements = 128 / esize;
3779 else
3780 elements = 64 / esize;
3781
3782 switch (opcode_bits)
3783 {
3784 /*LD/ST4 (4 Registers). */
3785 case 0:
3786 rpt = 1;
3787 selem = 4;
3788 break;
3789 /*LD/ST1 (4 Registers). */
3790 case 2:
3791 rpt = 4;
3792 selem = 1;
3793 break;
3794 /*LD/ST3 (3 Registers). */
3795 case 4:
3796 rpt = 1;
3797 selem = 3;
3798 break;
3799 /*LD/ST1 (3 Registers). */
3800 case 6:
3801 rpt = 3;
3802 selem = 1;
3803 break;
3804 /*LD/ST1 (1 Register). */
3805 case 7:
3806 rpt = 1;
3807 selem = 1;
3808 break;
3809 /*LD/ST2 (2 Registers). */
3810 case 8:
3811 rpt = 1;
3812 selem = 2;
3813 break;
3814 /*LD/ST1 (2 Registers). */
3815 case 10:
3816 rpt = 2;
3817 selem = 1;
3818 break;
3819 default:
3820 return AARCH64_RECORD_UNSUPPORTED;
3821 break;
3822 }
3823 for (rindex = 0; rindex < rpt; rindex++)
3824 for (eindex = 0; eindex < elements; eindex++)
3825 {
3826 uint8_t reg_tt, sindex;
3827 reg_tt = (reg_rt + rindex) % 32;
3828 for (sindex = 0; sindex < selem; sindex++)
3829 {
3830 if (bit (aarch64_insn_r->aarch64_insn, 22))
3831 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3832 else
3833 {
3834 record_buf_mem[mem_index++] = esize / 8;
3835 record_buf_mem[mem_index++] = address + addr_offset;
3836 }
3837 addr_offset = addr_offset + (esize / 8);
3838 reg_tt = (reg_tt + 1) % 32;
3839 }
3840 }
3841 }
3842
3843 if (bit (aarch64_insn_r->aarch64_insn, 23))
3844 record_buf[reg_index++] = reg_rn;
3845
3846 aarch64_insn_r->reg_rec_count = reg_index;
3847 aarch64_insn_r->mem_rec_count = mem_index / 2;
3848 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3849 record_buf_mem);
3850 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3851 record_buf);
3852 return AARCH64_RECORD_SUCCESS;
3853 }
3854
3855 /* Record handler for load and store instructions. */
3856
3857 static unsigned int
3858 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3859 {
3860 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3861 uint8_t insn_bit23, insn_bit21;
3862 uint8_t opc, size_bits, ld_flag, vector_flag;
3863 uint32_t reg_rn, reg_rt, reg_rt2;
3864 uint64_t datasize, offset;
3865 uint32_t record_buf[8];
3866 uint64_t record_buf_mem[8];
3867 CORE_ADDR address;
3868
3869 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3870 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3871 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3872 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3873 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3874 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3875 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3876 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3877 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3878 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3879 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3880
3881 /* Load/store exclusive. */
3882 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3883 {
3884 if (record_debug)
3885 debug_printf ("Process record: load/store exclusive\n");
3886
3887 if (ld_flag)
3888 {
3889 record_buf[0] = reg_rt;
3890 aarch64_insn_r->reg_rec_count = 1;
3891 if (insn_bit21)
3892 {
3893 record_buf[1] = reg_rt2;
3894 aarch64_insn_r->reg_rec_count = 2;
3895 }
3896 }
3897 else
3898 {
3899 if (insn_bit21)
3900 datasize = (8 << size_bits) * 2;
3901 else
3902 datasize = (8 << size_bits);
3903 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3904 &address);
3905 record_buf_mem[0] = datasize / 8;
3906 record_buf_mem[1] = address;
3907 aarch64_insn_r->mem_rec_count = 1;
3908 if (!insn_bit23)
3909 {
3910 /* Save register rs. */
3911 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3912 aarch64_insn_r->reg_rec_count = 1;
3913 }
3914 }
3915 }
3916 /* Load register (literal) instructions decoding. */
3917 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3918 {
3919 if (record_debug)
3920 debug_printf ("Process record: load register (literal)\n");
3921 if (vector_flag)
3922 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3923 else
3924 record_buf[0] = reg_rt;
3925 aarch64_insn_r->reg_rec_count = 1;
3926 }
3927 /* All types of load/store pair instructions decoding. */
3928 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3929 {
3930 if (record_debug)
3931 debug_printf ("Process record: load/store pair\n");
3932
3933 if (ld_flag)
3934 {
3935 if (vector_flag)
3936 {
3937 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3938 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3939 }
3940 else
3941 {
3942 record_buf[0] = reg_rt;
3943 record_buf[1] = reg_rt2;
3944 }
3945 aarch64_insn_r->reg_rec_count = 2;
3946 }
3947 else
3948 {
3949 uint16_t imm7_off;
3950 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3951 if (!vector_flag)
3952 size_bits = size_bits >> 1;
3953 datasize = 8 << (2 + size_bits);
3954 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3955 offset = offset << (2 + size_bits);
3956 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3957 &address);
3958 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3959 {
3960 if (imm7_off & 0x40)
3961 address = address - offset;
3962 else
3963 address = address + offset;
3964 }
3965
3966 record_buf_mem[0] = datasize / 8;
3967 record_buf_mem[1] = address;
3968 record_buf_mem[2] = datasize / 8;
3969 record_buf_mem[3] = address + (datasize / 8);
3970 aarch64_insn_r->mem_rec_count = 2;
3971 }
3972 if (bit (aarch64_insn_r->aarch64_insn, 23))
3973 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3974 }
3975 /* Load/store register (unsigned immediate) instructions. */
3976 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3977 {
3978 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3979 if (!(opc >> 1))
3980 {
3981 if (opc & 0x01)
3982 ld_flag = 0x01;
3983 else
3984 ld_flag = 0x0;
3985 }
3986 else
3987 {
3988 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3989 {
3990 /* PRFM (immediate) */
3991 return AARCH64_RECORD_SUCCESS;
3992 }
3993 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3994 {
3995 /* LDRSW (immediate) */
3996 ld_flag = 0x1;
3997 }
3998 else
3999 {
4000 if (opc & 0x01)
4001 ld_flag = 0x01;
4002 else
4003 ld_flag = 0x0;
4004 }
4005 }
4006
4007 if (record_debug)
4008 {
4009 debug_printf ("Process record: load/store (unsigned immediate):"
4010 " size %x V %d opc %x\n", size_bits, vector_flag,
4011 opc);
4012 }
4013
4014 if (!ld_flag)
4015 {
4016 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4017 datasize = 8 << size_bits;
4018 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4019 &address);
4020 offset = offset << size_bits;
4021 address = address + offset;
4022
4023 record_buf_mem[0] = datasize >> 3;
4024 record_buf_mem[1] = address;
4025 aarch64_insn_r->mem_rec_count = 1;
4026 }
4027 else
4028 {
4029 if (vector_flag)
4030 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4031 else
4032 record_buf[0] = reg_rt;
4033 aarch64_insn_r->reg_rec_count = 1;
4034 }
4035 }
4036 /* Load/store register (register offset) instructions. */
4037 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4038 && insn_bits10_11 == 0x02 && insn_bit21)
4039 {
4040 if (record_debug)
4041 debug_printf ("Process record: load/store (register offset)\n");
4042 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4043 if (!(opc >> 1))
4044 if (opc & 0x01)
4045 ld_flag = 0x01;
4046 else
4047 ld_flag = 0x0;
4048 else
4049 if (size_bits != 0x03)
4050 ld_flag = 0x01;
4051 else
4052 return AARCH64_RECORD_UNKNOWN;
4053
4054 if (!ld_flag)
4055 {
4056 ULONGEST reg_rm_val;
4057
4058 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4059 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4060 if (bit (aarch64_insn_r->aarch64_insn, 12))
4061 offset = reg_rm_val << size_bits;
4062 else
4063 offset = reg_rm_val;
4064 datasize = 8 << size_bits;
4065 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4066 &address);
4067 address = address + offset;
4068 record_buf_mem[0] = datasize >> 3;
4069 record_buf_mem[1] = address;
4070 aarch64_insn_r->mem_rec_count = 1;
4071 }
4072 else
4073 {
4074 if (vector_flag)
4075 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4076 else
4077 record_buf[0] = reg_rt;
4078 aarch64_insn_r->reg_rec_count = 1;
4079 }
4080 }
4081 /* Load/store register (immediate and unprivileged) instructions. */
4082 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4083 && !insn_bit21)
4084 {
4085 if (record_debug)
4086 {
4087 debug_printf ("Process record: load/store "
4088 "(immediate and unprivileged)\n");
4089 }
4090 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4091 if (!(opc >> 1))
4092 if (opc & 0x01)
4093 ld_flag = 0x01;
4094 else
4095 ld_flag = 0x0;
4096 else
4097 if (size_bits != 0x03)
4098 ld_flag = 0x01;
4099 else
4100 return AARCH64_RECORD_UNKNOWN;
4101
4102 if (!ld_flag)
4103 {
4104 uint16_t imm9_off;
4105 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4106 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4107 datasize = 8 << size_bits;
4108 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4109 &address);
4110 if (insn_bits10_11 != 0x01)
4111 {
4112 if (imm9_off & 0x0100)
4113 address = address - offset;
4114 else
4115 address = address + offset;
4116 }
4117 record_buf_mem[0] = datasize >> 3;
4118 record_buf_mem[1] = address;
4119 aarch64_insn_r->mem_rec_count = 1;
4120 }
4121 else
4122 {
4123 if (vector_flag)
4124 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4125 else
4126 record_buf[0] = reg_rt;
4127 aarch64_insn_r->reg_rec_count = 1;
4128 }
4129 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4130 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4131 }
4132 /* Advanced SIMD load/store instructions. */
4133 else
4134 return aarch64_record_asimd_load_store (aarch64_insn_r);
4135
4136 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4137 record_buf_mem);
4138 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4139 record_buf);
4140 return AARCH64_RECORD_SUCCESS;
4141 }
4142
4143 /* Record handler for data processing SIMD and floating point instructions. */
4144
4145 static unsigned int
4146 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4147 {
4148 uint8_t insn_bit21, opcode, rmode, reg_rd;
4149 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4150 uint8_t insn_bits11_14;
4151 uint32_t record_buf[2];
4152
4153 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4154 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4155 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4156 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4157 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4158 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4159 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4160 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4161 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4162
4163 if (record_debug)
4164 debug_printf ("Process record: data processing SIMD/FP: ");
4165
4166 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4167 {
4168 /* Floating point - fixed point conversion instructions. */
4169 if (!insn_bit21)
4170 {
4171 if (record_debug)
4172 debug_printf ("FP - fixed point conversion");
4173
4174 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4175 record_buf[0] = reg_rd;
4176 else
4177 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4178 }
4179 /* Floating point - conditional compare instructions. */
4180 else if (insn_bits10_11 == 0x01)
4181 {
4182 if (record_debug)
4183 debug_printf ("FP - conditional compare");
4184
4185 record_buf[0] = AARCH64_CPSR_REGNUM;
4186 }
4187 /* Floating point - data processing (2-source) and
4188 conditional select instructions. */
4189 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4190 {
4191 if (record_debug)
4192 debug_printf ("FP - DP (2-source)");
4193
4194 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4195 }
4196 else if (insn_bits10_11 == 0x00)
4197 {
4198 /* Floating point - immediate instructions. */
4199 if ((insn_bits12_15 & 0x01) == 0x01
4200 || (insn_bits12_15 & 0x07) == 0x04)
4201 {
4202 if (record_debug)
4203 debug_printf ("FP - immediate");
4204 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4205 }
4206 /* Floating point - compare instructions. */
4207 else if ((insn_bits12_15 & 0x03) == 0x02)
4208 {
4209 if (record_debug)
4210 debug_printf ("FP - immediate");
4211 record_buf[0] = AARCH64_CPSR_REGNUM;
4212 }
4213 /* Floating point - integer conversions instructions. */
4214 else if (insn_bits12_15 == 0x00)
4215 {
4216 /* Convert float to integer instruction. */
4217 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4218 {
4219 if (record_debug)
4220 debug_printf ("float to int conversion");
4221
4222 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4223 }
4224 /* Convert integer to float instruction. */
4225 else if ((opcode >> 1) == 0x01 && !rmode)
4226 {
4227 if (record_debug)
4228 debug_printf ("int to float conversion");
4229
4230 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4231 }
4232 /* Move float to integer instruction. */
4233 else if ((opcode >> 1) == 0x03)
4234 {
4235 if (record_debug)
4236 debug_printf ("move float to int");
4237
4238 if (!(opcode & 0x01))
4239 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4240 else
4241 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4242 }
4243 else
4244 return AARCH64_RECORD_UNKNOWN;
4245 }
4246 else
4247 return AARCH64_RECORD_UNKNOWN;
4248 }
4249 else
4250 return AARCH64_RECORD_UNKNOWN;
4251 }
4252 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4253 {
4254 if (record_debug)
4255 debug_printf ("SIMD copy");
4256
4257 /* Advanced SIMD copy instructions. */
4258 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4259 && !bit (aarch64_insn_r->aarch64_insn, 15)
4260 && bit (aarch64_insn_r->aarch64_insn, 10))
4261 {
4262 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4263 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4264 else
4265 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4266 }
4267 else
4268 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4269 }
4270 /* All remaining floating point or advanced SIMD instructions. */
4271 else
4272 {
4273 if (record_debug)
4274 debug_printf ("all remain");
4275
4276 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4277 }
4278
4279 if (record_debug)
4280 debug_printf ("\n");
4281
4282 aarch64_insn_r->reg_rec_count++;
4283 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4284 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4285 record_buf);
4286 return AARCH64_RECORD_SUCCESS;
4287 }
4288
4289 /* Decodes insns type and invokes its record handler. */
4290
4291 static unsigned int
4292 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4293 {
4294 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4295
4296 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4297 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4298 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4299 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4300
4301 /* Data processing - immediate instructions. */
4302 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4303 return aarch64_record_data_proc_imm (aarch64_insn_r);
4304
4305 /* Branch, exception generation and system instructions. */
4306 if (ins_bit26 && !ins_bit27 && ins_bit28)
4307 return aarch64_record_branch_except_sys (aarch64_insn_r);
4308
4309 /* Load and store instructions. */
4310 if (!ins_bit25 && ins_bit27)
4311 return aarch64_record_load_store (aarch64_insn_r);
4312
4313 /* Data processing - register instructions. */
4314 if (ins_bit25 && !ins_bit26 && ins_bit27)
4315 return aarch64_record_data_proc_reg (aarch64_insn_r);
4316
4317 /* Data processing - SIMD and floating point instructions. */
4318 if (ins_bit25 && ins_bit26 && ins_bit27)
4319 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4320
4321 return AARCH64_RECORD_UNSUPPORTED;
4322 }
4323
4324 /* Cleans up local record registers and memory allocations. */
4325
4326 static void
4327 deallocate_reg_mem (insn_decode_record *record)
4328 {
4329 xfree (record->aarch64_regs);
4330 xfree (record->aarch64_mems);
4331 }
4332
4333 #if GDB_SELF_TEST
4334 namespace selftests {
4335
4336 static void
4337 aarch64_process_record_test (void)
4338 {
4339 struct gdbarch_info info;
4340 uint32_t ret;
4341
4342 gdbarch_info_init (&info);
4343 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4344
4345 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4346 SELF_CHECK (gdbarch != NULL);
4347
4348 insn_decode_record aarch64_record;
4349
4350 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4351 aarch64_record.regcache = NULL;
4352 aarch64_record.this_addr = 0;
4353 aarch64_record.gdbarch = gdbarch;
4354
4355 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4356 aarch64_record.aarch64_insn = 0xf9800020;
4357 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4358 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4359 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4360 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4361
4362 deallocate_reg_mem (&aarch64_record);
4363 }
4364
4365 } // namespace selftests
4366 #endif /* GDB_SELF_TEST */
4367
4368 /* Parse the current instruction and record the values of the registers and
4369 memory that will be changed in current instruction to record_arch_list
4370 return -1 if something is wrong. */
4371
4372 int
4373 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4374 CORE_ADDR insn_addr)
4375 {
4376 uint32_t rec_no = 0;
4377 uint8_t insn_size = 4;
4378 uint32_t ret = 0;
4379 gdb_byte buf[insn_size];
4380 insn_decode_record aarch64_record;
4381
4382 memset (&buf[0], 0, insn_size);
4383 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4384 target_read_memory (insn_addr, &buf[0], insn_size);
4385 aarch64_record.aarch64_insn
4386 = (uint32_t) extract_unsigned_integer (&buf[0],
4387 insn_size,
4388 gdbarch_byte_order (gdbarch));
4389 aarch64_record.regcache = regcache;
4390 aarch64_record.this_addr = insn_addr;
4391 aarch64_record.gdbarch = gdbarch;
4392
4393 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4394 if (ret == AARCH64_RECORD_UNSUPPORTED)
4395 {
4396 printf_unfiltered (_("Process record does not support instruction "
4397 "0x%0x at address %s.\n"),
4398 aarch64_record.aarch64_insn,
4399 paddress (gdbarch, insn_addr));
4400 ret = -1;
4401 }
4402
4403 if (0 == ret)
4404 {
4405 /* Record registers. */
4406 record_full_arch_list_add_reg (aarch64_record.regcache,
4407 AARCH64_PC_REGNUM);
4408 /* Always record register CPSR. */
4409 record_full_arch_list_add_reg (aarch64_record.regcache,
4410 AARCH64_CPSR_REGNUM);
4411 if (aarch64_record.aarch64_regs)
4412 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4413 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4414 aarch64_record.aarch64_regs[rec_no]))
4415 ret = -1;
4416
4417 /* Record memories. */
4418 if (aarch64_record.aarch64_mems)
4419 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4420 if (record_full_arch_list_add_mem
4421 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4422 aarch64_record.aarch64_mems[rec_no].len))
4423 ret = -1;
4424
4425 if (record_full_arch_list_add_end ())
4426 ret = -1;
4427 }
4428
4429 deallocate_reg_mem (&aarch64_record);
4430 return ret;
4431 }
This page took 0.218971 seconds and 3 git commands to generate.