247d0ed4c63c1de4dd6f581ce09b0fb2ab17fed1
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47 #include "common/selftest.h"
48
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
51
52 #include "elf-bfd.h"
53 #include "elf/aarch64.h"
54
55 #include "common/vec.h"
56
57 #include "record.h"
58 #include "record-full.h"
59 #include "arch/aarch64-insn.h"
60
61 #include "opcode/aarch64.h"
62 #include <algorithm>
63
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
68 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
69 four members. */
70 #define HA_MAX_NUM_FLDS 4
71
72 /* All possible aarch64 target descriptors. */
73 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
74
75 /* The standard register names, and all the valid aliases for them. */
76 static const struct
77 {
78 const char *const name;
79 int regnum;
80 } aarch64_register_aliases[] =
81 {
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123 };
124
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
127 {
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139 };
140
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
143 {
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156 };
157
158 /* The SVE 'Z' and 'P' registers. */
159 static const char *const aarch64_sve_register_names[] =
160 {
161 /* These registers must appear in consecutive RAW register number
162 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
163 "z0", "z1", "z2", "z3",
164 "z4", "z5", "z6", "z7",
165 "z8", "z9", "z10", "z11",
166 "z12", "z13", "z14", "z15",
167 "z16", "z17", "z18", "z19",
168 "z20", "z21", "z22", "z23",
169 "z24", "z25", "z26", "z27",
170 "z28", "z29", "z30", "z31",
171 "fpsr", "fpcr",
172 "p0", "p1", "p2", "p3",
173 "p4", "p5", "p6", "p7",
174 "p8", "p9", "p10", "p11",
175 "p12", "p13", "p14", "p15",
176 "ffr", "vg"
177 };
178
179 static const char *const aarch64_pauth_register_names[] =
180 {
181 /* Authentication mask for data pointer. */
182 "pauth_dmask",
183 /* Authentication mask for code pointer. */
184 "pauth_cmask"
185 };
186
187 /* AArch64 prologue cache structure. */
188 struct aarch64_prologue_cache
189 {
190 /* The program counter at the start of the function. It is used to
191 identify this frame as a prologue frame. */
192 CORE_ADDR func;
193
194 /* The program counter at the time this frame was created; i.e. where
195 this function was called from. It is used to identify this frame as a
196 stub frame. */
197 CORE_ADDR prev_pc;
198
199 /* The stack pointer at the time this frame was created; i.e. the
200 caller's stack pointer when this function was called. It is used
201 to identify this frame. */
202 CORE_ADDR prev_sp;
203
204 /* Is the target available to read from? */
205 int available_p;
206
207 /* The frame base for this frame is just prev_sp - frame size.
208 FRAMESIZE is the distance from the frame pointer to the
209 initial stack pointer. */
210 int framesize;
211
212 /* The register used to hold the frame pointer for this frame. */
213 int framereg;
214
215 /* Saved register offsets. */
216 struct trad_frame_saved_reg *saved_regs;
217 };
218
219 static void
220 show_aarch64_debug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
222 {
223 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
224 }
225
226 namespace {
227
228 /* Abstract instruction reader. */
229
230 class abstract_instruction_reader
231 {
232 public:
233 /* Read in one instruction. */
234 virtual ULONGEST read (CORE_ADDR memaddr, int len,
235 enum bfd_endian byte_order) = 0;
236 };
237
238 /* Instruction reader from real target. */
239
240 class instruction_reader : public abstract_instruction_reader
241 {
242 public:
243 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
244 override
245 {
246 return read_code_unsigned_integer (memaddr, len, byte_order);
247 }
248 };
249
250 } // namespace
251
252 /* If address signing is enabled, mask off the signature bits from ADDR, using
253 the register values in THIS_FRAME. */
254
255 static CORE_ADDR
256 aarch64_frame_unmask_address (struct gdbarch_tdep *tdep,
257 struct frame_info *this_frame,
258 CORE_ADDR addr)
259 {
260 if (tdep->has_pauth ()
261 && frame_unwind_register_unsigned (this_frame,
262 tdep->pauth_ra_state_regnum))
263 {
264 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
265 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
266 addr = addr & ~cmask;
267 }
268
269 return addr;
270 }
271
272 /* Analyze a prologue, looking for a recognizable stack frame
273 and frame pointer. Scan until we encounter a store that could
274 clobber the stack frame unexpectedly, or an unknown instruction. */
275
276 static CORE_ADDR
277 aarch64_analyze_prologue (struct gdbarch *gdbarch,
278 CORE_ADDR start, CORE_ADDR limit,
279 struct aarch64_prologue_cache *cache,
280 abstract_instruction_reader& reader)
281 {
282 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
283 int i;
284 /* Track X registers and D registers in prologue. */
285 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
286
287 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
288 regs[i] = pv_register (i, 0);
289 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
290
291 for (; start < limit; start += 4)
292 {
293 uint32_t insn;
294 aarch64_inst inst;
295
296 insn = reader.read (start, 4, byte_order_for_code);
297
298 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
299 break;
300
301 if (inst.opcode->iclass == addsub_imm
302 && (inst.opcode->op == OP_ADD
303 || strcmp ("sub", inst.opcode->name) == 0))
304 {
305 unsigned rd = inst.operands[0].reg.regno;
306 unsigned rn = inst.operands[1].reg.regno;
307
308 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
309 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
310 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
311 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
312
313 if (inst.opcode->op == OP_ADD)
314 {
315 regs[rd] = pv_add_constant (regs[rn],
316 inst.operands[2].imm.value);
317 }
318 else
319 {
320 regs[rd] = pv_add_constant (regs[rn],
321 -inst.operands[2].imm.value);
322 }
323 }
324 else if (inst.opcode->iclass == pcreladdr
325 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
326 {
327 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
328 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
329
330 regs[inst.operands[0].reg.regno] = pv_unknown ();
331 }
332 else if (inst.opcode->iclass == branch_imm)
333 {
334 /* Stop analysis on branch. */
335 break;
336 }
337 else if (inst.opcode->iclass == condbranch)
338 {
339 /* Stop analysis on branch. */
340 break;
341 }
342 else if (inst.opcode->iclass == branch_reg)
343 {
344 /* Stop analysis on branch. */
345 break;
346 }
347 else if (inst.opcode->iclass == compbranch)
348 {
349 /* Stop analysis on branch. */
350 break;
351 }
352 else if (inst.opcode->op == OP_MOVZ)
353 {
354 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
355 regs[inst.operands[0].reg.regno] = pv_unknown ();
356 }
357 else if (inst.opcode->iclass == log_shift
358 && strcmp (inst.opcode->name, "orr") == 0)
359 {
360 unsigned rd = inst.operands[0].reg.regno;
361 unsigned rn = inst.operands[1].reg.regno;
362 unsigned rm = inst.operands[2].reg.regno;
363
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
365 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
366 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
367
368 if (inst.operands[2].shifter.amount == 0
369 && rn == AARCH64_SP_REGNUM)
370 regs[rd] = regs[rm];
371 else
372 {
373 if (aarch64_debug)
374 {
375 debug_printf ("aarch64: prologue analysis gave up "
376 "addr=%s opcode=0x%x (orr x register)\n",
377 core_addr_to_string_nz (start), insn);
378 }
379 break;
380 }
381 }
382 else if (inst.opcode->op == OP_STUR)
383 {
384 unsigned rt = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].addr.base_regno;
386 int is64
387 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
388
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
393
394 stack.store (pv_add_constant (regs[rn],
395 inst.operands[1].addr.offset.imm),
396 is64 ? 8 : 4, regs[rt]);
397 }
398 else if ((inst.opcode->iclass == ldstpair_off
399 || (inst.opcode->iclass == ldstpair_indexed
400 && inst.operands[2].addr.preind))
401 && strcmp ("stp", inst.opcode->name) == 0)
402 {
403 /* STP with addressing mode Pre-indexed and Base register. */
404 unsigned rt1;
405 unsigned rt2;
406 unsigned rn = inst.operands[2].addr.base_regno;
407 int32_t imm = inst.operands[2].addr.offset.imm;
408
409 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
410 || inst.operands[0].type == AARCH64_OPND_Ft);
411 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
412 || inst.operands[1].type == AARCH64_OPND_Ft2);
413 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
414 gdb_assert (!inst.operands[2].addr.offset.is_reg);
415
416 /* If recording this store would invalidate the store area
417 (perhaps because rn is not known) then we should abandon
418 further prologue analysis. */
419 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
420 break;
421
422 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
423 break;
424
425 rt1 = inst.operands[0].reg.regno;
426 rt2 = inst.operands[1].reg.regno;
427 if (inst.operands[0].type == AARCH64_OPND_Ft)
428 {
429 /* Only bottom 64-bit of each V register (D register) need
430 to be preserved. */
431 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
432 rt1 += AARCH64_X_REGISTER_COUNT;
433 rt2 += AARCH64_X_REGISTER_COUNT;
434 }
435
436 stack.store (pv_add_constant (regs[rn], imm), 8,
437 regs[rt1]);
438 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
439 regs[rt2]);
440
441 if (inst.operands[2].addr.writeback)
442 regs[rn] = pv_add_constant (regs[rn], imm);
443
444 }
445 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
446 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
447 && (inst.opcode->op == OP_STR_POS
448 || inst.opcode->op == OP_STRF_POS)))
449 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
450 && strcmp ("str", inst.opcode->name) == 0)
451 {
452 /* STR (immediate) */
453 unsigned int rt = inst.operands[0].reg.regno;
454 int32_t imm = inst.operands[1].addr.offset.imm;
455 unsigned int rn = inst.operands[1].addr.base_regno;
456 bool is64
457 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
458 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
459 || inst.operands[0].type == AARCH64_OPND_Ft);
460
461 if (inst.operands[0].type == AARCH64_OPND_Ft)
462 {
463 /* Only bottom 64-bit of each V register (D register) need
464 to be preserved. */
465 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
466 rt += AARCH64_X_REGISTER_COUNT;
467 }
468
469 stack.store (pv_add_constant (regs[rn], imm),
470 is64 ? 8 : 4, regs[rt]);
471 if (inst.operands[1].addr.writeback)
472 regs[rn] = pv_add_constant (regs[rn], imm);
473 }
474 else if (inst.opcode->iclass == testbranch)
475 {
476 /* Stop analysis on branch. */
477 break;
478 }
479 else if (inst.opcode->iclass == ic_system)
480 {
481 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
482 int ra_state_val = 0;
483
484 if (insn == 0xd503233f /* paciasp. */
485 || insn == 0xd503237f /* pacibsp. */)
486 {
487 /* Return addresses are mangled. */
488 ra_state_val = 1;
489 }
490 else if (insn == 0xd50323bf /* autiasp. */
491 || insn == 0xd50323ff /* autibsp. */)
492 {
493 /* Return addresses are not mangled. */
494 ra_state_val = 0;
495 }
496 else
497 {
498 if (aarch64_debug)
499 debug_printf ("aarch64: prologue analysis gave up addr=%s"
500 " opcode=0x%x (iclass)\n",
501 core_addr_to_string_nz (start), insn);
502 break;
503 }
504
505 if (tdep->has_pauth () && cache != nullptr)
506 trad_frame_set_value (cache->saved_regs,
507 tdep->pauth_ra_state_regnum,
508 ra_state_val);
509 }
510 else
511 {
512 if (aarch64_debug)
513 {
514 debug_printf ("aarch64: prologue analysis gave up addr=%s"
515 " opcode=0x%x\n",
516 core_addr_to_string_nz (start), insn);
517 }
518 break;
519 }
520 }
521
522 if (cache == NULL)
523 return start;
524
525 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
526 {
527 /* Frame pointer is fp. Frame size is constant. */
528 cache->framereg = AARCH64_FP_REGNUM;
529 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
530 }
531 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
532 {
533 /* Try the stack pointer. */
534 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
535 cache->framereg = AARCH64_SP_REGNUM;
536 }
537 else
538 {
539 /* We're just out of luck. We don't know where the frame is. */
540 cache->framereg = -1;
541 cache->framesize = 0;
542 }
543
544 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
545 {
546 CORE_ADDR offset;
547
548 if (stack.find_reg (gdbarch, i, &offset))
549 cache->saved_regs[i].addr = offset;
550 }
551
552 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
553 {
554 int regnum = gdbarch_num_regs (gdbarch);
555 CORE_ADDR offset;
556
557 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
558 &offset))
559 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
560 }
561
562 return start;
563 }
564
565 static CORE_ADDR
566 aarch64_analyze_prologue (struct gdbarch *gdbarch,
567 CORE_ADDR start, CORE_ADDR limit,
568 struct aarch64_prologue_cache *cache)
569 {
570 instruction_reader reader;
571
572 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
573 reader);
574 }
575
576 #if GDB_SELF_TEST
577
578 namespace selftests {
579
580 /* Instruction reader from manually cooked instruction sequences. */
581
582 class instruction_reader_test : public abstract_instruction_reader
583 {
584 public:
585 template<size_t SIZE>
586 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
587 : m_insns (insns), m_insns_size (SIZE)
588 {}
589
590 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
591 override
592 {
593 SELF_CHECK (len == 4);
594 SELF_CHECK (memaddr % 4 == 0);
595 SELF_CHECK (memaddr / 4 < m_insns_size);
596
597 return m_insns[memaddr / 4];
598 }
599
600 private:
601 const uint32_t *m_insns;
602 size_t m_insns_size;
603 };
604
605 static void
606 aarch64_analyze_prologue_test (void)
607 {
608 struct gdbarch_info info;
609
610 gdbarch_info_init (&info);
611 info.bfd_arch_info = bfd_scan_arch ("aarch64");
612
613 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
614 SELF_CHECK (gdbarch != NULL);
615
616 struct aarch64_prologue_cache cache;
617 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
618
619 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
620
621 /* Test the simple prologue in which frame pointer is used. */
622 {
623 static const uint32_t insns[] = {
624 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
625 0x910003fd, /* mov x29, sp */
626 0x97ffffe6, /* bl 0x400580 */
627 };
628 instruction_reader_test reader (insns);
629
630 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
631 SELF_CHECK (end == 4 * 2);
632
633 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
634 SELF_CHECK (cache.framesize == 272);
635
636 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
637 {
638 if (i == AARCH64_FP_REGNUM)
639 SELF_CHECK (cache.saved_regs[i].addr == -272);
640 else if (i == AARCH64_LR_REGNUM)
641 SELF_CHECK (cache.saved_regs[i].addr == -264);
642 else
643 SELF_CHECK (cache.saved_regs[i].addr == -1);
644 }
645
646 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
647 {
648 int regnum = gdbarch_num_regs (gdbarch);
649
650 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
651 == -1);
652 }
653 }
654
655 /* Test a prologue in which STR is used and frame pointer is not
656 used. */
657 {
658 static const uint32_t insns[] = {
659 0xf81d0ff3, /* str x19, [sp, #-48]! */
660 0xb9002fe0, /* str w0, [sp, #44] */
661 0xf90013e1, /* str x1, [sp, #32]*/
662 0xfd000fe0, /* str d0, [sp, #24] */
663 0xaa0203f3, /* mov x19, x2 */
664 0xf94013e0, /* ldr x0, [sp, #32] */
665 };
666 instruction_reader_test reader (insns);
667
668 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
669
670 SELF_CHECK (end == 4 * 5);
671
672 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
673 SELF_CHECK (cache.framesize == 48);
674
675 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
676 {
677 if (i == 1)
678 SELF_CHECK (cache.saved_regs[i].addr == -16);
679 else if (i == 19)
680 SELF_CHECK (cache.saved_regs[i].addr == -48);
681 else
682 SELF_CHECK (cache.saved_regs[i].addr == -1);
683 }
684
685 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
686 {
687 int regnum = gdbarch_num_regs (gdbarch);
688
689 if (i == 0)
690 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
691 == -24);
692 else
693 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
694 == -1);
695 }
696 }
697
698 /* Test a prologue in which there is a return address signing instruction. */
699 if (tdep->has_pauth ())
700 {
701 static const uint32_t insns[] = {
702 0xd503233f, /* paciasp */
703 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
704 0x910003fd, /* mov x29, sp */
705 0xf801c3f3, /* str x19, [sp, #28] */
706 0xb9401fa0, /* ldr x19, [x29, #28] */
707 };
708 instruction_reader_test reader (insns);
709
710 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
711 reader);
712
713 SELF_CHECK (end == 4 * 4);
714 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
715 SELF_CHECK (cache.framesize == 48);
716
717 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
718 {
719 if (i == 19)
720 SELF_CHECK (cache.saved_regs[i].addr == -20);
721 else if (i == AARCH64_FP_REGNUM)
722 SELF_CHECK (cache.saved_regs[i].addr == -48);
723 else if (i == AARCH64_LR_REGNUM)
724 SELF_CHECK (cache.saved_regs[i].addr == -40);
725 else
726 SELF_CHECK (cache.saved_regs[i].addr == -1);
727 }
728
729 if (tdep->has_pauth ())
730 {
731 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
732 tdep->pauth_ra_state_regnum));
733 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
734 }
735 }
736 }
737 } // namespace selftests
738 #endif /* GDB_SELF_TEST */
739
740 /* Implement the "skip_prologue" gdbarch method. */
741
742 static CORE_ADDR
743 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
744 {
745 CORE_ADDR func_addr, limit_pc;
746
747 /* See if we can determine the end of the prologue via the symbol
748 table. If so, then return either PC, or the PC after the
749 prologue, whichever is greater. */
750 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
751 {
752 CORE_ADDR post_prologue_pc
753 = skip_prologue_using_sal (gdbarch, func_addr);
754
755 if (post_prologue_pc != 0)
756 return std::max (pc, post_prologue_pc);
757 }
758
759 /* Can't determine prologue from the symbol table, need to examine
760 instructions. */
761
762 /* Find an upper limit on the function prologue using the debug
763 information. If the debug information could not be used to
764 provide that bound, then use an arbitrary large number as the
765 upper bound. */
766 limit_pc = skip_prologue_using_sal (gdbarch, pc);
767 if (limit_pc == 0)
768 limit_pc = pc + 128; /* Magic. */
769
770 /* Try disassembling prologue. */
771 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
772 }
773
774 /* Scan the function prologue for THIS_FRAME and populate the prologue
775 cache CACHE. */
776
777 static void
778 aarch64_scan_prologue (struct frame_info *this_frame,
779 struct aarch64_prologue_cache *cache)
780 {
781 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
782 CORE_ADDR prologue_start;
783 CORE_ADDR prologue_end;
784 CORE_ADDR prev_pc = get_frame_pc (this_frame);
785 struct gdbarch *gdbarch = get_frame_arch (this_frame);
786
787 cache->prev_pc = prev_pc;
788
789 /* Assume we do not find a frame. */
790 cache->framereg = -1;
791 cache->framesize = 0;
792
793 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
794 &prologue_end))
795 {
796 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
797
798 if (sal.line == 0)
799 {
800 /* No line info so use the current PC. */
801 prologue_end = prev_pc;
802 }
803 else if (sal.end < prologue_end)
804 {
805 /* The next line begins after the function end. */
806 prologue_end = sal.end;
807 }
808
809 prologue_end = std::min (prologue_end, prev_pc);
810 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
811 }
812 else
813 {
814 CORE_ADDR frame_loc;
815
816 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
817 if (frame_loc == 0)
818 return;
819
820 cache->framereg = AARCH64_FP_REGNUM;
821 cache->framesize = 16;
822 cache->saved_regs[29].addr = 0;
823 cache->saved_regs[30].addr = 8;
824 }
825 }
826
827 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
828 function may throw an exception if the inferior's registers or memory is
829 not available. */
830
831 static void
832 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
833 struct aarch64_prologue_cache *cache)
834 {
835 CORE_ADDR unwound_fp;
836 int reg;
837
838 aarch64_scan_prologue (this_frame, cache);
839
840 if (cache->framereg == -1)
841 return;
842
843 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
844 if (unwound_fp == 0)
845 return;
846
847 cache->prev_sp = unwound_fp + cache->framesize;
848
849 /* Calculate actual addresses of saved registers using offsets
850 determined by aarch64_analyze_prologue. */
851 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
852 if (trad_frame_addr_p (cache->saved_regs, reg))
853 cache->saved_regs[reg].addr += cache->prev_sp;
854
855 cache->func = get_frame_func (this_frame);
856
857 cache->available_p = 1;
858 }
859
860 /* Allocate and fill in *THIS_CACHE with information about the prologue of
861 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
862 Return a pointer to the current aarch64_prologue_cache in
863 *THIS_CACHE. */
864
865 static struct aarch64_prologue_cache *
866 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
867 {
868 struct aarch64_prologue_cache *cache;
869
870 if (*this_cache != NULL)
871 return (struct aarch64_prologue_cache *) *this_cache;
872
873 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
874 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
875 *this_cache = cache;
876
877 TRY
878 {
879 aarch64_make_prologue_cache_1 (this_frame, cache);
880 }
881 CATCH (ex, RETURN_MASK_ERROR)
882 {
883 if (ex.error != NOT_AVAILABLE_ERROR)
884 throw_exception (ex);
885 }
886 END_CATCH
887
888 return cache;
889 }
890
891 /* Implement the "stop_reason" frame_unwind method. */
892
893 static enum unwind_stop_reason
894 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
895 void **this_cache)
896 {
897 struct aarch64_prologue_cache *cache
898 = aarch64_make_prologue_cache (this_frame, this_cache);
899
900 if (!cache->available_p)
901 return UNWIND_UNAVAILABLE;
902
903 /* Halt the backtrace at "_start". */
904 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
905 return UNWIND_OUTERMOST;
906
907 /* We've hit a wall, stop. */
908 if (cache->prev_sp == 0)
909 return UNWIND_OUTERMOST;
910
911 return UNWIND_NO_REASON;
912 }
913
914 /* Our frame ID for a normal frame is the current function's starting
915 PC and the caller's SP when we were called. */
916
917 static void
918 aarch64_prologue_this_id (struct frame_info *this_frame,
919 void **this_cache, struct frame_id *this_id)
920 {
921 struct aarch64_prologue_cache *cache
922 = aarch64_make_prologue_cache (this_frame, this_cache);
923
924 if (!cache->available_p)
925 *this_id = frame_id_build_unavailable_stack (cache->func);
926 else
927 *this_id = frame_id_build (cache->prev_sp, cache->func);
928 }
929
930 /* Implement the "prev_register" frame_unwind method. */
931
932 static struct value *
933 aarch64_prologue_prev_register (struct frame_info *this_frame,
934 void **this_cache, int prev_regnum)
935 {
936 struct aarch64_prologue_cache *cache
937 = aarch64_make_prologue_cache (this_frame, this_cache);
938
939 /* If we are asked to unwind the PC, then we need to return the LR
940 instead. The prologue may save PC, but it will point into this
941 frame's prologue, not the next frame's resume location. */
942 if (prev_regnum == AARCH64_PC_REGNUM)
943 {
944 CORE_ADDR lr;
945 struct gdbarch *gdbarch = get_frame_arch (this_frame);
946 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
947
948 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
949
950 if (tdep->has_pauth ()
951 && trad_frame_value_p (cache->saved_regs,
952 tdep->pauth_ra_state_regnum))
953 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
954
955 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
956 }
957
958 /* SP is generally not saved to the stack, but this frame is
959 identified by the next frame's stack pointer at the time of the
960 call. The value was already reconstructed into PREV_SP. */
961 /*
962 +----------+ ^
963 | saved lr | |
964 +->| saved fp |--+
965 | | |
966 | | | <- Previous SP
967 | +----------+
968 | | saved lr |
969 +--| saved fp |<- FP
970 | |
971 | |<- SP
972 +----------+ */
973 if (prev_regnum == AARCH64_SP_REGNUM)
974 return frame_unwind_got_constant (this_frame, prev_regnum,
975 cache->prev_sp);
976
977 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
978 prev_regnum);
979 }
980
981 /* AArch64 prologue unwinder. */
982 struct frame_unwind aarch64_prologue_unwind =
983 {
984 NORMAL_FRAME,
985 aarch64_prologue_frame_unwind_stop_reason,
986 aarch64_prologue_this_id,
987 aarch64_prologue_prev_register,
988 NULL,
989 default_frame_sniffer
990 };
991
992 /* Allocate and fill in *THIS_CACHE with information about the prologue of
993 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
994 Return a pointer to the current aarch64_prologue_cache in
995 *THIS_CACHE. */
996
997 static struct aarch64_prologue_cache *
998 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
999 {
1000 struct aarch64_prologue_cache *cache;
1001
1002 if (*this_cache != NULL)
1003 return (struct aarch64_prologue_cache *) *this_cache;
1004
1005 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1006 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1007 *this_cache = cache;
1008
1009 TRY
1010 {
1011 cache->prev_sp = get_frame_register_unsigned (this_frame,
1012 AARCH64_SP_REGNUM);
1013 cache->prev_pc = get_frame_pc (this_frame);
1014 cache->available_p = 1;
1015 }
1016 CATCH (ex, RETURN_MASK_ERROR)
1017 {
1018 if (ex.error != NOT_AVAILABLE_ERROR)
1019 throw_exception (ex);
1020 }
1021 END_CATCH
1022
1023 return cache;
1024 }
1025
1026 /* Implement the "stop_reason" frame_unwind method. */
1027
1028 static enum unwind_stop_reason
1029 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1030 void **this_cache)
1031 {
1032 struct aarch64_prologue_cache *cache
1033 = aarch64_make_stub_cache (this_frame, this_cache);
1034
1035 if (!cache->available_p)
1036 return UNWIND_UNAVAILABLE;
1037
1038 return UNWIND_NO_REASON;
1039 }
1040
1041 /* Our frame ID for a stub frame is the current SP and LR. */
1042
1043 static void
1044 aarch64_stub_this_id (struct frame_info *this_frame,
1045 void **this_cache, struct frame_id *this_id)
1046 {
1047 struct aarch64_prologue_cache *cache
1048 = aarch64_make_stub_cache (this_frame, this_cache);
1049
1050 if (cache->available_p)
1051 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1052 else
1053 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1054 }
1055
1056 /* Implement the "sniffer" frame_unwind method. */
1057
1058 static int
1059 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1060 struct frame_info *this_frame,
1061 void **this_prologue_cache)
1062 {
1063 CORE_ADDR addr_in_block;
1064 gdb_byte dummy[4];
1065
1066 addr_in_block = get_frame_address_in_block (this_frame);
1067 if (in_plt_section (addr_in_block)
1068 /* We also use the stub winder if the target memory is unreadable
1069 to avoid having the prologue unwinder trying to read it. */
1070 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1071 return 1;
1072
1073 return 0;
1074 }
1075
1076 /* AArch64 stub unwinder. */
1077 struct frame_unwind aarch64_stub_unwind =
1078 {
1079 NORMAL_FRAME,
1080 aarch64_stub_frame_unwind_stop_reason,
1081 aarch64_stub_this_id,
1082 aarch64_prologue_prev_register,
1083 NULL,
1084 aarch64_stub_unwind_sniffer
1085 };
1086
1087 /* Return the frame base address of *THIS_FRAME. */
1088
1089 static CORE_ADDR
1090 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1091 {
1092 struct aarch64_prologue_cache *cache
1093 = aarch64_make_prologue_cache (this_frame, this_cache);
1094
1095 return cache->prev_sp - cache->framesize;
1096 }
1097
1098 /* AArch64 default frame base information. */
1099 struct frame_base aarch64_normal_base =
1100 {
1101 &aarch64_prologue_unwind,
1102 aarch64_normal_frame_base,
1103 aarch64_normal_frame_base,
1104 aarch64_normal_frame_base
1105 };
1106
1107 /* Return the value of the REGNUM register in the previous frame of
1108 *THIS_FRAME. */
1109
1110 static struct value *
1111 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1112 void **this_cache, int regnum)
1113 {
1114 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1115 CORE_ADDR lr;
1116
1117 switch (regnum)
1118 {
1119 case AARCH64_PC_REGNUM:
1120 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1121 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
1122 return frame_unwind_got_constant (this_frame, regnum, lr);
1123
1124 default:
1125 internal_error (__FILE__, __LINE__,
1126 _("Unexpected register %d"), regnum);
1127 }
1128 }
1129
1130 static const unsigned char op_lit0 = DW_OP_lit0;
1131 static const unsigned char op_lit1 = DW_OP_lit1;
1132
1133 /* Implement the "init_reg" dwarf2_frame_ops method. */
1134
1135 static void
1136 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1137 struct dwarf2_frame_state_reg *reg,
1138 struct frame_info *this_frame)
1139 {
1140 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1141
1142 switch (regnum)
1143 {
1144 case AARCH64_PC_REGNUM:
1145 reg->how = DWARF2_FRAME_REG_FN;
1146 reg->loc.fn = aarch64_dwarf2_prev_register;
1147 return;
1148
1149 case AARCH64_SP_REGNUM:
1150 reg->how = DWARF2_FRAME_REG_CFA;
1151 return;
1152 }
1153
1154 /* Init pauth registers. */
1155 if (tdep->has_pauth ())
1156 {
1157 if (regnum == tdep->pauth_ra_state_regnum)
1158 {
1159 /* Initialize RA_STATE to zero. */
1160 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1161 reg->loc.exp.start = &op_lit0;
1162 reg->loc.exp.len = 1;
1163 return;
1164 }
1165 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1166 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1167 {
1168 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1169 return;
1170 }
1171 }
1172 }
1173
1174 /* Implement the execute_dwarf_cfa_vendor_op method. */
1175
1176 static bool
1177 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1178 struct dwarf2_frame_state *fs)
1179 {
1180 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1181 struct dwarf2_frame_state_reg *ra_state;
1182
1183 if (tdep->has_pauth () && op == DW_CFA_AARCH64_negate_ra_state)
1184 {
1185 /* Allocate RA_STATE column if it's not allocated yet. */
1186 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1187
1188 /* Toggle the status of RA_STATE between 0 and 1. */
1189 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1190 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1191
1192 if (ra_state->loc.exp.start == nullptr
1193 || ra_state->loc.exp.start == &op_lit0)
1194 ra_state->loc.exp.start = &op_lit1;
1195 else
1196 ra_state->loc.exp.start = &op_lit0;
1197
1198 ra_state->loc.exp.len = 1;
1199
1200 return true;
1201 }
1202
1203 return false;
1204 }
1205
1206 /* When arguments must be pushed onto the stack, they go on in reverse
1207 order. The code below implements a FILO (stack) to do this. */
1208
1209 typedef struct
1210 {
1211 /* Value to pass on stack. It can be NULL if this item is for stack
1212 padding. */
1213 const gdb_byte *data;
1214
1215 /* Size in bytes of value to pass on stack. */
1216 int len;
1217 } stack_item_t;
1218
1219 DEF_VEC_O (stack_item_t);
1220
1221 /* Return the alignment (in bytes) of the given type. */
1222
1223 static int
1224 aarch64_type_align (struct type *t)
1225 {
1226 int n;
1227 int align;
1228 int falign;
1229
1230 t = check_typedef (t);
1231 switch (TYPE_CODE (t))
1232 {
1233 default:
1234 /* Should never happen. */
1235 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1236 return 4;
1237
1238 case TYPE_CODE_PTR:
1239 case TYPE_CODE_ENUM:
1240 case TYPE_CODE_INT:
1241 case TYPE_CODE_FLT:
1242 case TYPE_CODE_SET:
1243 case TYPE_CODE_RANGE:
1244 case TYPE_CODE_BITSTRING:
1245 case TYPE_CODE_REF:
1246 case TYPE_CODE_RVALUE_REF:
1247 case TYPE_CODE_CHAR:
1248 case TYPE_CODE_BOOL:
1249 return TYPE_LENGTH (t);
1250
1251 case TYPE_CODE_ARRAY:
1252 if (TYPE_VECTOR (t))
1253 {
1254 /* Use the natural alignment for vector types (the same for
1255 scalar type), but the maximum alignment is 128-bit. */
1256 if (TYPE_LENGTH (t) > 16)
1257 return 16;
1258 else
1259 return TYPE_LENGTH (t);
1260 }
1261 else
1262 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1263 case TYPE_CODE_COMPLEX:
1264 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1265
1266 case TYPE_CODE_STRUCT:
1267 case TYPE_CODE_UNION:
1268 align = 1;
1269 for (n = 0; n < TYPE_NFIELDS (t); n++)
1270 {
1271 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1272 if (falign > align)
1273 align = falign;
1274 }
1275 return align;
1276 }
1277 }
1278
1279 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1280
1281 Return the number of register required, or -1 on failure.
1282
1283 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1284 to the element, else fail if the type of this element does not match the
1285 existing value. */
1286
1287 static int
1288 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1289 struct type **fundamental_type)
1290 {
1291 if (type == nullptr)
1292 return -1;
1293
1294 switch (TYPE_CODE (type))
1295 {
1296 case TYPE_CODE_FLT:
1297 if (TYPE_LENGTH (type) > 16)
1298 return -1;
1299
1300 if (*fundamental_type == nullptr)
1301 *fundamental_type = type;
1302 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1303 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1304 return -1;
1305
1306 return 1;
1307
1308 case TYPE_CODE_COMPLEX:
1309 {
1310 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1311 if (TYPE_LENGTH (target_type) > 16)
1312 return -1;
1313
1314 if (*fundamental_type == nullptr)
1315 *fundamental_type = target_type;
1316 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1317 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1318 return -1;
1319
1320 return 2;
1321 }
1322
1323 case TYPE_CODE_ARRAY:
1324 {
1325 if (TYPE_VECTOR (type))
1326 {
1327 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1328 return -1;
1329
1330 if (*fundamental_type == nullptr)
1331 *fundamental_type = type;
1332 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1333 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1334 return -1;
1335
1336 return 1;
1337 }
1338 else
1339 {
1340 struct type *target_type = TYPE_TARGET_TYPE (type);
1341 int count = aapcs_is_vfp_call_or_return_candidate_1
1342 (target_type, fundamental_type);
1343
1344 if (count == -1)
1345 return count;
1346
1347 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1348 return count;
1349 }
1350 }
1351
1352 case TYPE_CODE_STRUCT:
1353 case TYPE_CODE_UNION:
1354 {
1355 int count = 0;
1356
1357 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1358 {
1359 /* Ignore any static fields. */
1360 if (field_is_static (&TYPE_FIELD (type, i)))
1361 continue;
1362
1363 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1364
1365 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1366 (member, fundamental_type);
1367 if (sub_count == -1)
1368 return -1;
1369 count += sub_count;
1370 }
1371
1372 /* Ensure there is no padding between the fields (allowing for empty
1373 zero length structs) */
1374 int ftype_length = (*fundamental_type == nullptr)
1375 ? 0 : TYPE_LENGTH (*fundamental_type);
1376 if (count * ftype_length != TYPE_LENGTH (type))
1377 return -1;
1378
1379 return count;
1380 }
1381
1382 default:
1383 break;
1384 }
1385
1386 return -1;
1387 }
1388
1389 /* Return true if an argument, whose type is described by TYPE, can be passed or
1390 returned in simd/fp registers, providing enough parameter passing registers
1391 are available. This is as described in the AAPCS64.
1392
1393 Upon successful return, *COUNT returns the number of needed registers,
1394 *FUNDAMENTAL_TYPE contains the type of those registers.
1395
1396 Candidate as per the AAPCS64 5.4.2.C is either a:
1397 - float.
1398 - short-vector.
1399 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1400 all the members are floats and has at most 4 members.
1401 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1402 all the members are short vectors and has at most 4 members.
1403 - Complex (7.1.1)
1404
1405 Note that HFAs and HVAs can include nested structures and arrays. */
1406
1407 static bool
1408 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1409 struct type **fundamental_type)
1410 {
1411 if (type == nullptr)
1412 return false;
1413
1414 *fundamental_type = nullptr;
1415
1416 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1417 fundamental_type);
1418
1419 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1420 {
1421 *count = ag_count;
1422 return true;
1423 }
1424 else
1425 return false;
1426 }
1427
1428 /* AArch64 function call information structure. */
1429 struct aarch64_call_info
1430 {
1431 /* the current argument number. */
1432 unsigned argnum;
1433
1434 /* The next general purpose register number, equivalent to NGRN as
1435 described in the AArch64 Procedure Call Standard. */
1436 unsigned ngrn;
1437
1438 /* The next SIMD and floating point register number, equivalent to
1439 NSRN as described in the AArch64 Procedure Call Standard. */
1440 unsigned nsrn;
1441
1442 /* The next stacked argument address, equivalent to NSAA as
1443 described in the AArch64 Procedure Call Standard. */
1444 unsigned nsaa;
1445
1446 /* Stack item vector. */
1447 VEC(stack_item_t) *si;
1448 };
1449
1450 /* Pass a value in a sequence of consecutive X registers. The caller
1451 is responsbile for ensuring sufficient registers are available. */
1452
1453 static void
1454 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1455 struct aarch64_call_info *info, struct type *type,
1456 struct value *arg)
1457 {
1458 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1459 int len = TYPE_LENGTH (type);
1460 enum type_code typecode = TYPE_CODE (type);
1461 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1462 const bfd_byte *buf = value_contents (arg);
1463
1464 info->argnum++;
1465
1466 while (len > 0)
1467 {
1468 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1469 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1470 byte_order);
1471
1472
1473 /* Adjust sub-word struct/union args when big-endian. */
1474 if (byte_order == BFD_ENDIAN_BIG
1475 && partial_len < X_REGISTER_SIZE
1476 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1477 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1478
1479 if (aarch64_debug)
1480 {
1481 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1482 gdbarch_register_name (gdbarch, regnum),
1483 phex (regval, X_REGISTER_SIZE));
1484 }
1485 regcache_cooked_write_unsigned (regcache, regnum, regval);
1486 len -= partial_len;
1487 buf += partial_len;
1488 regnum++;
1489 }
1490 }
1491
1492 /* Attempt to marshall a value in a V register. Return 1 if
1493 successful, or 0 if insufficient registers are available. This
1494 function, unlike the equivalent pass_in_x() function does not
1495 handle arguments spread across multiple registers. */
1496
1497 static int
1498 pass_in_v (struct gdbarch *gdbarch,
1499 struct regcache *regcache,
1500 struct aarch64_call_info *info,
1501 int len, const bfd_byte *buf)
1502 {
1503 if (info->nsrn < 8)
1504 {
1505 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1506 /* Enough space for a full vector register. */
1507 gdb_byte reg[register_size (gdbarch, regnum)];
1508 gdb_assert (len <= sizeof (reg));
1509
1510 info->argnum++;
1511 info->nsrn++;
1512
1513 memset (reg, 0, sizeof (reg));
1514 /* PCS C.1, the argument is allocated to the least significant
1515 bits of V register. */
1516 memcpy (reg, buf, len);
1517 regcache->cooked_write (regnum, reg);
1518
1519 if (aarch64_debug)
1520 {
1521 debug_printf ("arg %d in %s\n", info->argnum,
1522 gdbarch_register_name (gdbarch, regnum));
1523 }
1524 return 1;
1525 }
1526 info->nsrn = 8;
1527 return 0;
1528 }
1529
1530 /* Marshall an argument onto the stack. */
1531
1532 static void
1533 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1534 struct value *arg)
1535 {
1536 const bfd_byte *buf = value_contents (arg);
1537 int len = TYPE_LENGTH (type);
1538 int align;
1539 stack_item_t item;
1540
1541 info->argnum++;
1542
1543 align = aarch64_type_align (type);
1544
1545 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1546 Natural alignment of the argument's type. */
1547 align = align_up (align, 8);
1548
1549 /* The AArch64 PCS requires at most doubleword alignment. */
1550 if (align > 16)
1551 align = 16;
1552
1553 if (aarch64_debug)
1554 {
1555 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1556 info->nsaa);
1557 }
1558
1559 item.len = len;
1560 item.data = buf;
1561 VEC_safe_push (stack_item_t, info->si, &item);
1562
1563 info->nsaa += len;
1564 if (info->nsaa & (align - 1))
1565 {
1566 /* Push stack alignment padding. */
1567 int pad = align - (info->nsaa & (align - 1));
1568
1569 item.len = pad;
1570 item.data = NULL;
1571
1572 VEC_safe_push (stack_item_t, info->si, &item);
1573 info->nsaa += pad;
1574 }
1575 }
1576
1577 /* Marshall an argument into a sequence of one or more consecutive X
1578 registers or, if insufficient X registers are available then onto
1579 the stack. */
1580
1581 static void
1582 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1583 struct aarch64_call_info *info, struct type *type,
1584 struct value *arg)
1585 {
1586 int len = TYPE_LENGTH (type);
1587 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1588
1589 /* PCS C.13 - Pass in registers if we have enough spare */
1590 if (info->ngrn + nregs <= 8)
1591 {
1592 pass_in_x (gdbarch, regcache, info, type, arg);
1593 info->ngrn += nregs;
1594 }
1595 else
1596 {
1597 info->ngrn = 8;
1598 pass_on_stack (info, type, arg);
1599 }
1600 }
1601
1602 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1603 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1604 registers. A return value of false is an error state as the value will have
1605 been partially passed to the stack. */
1606 static bool
1607 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1608 struct aarch64_call_info *info, struct type *arg_type,
1609 struct value *arg)
1610 {
1611 switch (TYPE_CODE (arg_type))
1612 {
1613 case TYPE_CODE_FLT:
1614 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1615 value_contents (arg));
1616 break;
1617
1618 case TYPE_CODE_COMPLEX:
1619 {
1620 const bfd_byte *buf = value_contents (arg);
1621 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1622
1623 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1624 buf))
1625 return false;
1626
1627 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1628 buf + TYPE_LENGTH (target_type));
1629 }
1630
1631 case TYPE_CODE_ARRAY:
1632 if (TYPE_VECTOR (arg_type))
1633 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1634 value_contents (arg));
1635 /* fall through. */
1636
1637 case TYPE_CODE_STRUCT:
1638 case TYPE_CODE_UNION:
1639 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1640 {
1641 /* Don't include static fields. */
1642 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1643 continue;
1644
1645 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1646 struct type *field_type = check_typedef (value_type (field));
1647
1648 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1649 field))
1650 return false;
1651 }
1652 return true;
1653
1654 default:
1655 return false;
1656 }
1657 }
1658
1659 /* Implement the "push_dummy_call" gdbarch method. */
1660
1661 static CORE_ADDR
1662 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1663 struct regcache *regcache, CORE_ADDR bp_addr,
1664 int nargs,
1665 struct value **args, CORE_ADDR sp,
1666 function_call_return_method return_method,
1667 CORE_ADDR struct_addr)
1668 {
1669 int argnum;
1670 struct aarch64_call_info info;
1671
1672 memset (&info, 0, sizeof (info));
1673
1674 /* We need to know what the type of the called function is in order
1675 to determine the number of named/anonymous arguments for the
1676 actual argument placement, and the return type in order to handle
1677 return value correctly.
1678
1679 The generic code above us views the decision of return in memory
1680 or return in registers as a two stage processes. The language
1681 handler is consulted first and may decide to return in memory (eg
1682 class with copy constructor returned by value), this will cause
1683 the generic code to allocate space AND insert an initial leading
1684 argument.
1685
1686 If the language code does not decide to pass in memory then the
1687 target code is consulted.
1688
1689 If the language code decides to pass in memory we want to move
1690 the pointer inserted as the initial argument from the argument
1691 list and into X8, the conventional AArch64 struct return pointer
1692 register. */
1693
1694 /* Set the return address. For the AArch64, the return breakpoint
1695 is always at BP_ADDR. */
1696 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1697
1698 /* If we were given an initial argument for the return slot, lose it. */
1699 if (return_method == return_method_hidden_param)
1700 {
1701 args++;
1702 nargs--;
1703 }
1704
1705 /* The struct_return pointer occupies X8. */
1706 if (return_method != return_method_normal)
1707 {
1708 if (aarch64_debug)
1709 {
1710 debug_printf ("struct return in %s = 0x%s\n",
1711 gdbarch_register_name (gdbarch,
1712 AARCH64_STRUCT_RETURN_REGNUM),
1713 paddress (gdbarch, struct_addr));
1714 }
1715 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1716 struct_addr);
1717 }
1718
1719 for (argnum = 0; argnum < nargs; argnum++)
1720 {
1721 struct value *arg = args[argnum];
1722 struct type *arg_type, *fundamental_type;
1723 int len, elements;
1724
1725 arg_type = check_typedef (value_type (arg));
1726 len = TYPE_LENGTH (arg_type);
1727
1728 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1729 if there are enough spare registers. */
1730 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1731 &fundamental_type))
1732 {
1733 if (info.nsrn + elements <= 8)
1734 {
1735 /* We know that we have sufficient registers available therefore
1736 this will never need to fallback to the stack. */
1737 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1738 arg))
1739 gdb_assert_not_reached ("Failed to push args");
1740 }
1741 else
1742 {
1743 info.nsrn = 8;
1744 pass_on_stack (&info, arg_type, arg);
1745 }
1746 continue;
1747 }
1748
1749 switch (TYPE_CODE (arg_type))
1750 {
1751 case TYPE_CODE_INT:
1752 case TYPE_CODE_BOOL:
1753 case TYPE_CODE_CHAR:
1754 case TYPE_CODE_RANGE:
1755 case TYPE_CODE_ENUM:
1756 if (len < 4)
1757 {
1758 /* Promote to 32 bit integer. */
1759 if (TYPE_UNSIGNED (arg_type))
1760 arg_type = builtin_type (gdbarch)->builtin_uint32;
1761 else
1762 arg_type = builtin_type (gdbarch)->builtin_int32;
1763 arg = value_cast (arg_type, arg);
1764 }
1765 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1766 break;
1767
1768 case TYPE_CODE_STRUCT:
1769 case TYPE_CODE_ARRAY:
1770 case TYPE_CODE_UNION:
1771 if (len > 16)
1772 {
1773 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1774 invisible reference. */
1775
1776 /* Allocate aligned storage. */
1777 sp = align_down (sp - len, 16);
1778
1779 /* Write the real data into the stack. */
1780 write_memory (sp, value_contents (arg), len);
1781
1782 /* Construct the indirection. */
1783 arg_type = lookup_pointer_type (arg_type);
1784 arg = value_from_pointer (arg_type, sp);
1785 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1786 }
1787 else
1788 /* PCS C.15 / C.18 multiple values pass. */
1789 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1790 break;
1791
1792 default:
1793 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1794 break;
1795 }
1796 }
1797
1798 /* Make sure stack retains 16 byte alignment. */
1799 if (info.nsaa & 15)
1800 sp -= 16 - (info.nsaa & 15);
1801
1802 while (!VEC_empty (stack_item_t, info.si))
1803 {
1804 stack_item_t *si = VEC_last (stack_item_t, info.si);
1805
1806 sp -= si->len;
1807 if (si->data != NULL)
1808 write_memory (sp, si->data, si->len);
1809 VEC_pop (stack_item_t, info.si);
1810 }
1811
1812 VEC_free (stack_item_t, info.si);
1813
1814 /* Finally, update the SP register. */
1815 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1816
1817 return sp;
1818 }
1819
1820 /* Implement the "frame_align" gdbarch method. */
1821
1822 static CORE_ADDR
1823 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1824 {
1825 /* Align the stack to sixteen bytes. */
1826 return sp & ~(CORE_ADDR) 15;
1827 }
1828
1829 /* Return the type for an AdvSISD Q register. */
1830
1831 static struct type *
1832 aarch64_vnq_type (struct gdbarch *gdbarch)
1833 {
1834 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1835
1836 if (tdep->vnq_type == NULL)
1837 {
1838 struct type *t;
1839 struct type *elem;
1840
1841 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1842 TYPE_CODE_UNION);
1843
1844 elem = builtin_type (gdbarch)->builtin_uint128;
1845 append_composite_type_field (t, "u", elem);
1846
1847 elem = builtin_type (gdbarch)->builtin_int128;
1848 append_composite_type_field (t, "s", elem);
1849
1850 tdep->vnq_type = t;
1851 }
1852
1853 return tdep->vnq_type;
1854 }
1855
1856 /* Return the type for an AdvSISD D register. */
1857
1858 static struct type *
1859 aarch64_vnd_type (struct gdbarch *gdbarch)
1860 {
1861 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1862
1863 if (tdep->vnd_type == NULL)
1864 {
1865 struct type *t;
1866 struct type *elem;
1867
1868 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1869 TYPE_CODE_UNION);
1870
1871 elem = builtin_type (gdbarch)->builtin_double;
1872 append_composite_type_field (t, "f", elem);
1873
1874 elem = builtin_type (gdbarch)->builtin_uint64;
1875 append_composite_type_field (t, "u", elem);
1876
1877 elem = builtin_type (gdbarch)->builtin_int64;
1878 append_composite_type_field (t, "s", elem);
1879
1880 tdep->vnd_type = t;
1881 }
1882
1883 return tdep->vnd_type;
1884 }
1885
1886 /* Return the type for an AdvSISD S register. */
1887
1888 static struct type *
1889 aarch64_vns_type (struct gdbarch *gdbarch)
1890 {
1891 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1892
1893 if (tdep->vns_type == NULL)
1894 {
1895 struct type *t;
1896 struct type *elem;
1897
1898 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1899 TYPE_CODE_UNION);
1900
1901 elem = builtin_type (gdbarch)->builtin_float;
1902 append_composite_type_field (t, "f", elem);
1903
1904 elem = builtin_type (gdbarch)->builtin_uint32;
1905 append_composite_type_field (t, "u", elem);
1906
1907 elem = builtin_type (gdbarch)->builtin_int32;
1908 append_composite_type_field (t, "s", elem);
1909
1910 tdep->vns_type = t;
1911 }
1912
1913 return tdep->vns_type;
1914 }
1915
1916 /* Return the type for an AdvSISD H register. */
1917
1918 static struct type *
1919 aarch64_vnh_type (struct gdbarch *gdbarch)
1920 {
1921 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1922
1923 if (tdep->vnh_type == NULL)
1924 {
1925 struct type *t;
1926 struct type *elem;
1927
1928 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1929 TYPE_CODE_UNION);
1930
1931 elem = builtin_type (gdbarch)->builtin_uint16;
1932 append_composite_type_field (t, "u", elem);
1933
1934 elem = builtin_type (gdbarch)->builtin_int16;
1935 append_composite_type_field (t, "s", elem);
1936
1937 tdep->vnh_type = t;
1938 }
1939
1940 return tdep->vnh_type;
1941 }
1942
1943 /* Return the type for an AdvSISD B register. */
1944
1945 static struct type *
1946 aarch64_vnb_type (struct gdbarch *gdbarch)
1947 {
1948 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1949
1950 if (tdep->vnb_type == NULL)
1951 {
1952 struct type *t;
1953 struct type *elem;
1954
1955 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1956 TYPE_CODE_UNION);
1957
1958 elem = builtin_type (gdbarch)->builtin_uint8;
1959 append_composite_type_field (t, "u", elem);
1960
1961 elem = builtin_type (gdbarch)->builtin_int8;
1962 append_composite_type_field (t, "s", elem);
1963
1964 tdep->vnb_type = t;
1965 }
1966
1967 return tdep->vnb_type;
1968 }
1969
1970 /* Return the type for an AdvSISD V register. */
1971
1972 static struct type *
1973 aarch64_vnv_type (struct gdbarch *gdbarch)
1974 {
1975 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1976
1977 if (tdep->vnv_type == NULL)
1978 {
1979 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1980 TYPE_CODE_UNION);
1981
1982 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1983 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1984 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1985 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1986 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1987
1988 tdep->vnv_type = t;
1989 }
1990
1991 return tdep->vnv_type;
1992 }
1993
1994 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1995
1996 static int
1997 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1998 {
1999 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2000
2001 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2002 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2003
2004 if (reg == AARCH64_DWARF_SP)
2005 return AARCH64_SP_REGNUM;
2006
2007 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2008 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2009
2010 if (reg == AARCH64_DWARF_SVE_VG)
2011 return AARCH64_SVE_VG_REGNUM;
2012
2013 if (reg == AARCH64_DWARF_SVE_FFR)
2014 return AARCH64_SVE_FFR_REGNUM;
2015
2016 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2017 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2018
2019 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2020 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2021
2022 if (tdep->has_pauth ())
2023 {
2024 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2025 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2026
2027 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2028 return tdep->pauth_ra_state_regnum;
2029 }
2030
2031 return -1;
2032 }
2033
2034 /* Implement the "print_insn" gdbarch method. */
2035
2036 static int
2037 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2038 {
2039 info->symbols = NULL;
2040 return default_print_insn (memaddr, info);
2041 }
2042
2043 /* AArch64 BRK software debug mode instruction.
2044 Note that AArch64 code is always little-endian.
2045 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2046 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2047
2048 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2049
2050 /* Extract from an array REGS containing the (raw) register state a
2051 function return value of type TYPE, and copy that, in virtual
2052 format, into VALBUF. */
2053
2054 static void
2055 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2056 gdb_byte *valbuf)
2057 {
2058 struct gdbarch *gdbarch = regs->arch ();
2059 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2060 int elements;
2061 struct type *fundamental_type;
2062
2063 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2064 &fundamental_type))
2065 {
2066 int len = TYPE_LENGTH (fundamental_type);
2067
2068 for (int i = 0; i < elements; i++)
2069 {
2070 int regno = AARCH64_V0_REGNUM + i;
2071 /* Enough space for a full vector register. */
2072 gdb_byte buf[register_size (gdbarch, regno)];
2073 gdb_assert (len <= sizeof (buf));
2074
2075 if (aarch64_debug)
2076 {
2077 debug_printf ("read HFA or HVA return value element %d from %s\n",
2078 i + 1,
2079 gdbarch_register_name (gdbarch, regno));
2080 }
2081 regs->cooked_read (regno, buf);
2082
2083 memcpy (valbuf, buf, len);
2084 valbuf += len;
2085 }
2086 }
2087 else if (TYPE_CODE (type) == TYPE_CODE_INT
2088 || TYPE_CODE (type) == TYPE_CODE_CHAR
2089 || TYPE_CODE (type) == TYPE_CODE_BOOL
2090 || TYPE_CODE (type) == TYPE_CODE_PTR
2091 || TYPE_IS_REFERENCE (type)
2092 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2093 {
2094 /* If the type is a plain integer, then the access is
2095 straight-forward. Otherwise we have to play around a bit
2096 more. */
2097 int len = TYPE_LENGTH (type);
2098 int regno = AARCH64_X0_REGNUM;
2099 ULONGEST tmp;
2100
2101 while (len > 0)
2102 {
2103 /* By using store_unsigned_integer we avoid having to do
2104 anything special for small big-endian values. */
2105 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2106 store_unsigned_integer (valbuf,
2107 (len > X_REGISTER_SIZE
2108 ? X_REGISTER_SIZE : len), byte_order, tmp);
2109 len -= X_REGISTER_SIZE;
2110 valbuf += X_REGISTER_SIZE;
2111 }
2112 }
2113 else
2114 {
2115 /* For a structure or union the behaviour is as if the value had
2116 been stored to word-aligned memory and then loaded into
2117 registers with 64-bit load instruction(s). */
2118 int len = TYPE_LENGTH (type);
2119 int regno = AARCH64_X0_REGNUM;
2120 bfd_byte buf[X_REGISTER_SIZE];
2121
2122 while (len > 0)
2123 {
2124 regs->cooked_read (regno++, buf);
2125 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2126 len -= X_REGISTER_SIZE;
2127 valbuf += X_REGISTER_SIZE;
2128 }
2129 }
2130 }
2131
2132
2133 /* Will a function return an aggregate type in memory or in a
2134 register? Return 0 if an aggregate type can be returned in a
2135 register, 1 if it must be returned in memory. */
2136
2137 static int
2138 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2139 {
2140 type = check_typedef (type);
2141 int elements;
2142 struct type *fundamental_type;
2143
2144 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2145 &fundamental_type))
2146 {
2147 /* v0-v7 are used to return values and one register is allocated
2148 for one member. However, HFA or HVA has at most four members. */
2149 return 0;
2150 }
2151
2152 if (TYPE_LENGTH (type) > 16)
2153 {
2154 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2155 invisible reference. */
2156
2157 return 1;
2158 }
2159
2160 return 0;
2161 }
2162
2163 /* Write into appropriate registers a function return value of type
2164 TYPE, given in virtual format. */
2165
2166 static void
2167 aarch64_store_return_value (struct type *type, struct regcache *regs,
2168 const gdb_byte *valbuf)
2169 {
2170 struct gdbarch *gdbarch = regs->arch ();
2171 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2172 int elements;
2173 struct type *fundamental_type;
2174
2175 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2176 &fundamental_type))
2177 {
2178 int len = TYPE_LENGTH (fundamental_type);
2179
2180 for (int i = 0; i < elements; i++)
2181 {
2182 int regno = AARCH64_V0_REGNUM + i;
2183 /* Enough space for a full vector register. */
2184 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2185 gdb_assert (len <= sizeof (tmpbuf));
2186
2187 if (aarch64_debug)
2188 {
2189 debug_printf ("write HFA or HVA return value element %d to %s\n",
2190 i + 1,
2191 gdbarch_register_name (gdbarch, regno));
2192 }
2193
2194 memcpy (tmpbuf, valbuf,
2195 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2196 regs->cooked_write (regno, tmpbuf);
2197 valbuf += len;
2198 }
2199 }
2200 else if (TYPE_CODE (type) == TYPE_CODE_INT
2201 || TYPE_CODE (type) == TYPE_CODE_CHAR
2202 || TYPE_CODE (type) == TYPE_CODE_BOOL
2203 || TYPE_CODE (type) == TYPE_CODE_PTR
2204 || TYPE_IS_REFERENCE (type)
2205 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2206 {
2207 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2208 {
2209 /* Values of one word or less are zero/sign-extended and
2210 returned in r0. */
2211 bfd_byte tmpbuf[X_REGISTER_SIZE];
2212 LONGEST val = unpack_long (type, valbuf);
2213
2214 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2215 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2216 }
2217 else
2218 {
2219 /* Integral values greater than one word are stored in
2220 consecutive registers starting with r0. This will always
2221 be a multiple of the regiser size. */
2222 int len = TYPE_LENGTH (type);
2223 int regno = AARCH64_X0_REGNUM;
2224
2225 while (len > 0)
2226 {
2227 regs->cooked_write (regno++, valbuf);
2228 len -= X_REGISTER_SIZE;
2229 valbuf += X_REGISTER_SIZE;
2230 }
2231 }
2232 }
2233 else
2234 {
2235 /* For a structure or union the behaviour is as if the value had
2236 been stored to word-aligned memory and then loaded into
2237 registers with 64-bit load instruction(s). */
2238 int len = TYPE_LENGTH (type);
2239 int regno = AARCH64_X0_REGNUM;
2240 bfd_byte tmpbuf[X_REGISTER_SIZE];
2241
2242 while (len > 0)
2243 {
2244 memcpy (tmpbuf, valbuf,
2245 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2246 regs->cooked_write (regno++, tmpbuf);
2247 len -= X_REGISTER_SIZE;
2248 valbuf += X_REGISTER_SIZE;
2249 }
2250 }
2251 }
2252
2253 /* Implement the "return_value" gdbarch method. */
2254
2255 static enum return_value_convention
2256 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2257 struct type *valtype, struct regcache *regcache,
2258 gdb_byte *readbuf, const gdb_byte *writebuf)
2259 {
2260
2261 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2262 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2263 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2264 {
2265 if (aarch64_return_in_memory (gdbarch, valtype))
2266 {
2267 if (aarch64_debug)
2268 debug_printf ("return value in memory\n");
2269 return RETURN_VALUE_STRUCT_CONVENTION;
2270 }
2271 }
2272
2273 if (writebuf)
2274 aarch64_store_return_value (valtype, regcache, writebuf);
2275
2276 if (readbuf)
2277 aarch64_extract_return_value (valtype, regcache, readbuf);
2278
2279 if (aarch64_debug)
2280 debug_printf ("return value in registers\n");
2281
2282 return RETURN_VALUE_REGISTER_CONVENTION;
2283 }
2284
2285 /* Implement the "get_longjmp_target" gdbarch method. */
2286
2287 static int
2288 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2289 {
2290 CORE_ADDR jb_addr;
2291 gdb_byte buf[X_REGISTER_SIZE];
2292 struct gdbarch *gdbarch = get_frame_arch (frame);
2293 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2294 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2295
2296 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2297
2298 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2299 X_REGISTER_SIZE))
2300 return 0;
2301
2302 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2303 return 1;
2304 }
2305
2306 /* Implement the "gen_return_address" gdbarch method. */
2307
2308 static void
2309 aarch64_gen_return_address (struct gdbarch *gdbarch,
2310 struct agent_expr *ax, struct axs_value *value,
2311 CORE_ADDR scope)
2312 {
2313 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2314 value->kind = axs_lvalue_register;
2315 value->u.reg = AARCH64_LR_REGNUM;
2316 }
2317 \f
2318
2319 /* Return the pseudo register name corresponding to register regnum. */
2320
2321 static const char *
2322 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2323 {
2324 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2325
2326 static const char *const q_name[] =
2327 {
2328 "q0", "q1", "q2", "q3",
2329 "q4", "q5", "q6", "q7",
2330 "q8", "q9", "q10", "q11",
2331 "q12", "q13", "q14", "q15",
2332 "q16", "q17", "q18", "q19",
2333 "q20", "q21", "q22", "q23",
2334 "q24", "q25", "q26", "q27",
2335 "q28", "q29", "q30", "q31",
2336 };
2337
2338 static const char *const d_name[] =
2339 {
2340 "d0", "d1", "d2", "d3",
2341 "d4", "d5", "d6", "d7",
2342 "d8", "d9", "d10", "d11",
2343 "d12", "d13", "d14", "d15",
2344 "d16", "d17", "d18", "d19",
2345 "d20", "d21", "d22", "d23",
2346 "d24", "d25", "d26", "d27",
2347 "d28", "d29", "d30", "d31",
2348 };
2349
2350 static const char *const s_name[] =
2351 {
2352 "s0", "s1", "s2", "s3",
2353 "s4", "s5", "s6", "s7",
2354 "s8", "s9", "s10", "s11",
2355 "s12", "s13", "s14", "s15",
2356 "s16", "s17", "s18", "s19",
2357 "s20", "s21", "s22", "s23",
2358 "s24", "s25", "s26", "s27",
2359 "s28", "s29", "s30", "s31",
2360 };
2361
2362 static const char *const h_name[] =
2363 {
2364 "h0", "h1", "h2", "h3",
2365 "h4", "h5", "h6", "h7",
2366 "h8", "h9", "h10", "h11",
2367 "h12", "h13", "h14", "h15",
2368 "h16", "h17", "h18", "h19",
2369 "h20", "h21", "h22", "h23",
2370 "h24", "h25", "h26", "h27",
2371 "h28", "h29", "h30", "h31",
2372 };
2373
2374 static const char *const b_name[] =
2375 {
2376 "b0", "b1", "b2", "b3",
2377 "b4", "b5", "b6", "b7",
2378 "b8", "b9", "b10", "b11",
2379 "b12", "b13", "b14", "b15",
2380 "b16", "b17", "b18", "b19",
2381 "b20", "b21", "b22", "b23",
2382 "b24", "b25", "b26", "b27",
2383 "b28", "b29", "b30", "b31",
2384 };
2385
2386 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2387
2388 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2389 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2390
2391 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2392 return d_name[p_regnum - AARCH64_D0_REGNUM];
2393
2394 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2395 return s_name[p_regnum - AARCH64_S0_REGNUM];
2396
2397 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2398 return h_name[p_regnum - AARCH64_H0_REGNUM];
2399
2400 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2401 return b_name[p_regnum - AARCH64_B0_REGNUM];
2402
2403 if (tdep->has_sve ())
2404 {
2405 static const char *const sve_v_name[] =
2406 {
2407 "v0", "v1", "v2", "v3",
2408 "v4", "v5", "v6", "v7",
2409 "v8", "v9", "v10", "v11",
2410 "v12", "v13", "v14", "v15",
2411 "v16", "v17", "v18", "v19",
2412 "v20", "v21", "v22", "v23",
2413 "v24", "v25", "v26", "v27",
2414 "v28", "v29", "v30", "v31",
2415 };
2416
2417 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2418 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2419 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2420 }
2421
2422 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2423 prevents it from being read by methods such as
2424 mi_cmd_trace_frame_collected. */
2425 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2426 return "";
2427
2428 internal_error (__FILE__, __LINE__,
2429 _("aarch64_pseudo_register_name: bad register number %d"),
2430 p_regnum);
2431 }
2432
2433 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2434
2435 static struct type *
2436 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2437 {
2438 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2439
2440 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2441
2442 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2443 return aarch64_vnq_type (gdbarch);
2444
2445 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2446 return aarch64_vnd_type (gdbarch);
2447
2448 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2449 return aarch64_vns_type (gdbarch);
2450
2451 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2452 return aarch64_vnh_type (gdbarch);
2453
2454 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2455 return aarch64_vnb_type (gdbarch);
2456
2457 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2458 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2459 return aarch64_vnv_type (gdbarch);
2460
2461 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2462 return builtin_type (gdbarch)->builtin_uint64;
2463
2464 internal_error (__FILE__, __LINE__,
2465 _("aarch64_pseudo_register_type: bad register number %d"),
2466 p_regnum);
2467 }
2468
2469 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2470
2471 static int
2472 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2473 struct reggroup *group)
2474 {
2475 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2476
2477 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2478
2479 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2480 return group == all_reggroup || group == vector_reggroup;
2481 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2482 return (group == all_reggroup || group == vector_reggroup
2483 || group == float_reggroup);
2484 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2485 return (group == all_reggroup || group == vector_reggroup
2486 || group == float_reggroup);
2487 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2488 return group == all_reggroup || group == vector_reggroup;
2489 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2490 return group == all_reggroup || group == vector_reggroup;
2491 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2492 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2493 return group == all_reggroup || group == vector_reggroup;
2494 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2495 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2496 return 0;
2497
2498 return group == all_reggroup;
2499 }
2500
2501 /* Helper for aarch64_pseudo_read_value. */
2502
2503 static struct value *
2504 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2505 readable_regcache *regcache, int regnum_offset,
2506 int regsize, struct value *result_value)
2507 {
2508 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2509
2510 /* Enough space for a full vector register. */
2511 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2512 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2513
2514 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2515 mark_value_bytes_unavailable (result_value, 0,
2516 TYPE_LENGTH (value_type (result_value)));
2517 else
2518 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2519
2520 return result_value;
2521 }
2522
2523 /* Implement the "pseudo_register_read_value" gdbarch method. */
2524
2525 static struct value *
2526 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2527 int regnum)
2528 {
2529 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2530 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2531
2532 VALUE_LVAL (result_value) = lval_register;
2533 VALUE_REGNUM (result_value) = regnum;
2534
2535 regnum -= gdbarch_num_regs (gdbarch);
2536
2537 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2538 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2539 regnum - AARCH64_Q0_REGNUM,
2540 Q_REGISTER_SIZE, result_value);
2541
2542 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2543 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2544 regnum - AARCH64_D0_REGNUM,
2545 D_REGISTER_SIZE, result_value);
2546
2547 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2548 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2549 regnum - AARCH64_S0_REGNUM,
2550 S_REGISTER_SIZE, result_value);
2551
2552 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2553 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2554 regnum - AARCH64_H0_REGNUM,
2555 H_REGISTER_SIZE, result_value);
2556
2557 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2558 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2559 regnum - AARCH64_B0_REGNUM,
2560 B_REGISTER_SIZE, result_value);
2561
2562 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2563 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2564 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2565 regnum - AARCH64_SVE_V0_REGNUM,
2566 V_REGISTER_SIZE, result_value);
2567
2568 gdb_assert_not_reached ("regnum out of bound");
2569 }
2570
2571 /* Helper for aarch64_pseudo_write. */
2572
2573 static void
2574 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2575 int regnum_offset, int regsize, const gdb_byte *buf)
2576 {
2577 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2578
2579 /* Enough space for a full vector register. */
2580 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2581 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2582
2583 /* Ensure the register buffer is zero, we want gdb writes of the
2584 various 'scalar' pseudo registers to behavior like architectural
2585 writes, register width bytes are written the remainder are set to
2586 zero. */
2587 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2588
2589 memcpy (reg_buf, buf, regsize);
2590 regcache->raw_write (v_regnum, reg_buf);
2591 }
2592
2593 /* Implement the "pseudo_register_write" gdbarch method. */
2594
2595 static void
2596 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2597 int regnum, const gdb_byte *buf)
2598 {
2599 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2600 regnum -= gdbarch_num_regs (gdbarch);
2601
2602 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2603 return aarch64_pseudo_write_1 (gdbarch, regcache,
2604 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2605 buf);
2606
2607 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2608 return aarch64_pseudo_write_1 (gdbarch, regcache,
2609 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2610 buf);
2611
2612 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2613 return aarch64_pseudo_write_1 (gdbarch, regcache,
2614 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2615 buf);
2616
2617 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2618 return aarch64_pseudo_write_1 (gdbarch, regcache,
2619 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2620 buf);
2621
2622 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2623 return aarch64_pseudo_write_1 (gdbarch, regcache,
2624 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2625 buf);
2626
2627 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2628 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2629 return aarch64_pseudo_write_1 (gdbarch, regcache,
2630 regnum - AARCH64_SVE_V0_REGNUM,
2631 V_REGISTER_SIZE, buf);
2632
2633 gdb_assert_not_reached ("regnum out of bound");
2634 }
2635
2636 /* Callback function for user_reg_add. */
2637
2638 static struct value *
2639 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2640 {
2641 const int *reg_p = (const int *) baton;
2642
2643 return value_of_register (*reg_p, frame);
2644 }
2645 \f
2646
2647 /* Implement the "software_single_step" gdbarch method, needed to
2648 single step through atomic sequences on AArch64. */
2649
2650 static std::vector<CORE_ADDR>
2651 aarch64_software_single_step (struct regcache *regcache)
2652 {
2653 struct gdbarch *gdbarch = regcache->arch ();
2654 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2655 const int insn_size = 4;
2656 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2657 CORE_ADDR pc = regcache_read_pc (regcache);
2658 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2659 CORE_ADDR loc = pc;
2660 CORE_ADDR closing_insn = 0;
2661 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2662 byte_order_for_code);
2663 int index;
2664 int insn_count;
2665 int bc_insn_count = 0; /* Conditional branch instruction count. */
2666 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2667 aarch64_inst inst;
2668
2669 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2670 return {};
2671
2672 /* Look for a Load Exclusive instruction which begins the sequence. */
2673 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2674 return {};
2675
2676 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2677 {
2678 loc += insn_size;
2679 insn = read_memory_unsigned_integer (loc, insn_size,
2680 byte_order_for_code);
2681
2682 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2683 return {};
2684 /* Check if the instruction is a conditional branch. */
2685 if (inst.opcode->iclass == condbranch)
2686 {
2687 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2688
2689 if (bc_insn_count >= 1)
2690 return {};
2691
2692 /* It is, so we'll try to set a breakpoint at the destination. */
2693 breaks[1] = loc + inst.operands[0].imm.value;
2694
2695 bc_insn_count++;
2696 last_breakpoint++;
2697 }
2698
2699 /* Look for the Store Exclusive which closes the atomic sequence. */
2700 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2701 {
2702 closing_insn = loc;
2703 break;
2704 }
2705 }
2706
2707 /* We didn't find a closing Store Exclusive instruction, fall back. */
2708 if (!closing_insn)
2709 return {};
2710
2711 /* Insert breakpoint after the end of the atomic sequence. */
2712 breaks[0] = loc + insn_size;
2713
2714 /* Check for duplicated breakpoints, and also check that the second
2715 breakpoint is not within the atomic sequence. */
2716 if (last_breakpoint
2717 && (breaks[1] == breaks[0]
2718 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2719 last_breakpoint = 0;
2720
2721 std::vector<CORE_ADDR> next_pcs;
2722
2723 /* Insert the breakpoint at the end of the sequence, and one at the
2724 destination of the conditional branch, if it exists. */
2725 for (index = 0; index <= last_breakpoint; index++)
2726 next_pcs.push_back (breaks[index]);
2727
2728 return next_pcs;
2729 }
2730
2731 struct aarch64_displaced_step_closure : public displaced_step_closure
2732 {
2733 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2734 is being displaced stepping. */
2735 int cond = 0;
2736
2737 /* PC adjustment offset after displaced stepping. */
2738 int32_t pc_adjust = 0;
2739 };
2740
2741 /* Data when visiting instructions for displaced stepping. */
2742
2743 struct aarch64_displaced_step_data
2744 {
2745 struct aarch64_insn_data base;
2746
2747 /* The address where the instruction will be executed at. */
2748 CORE_ADDR new_addr;
2749 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2750 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2751 /* Number of instructions in INSN_BUF. */
2752 unsigned insn_count;
2753 /* Registers when doing displaced stepping. */
2754 struct regcache *regs;
2755
2756 aarch64_displaced_step_closure *dsc;
2757 };
2758
2759 /* Implementation of aarch64_insn_visitor method "b". */
2760
2761 static void
2762 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2763 struct aarch64_insn_data *data)
2764 {
2765 struct aarch64_displaced_step_data *dsd
2766 = (struct aarch64_displaced_step_data *) data;
2767 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2768
2769 if (can_encode_int32 (new_offset, 28))
2770 {
2771 /* Emit B rather than BL, because executing BL on a new address
2772 will get the wrong address into LR. In order to avoid this,
2773 we emit B, and update LR if the instruction is BL. */
2774 emit_b (dsd->insn_buf, 0, new_offset);
2775 dsd->insn_count++;
2776 }
2777 else
2778 {
2779 /* Write NOP. */
2780 emit_nop (dsd->insn_buf);
2781 dsd->insn_count++;
2782 dsd->dsc->pc_adjust = offset;
2783 }
2784
2785 if (is_bl)
2786 {
2787 /* Update LR. */
2788 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2789 data->insn_addr + 4);
2790 }
2791 }
2792
2793 /* Implementation of aarch64_insn_visitor method "b_cond". */
2794
2795 static void
2796 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2797 struct aarch64_insn_data *data)
2798 {
2799 struct aarch64_displaced_step_data *dsd
2800 = (struct aarch64_displaced_step_data *) data;
2801
2802 /* GDB has to fix up PC after displaced step this instruction
2803 differently according to the condition is true or false. Instead
2804 of checking COND against conditional flags, we can use
2805 the following instructions, and GDB can tell how to fix up PC
2806 according to the PC value.
2807
2808 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2809 INSN1 ;
2810 TAKEN:
2811 INSN2
2812 */
2813
2814 emit_bcond (dsd->insn_buf, cond, 8);
2815 dsd->dsc->cond = 1;
2816 dsd->dsc->pc_adjust = offset;
2817 dsd->insn_count = 1;
2818 }
2819
2820 /* Dynamically allocate a new register. If we know the register
2821 statically, we should make it a global as above instead of using this
2822 helper function. */
2823
2824 static struct aarch64_register
2825 aarch64_register (unsigned num, int is64)
2826 {
2827 return (struct aarch64_register) { num, is64 };
2828 }
2829
2830 /* Implementation of aarch64_insn_visitor method "cb". */
2831
2832 static void
2833 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2834 const unsigned rn, int is64,
2835 struct aarch64_insn_data *data)
2836 {
2837 struct aarch64_displaced_step_data *dsd
2838 = (struct aarch64_displaced_step_data *) data;
2839
2840 /* The offset is out of range for a compare and branch
2841 instruction. We can use the following instructions instead:
2842
2843 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2844 INSN1 ;
2845 TAKEN:
2846 INSN2
2847 */
2848 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2849 dsd->insn_count = 1;
2850 dsd->dsc->cond = 1;
2851 dsd->dsc->pc_adjust = offset;
2852 }
2853
2854 /* Implementation of aarch64_insn_visitor method "tb". */
2855
2856 static void
2857 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2858 const unsigned rt, unsigned bit,
2859 struct aarch64_insn_data *data)
2860 {
2861 struct aarch64_displaced_step_data *dsd
2862 = (struct aarch64_displaced_step_data *) data;
2863
2864 /* The offset is out of range for a test bit and branch
2865 instruction We can use the following instructions instead:
2866
2867 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2868 INSN1 ;
2869 TAKEN:
2870 INSN2
2871
2872 */
2873 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2874 dsd->insn_count = 1;
2875 dsd->dsc->cond = 1;
2876 dsd->dsc->pc_adjust = offset;
2877 }
2878
2879 /* Implementation of aarch64_insn_visitor method "adr". */
2880
2881 static void
2882 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2883 const int is_adrp, struct aarch64_insn_data *data)
2884 {
2885 struct aarch64_displaced_step_data *dsd
2886 = (struct aarch64_displaced_step_data *) data;
2887 /* We know exactly the address the ADR{P,} instruction will compute.
2888 We can just write it to the destination register. */
2889 CORE_ADDR address = data->insn_addr + offset;
2890
2891 if (is_adrp)
2892 {
2893 /* Clear the lower 12 bits of the offset to get the 4K page. */
2894 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2895 address & ~0xfff);
2896 }
2897 else
2898 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2899 address);
2900
2901 dsd->dsc->pc_adjust = 4;
2902 emit_nop (dsd->insn_buf);
2903 dsd->insn_count = 1;
2904 }
2905
2906 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2907
2908 static void
2909 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2910 const unsigned rt, const int is64,
2911 struct aarch64_insn_data *data)
2912 {
2913 struct aarch64_displaced_step_data *dsd
2914 = (struct aarch64_displaced_step_data *) data;
2915 CORE_ADDR address = data->insn_addr + offset;
2916 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2917
2918 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2919 address);
2920
2921 if (is_sw)
2922 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2923 aarch64_register (rt, 1), zero);
2924 else
2925 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2926 aarch64_register (rt, 1), zero);
2927
2928 dsd->dsc->pc_adjust = 4;
2929 }
2930
2931 /* Implementation of aarch64_insn_visitor method "others". */
2932
2933 static void
2934 aarch64_displaced_step_others (const uint32_t insn,
2935 struct aarch64_insn_data *data)
2936 {
2937 struct aarch64_displaced_step_data *dsd
2938 = (struct aarch64_displaced_step_data *) data;
2939
2940 aarch64_emit_insn (dsd->insn_buf, insn);
2941 dsd->insn_count = 1;
2942
2943 if ((insn & 0xfffffc1f) == 0xd65f0000)
2944 {
2945 /* RET */
2946 dsd->dsc->pc_adjust = 0;
2947 }
2948 else
2949 dsd->dsc->pc_adjust = 4;
2950 }
2951
2952 static const struct aarch64_insn_visitor visitor =
2953 {
2954 aarch64_displaced_step_b,
2955 aarch64_displaced_step_b_cond,
2956 aarch64_displaced_step_cb,
2957 aarch64_displaced_step_tb,
2958 aarch64_displaced_step_adr,
2959 aarch64_displaced_step_ldr_literal,
2960 aarch64_displaced_step_others,
2961 };
2962
2963 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2964
2965 struct displaced_step_closure *
2966 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2967 CORE_ADDR from, CORE_ADDR to,
2968 struct regcache *regs)
2969 {
2970 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2971 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2972 struct aarch64_displaced_step_data dsd;
2973 aarch64_inst inst;
2974
2975 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2976 return NULL;
2977
2978 /* Look for a Load Exclusive instruction which begins the sequence. */
2979 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2980 {
2981 /* We can't displaced step atomic sequences. */
2982 return NULL;
2983 }
2984
2985 std::unique_ptr<aarch64_displaced_step_closure> dsc
2986 (new aarch64_displaced_step_closure);
2987 dsd.base.insn_addr = from;
2988 dsd.new_addr = to;
2989 dsd.regs = regs;
2990 dsd.dsc = dsc.get ();
2991 dsd.insn_count = 0;
2992 aarch64_relocate_instruction (insn, &visitor,
2993 (struct aarch64_insn_data *) &dsd);
2994 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2995
2996 if (dsd.insn_count != 0)
2997 {
2998 int i;
2999
3000 /* Instruction can be relocated to scratch pad. Copy
3001 relocated instruction(s) there. */
3002 for (i = 0; i < dsd.insn_count; i++)
3003 {
3004 if (debug_displaced)
3005 {
3006 debug_printf ("displaced: writing insn ");
3007 debug_printf ("%.8x", dsd.insn_buf[i]);
3008 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3009 }
3010 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3011 (ULONGEST) dsd.insn_buf[i]);
3012 }
3013 }
3014 else
3015 {
3016 dsc = NULL;
3017 }
3018
3019 return dsc.release ();
3020 }
3021
3022 /* Implement the "displaced_step_fixup" gdbarch method. */
3023
3024 void
3025 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3026 struct displaced_step_closure *dsc_,
3027 CORE_ADDR from, CORE_ADDR to,
3028 struct regcache *regs)
3029 {
3030 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3031
3032 if (dsc->cond)
3033 {
3034 ULONGEST pc;
3035
3036 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3037 if (pc - to == 8)
3038 {
3039 /* Condition is true. */
3040 }
3041 else if (pc - to == 4)
3042 {
3043 /* Condition is false. */
3044 dsc->pc_adjust = 4;
3045 }
3046 else
3047 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3048 }
3049
3050 if (dsc->pc_adjust != 0)
3051 {
3052 if (debug_displaced)
3053 {
3054 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3055 paddress (gdbarch, from), dsc->pc_adjust);
3056 }
3057 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3058 from + dsc->pc_adjust);
3059 }
3060 }
3061
3062 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3063
3064 int
3065 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3066 struct displaced_step_closure *closure)
3067 {
3068 return 1;
3069 }
3070
3071 /* Get the correct target description for the given VQ value.
3072 If VQ is zero then it is assumed SVE is not supported.
3073 (It is not possible to set VQ to zero on an SVE system). */
3074
3075 const target_desc *
3076 aarch64_read_description (uint64_t vq, bool pauth_p)
3077 {
3078 if (vq > AARCH64_MAX_SVE_VQ)
3079 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3080 AARCH64_MAX_SVE_VQ);
3081
3082 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3083
3084 if (tdesc == NULL)
3085 {
3086 tdesc = aarch64_create_target_description (vq, pauth_p);
3087 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3088 }
3089
3090 return tdesc;
3091 }
3092
3093 /* Return the VQ used when creating the target description TDESC. */
3094
3095 static uint64_t
3096 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3097 {
3098 const struct tdesc_feature *feature_sve;
3099
3100 if (!tdesc_has_registers (tdesc))
3101 return 0;
3102
3103 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3104
3105 if (feature_sve == nullptr)
3106 return 0;
3107
3108 uint64_t vl = tdesc_register_bitsize (feature_sve,
3109 aarch64_sve_register_names[0]) / 8;
3110 return sve_vq_from_vl (vl);
3111 }
3112
3113 /* Add all the expected register sets into GDBARCH. */
3114
3115 static void
3116 aarch64_add_reggroups (struct gdbarch *gdbarch)
3117 {
3118 reggroup_add (gdbarch, general_reggroup);
3119 reggroup_add (gdbarch, float_reggroup);
3120 reggroup_add (gdbarch, system_reggroup);
3121 reggroup_add (gdbarch, vector_reggroup);
3122 reggroup_add (gdbarch, all_reggroup);
3123 reggroup_add (gdbarch, save_reggroup);
3124 reggroup_add (gdbarch, restore_reggroup);
3125 }
3126
3127 /* Implement the "cannot_store_register" gdbarch method. */
3128
3129 static int
3130 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3131 {
3132 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3133
3134 if (!tdep->has_pauth ())
3135 return 0;
3136
3137 /* Pointer authentication registers are read-only. */
3138 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3139 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3140 }
3141
3142 /* Initialize the current architecture based on INFO. If possible,
3143 re-use an architecture from ARCHES, which is a list of
3144 architectures already created during this debugging session.
3145
3146 Called e.g. at program startup, when reading a core file, and when
3147 reading a binary file. */
3148
3149 static struct gdbarch *
3150 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3151 {
3152 struct gdbarch_tdep *tdep;
3153 struct gdbarch *gdbarch;
3154 struct gdbarch_list *best_arch;
3155 struct tdesc_arch_data *tdesc_data = NULL;
3156 const struct target_desc *tdesc = info.target_desc;
3157 int i;
3158 int valid_p = 1;
3159 const struct tdesc_feature *feature_core;
3160 const struct tdesc_feature *feature_fpu;
3161 const struct tdesc_feature *feature_sve;
3162 const struct tdesc_feature *feature_pauth;
3163 int num_regs = 0;
3164 int num_pseudo_regs = 0;
3165 int first_pauth_regnum = -1;
3166 int pauth_ra_state_offset = -1;
3167
3168 /* Ensure we always have a target description. */
3169 if (!tdesc_has_registers (tdesc))
3170 tdesc = aarch64_read_description (0, false);
3171 gdb_assert (tdesc);
3172
3173 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
3174 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3175 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3176 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3177
3178 if (feature_core == NULL)
3179 return NULL;
3180
3181 tdesc_data = tdesc_data_alloc ();
3182
3183 /* Validate the description provides the mandatory core R registers
3184 and allocate their numbers. */
3185 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3186 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3187 AARCH64_X0_REGNUM + i,
3188 aarch64_r_register_names[i]);
3189
3190 num_regs = AARCH64_X0_REGNUM + i;
3191
3192 /* Add the V registers. */
3193 if (feature_fpu != NULL)
3194 {
3195 if (feature_sve != NULL)
3196 error (_("Program contains both fpu and SVE features."));
3197
3198 /* Validate the description provides the mandatory V registers
3199 and allocate their numbers. */
3200 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3201 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3202 AARCH64_V0_REGNUM + i,
3203 aarch64_v_register_names[i]);
3204
3205 num_regs = AARCH64_V0_REGNUM + i;
3206 }
3207
3208 /* Add the SVE registers. */
3209 if (feature_sve != NULL)
3210 {
3211 /* Validate the description provides the mandatory SVE registers
3212 and allocate their numbers. */
3213 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3214 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3215 AARCH64_SVE_Z0_REGNUM + i,
3216 aarch64_sve_register_names[i]);
3217
3218 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3219 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3220 }
3221
3222 if (feature_fpu != NULL || feature_sve != NULL)
3223 {
3224 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3225 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3226 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3227 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3228 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3229 }
3230
3231 /* Add the pauth registers. */
3232 if (feature_pauth != NULL)
3233 {
3234 first_pauth_regnum = num_regs;
3235 pauth_ra_state_offset = num_pseudo_regs;
3236 /* Validate the descriptor provides the mandatory PAUTH registers and
3237 allocate their numbers. */
3238 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3239 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3240 first_pauth_regnum + i,
3241 aarch64_pauth_register_names[i]);
3242
3243 num_regs += i;
3244 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3245 }
3246
3247 if (!valid_p)
3248 {
3249 tdesc_data_cleanup (tdesc_data);
3250 return NULL;
3251 }
3252
3253 /* AArch64 code is always little-endian. */
3254 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3255
3256 /* If there is already a candidate, use it. */
3257 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3258 best_arch != NULL;
3259 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3260 {
3261 /* Found a match. */
3262 break;
3263 }
3264
3265 if (best_arch != NULL)
3266 {
3267 if (tdesc_data != NULL)
3268 tdesc_data_cleanup (tdesc_data);
3269 return best_arch->gdbarch;
3270 }
3271
3272 tdep = XCNEW (struct gdbarch_tdep);
3273 gdbarch = gdbarch_alloc (&info, tdep);
3274
3275 /* This should be low enough for everything. */
3276 tdep->lowest_pc = 0x20;
3277 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3278 tdep->jb_elt_size = 8;
3279 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3280 tdep->pauth_reg_base = first_pauth_regnum;
3281 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3282 : pauth_ra_state_offset + num_regs;
3283
3284
3285 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3286 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3287
3288 /* Advance PC across function entry code. */
3289 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3290
3291 /* The stack grows downward. */
3292 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3293
3294 /* Breakpoint manipulation. */
3295 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3296 aarch64_breakpoint::kind_from_pc);
3297 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3298 aarch64_breakpoint::bp_from_kind);
3299 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3300 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3301
3302 /* Information about registers, etc. */
3303 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3304 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3305 set_gdbarch_num_regs (gdbarch, num_regs);
3306
3307 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3308 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3309 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3310 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3311 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3312 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3313 aarch64_pseudo_register_reggroup_p);
3314 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3315
3316 /* ABI */
3317 set_gdbarch_short_bit (gdbarch, 16);
3318 set_gdbarch_int_bit (gdbarch, 32);
3319 set_gdbarch_float_bit (gdbarch, 32);
3320 set_gdbarch_double_bit (gdbarch, 64);
3321 set_gdbarch_long_double_bit (gdbarch, 128);
3322 set_gdbarch_long_bit (gdbarch, 64);
3323 set_gdbarch_long_long_bit (gdbarch, 64);
3324 set_gdbarch_ptr_bit (gdbarch, 64);
3325 set_gdbarch_char_signed (gdbarch, 0);
3326 set_gdbarch_wchar_signed (gdbarch, 0);
3327 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3328 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3329 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3330
3331 /* Internal <-> external register number maps. */
3332 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3333
3334 /* Returning results. */
3335 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3336
3337 /* Disassembly. */
3338 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3339
3340 /* Virtual tables. */
3341 set_gdbarch_vbit_in_delta (gdbarch, 1);
3342
3343 /* Register architecture. */
3344 aarch64_add_reggroups (gdbarch);
3345
3346 /* Hook in the ABI-specific overrides, if they have been registered. */
3347 info.target_desc = tdesc;
3348 info.tdesc_data = tdesc_data;
3349 gdbarch_init_osabi (info, gdbarch);
3350
3351 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3352 /* Register DWARF CFA vendor handler. */
3353 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3354 aarch64_execute_dwarf_cfa_vendor_op);
3355
3356 /* Add some default predicates. */
3357 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3358 dwarf2_append_unwinders (gdbarch);
3359 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3360
3361 frame_base_set_default (gdbarch, &aarch64_normal_base);
3362
3363 /* Now we have tuned the configuration, set a few final things,
3364 based on what the OS ABI has told us. */
3365
3366 if (tdep->jb_pc >= 0)
3367 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3368
3369 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3370
3371 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3372
3373 /* Add standard register aliases. */
3374 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3375 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3376 value_of_aarch64_user_reg,
3377 &aarch64_register_aliases[i].regnum);
3378
3379 register_aarch64_ravenscar_ops (gdbarch);
3380
3381 return gdbarch;
3382 }
3383
3384 static void
3385 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3386 {
3387 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3388
3389 if (tdep == NULL)
3390 return;
3391
3392 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3393 paddress (gdbarch, tdep->lowest_pc));
3394 }
3395
3396 #if GDB_SELF_TEST
3397 namespace selftests
3398 {
3399 static void aarch64_process_record_test (void);
3400 }
3401 #endif
3402
3403 void
3404 _initialize_aarch64_tdep (void)
3405 {
3406 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3407 aarch64_dump_tdep);
3408
3409 /* Debug this file's internals. */
3410 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3411 Set AArch64 debugging."), _("\
3412 Show AArch64 debugging."), _("\
3413 When on, AArch64 specific debugging is enabled."),
3414 NULL,
3415 show_aarch64_debug,
3416 &setdebuglist, &showdebuglist);
3417
3418 #if GDB_SELF_TEST
3419 selftests::register_test ("aarch64-analyze-prologue",
3420 selftests::aarch64_analyze_prologue_test);
3421 selftests::register_test ("aarch64-process-record",
3422 selftests::aarch64_process_record_test);
3423 selftests::record_xml_tdesc ("aarch64.xml",
3424 aarch64_create_target_description (0, false));
3425 #endif
3426 }
3427
3428 /* AArch64 process record-replay related structures, defines etc. */
3429
3430 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3431 do \
3432 { \
3433 unsigned int reg_len = LENGTH; \
3434 if (reg_len) \
3435 { \
3436 REGS = XNEWVEC (uint32_t, reg_len); \
3437 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3438 } \
3439 } \
3440 while (0)
3441
3442 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3443 do \
3444 { \
3445 unsigned int mem_len = LENGTH; \
3446 if (mem_len) \
3447 { \
3448 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3449 memcpy(&MEMS->len, &RECORD_BUF[0], \
3450 sizeof(struct aarch64_mem_r) * LENGTH); \
3451 } \
3452 } \
3453 while (0)
3454
3455 /* AArch64 record/replay structures and enumerations. */
3456
3457 struct aarch64_mem_r
3458 {
3459 uint64_t len; /* Record length. */
3460 uint64_t addr; /* Memory address. */
3461 };
3462
3463 enum aarch64_record_result
3464 {
3465 AARCH64_RECORD_SUCCESS,
3466 AARCH64_RECORD_UNSUPPORTED,
3467 AARCH64_RECORD_UNKNOWN
3468 };
3469
3470 typedef struct insn_decode_record_t
3471 {
3472 struct gdbarch *gdbarch;
3473 struct regcache *regcache;
3474 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3475 uint32_t aarch64_insn; /* Insn to be recorded. */
3476 uint32_t mem_rec_count; /* Count of memory records. */
3477 uint32_t reg_rec_count; /* Count of register records. */
3478 uint32_t *aarch64_regs; /* Registers to be recorded. */
3479 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3480 } insn_decode_record;
3481
3482 /* Record handler for data processing - register instructions. */
3483
3484 static unsigned int
3485 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3486 {
3487 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3488 uint32_t record_buf[4];
3489
3490 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3491 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3492 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3493
3494 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3495 {
3496 uint8_t setflags;
3497
3498 /* Logical (shifted register). */
3499 if (insn_bits24_27 == 0x0a)
3500 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3501 /* Add/subtract. */
3502 else if (insn_bits24_27 == 0x0b)
3503 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3504 else
3505 return AARCH64_RECORD_UNKNOWN;
3506
3507 record_buf[0] = reg_rd;
3508 aarch64_insn_r->reg_rec_count = 1;
3509 if (setflags)
3510 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3511 }
3512 else
3513 {
3514 if (insn_bits24_27 == 0x0b)
3515 {
3516 /* Data-processing (3 source). */
3517 record_buf[0] = reg_rd;
3518 aarch64_insn_r->reg_rec_count = 1;
3519 }
3520 else if (insn_bits24_27 == 0x0a)
3521 {
3522 if (insn_bits21_23 == 0x00)
3523 {
3524 /* Add/subtract (with carry). */
3525 record_buf[0] = reg_rd;
3526 aarch64_insn_r->reg_rec_count = 1;
3527 if (bit (aarch64_insn_r->aarch64_insn, 29))
3528 {
3529 record_buf[1] = AARCH64_CPSR_REGNUM;
3530 aarch64_insn_r->reg_rec_count = 2;
3531 }
3532 }
3533 else if (insn_bits21_23 == 0x02)
3534 {
3535 /* Conditional compare (register) and conditional compare
3536 (immediate) instructions. */
3537 record_buf[0] = AARCH64_CPSR_REGNUM;
3538 aarch64_insn_r->reg_rec_count = 1;
3539 }
3540 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3541 {
3542 /* CConditional select. */
3543 /* Data-processing (2 source). */
3544 /* Data-processing (1 source). */
3545 record_buf[0] = reg_rd;
3546 aarch64_insn_r->reg_rec_count = 1;
3547 }
3548 else
3549 return AARCH64_RECORD_UNKNOWN;
3550 }
3551 }
3552
3553 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3554 record_buf);
3555 return AARCH64_RECORD_SUCCESS;
3556 }
3557
3558 /* Record handler for data processing - immediate instructions. */
3559
3560 static unsigned int
3561 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3562 {
3563 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3564 uint32_t record_buf[4];
3565
3566 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3567 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3568 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3569
3570 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3571 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3572 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3573 {
3574 record_buf[0] = reg_rd;
3575 aarch64_insn_r->reg_rec_count = 1;
3576 }
3577 else if (insn_bits24_27 == 0x01)
3578 {
3579 /* Add/Subtract (immediate). */
3580 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3581 record_buf[0] = reg_rd;
3582 aarch64_insn_r->reg_rec_count = 1;
3583 if (setflags)
3584 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3585 }
3586 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3587 {
3588 /* Logical (immediate). */
3589 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3590 record_buf[0] = reg_rd;
3591 aarch64_insn_r->reg_rec_count = 1;
3592 if (setflags)
3593 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3594 }
3595 else
3596 return AARCH64_RECORD_UNKNOWN;
3597
3598 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3599 record_buf);
3600 return AARCH64_RECORD_SUCCESS;
3601 }
3602
3603 /* Record handler for branch, exception generation and system instructions. */
3604
3605 static unsigned int
3606 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3607 {
3608 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3609 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3610 uint32_t record_buf[4];
3611
3612 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3613 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3614 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3615
3616 if (insn_bits28_31 == 0x0d)
3617 {
3618 /* Exception generation instructions. */
3619 if (insn_bits24_27 == 0x04)
3620 {
3621 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3622 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3623 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3624 {
3625 ULONGEST svc_number;
3626
3627 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3628 &svc_number);
3629 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3630 svc_number);
3631 }
3632 else
3633 return AARCH64_RECORD_UNSUPPORTED;
3634 }
3635 /* System instructions. */
3636 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3637 {
3638 uint32_t reg_rt, reg_crn;
3639
3640 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3641 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3642
3643 /* Record rt in case of sysl and mrs instructions. */
3644 if (bit (aarch64_insn_r->aarch64_insn, 21))
3645 {
3646 record_buf[0] = reg_rt;
3647 aarch64_insn_r->reg_rec_count = 1;
3648 }
3649 /* Record cpsr for hint and msr(immediate) instructions. */
3650 else if (reg_crn == 0x02 || reg_crn == 0x04)
3651 {
3652 record_buf[0] = AARCH64_CPSR_REGNUM;
3653 aarch64_insn_r->reg_rec_count = 1;
3654 }
3655 }
3656 /* Unconditional branch (register). */
3657 else if((insn_bits24_27 & 0x0e) == 0x06)
3658 {
3659 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3660 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3661 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3662 }
3663 else
3664 return AARCH64_RECORD_UNKNOWN;
3665 }
3666 /* Unconditional branch (immediate). */
3667 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3668 {
3669 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3670 if (bit (aarch64_insn_r->aarch64_insn, 31))
3671 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3672 }
3673 else
3674 /* Compare & branch (immediate), Test & branch (immediate) and
3675 Conditional branch (immediate). */
3676 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3677
3678 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3679 record_buf);
3680 return AARCH64_RECORD_SUCCESS;
3681 }
3682
3683 /* Record handler for advanced SIMD load and store instructions. */
3684
3685 static unsigned int
3686 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3687 {
3688 CORE_ADDR address;
3689 uint64_t addr_offset = 0;
3690 uint32_t record_buf[24];
3691 uint64_t record_buf_mem[24];
3692 uint32_t reg_rn, reg_rt;
3693 uint32_t reg_index = 0, mem_index = 0;
3694 uint8_t opcode_bits, size_bits;
3695
3696 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3697 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3698 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3699 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3700 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3701
3702 if (record_debug)
3703 debug_printf ("Process record: Advanced SIMD load/store\n");
3704
3705 /* Load/store single structure. */
3706 if (bit (aarch64_insn_r->aarch64_insn, 24))
3707 {
3708 uint8_t sindex, scale, selem, esize, replicate = 0;
3709 scale = opcode_bits >> 2;
3710 selem = ((opcode_bits & 0x02) |
3711 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3712 switch (scale)
3713 {
3714 case 1:
3715 if (size_bits & 0x01)
3716 return AARCH64_RECORD_UNKNOWN;
3717 break;
3718 case 2:
3719 if ((size_bits >> 1) & 0x01)
3720 return AARCH64_RECORD_UNKNOWN;
3721 if (size_bits & 0x01)
3722 {
3723 if (!((opcode_bits >> 1) & 0x01))
3724 scale = 3;
3725 else
3726 return AARCH64_RECORD_UNKNOWN;
3727 }
3728 break;
3729 case 3:
3730 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3731 {
3732 scale = size_bits;
3733 replicate = 1;
3734 break;
3735 }
3736 else
3737 return AARCH64_RECORD_UNKNOWN;
3738 default:
3739 break;
3740 }
3741 esize = 8 << scale;
3742 if (replicate)
3743 for (sindex = 0; sindex < selem; sindex++)
3744 {
3745 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3746 reg_rt = (reg_rt + 1) % 32;
3747 }
3748 else
3749 {
3750 for (sindex = 0; sindex < selem; sindex++)
3751 {
3752 if (bit (aarch64_insn_r->aarch64_insn, 22))
3753 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3754 else
3755 {
3756 record_buf_mem[mem_index++] = esize / 8;
3757 record_buf_mem[mem_index++] = address + addr_offset;
3758 }
3759 addr_offset = addr_offset + (esize / 8);
3760 reg_rt = (reg_rt + 1) % 32;
3761 }
3762 }
3763 }
3764 /* Load/store multiple structure. */
3765 else
3766 {
3767 uint8_t selem, esize, rpt, elements;
3768 uint8_t eindex, rindex;
3769
3770 esize = 8 << size_bits;
3771 if (bit (aarch64_insn_r->aarch64_insn, 30))
3772 elements = 128 / esize;
3773 else
3774 elements = 64 / esize;
3775
3776 switch (opcode_bits)
3777 {
3778 /*LD/ST4 (4 Registers). */
3779 case 0:
3780 rpt = 1;
3781 selem = 4;
3782 break;
3783 /*LD/ST1 (4 Registers). */
3784 case 2:
3785 rpt = 4;
3786 selem = 1;
3787 break;
3788 /*LD/ST3 (3 Registers). */
3789 case 4:
3790 rpt = 1;
3791 selem = 3;
3792 break;
3793 /*LD/ST1 (3 Registers). */
3794 case 6:
3795 rpt = 3;
3796 selem = 1;
3797 break;
3798 /*LD/ST1 (1 Register). */
3799 case 7:
3800 rpt = 1;
3801 selem = 1;
3802 break;
3803 /*LD/ST2 (2 Registers). */
3804 case 8:
3805 rpt = 1;
3806 selem = 2;
3807 break;
3808 /*LD/ST1 (2 Registers). */
3809 case 10:
3810 rpt = 2;
3811 selem = 1;
3812 break;
3813 default:
3814 return AARCH64_RECORD_UNSUPPORTED;
3815 break;
3816 }
3817 for (rindex = 0; rindex < rpt; rindex++)
3818 for (eindex = 0; eindex < elements; eindex++)
3819 {
3820 uint8_t reg_tt, sindex;
3821 reg_tt = (reg_rt + rindex) % 32;
3822 for (sindex = 0; sindex < selem; sindex++)
3823 {
3824 if (bit (aarch64_insn_r->aarch64_insn, 22))
3825 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3826 else
3827 {
3828 record_buf_mem[mem_index++] = esize / 8;
3829 record_buf_mem[mem_index++] = address + addr_offset;
3830 }
3831 addr_offset = addr_offset + (esize / 8);
3832 reg_tt = (reg_tt + 1) % 32;
3833 }
3834 }
3835 }
3836
3837 if (bit (aarch64_insn_r->aarch64_insn, 23))
3838 record_buf[reg_index++] = reg_rn;
3839
3840 aarch64_insn_r->reg_rec_count = reg_index;
3841 aarch64_insn_r->mem_rec_count = mem_index / 2;
3842 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3843 record_buf_mem);
3844 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3845 record_buf);
3846 return AARCH64_RECORD_SUCCESS;
3847 }
3848
3849 /* Record handler for load and store instructions. */
3850
3851 static unsigned int
3852 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3853 {
3854 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3855 uint8_t insn_bit23, insn_bit21;
3856 uint8_t opc, size_bits, ld_flag, vector_flag;
3857 uint32_t reg_rn, reg_rt, reg_rt2;
3858 uint64_t datasize, offset;
3859 uint32_t record_buf[8];
3860 uint64_t record_buf_mem[8];
3861 CORE_ADDR address;
3862
3863 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3864 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3865 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3866 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3867 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3868 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3869 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3870 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3871 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3872 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3873 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3874
3875 /* Load/store exclusive. */
3876 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3877 {
3878 if (record_debug)
3879 debug_printf ("Process record: load/store exclusive\n");
3880
3881 if (ld_flag)
3882 {
3883 record_buf[0] = reg_rt;
3884 aarch64_insn_r->reg_rec_count = 1;
3885 if (insn_bit21)
3886 {
3887 record_buf[1] = reg_rt2;
3888 aarch64_insn_r->reg_rec_count = 2;
3889 }
3890 }
3891 else
3892 {
3893 if (insn_bit21)
3894 datasize = (8 << size_bits) * 2;
3895 else
3896 datasize = (8 << size_bits);
3897 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3898 &address);
3899 record_buf_mem[0] = datasize / 8;
3900 record_buf_mem[1] = address;
3901 aarch64_insn_r->mem_rec_count = 1;
3902 if (!insn_bit23)
3903 {
3904 /* Save register rs. */
3905 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3906 aarch64_insn_r->reg_rec_count = 1;
3907 }
3908 }
3909 }
3910 /* Load register (literal) instructions decoding. */
3911 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3912 {
3913 if (record_debug)
3914 debug_printf ("Process record: load register (literal)\n");
3915 if (vector_flag)
3916 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3917 else
3918 record_buf[0] = reg_rt;
3919 aarch64_insn_r->reg_rec_count = 1;
3920 }
3921 /* All types of load/store pair instructions decoding. */
3922 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3923 {
3924 if (record_debug)
3925 debug_printf ("Process record: load/store pair\n");
3926
3927 if (ld_flag)
3928 {
3929 if (vector_flag)
3930 {
3931 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3932 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3933 }
3934 else
3935 {
3936 record_buf[0] = reg_rt;
3937 record_buf[1] = reg_rt2;
3938 }
3939 aarch64_insn_r->reg_rec_count = 2;
3940 }
3941 else
3942 {
3943 uint16_t imm7_off;
3944 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3945 if (!vector_flag)
3946 size_bits = size_bits >> 1;
3947 datasize = 8 << (2 + size_bits);
3948 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3949 offset = offset << (2 + size_bits);
3950 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3951 &address);
3952 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3953 {
3954 if (imm7_off & 0x40)
3955 address = address - offset;
3956 else
3957 address = address + offset;
3958 }
3959
3960 record_buf_mem[0] = datasize / 8;
3961 record_buf_mem[1] = address;
3962 record_buf_mem[2] = datasize / 8;
3963 record_buf_mem[3] = address + (datasize / 8);
3964 aarch64_insn_r->mem_rec_count = 2;
3965 }
3966 if (bit (aarch64_insn_r->aarch64_insn, 23))
3967 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3968 }
3969 /* Load/store register (unsigned immediate) instructions. */
3970 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3971 {
3972 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3973 if (!(opc >> 1))
3974 {
3975 if (opc & 0x01)
3976 ld_flag = 0x01;
3977 else
3978 ld_flag = 0x0;
3979 }
3980 else
3981 {
3982 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3983 {
3984 /* PRFM (immediate) */
3985 return AARCH64_RECORD_SUCCESS;
3986 }
3987 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3988 {
3989 /* LDRSW (immediate) */
3990 ld_flag = 0x1;
3991 }
3992 else
3993 {
3994 if (opc & 0x01)
3995 ld_flag = 0x01;
3996 else
3997 ld_flag = 0x0;
3998 }
3999 }
4000
4001 if (record_debug)
4002 {
4003 debug_printf ("Process record: load/store (unsigned immediate):"
4004 " size %x V %d opc %x\n", size_bits, vector_flag,
4005 opc);
4006 }
4007
4008 if (!ld_flag)
4009 {
4010 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4011 datasize = 8 << size_bits;
4012 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4013 &address);
4014 offset = offset << size_bits;
4015 address = address + offset;
4016
4017 record_buf_mem[0] = datasize >> 3;
4018 record_buf_mem[1] = address;
4019 aarch64_insn_r->mem_rec_count = 1;
4020 }
4021 else
4022 {
4023 if (vector_flag)
4024 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4025 else
4026 record_buf[0] = reg_rt;
4027 aarch64_insn_r->reg_rec_count = 1;
4028 }
4029 }
4030 /* Load/store register (register offset) instructions. */
4031 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4032 && insn_bits10_11 == 0x02 && insn_bit21)
4033 {
4034 if (record_debug)
4035 debug_printf ("Process record: load/store (register offset)\n");
4036 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4037 if (!(opc >> 1))
4038 if (opc & 0x01)
4039 ld_flag = 0x01;
4040 else
4041 ld_flag = 0x0;
4042 else
4043 if (size_bits != 0x03)
4044 ld_flag = 0x01;
4045 else
4046 return AARCH64_RECORD_UNKNOWN;
4047
4048 if (!ld_flag)
4049 {
4050 ULONGEST reg_rm_val;
4051
4052 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4053 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4054 if (bit (aarch64_insn_r->aarch64_insn, 12))
4055 offset = reg_rm_val << size_bits;
4056 else
4057 offset = reg_rm_val;
4058 datasize = 8 << size_bits;
4059 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4060 &address);
4061 address = address + offset;
4062 record_buf_mem[0] = datasize >> 3;
4063 record_buf_mem[1] = address;
4064 aarch64_insn_r->mem_rec_count = 1;
4065 }
4066 else
4067 {
4068 if (vector_flag)
4069 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4070 else
4071 record_buf[0] = reg_rt;
4072 aarch64_insn_r->reg_rec_count = 1;
4073 }
4074 }
4075 /* Load/store register (immediate and unprivileged) instructions. */
4076 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4077 && !insn_bit21)
4078 {
4079 if (record_debug)
4080 {
4081 debug_printf ("Process record: load/store "
4082 "(immediate and unprivileged)\n");
4083 }
4084 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4085 if (!(opc >> 1))
4086 if (opc & 0x01)
4087 ld_flag = 0x01;
4088 else
4089 ld_flag = 0x0;
4090 else
4091 if (size_bits != 0x03)
4092 ld_flag = 0x01;
4093 else
4094 return AARCH64_RECORD_UNKNOWN;
4095
4096 if (!ld_flag)
4097 {
4098 uint16_t imm9_off;
4099 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4100 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4101 datasize = 8 << size_bits;
4102 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4103 &address);
4104 if (insn_bits10_11 != 0x01)
4105 {
4106 if (imm9_off & 0x0100)
4107 address = address - offset;
4108 else
4109 address = address + offset;
4110 }
4111 record_buf_mem[0] = datasize >> 3;
4112 record_buf_mem[1] = address;
4113 aarch64_insn_r->mem_rec_count = 1;
4114 }
4115 else
4116 {
4117 if (vector_flag)
4118 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4119 else
4120 record_buf[0] = reg_rt;
4121 aarch64_insn_r->reg_rec_count = 1;
4122 }
4123 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4124 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4125 }
4126 /* Advanced SIMD load/store instructions. */
4127 else
4128 return aarch64_record_asimd_load_store (aarch64_insn_r);
4129
4130 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4131 record_buf_mem);
4132 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4133 record_buf);
4134 return AARCH64_RECORD_SUCCESS;
4135 }
4136
4137 /* Record handler for data processing SIMD and floating point instructions. */
4138
4139 static unsigned int
4140 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4141 {
4142 uint8_t insn_bit21, opcode, rmode, reg_rd;
4143 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4144 uint8_t insn_bits11_14;
4145 uint32_t record_buf[2];
4146
4147 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4148 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4149 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4150 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4151 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4152 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4153 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4154 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4155 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4156
4157 if (record_debug)
4158 debug_printf ("Process record: data processing SIMD/FP: ");
4159
4160 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4161 {
4162 /* Floating point - fixed point conversion instructions. */
4163 if (!insn_bit21)
4164 {
4165 if (record_debug)
4166 debug_printf ("FP - fixed point conversion");
4167
4168 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4169 record_buf[0] = reg_rd;
4170 else
4171 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4172 }
4173 /* Floating point - conditional compare instructions. */
4174 else if (insn_bits10_11 == 0x01)
4175 {
4176 if (record_debug)
4177 debug_printf ("FP - conditional compare");
4178
4179 record_buf[0] = AARCH64_CPSR_REGNUM;
4180 }
4181 /* Floating point - data processing (2-source) and
4182 conditional select instructions. */
4183 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4184 {
4185 if (record_debug)
4186 debug_printf ("FP - DP (2-source)");
4187
4188 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4189 }
4190 else if (insn_bits10_11 == 0x00)
4191 {
4192 /* Floating point - immediate instructions. */
4193 if ((insn_bits12_15 & 0x01) == 0x01
4194 || (insn_bits12_15 & 0x07) == 0x04)
4195 {
4196 if (record_debug)
4197 debug_printf ("FP - immediate");
4198 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4199 }
4200 /* Floating point - compare instructions. */
4201 else if ((insn_bits12_15 & 0x03) == 0x02)
4202 {
4203 if (record_debug)
4204 debug_printf ("FP - immediate");
4205 record_buf[0] = AARCH64_CPSR_REGNUM;
4206 }
4207 /* Floating point - integer conversions instructions. */
4208 else if (insn_bits12_15 == 0x00)
4209 {
4210 /* Convert float to integer instruction. */
4211 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4212 {
4213 if (record_debug)
4214 debug_printf ("float to int conversion");
4215
4216 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4217 }
4218 /* Convert integer to float instruction. */
4219 else if ((opcode >> 1) == 0x01 && !rmode)
4220 {
4221 if (record_debug)
4222 debug_printf ("int to float conversion");
4223
4224 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4225 }
4226 /* Move float to integer instruction. */
4227 else if ((opcode >> 1) == 0x03)
4228 {
4229 if (record_debug)
4230 debug_printf ("move float to int");
4231
4232 if (!(opcode & 0x01))
4233 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4234 else
4235 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4236 }
4237 else
4238 return AARCH64_RECORD_UNKNOWN;
4239 }
4240 else
4241 return AARCH64_RECORD_UNKNOWN;
4242 }
4243 else
4244 return AARCH64_RECORD_UNKNOWN;
4245 }
4246 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4247 {
4248 if (record_debug)
4249 debug_printf ("SIMD copy");
4250
4251 /* Advanced SIMD copy instructions. */
4252 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4253 && !bit (aarch64_insn_r->aarch64_insn, 15)
4254 && bit (aarch64_insn_r->aarch64_insn, 10))
4255 {
4256 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4257 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4258 else
4259 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4260 }
4261 else
4262 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4263 }
4264 /* All remaining floating point or advanced SIMD instructions. */
4265 else
4266 {
4267 if (record_debug)
4268 debug_printf ("all remain");
4269
4270 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4271 }
4272
4273 if (record_debug)
4274 debug_printf ("\n");
4275
4276 aarch64_insn_r->reg_rec_count++;
4277 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4278 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4279 record_buf);
4280 return AARCH64_RECORD_SUCCESS;
4281 }
4282
4283 /* Decodes insns type and invokes its record handler. */
4284
4285 static unsigned int
4286 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4287 {
4288 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4289
4290 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4291 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4292 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4293 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4294
4295 /* Data processing - immediate instructions. */
4296 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4297 return aarch64_record_data_proc_imm (aarch64_insn_r);
4298
4299 /* Branch, exception generation and system instructions. */
4300 if (ins_bit26 && !ins_bit27 && ins_bit28)
4301 return aarch64_record_branch_except_sys (aarch64_insn_r);
4302
4303 /* Load and store instructions. */
4304 if (!ins_bit25 && ins_bit27)
4305 return aarch64_record_load_store (aarch64_insn_r);
4306
4307 /* Data processing - register instructions. */
4308 if (ins_bit25 && !ins_bit26 && ins_bit27)
4309 return aarch64_record_data_proc_reg (aarch64_insn_r);
4310
4311 /* Data processing - SIMD and floating point instructions. */
4312 if (ins_bit25 && ins_bit26 && ins_bit27)
4313 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4314
4315 return AARCH64_RECORD_UNSUPPORTED;
4316 }
4317
4318 /* Cleans up local record registers and memory allocations. */
4319
4320 static void
4321 deallocate_reg_mem (insn_decode_record *record)
4322 {
4323 xfree (record->aarch64_regs);
4324 xfree (record->aarch64_mems);
4325 }
4326
4327 #if GDB_SELF_TEST
4328 namespace selftests {
4329
4330 static void
4331 aarch64_process_record_test (void)
4332 {
4333 struct gdbarch_info info;
4334 uint32_t ret;
4335
4336 gdbarch_info_init (&info);
4337 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4338
4339 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4340 SELF_CHECK (gdbarch != NULL);
4341
4342 insn_decode_record aarch64_record;
4343
4344 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4345 aarch64_record.regcache = NULL;
4346 aarch64_record.this_addr = 0;
4347 aarch64_record.gdbarch = gdbarch;
4348
4349 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4350 aarch64_record.aarch64_insn = 0xf9800020;
4351 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4352 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4353 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4354 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4355
4356 deallocate_reg_mem (&aarch64_record);
4357 }
4358
4359 } // namespace selftests
4360 #endif /* GDB_SELF_TEST */
4361
4362 /* Parse the current instruction and record the values of the registers and
4363 memory that will be changed in current instruction to record_arch_list
4364 return -1 if something is wrong. */
4365
4366 int
4367 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4368 CORE_ADDR insn_addr)
4369 {
4370 uint32_t rec_no = 0;
4371 uint8_t insn_size = 4;
4372 uint32_t ret = 0;
4373 gdb_byte buf[insn_size];
4374 insn_decode_record aarch64_record;
4375
4376 memset (&buf[0], 0, insn_size);
4377 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4378 target_read_memory (insn_addr, &buf[0], insn_size);
4379 aarch64_record.aarch64_insn
4380 = (uint32_t) extract_unsigned_integer (&buf[0],
4381 insn_size,
4382 gdbarch_byte_order (gdbarch));
4383 aarch64_record.regcache = regcache;
4384 aarch64_record.this_addr = insn_addr;
4385 aarch64_record.gdbarch = gdbarch;
4386
4387 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4388 if (ret == AARCH64_RECORD_UNSUPPORTED)
4389 {
4390 printf_unfiltered (_("Process record does not support instruction "
4391 "0x%0x at address %s.\n"),
4392 aarch64_record.aarch64_insn,
4393 paddress (gdbarch, insn_addr));
4394 ret = -1;
4395 }
4396
4397 if (0 == ret)
4398 {
4399 /* Record registers. */
4400 record_full_arch_list_add_reg (aarch64_record.regcache,
4401 AARCH64_PC_REGNUM);
4402 /* Always record register CPSR. */
4403 record_full_arch_list_add_reg (aarch64_record.regcache,
4404 AARCH64_CPSR_REGNUM);
4405 if (aarch64_record.aarch64_regs)
4406 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4407 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4408 aarch64_record.aarch64_regs[rec_no]))
4409 ret = -1;
4410
4411 /* Record memories. */
4412 if (aarch64_record.aarch64_mems)
4413 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4414 if (record_full_arch_list_add_mem
4415 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4416 aarch64_record.aarch64_mems[rec_no].len))
4417 ret = -1;
4418
4419 if (record_full_arch_list_add_end ())
4420 ret = -1;
4421 }
4422
4423 deallocate_reg_mem (&aarch64_record);
4424 return ret;
4425 }
This page took 0.119976 seconds and 3 git commands to generate.