Precompute hash value for symbol_set_names
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47 #include "gdbsupport/selftest.h"
48
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
51
52 #include "elf-bfd.h"
53 #include "elf/aarch64.h"
54
55 #include "record.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
58 #include "gdbarch.h"
59
60 #include "opcode/aarch64.h"
61 #include <algorithm>
62
63 #define submask(x) ((1L << ((x) + 1)) - 1)
64 #define bit(obj,st) (((obj) >> (st)) & 1)
65 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66
67 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
68 four members. */
69 #define HA_MAX_NUM_FLDS 4
70
71 /* All possible aarch64 target descriptors. */
72 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
73
74 /* The standard register names, and all the valid aliases for them. */
75 static const struct
76 {
77 const char *const name;
78 int regnum;
79 } aarch64_register_aliases[] =
80 {
81 /* 64-bit register names. */
82 {"fp", AARCH64_FP_REGNUM},
83 {"lr", AARCH64_LR_REGNUM},
84 {"sp", AARCH64_SP_REGNUM},
85
86 /* 32-bit register names. */
87 {"w0", AARCH64_X0_REGNUM + 0},
88 {"w1", AARCH64_X0_REGNUM + 1},
89 {"w2", AARCH64_X0_REGNUM + 2},
90 {"w3", AARCH64_X0_REGNUM + 3},
91 {"w4", AARCH64_X0_REGNUM + 4},
92 {"w5", AARCH64_X0_REGNUM + 5},
93 {"w6", AARCH64_X0_REGNUM + 6},
94 {"w7", AARCH64_X0_REGNUM + 7},
95 {"w8", AARCH64_X0_REGNUM + 8},
96 {"w9", AARCH64_X0_REGNUM + 9},
97 {"w10", AARCH64_X0_REGNUM + 10},
98 {"w11", AARCH64_X0_REGNUM + 11},
99 {"w12", AARCH64_X0_REGNUM + 12},
100 {"w13", AARCH64_X0_REGNUM + 13},
101 {"w14", AARCH64_X0_REGNUM + 14},
102 {"w15", AARCH64_X0_REGNUM + 15},
103 {"w16", AARCH64_X0_REGNUM + 16},
104 {"w17", AARCH64_X0_REGNUM + 17},
105 {"w18", AARCH64_X0_REGNUM + 18},
106 {"w19", AARCH64_X0_REGNUM + 19},
107 {"w20", AARCH64_X0_REGNUM + 20},
108 {"w21", AARCH64_X0_REGNUM + 21},
109 {"w22", AARCH64_X0_REGNUM + 22},
110 {"w23", AARCH64_X0_REGNUM + 23},
111 {"w24", AARCH64_X0_REGNUM + 24},
112 {"w25", AARCH64_X0_REGNUM + 25},
113 {"w26", AARCH64_X0_REGNUM + 26},
114 {"w27", AARCH64_X0_REGNUM + 27},
115 {"w28", AARCH64_X0_REGNUM + 28},
116 {"w29", AARCH64_X0_REGNUM + 29},
117 {"w30", AARCH64_X0_REGNUM + 30},
118
119 /* specials */
120 {"ip0", AARCH64_X0_REGNUM + 16},
121 {"ip1", AARCH64_X0_REGNUM + 17}
122 };
123
124 /* The required core 'R' registers. */
125 static const char *const aarch64_r_register_names[] =
126 {
127 /* These registers must appear in consecutive RAW register number
128 order and they must begin with AARCH64_X0_REGNUM! */
129 "x0", "x1", "x2", "x3",
130 "x4", "x5", "x6", "x7",
131 "x8", "x9", "x10", "x11",
132 "x12", "x13", "x14", "x15",
133 "x16", "x17", "x18", "x19",
134 "x20", "x21", "x22", "x23",
135 "x24", "x25", "x26", "x27",
136 "x28", "x29", "x30", "sp",
137 "pc", "cpsr"
138 };
139
140 /* The FP/SIMD 'V' registers. */
141 static const char *const aarch64_v_register_names[] =
142 {
143 /* These registers must appear in consecutive RAW register number
144 order and they must begin with AARCH64_V0_REGNUM! */
145 "v0", "v1", "v2", "v3",
146 "v4", "v5", "v6", "v7",
147 "v8", "v9", "v10", "v11",
148 "v12", "v13", "v14", "v15",
149 "v16", "v17", "v18", "v19",
150 "v20", "v21", "v22", "v23",
151 "v24", "v25", "v26", "v27",
152 "v28", "v29", "v30", "v31",
153 "fpsr",
154 "fpcr"
155 };
156
157 /* The SVE 'Z' and 'P' registers. */
158 static const char *const aarch64_sve_register_names[] =
159 {
160 /* These registers must appear in consecutive RAW register number
161 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
162 "z0", "z1", "z2", "z3",
163 "z4", "z5", "z6", "z7",
164 "z8", "z9", "z10", "z11",
165 "z12", "z13", "z14", "z15",
166 "z16", "z17", "z18", "z19",
167 "z20", "z21", "z22", "z23",
168 "z24", "z25", "z26", "z27",
169 "z28", "z29", "z30", "z31",
170 "fpsr", "fpcr",
171 "p0", "p1", "p2", "p3",
172 "p4", "p5", "p6", "p7",
173 "p8", "p9", "p10", "p11",
174 "p12", "p13", "p14", "p15",
175 "ffr", "vg"
176 };
177
178 static const char *const aarch64_pauth_register_names[] =
179 {
180 /* Authentication mask for data pointer. */
181 "pauth_dmask",
182 /* Authentication mask for code pointer. */
183 "pauth_cmask"
184 };
185
186 /* AArch64 prologue cache structure. */
187 struct aarch64_prologue_cache
188 {
189 /* The program counter at the start of the function. It is used to
190 identify this frame as a prologue frame. */
191 CORE_ADDR func;
192
193 /* The program counter at the time this frame was created; i.e. where
194 this function was called from. It is used to identify this frame as a
195 stub frame. */
196 CORE_ADDR prev_pc;
197
198 /* The stack pointer at the time this frame was created; i.e. the
199 caller's stack pointer when this function was called. It is used
200 to identify this frame. */
201 CORE_ADDR prev_sp;
202
203 /* Is the target available to read from? */
204 int available_p;
205
206 /* The frame base for this frame is just prev_sp - frame size.
207 FRAMESIZE is the distance from the frame pointer to the
208 initial stack pointer. */
209 int framesize;
210
211 /* The register used to hold the frame pointer for this frame. */
212 int framereg;
213
214 /* Saved register offsets. */
215 struct trad_frame_saved_reg *saved_regs;
216 };
217
218 static void
219 show_aarch64_debug (struct ui_file *file, int from_tty,
220 struct cmd_list_element *c, const char *value)
221 {
222 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
223 }
224
225 namespace {
226
227 /* Abstract instruction reader. */
228
229 class abstract_instruction_reader
230 {
231 public:
232 /* Read in one instruction. */
233 virtual ULONGEST read (CORE_ADDR memaddr, int len,
234 enum bfd_endian byte_order) = 0;
235 };
236
237 /* Instruction reader from real target. */
238
239 class instruction_reader : public abstract_instruction_reader
240 {
241 public:
242 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
243 override
244 {
245 return read_code_unsigned_integer (memaddr, len, byte_order);
246 }
247 };
248
249 } // namespace
250
251 /* If address signing is enabled, mask off the signature bits from the link
252 register, which is passed by value in ADDR, using the register values in
253 THIS_FRAME. */
254
255 static CORE_ADDR
256 aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
257 struct frame_info *this_frame, CORE_ADDR addr)
258 {
259 if (tdep->has_pauth ()
260 && frame_unwind_register_unsigned (this_frame,
261 tdep->pauth_ra_state_regnum))
262 {
263 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
264 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
265 addr = addr & ~cmask;
266
267 /* Record in the frame that the link register required unmasking. */
268 set_frame_previous_pc_masked (this_frame);
269 }
270
271 return addr;
272 }
273
274 /* Implement the "get_pc_address_flags" gdbarch method. */
275
276 static std::string
277 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
278 {
279 if (pc != 0 && get_frame_pc_masked (frame))
280 return "PAC";
281
282 return "";
283 }
284
285 /* Analyze a prologue, looking for a recognizable stack frame
286 and frame pointer. Scan until we encounter a store that could
287 clobber the stack frame unexpectedly, or an unknown instruction. */
288
289 static CORE_ADDR
290 aarch64_analyze_prologue (struct gdbarch *gdbarch,
291 CORE_ADDR start, CORE_ADDR limit,
292 struct aarch64_prologue_cache *cache,
293 abstract_instruction_reader& reader)
294 {
295 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
296 int i;
297 /* Track X registers and D registers in prologue. */
298 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
299
300 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
301 regs[i] = pv_register (i, 0);
302 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
303
304 for (; start < limit; start += 4)
305 {
306 uint32_t insn;
307 aarch64_inst inst;
308
309 insn = reader.read (start, 4, byte_order_for_code);
310
311 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
312 break;
313
314 if (inst.opcode->iclass == addsub_imm
315 && (inst.opcode->op == OP_ADD
316 || strcmp ("sub", inst.opcode->name) == 0))
317 {
318 unsigned rd = inst.operands[0].reg.regno;
319 unsigned rn = inst.operands[1].reg.regno;
320
321 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
322 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
323 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
324 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
325
326 if (inst.opcode->op == OP_ADD)
327 {
328 regs[rd] = pv_add_constant (regs[rn],
329 inst.operands[2].imm.value);
330 }
331 else
332 {
333 regs[rd] = pv_add_constant (regs[rn],
334 -inst.operands[2].imm.value);
335 }
336 }
337 else if (inst.opcode->iclass == pcreladdr
338 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
339 {
340 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
341 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
342
343 regs[inst.operands[0].reg.regno] = pv_unknown ();
344 }
345 else if (inst.opcode->iclass == branch_imm)
346 {
347 /* Stop analysis on branch. */
348 break;
349 }
350 else if (inst.opcode->iclass == condbranch)
351 {
352 /* Stop analysis on branch. */
353 break;
354 }
355 else if (inst.opcode->iclass == branch_reg)
356 {
357 /* Stop analysis on branch. */
358 break;
359 }
360 else if (inst.opcode->iclass == compbranch)
361 {
362 /* Stop analysis on branch. */
363 break;
364 }
365 else if (inst.opcode->op == OP_MOVZ)
366 {
367 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
368 regs[inst.operands[0].reg.regno] = pv_unknown ();
369 }
370 else if (inst.opcode->iclass == log_shift
371 && strcmp (inst.opcode->name, "orr") == 0)
372 {
373 unsigned rd = inst.operands[0].reg.regno;
374 unsigned rn = inst.operands[1].reg.regno;
375 unsigned rm = inst.operands[2].reg.regno;
376
377 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
378 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
379 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
380
381 if (inst.operands[2].shifter.amount == 0
382 && rn == AARCH64_SP_REGNUM)
383 regs[rd] = regs[rm];
384 else
385 {
386 if (aarch64_debug)
387 {
388 debug_printf ("aarch64: prologue analysis gave up "
389 "addr=%s opcode=0x%x (orr x register)\n",
390 core_addr_to_string_nz (start), insn);
391 }
392 break;
393 }
394 }
395 else if (inst.opcode->op == OP_STUR)
396 {
397 unsigned rt = inst.operands[0].reg.regno;
398 unsigned rn = inst.operands[1].addr.base_regno;
399 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
400
401 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
402 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
403 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
404 gdb_assert (!inst.operands[1].addr.offset.is_reg);
405
406 stack.store
407 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
408 size, regs[rt]);
409 }
410 else if ((inst.opcode->iclass == ldstpair_off
411 || (inst.opcode->iclass == ldstpair_indexed
412 && inst.operands[2].addr.preind))
413 && strcmp ("stp", inst.opcode->name) == 0)
414 {
415 /* STP with addressing mode Pre-indexed and Base register. */
416 unsigned rt1;
417 unsigned rt2;
418 unsigned rn = inst.operands[2].addr.base_regno;
419 int32_t imm = inst.operands[2].addr.offset.imm;
420 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
421
422 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
423 || inst.operands[0].type == AARCH64_OPND_Ft);
424 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
425 || inst.operands[1].type == AARCH64_OPND_Ft2);
426 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
427 gdb_assert (!inst.operands[2].addr.offset.is_reg);
428
429 /* If recording this store would invalidate the store area
430 (perhaps because rn is not known) then we should abandon
431 further prologue analysis. */
432 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
433 break;
434
435 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
436 break;
437
438 rt1 = inst.operands[0].reg.regno;
439 rt2 = inst.operands[1].reg.regno;
440 if (inst.operands[0].type == AARCH64_OPND_Ft)
441 {
442 rt1 += AARCH64_X_REGISTER_COUNT;
443 rt2 += AARCH64_X_REGISTER_COUNT;
444 }
445
446 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
447 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
448
449 if (inst.operands[2].addr.writeback)
450 regs[rn] = pv_add_constant (regs[rn], imm);
451
452 }
453 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
454 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
455 && (inst.opcode->op == OP_STR_POS
456 || inst.opcode->op == OP_STRF_POS)))
457 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
458 && strcmp ("str", inst.opcode->name) == 0)
459 {
460 /* STR (immediate) */
461 unsigned int rt = inst.operands[0].reg.regno;
462 int32_t imm = inst.operands[1].addr.offset.imm;
463 unsigned int rn = inst.operands[1].addr.base_regno;
464 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
465 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
466 || inst.operands[0].type == AARCH64_OPND_Ft);
467
468 if (inst.operands[0].type == AARCH64_OPND_Ft)
469 rt += AARCH64_X_REGISTER_COUNT;
470
471 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
472 if (inst.operands[1].addr.writeback)
473 regs[rn] = pv_add_constant (regs[rn], imm);
474 }
475 else if (inst.opcode->iclass == testbranch)
476 {
477 /* Stop analysis on branch. */
478 break;
479 }
480 else if (inst.opcode->iclass == ic_system)
481 {
482 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
483 int ra_state_val = 0;
484
485 if (insn == 0xd503233f /* paciasp. */
486 || insn == 0xd503237f /* pacibsp. */)
487 {
488 /* Return addresses are mangled. */
489 ra_state_val = 1;
490 }
491 else if (insn == 0xd50323bf /* autiasp. */
492 || insn == 0xd50323ff /* autibsp. */)
493 {
494 /* Return addresses are not mangled. */
495 ra_state_val = 0;
496 }
497 else
498 {
499 if (aarch64_debug)
500 debug_printf ("aarch64: prologue analysis gave up addr=%s"
501 " opcode=0x%x (iclass)\n",
502 core_addr_to_string_nz (start), insn);
503 break;
504 }
505
506 if (tdep->has_pauth () && cache != nullptr)
507 trad_frame_set_value (cache->saved_regs,
508 tdep->pauth_ra_state_regnum,
509 ra_state_val);
510 }
511 else
512 {
513 if (aarch64_debug)
514 {
515 debug_printf ("aarch64: prologue analysis gave up addr=%s"
516 " opcode=0x%x\n",
517 core_addr_to_string_nz (start), insn);
518 }
519 break;
520 }
521 }
522
523 if (cache == NULL)
524 return start;
525
526 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
527 {
528 /* Frame pointer is fp. Frame size is constant. */
529 cache->framereg = AARCH64_FP_REGNUM;
530 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
531 }
532 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
533 {
534 /* Try the stack pointer. */
535 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
536 cache->framereg = AARCH64_SP_REGNUM;
537 }
538 else
539 {
540 /* We're just out of luck. We don't know where the frame is. */
541 cache->framereg = -1;
542 cache->framesize = 0;
543 }
544
545 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
546 {
547 CORE_ADDR offset;
548
549 if (stack.find_reg (gdbarch, i, &offset))
550 cache->saved_regs[i].addr = offset;
551 }
552
553 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
554 {
555 int regnum = gdbarch_num_regs (gdbarch);
556 CORE_ADDR offset;
557
558 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
559 &offset))
560 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
561 }
562
563 return start;
564 }
565
566 static CORE_ADDR
567 aarch64_analyze_prologue (struct gdbarch *gdbarch,
568 CORE_ADDR start, CORE_ADDR limit,
569 struct aarch64_prologue_cache *cache)
570 {
571 instruction_reader reader;
572
573 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
574 reader);
575 }
576
577 #if GDB_SELF_TEST
578
579 namespace selftests {
580
581 /* Instruction reader from manually cooked instruction sequences. */
582
583 class instruction_reader_test : public abstract_instruction_reader
584 {
585 public:
586 template<size_t SIZE>
587 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
588 : m_insns (insns), m_insns_size (SIZE)
589 {}
590
591 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
592 override
593 {
594 SELF_CHECK (len == 4);
595 SELF_CHECK (memaddr % 4 == 0);
596 SELF_CHECK (memaddr / 4 < m_insns_size);
597
598 return m_insns[memaddr / 4];
599 }
600
601 private:
602 const uint32_t *m_insns;
603 size_t m_insns_size;
604 };
605
606 static void
607 aarch64_analyze_prologue_test (void)
608 {
609 struct gdbarch_info info;
610
611 gdbarch_info_init (&info);
612 info.bfd_arch_info = bfd_scan_arch ("aarch64");
613
614 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
615 SELF_CHECK (gdbarch != NULL);
616
617 struct aarch64_prologue_cache cache;
618 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
619
620 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
621
622 /* Test the simple prologue in which frame pointer is used. */
623 {
624 static const uint32_t insns[] = {
625 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
626 0x910003fd, /* mov x29, sp */
627 0x97ffffe6, /* bl 0x400580 */
628 };
629 instruction_reader_test reader (insns);
630
631 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
632 SELF_CHECK (end == 4 * 2);
633
634 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
635 SELF_CHECK (cache.framesize == 272);
636
637 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
638 {
639 if (i == AARCH64_FP_REGNUM)
640 SELF_CHECK (cache.saved_regs[i].addr == -272);
641 else if (i == AARCH64_LR_REGNUM)
642 SELF_CHECK (cache.saved_regs[i].addr == -264);
643 else
644 SELF_CHECK (cache.saved_regs[i].addr == -1);
645 }
646
647 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
648 {
649 int regnum = gdbarch_num_regs (gdbarch);
650
651 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
652 == -1);
653 }
654 }
655
656 /* Test a prologue in which STR is used and frame pointer is not
657 used. */
658 {
659 static const uint32_t insns[] = {
660 0xf81d0ff3, /* str x19, [sp, #-48]! */
661 0xb9002fe0, /* str w0, [sp, #44] */
662 0xf90013e1, /* str x1, [sp, #32]*/
663 0xfd000fe0, /* str d0, [sp, #24] */
664 0xaa0203f3, /* mov x19, x2 */
665 0xf94013e0, /* ldr x0, [sp, #32] */
666 };
667 instruction_reader_test reader (insns);
668
669 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
670 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
671
672 SELF_CHECK (end == 4 * 5);
673
674 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
675 SELF_CHECK (cache.framesize == 48);
676
677 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
678 {
679 if (i == 1)
680 SELF_CHECK (cache.saved_regs[i].addr == -16);
681 else if (i == 19)
682 SELF_CHECK (cache.saved_regs[i].addr == -48);
683 else
684 SELF_CHECK (cache.saved_regs[i].addr == -1);
685 }
686
687 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
688 {
689 int regnum = gdbarch_num_regs (gdbarch);
690
691 if (i == 0)
692 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
693 == -24);
694 else
695 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
696 == -1);
697 }
698 }
699
700 /* Test a prologue in which there is a return address signing instruction. */
701 if (tdep->has_pauth ())
702 {
703 static const uint32_t insns[] = {
704 0xd503233f, /* paciasp */
705 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
706 0x910003fd, /* mov x29, sp */
707 0xf801c3f3, /* str x19, [sp, #28] */
708 0xb9401fa0, /* ldr x19, [x29, #28] */
709 };
710 instruction_reader_test reader (insns);
711
712 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
713 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
714 reader);
715
716 SELF_CHECK (end == 4 * 4);
717 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
718 SELF_CHECK (cache.framesize == 48);
719
720 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
721 {
722 if (i == 19)
723 SELF_CHECK (cache.saved_regs[i].addr == -20);
724 else if (i == AARCH64_FP_REGNUM)
725 SELF_CHECK (cache.saved_regs[i].addr == -48);
726 else if (i == AARCH64_LR_REGNUM)
727 SELF_CHECK (cache.saved_regs[i].addr == -40);
728 else
729 SELF_CHECK (cache.saved_regs[i].addr == -1);
730 }
731
732 if (tdep->has_pauth ())
733 {
734 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
735 tdep->pauth_ra_state_regnum));
736 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
737 }
738 }
739 }
740 } // namespace selftests
741 #endif /* GDB_SELF_TEST */
742
743 /* Implement the "skip_prologue" gdbarch method. */
744
745 static CORE_ADDR
746 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
747 {
748 CORE_ADDR func_addr, limit_pc;
749
750 /* See if we can determine the end of the prologue via the symbol
751 table. If so, then return either PC, or the PC after the
752 prologue, whichever is greater. */
753 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
754 {
755 CORE_ADDR post_prologue_pc
756 = skip_prologue_using_sal (gdbarch, func_addr);
757
758 if (post_prologue_pc != 0)
759 return std::max (pc, post_prologue_pc);
760 }
761
762 /* Can't determine prologue from the symbol table, need to examine
763 instructions. */
764
765 /* Find an upper limit on the function prologue using the debug
766 information. If the debug information could not be used to
767 provide that bound, then use an arbitrary large number as the
768 upper bound. */
769 limit_pc = skip_prologue_using_sal (gdbarch, pc);
770 if (limit_pc == 0)
771 limit_pc = pc + 128; /* Magic. */
772
773 /* Try disassembling prologue. */
774 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
775 }
776
777 /* Scan the function prologue for THIS_FRAME and populate the prologue
778 cache CACHE. */
779
780 static void
781 aarch64_scan_prologue (struct frame_info *this_frame,
782 struct aarch64_prologue_cache *cache)
783 {
784 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
785 CORE_ADDR prologue_start;
786 CORE_ADDR prologue_end;
787 CORE_ADDR prev_pc = get_frame_pc (this_frame);
788 struct gdbarch *gdbarch = get_frame_arch (this_frame);
789
790 cache->prev_pc = prev_pc;
791
792 /* Assume we do not find a frame. */
793 cache->framereg = -1;
794 cache->framesize = 0;
795
796 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
797 &prologue_end))
798 {
799 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
800
801 if (sal.line == 0)
802 {
803 /* No line info so use the current PC. */
804 prologue_end = prev_pc;
805 }
806 else if (sal.end < prologue_end)
807 {
808 /* The next line begins after the function end. */
809 prologue_end = sal.end;
810 }
811
812 prologue_end = std::min (prologue_end, prev_pc);
813 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
814 }
815 else
816 {
817 CORE_ADDR frame_loc;
818
819 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
820 if (frame_loc == 0)
821 return;
822
823 cache->framereg = AARCH64_FP_REGNUM;
824 cache->framesize = 16;
825 cache->saved_regs[29].addr = 0;
826 cache->saved_regs[30].addr = 8;
827 }
828 }
829
830 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
831 function may throw an exception if the inferior's registers or memory is
832 not available. */
833
834 static void
835 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
836 struct aarch64_prologue_cache *cache)
837 {
838 CORE_ADDR unwound_fp;
839 int reg;
840
841 aarch64_scan_prologue (this_frame, cache);
842
843 if (cache->framereg == -1)
844 return;
845
846 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
847 if (unwound_fp == 0)
848 return;
849
850 cache->prev_sp = unwound_fp + cache->framesize;
851
852 /* Calculate actual addresses of saved registers using offsets
853 determined by aarch64_analyze_prologue. */
854 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
855 if (trad_frame_addr_p (cache->saved_regs, reg))
856 cache->saved_regs[reg].addr += cache->prev_sp;
857
858 cache->func = get_frame_func (this_frame);
859
860 cache->available_p = 1;
861 }
862
863 /* Allocate and fill in *THIS_CACHE with information about the prologue of
864 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
865 Return a pointer to the current aarch64_prologue_cache in
866 *THIS_CACHE. */
867
868 static struct aarch64_prologue_cache *
869 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
870 {
871 struct aarch64_prologue_cache *cache;
872
873 if (*this_cache != NULL)
874 return (struct aarch64_prologue_cache *) *this_cache;
875
876 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
877 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
878 *this_cache = cache;
879
880 try
881 {
882 aarch64_make_prologue_cache_1 (this_frame, cache);
883 }
884 catch (const gdb_exception_error &ex)
885 {
886 if (ex.error != NOT_AVAILABLE_ERROR)
887 throw;
888 }
889
890 return cache;
891 }
892
893 /* Implement the "stop_reason" frame_unwind method. */
894
895 static enum unwind_stop_reason
896 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
897 void **this_cache)
898 {
899 struct aarch64_prologue_cache *cache
900 = aarch64_make_prologue_cache (this_frame, this_cache);
901
902 if (!cache->available_p)
903 return UNWIND_UNAVAILABLE;
904
905 /* Halt the backtrace at "_start". */
906 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
907 return UNWIND_OUTERMOST;
908
909 /* We've hit a wall, stop. */
910 if (cache->prev_sp == 0)
911 return UNWIND_OUTERMOST;
912
913 return UNWIND_NO_REASON;
914 }
915
916 /* Our frame ID for a normal frame is the current function's starting
917 PC and the caller's SP when we were called. */
918
919 static void
920 aarch64_prologue_this_id (struct frame_info *this_frame,
921 void **this_cache, struct frame_id *this_id)
922 {
923 struct aarch64_prologue_cache *cache
924 = aarch64_make_prologue_cache (this_frame, this_cache);
925
926 if (!cache->available_p)
927 *this_id = frame_id_build_unavailable_stack (cache->func);
928 else
929 *this_id = frame_id_build (cache->prev_sp, cache->func);
930 }
931
932 /* Implement the "prev_register" frame_unwind method. */
933
934 static struct value *
935 aarch64_prologue_prev_register (struct frame_info *this_frame,
936 void **this_cache, int prev_regnum)
937 {
938 struct aarch64_prologue_cache *cache
939 = aarch64_make_prologue_cache (this_frame, this_cache);
940
941 /* If we are asked to unwind the PC, then we need to return the LR
942 instead. The prologue may save PC, but it will point into this
943 frame's prologue, not the next frame's resume location. */
944 if (prev_regnum == AARCH64_PC_REGNUM)
945 {
946 CORE_ADDR lr;
947 struct gdbarch *gdbarch = get_frame_arch (this_frame);
948 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
949
950 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
951
952 if (tdep->has_pauth ()
953 && trad_frame_value_p (cache->saved_regs,
954 tdep->pauth_ra_state_regnum))
955 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
956
957 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
958 }
959
960 /* SP is generally not saved to the stack, but this frame is
961 identified by the next frame's stack pointer at the time of the
962 call. The value was already reconstructed into PREV_SP. */
963 /*
964 +----------+ ^
965 | saved lr | |
966 +->| saved fp |--+
967 | | |
968 | | | <- Previous SP
969 | +----------+
970 | | saved lr |
971 +--| saved fp |<- FP
972 | |
973 | |<- SP
974 +----------+ */
975 if (prev_regnum == AARCH64_SP_REGNUM)
976 return frame_unwind_got_constant (this_frame, prev_regnum,
977 cache->prev_sp);
978
979 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
980 prev_regnum);
981 }
982
983 /* AArch64 prologue unwinder. */
984 struct frame_unwind aarch64_prologue_unwind =
985 {
986 NORMAL_FRAME,
987 aarch64_prologue_frame_unwind_stop_reason,
988 aarch64_prologue_this_id,
989 aarch64_prologue_prev_register,
990 NULL,
991 default_frame_sniffer
992 };
993
994 /* Allocate and fill in *THIS_CACHE with information about the prologue of
995 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
996 Return a pointer to the current aarch64_prologue_cache in
997 *THIS_CACHE. */
998
999 static struct aarch64_prologue_cache *
1000 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1001 {
1002 struct aarch64_prologue_cache *cache;
1003
1004 if (*this_cache != NULL)
1005 return (struct aarch64_prologue_cache *) *this_cache;
1006
1007 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1008 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1009 *this_cache = cache;
1010
1011 try
1012 {
1013 cache->prev_sp = get_frame_register_unsigned (this_frame,
1014 AARCH64_SP_REGNUM);
1015 cache->prev_pc = get_frame_pc (this_frame);
1016 cache->available_p = 1;
1017 }
1018 catch (const gdb_exception_error &ex)
1019 {
1020 if (ex.error != NOT_AVAILABLE_ERROR)
1021 throw;
1022 }
1023
1024 return cache;
1025 }
1026
1027 /* Implement the "stop_reason" frame_unwind method. */
1028
1029 static enum unwind_stop_reason
1030 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1031 void **this_cache)
1032 {
1033 struct aarch64_prologue_cache *cache
1034 = aarch64_make_stub_cache (this_frame, this_cache);
1035
1036 if (!cache->available_p)
1037 return UNWIND_UNAVAILABLE;
1038
1039 return UNWIND_NO_REASON;
1040 }
1041
1042 /* Our frame ID for a stub frame is the current SP and LR. */
1043
1044 static void
1045 aarch64_stub_this_id (struct frame_info *this_frame,
1046 void **this_cache, struct frame_id *this_id)
1047 {
1048 struct aarch64_prologue_cache *cache
1049 = aarch64_make_stub_cache (this_frame, this_cache);
1050
1051 if (cache->available_p)
1052 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1053 else
1054 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1055 }
1056
1057 /* Implement the "sniffer" frame_unwind method. */
1058
1059 static int
1060 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1061 struct frame_info *this_frame,
1062 void **this_prologue_cache)
1063 {
1064 CORE_ADDR addr_in_block;
1065 gdb_byte dummy[4];
1066
1067 addr_in_block = get_frame_address_in_block (this_frame);
1068 if (in_plt_section (addr_in_block)
1069 /* We also use the stub winder if the target memory is unreadable
1070 to avoid having the prologue unwinder trying to read it. */
1071 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1072 return 1;
1073
1074 return 0;
1075 }
1076
1077 /* AArch64 stub unwinder. */
1078 struct frame_unwind aarch64_stub_unwind =
1079 {
1080 NORMAL_FRAME,
1081 aarch64_stub_frame_unwind_stop_reason,
1082 aarch64_stub_this_id,
1083 aarch64_prologue_prev_register,
1084 NULL,
1085 aarch64_stub_unwind_sniffer
1086 };
1087
1088 /* Return the frame base address of *THIS_FRAME. */
1089
1090 static CORE_ADDR
1091 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1092 {
1093 struct aarch64_prologue_cache *cache
1094 = aarch64_make_prologue_cache (this_frame, this_cache);
1095
1096 return cache->prev_sp - cache->framesize;
1097 }
1098
1099 /* AArch64 default frame base information. */
1100 struct frame_base aarch64_normal_base =
1101 {
1102 &aarch64_prologue_unwind,
1103 aarch64_normal_frame_base,
1104 aarch64_normal_frame_base,
1105 aarch64_normal_frame_base
1106 };
1107
1108 /* Return the value of the REGNUM register in the previous frame of
1109 *THIS_FRAME. */
1110
1111 static struct value *
1112 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1113 void **this_cache, int regnum)
1114 {
1115 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1116 CORE_ADDR lr;
1117
1118 switch (regnum)
1119 {
1120 case AARCH64_PC_REGNUM:
1121 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1122 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1123 return frame_unwind_got_constant (this_frame, regnum, lr);
1124
1125 default:
1126 internal_error (__FILE__, __LINE__,
1127 _("Unexpected register %d"), regnum);
1128 }
1129 }
1130
1131 static const unsigned char op_lit0 = DW_OP_lit0;
1132 static const unsigned char op_lit1 = DW_OP_lit1;
1133
1134 /* Implement the "init_reg" dwarf2_frame_ops method. */
1135
1136 static void
1137 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1138 struct dwarf2_frame_state_reg *reg,
1139 struct frame_info *this_frame)
1140 {
1141 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1142
1143 switch (regnum)
1144 {
1145 case AARCH64_PC_REGNUM:
1146 reg->how = DWARF2_FRAME_REG_FN;
1147 reg->loc.fn = aarch64_dwarf2_prev_register;
1148 return;
1149
1150 case AARCH64_SP_REGNUM:
1151 reg->how = DWARF2_FRAME_REG_CFA;
1152 return;
1153 }
1154
1155 /* Init pauth registers. */
1156 if (tdep->has_pauth ())
1157 {
1158 if (regnum == tdep->pauth_ra_state_regnum)
1159 {
1160 /* Initialize RA_STATE to zero. */
1161 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1162 reg->loc.exp.start = &op_lit0;
1163 reg->loc.exp.len = 1;
1164 return;
1165 }
1166 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1167 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1168 {
1169 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1170 return;
1171 }
1172 }
1173 }
1174
1175 /* Implement the execute_dwarf_cfa_vendor_op method. */
1176
1177 static bool
1178 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1179 struct dwarf2_frame_state *fs)
1180 {
1181 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1182 struct dwarf2_frame_state_reg *ra_state;
1183
1184 if (op == DW_CFA_AARCH64_negate_ra_state)
1185 {
1186 /* On systems without pauth, treat as a nop. */
1187 if (!tdep->has_pauth ())
1188 return true;
1189
1190 /* Allocate RA_STATE column if it's not allocated yet. */
1191 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1192
1193 /* Toggle the status of RA_STATE between 0 and 1. */
1194 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1195 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1196
1197 if (ra_state->loc.exp.start == nullptr
1198 || ra_state->loc.exp.start == &op_lit0)
1199 ra_state->loc.exp.start = &op_lit1;
1200 else
1201 ra_state->loc.exp.start = &op_lit0;
1202
1203 ra_state->loc.exp.len = 1;
1204
1205 return true;
1206 }
1207
1208 return false;
1209 }
1210
1211 /* When arguments must be pushed onto the stack, they go on in reverse
1212 order. The code below implements a FILO (stack) to do this. */
1213
1214 struct stack_item_t
1215 {
1216 /* Value to pass on stack. It can be NULL if this item is for stack
1217 padding. */
1218 const gdb_byte *data;
1219
1220 /* Size in bytes of value to pass on stack. */
1221 int len;
1222 };
1223
1224 /* Implement the gdbarch type alignment method, overrides the generic
1225 alignment algorithm for anything that is aarch64 specific. */
1226
1227 static ULONGEST
1228 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1229 {
1230 t = check_typedef (t);
1231 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
1232 {
1233 /* Use the natural alignment for vector types (the same for
1234 scalar type), but the maximum alignment is 128-bit. */
1235 if (TYPE_LENGTH (t) > 16)
1236 return 16;
1237 else
1238 return TYPE_LENGTH (t);
1239 }
1240
1241 /* Allow the common code to calculate the alignment. */
1242 return 0;
1243 }
1244
1245 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1246
1247 Return the number of register required, or -1 on failure.
1248
1249 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1250 to the element, else fail if the type of this element does not match the
1251 existing value. */
1252
1253 static int
1254 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1255 struct type **fundamental_type)
1256 {
1257 if (type == nullptr)
1258 return -1;
1259
1260 switch (TYPE_CODE (type))
1261 {
1262 case TYPE_CODE_FLT:
1263 if (TYPE_LENGTH (type) > 16)
1264 return -1;
1265
1266 if (*fundamental_type == nullptr)
1267 *fundamental_type = type;
1268 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1269 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1270 return -1;
1271
1272 return 1;
1273
1274 case TYPE_CODE_COMPLEX:
1275 {
1276 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1277 if (TYPE_LENGTH (target_type) > 16)
1278 return -1;
1279
1280 if (*fundamental_type == nullptr)
1281 *fundamental_type = target_type;
1282 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1283 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1284 return -1;
1285
1286 return 2;
1287 }
1288
1289 case TYPE_CODE_ARRAY:
1290 {
1291 if (TYPE_VECTOR (type))
1292 {
1293 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1294 return -1;
1295
1296 if (*fundamental_type == nullptr)
1297 *fundamental_type = type;
1298 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1299 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1300 return -1;
1301
1302 return 1;
1303 }
1304 else
1305 {
1306 struct type *target_type = TYPE_TARGET_TYPE (type);
1307 int count = aapcs_is_vfp_call_or_return_candidate_1
1308 (target_type, fundamental_type);
1309
1310 if (count == -1)
1311 return count;
1312
1313 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1314 return count;
1315 }
1316 }
1317
1318 case TYPE_CODE_STRUCT:
1319 case TYPE_CODE_UNION:
1320 {
1321 int count = 0;
1322
1323 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1324 {
1325 /* Ignore any static fields. */
1326 if (field_is_static (&TYPE_FIELD (type, i)))
1327 continue;
1328
1329 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1330
1331 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1332 (member, fundamental_type);
1333 if (sub_count == -1)
1334 return -1;
1335 count += sub_count;
1336 }
1337
1338 /* Ensure there is no padding between the fields (allowing for empty
1339 zero length structs) */
1340 int ftype_length = (*fundamental_type == nullptr)
1341 ? 0 : TYPE_LENGTH (*fundamental_type);
1342 if (count * ftype_length != TYPE_LENGTH (type))
1343 return -1;
1344
1345 return count;
1346 }
1347
1348 default:
1349 break;
1350 }
1351
1352 return -1;
1353 }
1354
1355 /* Return true if an argument, whose type is described by TYPE, can be passed or
1356 returned in simd/fp registers, providing enough parameter passing registers
1357 are available. This is as described in the AAPCS64.
1358
1359 Upon successful return, *COUNT returns the number of needed registers,
1360 *FUNDAMENTAL_TYPE contains the type of those registers.
1361
1362 Candidate as per the AAPCS64 5.4.2.C is either a:
1363 - float.
1364 - short-vector.
1365 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1366 all the members are floats and has at most 4 members.
1367 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1368 all the members are short vectors and has at most 4 members.
1369 - Complex (7.1.1)
1370
1371 Note that HFAs and HVAs can include nested structures and arrays. */
1372
1373 static bool
1374 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1375 struct type **fundamental_type)
1376 {
1377 if (type == nullptr)
1378 return false;
1379
1380 *fundamental_type = nullptr;
1381
1382 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1383 fundamental_type);
1384
1385 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1386 {
1387 *count = ag_count;
1388 return true;
1389 }
1390 else
1391 return false;
1392 }
1393
1394 /* AArch64 function call information structure. */
1395 struct aarch64_call_info
1396 {
1397 /* the current argument number. */
1398 unsigned argnum = 0;
1399
1400 /* The next general purpose register number, equivalent to NGRN as
1401 described in the AArch64 Procedure Call Standard. */
1402 unsigned ngrn = 0;
1403
1404 /* The next SIMD and floating point register number, equivalent to
1405 NSRN as described in the AArch64 Procedure Call Standard. */
1406 unsigned nsrn = 0;
1407
1408 /* The next stacked argument address, equivalent to NSAA as
1409 described in the AArch64 Procedure Call Standard. */
1410 unsigned nsaa = 0;
1411
1412 /* Stack item vector. */
1413 std::vector<stack_item_t> si;
1414 };
1415
1416 /* Pass a value in a sequence of consecutive X registers. The caller
1417 is responsible for ensuring sufficient registers are available. */
1418
1419 static void
1420 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1421 struct aarch64_call_info *info, struct type *type,
1422 struct value *arg)
1423 {
1424 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1425 int len = TYPE_LENGTH (type);
1426 enum type_code typecode = TYPE_CODE (type);
1427 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1428 const bfd_byte *buf = value_contents (arg);
1429
1430 info->argnum++;
1431
1432 while (len > 0)
1433 {
1434 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1435 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1436 byte_order);
1437
1438
1439 /* Adjust sub-word struct/union args when big-endian. */
1440 if (byte_order == BFD_ENDIAN_BIG
1441 && partial_len < X_REGISTER_SIZE
1442 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1443 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1444
1445 if (aarch64_debug)
1446 {
1447 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1448 gdbarch_register_name (gdbarch, regnum),
1449 phex (regval, X_REGISTER_SIZE));
1450 }
1451 regcache_cooked_write_unsigned (regcache, regnum, regval);
1452 len -= partial_len;
1453 buf += partial_len;
1454 regnum++;
1455 }
1456 }
1457
1458 /* Attempt to marshall a value in a V register. Return 1 if
1459 successful, or 0 if insufficient registers are available. This
1460 function, unlike the equivalent pass_in_x() function does not
1461 handle arguments spread across multiple registers. */
1462
1463 static int
1464 pass_in_v (struct gdbarch *gdbarch,
1465 struct regcache *regcache,
1466 struct aarch64_call_info *info,
1467 int len, const bfd_byte *buf)
1468 {
1469 if (info->nsrn < 8)
1470 {
1471 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1472 /* Enough space for a full vector register. */
1473 gdb_byte reg[register_size (gdbarch, regnum)];
1474 gdb_assert (len <= sizeof (reg));
1475
1476 info->argnum++;
1477 info->nsrn++;
1478
1479 memset (reg, 0, sizeof (reg));
1480 /* PCS C.1, the argument is allocated to the least significant
1481 bits of V register. */
1482 memcpy (reg, buf, len);
1483 regcache->cooked_write (regnum, reg);
1484
1485 if (aarch64_debug)
1486 {
1487 debug_printf ("arg %d in %s\n", info->argnum,
1488 gdbarch_register_name (gdbarch, regnum));
1489 }
1490 return 1;
1491 }
1492 info->nsrn = 8;
1493 return 0;
1494 }
1495
1496 /* Marshall an argument onto the stack. */
1497
1498 static void
1499 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1500 struct value *arg)
1501 {
1502 const bfd_byte *buf = value_contents (arg);
1503 int len = TYPE_LENGTH (type);
1504 int align;
1505 stack_item_t item;
1506
1507 info->argnum++;
1508
1509 align = type_align (type);
1510
1511 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1512 Natural alignment of the argument's type. */
1513 align = align_up (align, 8);
1514
1515 /* The AArch64 PCS requires at most doubleword alignment. */
1516 if (align > 16)
1517 align = 16;
1518
1519 if (aarch64_debug)
1520 {
1521 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1522 info->nsaa);
1523 }
1524
1525 item.len = len;
1526 item.data = buf;
1527 info->si.push_back (item);
1528
1529 info->nsaa += len;
1530 if (info->nsaa & (align - 1))
1531 {
1532 /* Push stack alignment padding. */
1533 int pad = align - (info->nsaa & (align - 1));
1534
1535 item.len = pad;
1536 item.data = NULL;
1537
1538 info->si.push_back (item);
1539 info->nsaa += pad;
1540 }
1541 }
1542
1543 /* Marshall an argument into a sequence of one or more consecutive X
1544 registers or, if insufficient X registers are available then onto
1545 the stack. */
1546
1547 static void
1548 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1549 struct aarch64_call_info *info, struct type *type,
1550 struct value *arg)
1551 {
1552 int len = TYPE_LENGTH (type);
1553 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1554
1555 /* PCS C.13 - Pass in registers if we have enough spare */
1556 if (info->ngrn + nregs <= 8)
1557 {
1558 pass_in_x (gdbarch, regcache, info, type, arg);
1559 info->ngrn += nregs;
1560 }
1561 else
1562 {
1563 info->ngrn = 8;
1564 pass_on_stack (info, type, arg);
1565 }
1566 }
1567
1568 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1569 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1570 registers. A return value of false is an error state as the value will have
1571 been partially passed to the stack. */
1572 static bool
1573 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1574 struct aarch64_call_info *info, struct type *arg_type,
1575 struct value *arg)
1576 {
1577 switch (TYPE_CODE (arg_type))
1578 {
1579 case TYPE_CODE_FLT:
1580 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1581 value_contents (arg));
1582 break;
1583
1584 case TYPE_CODE_COMPLEX:
1585 {
1586 const bfd_byte *buf = value_contents (arg);
1587 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1588
1589 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1590 buf))
1591 return false;
1592
1593 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1594 buf + TYPE_LENGTH (target_type));
1595 }
1596
1597 case TYPE_CODE_ARRAY:
1598 if (TYPE_VECTOR (arg_type))
1599 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1600 value_contents (arg));
1601 /* fall through. */
1602
1603 case TYPE_CODE_STRUCT:
1604 case TYPE_CODE_UNION:
1605 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1606 {
1607 /* Don't include static fields. */
1608 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1609 continue;
1610
1611 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1612 struct type *field_type = check_typedef (value_type (field));
1613
1614 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1615 field))
1616 return false;
1617 }
1618 return true;
1619
1620 default:
1621 return false;
1622 }
1623 }
1624
1625 /* Implement the "push_dummy_call" gdbarch method. */
1626
1627 static CORE_ADDR
1628 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1629 struct regcache *regcache, CORE_ADDR bp_addr,
1630 int nargs,
1631 struct value **args, CORE_ADDR sp,
1632 function_call_return_method return_method,
1633 CORE_ADDR struct_addr)
1634 {
1635 int argnum;
1636 struct aarch64_call_info info;
1637
1638 /* We need to know what the type of the called function is in order
1639 to determine the number of named/anonymous arguments for the
1640 actual argument placement, and the return type in order to handle
1641 return value correctly.
1642
1643 The generic code above us views the decision of return in memory
1644 or return in registers as a two stage processes. The language
1645 handler is consulted first and may decide to return in memory (eg
1646 class with copy constructor returned by value), this will cause
1647 the generic code to allocate space AND insert an initial leading
1648 argument.
1649
1650 If the language code does not decide to pass in memory then the
1651 target code is consulted.
1652
1653 If the language code decides to pass in memory we want to move
1654 the pointer inserted as the initial argument from the argument
1655 list and into X8, the conventional AArch64 struct return pointer
1656 register. */
1657
1658 /* Set the return address. For the AArch64, the return breakpoint
1659 is always at BP_ADDR. */
1660 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1661
1662 /* If we were given an initial argument for the return slot, lose it. */
1663 if (return_method == return_method_hidden_param)
1664 {
1665 args++;
1666 nargs--;
1667 }
1668
1669 /* The struct_return pointer occupies X8. */
1670 if (return_method != return_method_normal)
1671 {
1672 if (aarch64_debug)
1673 {
1674 debug_printf ("struct return in %s = 0x%s\n",
1675 gdbarch_register_name (gdbarch,
1676 AARCH64_STRUCT_RETURN_REGNUM),
1677 paddress (gdbarch, struct_addr));
1678 }
1679 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1680 struct_addr);
1681 }
1682
1683 for (argnum = 0; argnum < nargs; argnum++)
1684 {
1685 struct value *arg = args[argnum];
1686 struct type *arg_type, *fundamental_type;
1687 int len, elements;
1688
1689 arg_type = check_typedef (value_type (arg));
1690 len = TYPE_LENGTH (arg_type);
1691
1692 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1693 if there are enough spare registers. */
1694 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1695 &fundamental_type))
1696 {
1697 if (info.nsrn + elements <= 8)
1698 {
1699 /* We know that we have sufficient registers available therefore
1700 this will never need to fallback to the stack. */
1701 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1702 arg))
1703 gdb_assert_not_reached ("Failed to push args");
1704 }
1705 else
1706 {
1707 info.nsrn = 8;
1708 pass_on_stack (&info, arg_type, arg);
1709 }
1710 continue;
1711 }
1712
1713 switch (TYPE_CODE (arg_type))
1714 {
1715 case TYPE_CODE_INT:
1716 case TYPE_CODE_BOOL:
1717 case TYPE_CODE_CHAR:
1718 case TYPE_CODE_RANGE:
1719 case TYPE_CODE_ENUM:
1720 if (len < 4)
1721 {
1722 /* Promote to 32 bit integer. */
1723 if (TYPE_UNSIGNED (arg_type))
1724 arg_type = builtin_type (gdbarch)->builtin_uint32;
1725 else
1726 arg_type = builtin_type (gdbarch)->builtin_int32;
1727 arg = value_cast (arg_type, arg);
1728 }
1729 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1730 break;
1731
1732 case TYPE_CODE_STRUCT:
1733 case TYPE_CODE_ARRAY:
1734 case TYPE_CODE_UNION:
1735 if (len > 16)
1736 {
1737 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1738 invisible reference. */
1739
1740 /* Allocate aligned storage. */
1741 sp = align_down (sp - len, 16);
1742
1743 /* Write the real data into the stack. */
1744 write_memory (sp, value_contents (arg), len);
1745
1746 /* Construct the indirection. */
1747 arg_type = lookup_pointer_type (arg_type);
1748 arg = value_from_pointer (arg_type, sp);
1749 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1750 }
1751 else
1752 /* PCS C.15 / C.18 multiple values pass. */
1753 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1754 break;
1755
1756 default:
1757 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1758 break;
1759 }
1760 }
1761
1762 /* Make sure stack retains 16 byte alignment. */
1763 if (info.nsaa & 15)
1764 sp -= 16 - (info.nsaa & 15);
1765
1766 while (!info.si.empty ())
1767 {
1768 const stack_item_t &si = info.si.back ();
1769
1770 sp -= si.len;
1771 if (si.data != NULL)
1772 write_memory (sp, si.data, si.len);
1773 info.si.pop_back ();
1774 }
1775
1776 /* Finally, update the SP register. */
1777 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1778
1779 return sp;
1780 }
1781
1782 /* Implement the "frame_align" gdbarch method. */
1783
1784 static CORE_ADDR
1785 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1786 {
1787 /* Align the stack to sixteen bytes. */
1788 return sp & ~(CORE_ADDR) 15;
1789 }
1790
1791 /* Return the type for an AdvSISD Q register. */
1792
1793 static struct type *
1794 aarch64_vnq_type (struct gdbarch *gdbarch)
1795 {
1796 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1797
1798 if (tdep->vnq_type == NULL)
1799 {
1800 struct type *t;
1801 struct type *elem;
1802
1803 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1804 TYPE_CODE_UNION);
1805
1806 elem = builtin_type (gdbarch)->builtin_uint128;
1807 append_composite_type_field (t, "u", elem);
1808
1809 elem = builtin_type (gdbarch)->builtin_int128;
1810 append_composite_type_field (t, "s", elem);
1811
1812 tdep->vnq_type = t;
1813 }
1814
1815 return tdep->vnq_type;
1816 }
1817
1818 /* Return the type for an AdvSISD D register. */
1819
1820 static struct type *
1821 aarch64_vnd_type (struct gdbarch *gdbarch)
1822 {
1823 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1824
1825 if (tdep->vnd_type == NULL)
1826 {
1827 struct type *t;
1828 struct type *elem;
1829
1830 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1831 TYPE_CODE_UNION);
1832
1833 elem = builtin_type (gdbarch)->builtin_double;
1834 append_composite_type_field (t, "f", elem);
1835
1836 elem = builtin_type (gdbarch)->builtin_uint64;
1837 append_composite_type_field (t, "u", elem);
1838
1839 elem = builtin_type (gdbarch)->builtin_int64;
1840 append_composite_type_field (t, "s", elem);
1841
1842 tdep->vnd_type = t;
1843 }
1844
1845 return tdep->vnd_type;
1846 }
1847
1848 /* Return the type for an AdvSISD S register. */
1849
1850 static struct type *
1851 aarch64_vns_type (struct gdbarch *gdbarch)
1852 {
1853 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1854
1855 if (tdep->vns_type == NULL)
1856 {
1857 struct type *t;
1858 struct type *elem;
1859
1860 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1861 TYPE_CODE_UNION);
1862
1863 elem = builtin_type (gdbarch)->builtin_float;
1864 append_composite_type_field (t, "f", elem);
1865
1866 elem = builtin_type (gdbarch)->builtin_uint32;
1867 append_composite_type_field (t, "u", elem);
1868
1869 elem = builtin_type (gdbarch)->builtin_int32;
1870 append_composite_type_field (t, "s", elem);
1871
1872 tdep->vns_type = t;
1873 }
1874
1875 return tdep->vns_type;
1876 }
1877
1878 /* Return the type for an AdvSISD H register. */
1879
1880 static struct type *
1881 aarch64_vnh_type (struct gdbarch *gdbarch)
1882 {
1883 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1884
1885 if (tdep->vnh_type == NULL)
1886 {
1887 struct type *t;
1888 struct type *elem;
1889
1890 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1891 TYPE_CODE_UNION);
1892
1893 elem = builtin_type (gdbarch)->builtin_half;
1894 append_composite_type_field (t, "f", elem);
1895
1896 elem = builtin_type (gdbarch)->builtin_uint16;
1897 append_composite_type_field (t, "u", elem);
1898
1899 elem = builtin_type (gdbarch)->builtin_int16;
1900 append_composite_type_field (t, "s", elem);
1901
1902 tdep->vnh_type = t;
1903 }
1904
1905 return tdep->vnh_type;
1906 }
1907
1908 /* Return the type for an AdvSISD B register. */
1909
1910 static struct type *
1911 aarch64_vnb_type (struct gdbarch *gdbarch)
1912 {
1913 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1914
1915 if (tdep->vnb_type == NULL)
1916 {
1917 struct type *t;
1918 struct type *elem;
1919
1920 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1921 TYPE_CODE_UNION);
1922
1923 elem = builtin_type (gdbarch)->builtin_uint8;
1924 append_composite_type_field (t, "u", elem);
1925
1926 elem = builtin_type (gdbarch)->builtin_int8;
1927 append_composite_type_field (t, "s", elem);
1928
1929 tdep->vnb_type = t;
1930 }
1931
1932 return tdep->vnb_type;
1933 }
1934
1935 /* Return the type for an AdvSISD V register. */
1936
1937 static struct type *
1938 aarch64_vnv_type (struct gdbarch *gdbarch)
1939 {
1940 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1941
1942 if (tdep->vnv_type == NULL)
1943 {
1944 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1945 slice from the non-pseudo vector registers. However NEON V registers
1946 are always vector registers, and need constructing as such. */
1947 const struct builtin_type *bt = builtin_type (gdbarch);
1948
1949 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1950 TYPE_CODE_UNION);
1951
1952 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1953 TYPE_CODE_UNION);
1954 append_composite_type_field (sub, "f",
1955 init_vector_type (bt->builtin_double, 2));
1956 append_composite_type_field (sub, "u",
1957 init_vector_type (bt->builtin_uint64, 2));
1958 append_composite_type_field (sub, "s",
1959 init_vector_type (bt->builtin_int64, 2));
1960 append_composite_type_field (t, "d", sub);
1961
1962 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1963 TYPE_CODE_UNION);
1964 append_composite_type_field (sub, "f",
1965 init_vector_type (bt->builtin_float, 4));
1966 append_composite_type_field (sub, "u",
1967 init_vector_type (bt->builtin_uint32, 4));
1968 append_composite_type_field (sub, "s",
1969 init_vector_type (bt->builtin_int32, 4));
1970 append_composite_type_field (t, "s", sub);
1971
1972 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1973 TYPE_CODE_UNION);
1974 append_composite_type_field (sub, "f",
1975 init_vector_type (bt->builtin_half, 8));
1976 append_composite_type_field (sub, "u",
1977 init_vector_type (bt->builtin_uint16, 8));
1978 append_composite_type_field (sub, "s",
1979 init_vector_type (bt->builtin_int16, 8));
1980 append_composite_type_field (t, "h", sub);
1981
1982 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1983 TYPE_CODE_UNION);
1984 append_composite_type_field (sub, "u",
1985 init_vector_type (bt->builtin_uint8, 16));
1986 append_composite_type_field (sub, "s",
1987 init_vector_type (bt->builtin_int8, 16));
1988 append_composite_type_field (t, "b", sub);
1989
1990 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1991 TYPE_CODE_UNION);
1992 append_composite_type_field (sub, "u",
1993 init_vector_type (bt->builtin_uint128, 1));
1994 append_composite_type_field (sub, "s",
1995 init_vector_type (bt->builtin_int128, 1));
1996 append_composite_type_field (t, "q", sub);
1997
1998 tdep->vnv_type = t;
1999 }
2000
2001 return tdep->vnv_type;
2002 }
2003
2004 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2005
2006 static int
2007 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2008 {
2009 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2010
2011 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2012 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2013
2014 if (reg == AARCH64_DWARF_SP)
2015 return AARCH64_SP_REGNUM;
2016
2017 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2018 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2019
2020 if (reg == AARCH64_DWARF_SVE_VG)
2021 return AARCH64_SVE_VG_REGNUM;
2022
2023 if (reg == AARCH64_DWARF_SVE_FFR)
2024 return AARCH64_SVE_FFR_REGNUM;
2025
2026 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2027 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2028
2029 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2030 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2031
2032 if (tdep->has_pauth ())
2033 {
2034 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2035 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2036
2037 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2038 return tdep->pauth_ra_state_regnum;
2039 }
2040
2041 return -1;
2042 }
2043
2044 /* Implement the "print_insn" gdbarch method. */
2045
2046 static int
2047 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2048 {
2049 info->symbols = NULL;
2050 return default_print_insn (memaddr, info);
2051 }
2052
2053 /* AArch64 BRK software debug mode instruction.
2054 Note that AArch64 code is always little-endian.
2055 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2056 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2057
2058 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2059
2060 /* Extract from an array REGS containing the (raw) register state a
2061 function return value of type TYPE, and copy that, in virtual
2062 format, into VALBUF. */
2063
2064 static void
2065 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2066 gdb_byte *valbuf)
2067 {
2068 struct gdbarch *gdbarch = regs->arch ();
2069 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2070 int elements;
2071 struct type *fundamental_type;
2072
2073 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2074 &fundamental_type))
2075 {
2076 int len = TYPE_LENGTH (fundamental_type);
2077
2078 for (int i = 0; i < elements; i++)
2079 {
2080 int regno = AARCH64_V0_REGNUM + i;
2081 /* Enough space for a full vector register. */
2082 gdb_byte buf[register_size (gdbarch, regno)];
2083 gdb_assert (len <= sizeof (buf));
2084
2085 if (aarch64_debug)
2086 {
2087 debug_printf ("read HFA or HVA return value element %d from %s\n",
2088 i + 1,
2089 gdbarch_register_name (gdbarch, regno));
2090 }
2091 regs->cooked_read (regno, buf);
2092
2093 memcpy (valbuf, buf, len);
2094 valbuf += len;
2095 }
2096 }
2097 else if (TYPE_CODE (type) == TYPE_CODE_INT
2098 || TYPE_CODE (type) == TYPE_CODE_CHAR
2099 || TYPE_CODE (type) == TYPE_CODE_BOOL
2100 || TYPE_CODE (type) == TYPE_CODE_PTR
2101 || TYPE_IS_REFERENCE (type)
2102 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2103 {
2104 /* If the type is a plain integer, then the access is
2105 straight-forward. Otherwise we have to play around a bit
2106 more. */
2107 int len = TYPE_LENGTH (type);
2108 int regno = AARCH64_X0_REGNUM;
2109 ULONGEST tmp;
2110
2111 while (len > 0)
2112 {
2113 /* By using store_unsigned_integer we avoid having to do
2114 anything special for small big-endian values. */
2115 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2116 store_unsigned_integer (valbuf,
2117 (len > X_REGISTER_SIZE
2118 ? X_REGISTER_SIZE : len), byte_order, tmp);
2119 len -= X_REGISTER_SIZE;
2120 valbuf += X_REGISTER_SIZE;
2121 }
2122 }
2123 else
2124 {
2125 /* For a structure or union the behaviour is as if the value had
2126 been stored to word-aligned memory and then loaded into
2127 registers with 64-bit load instruction(s). */
2128 int len = TYPE_LENGTH (type);
2129 int regno = AARCH64_X0_REGNUM;
2130 bfd_byte buf[X_REGISTER_SIZE];
2131
2132 while (len > 0)
2133 {
2134 regs->cooked_read (regno++, buf);
2135 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2136 len -= X_REGISTER_SIZE;
2137 valbuf += X_REGISTER_SIZE;
2138 }
2139 }
2140 }
2141
2142
2143 /* Will a function return an aggregate type in memory or in a
2144 register? Return 0 if an aggregate type can be returned in a
2145 register, 1 if it must be returned in memory. */
2146
2147 static int
2148 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2149 {
2150 type = check_typedef (type);
2151 int elements;
2152 struct type *fundamental_type;
2153
2154 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2155 &fundamental_type))
2156 {
2157 /* v0-v7 are used to return values and one register is allocated
2158 for one member. However, HFA or HVA has at most four members. */
2159 return 0;
2160 }
2161
2162 if (TYPE_LENGTH (type) > 16)
2163 {
2164 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2165 invisible reference. */
2166
2167 return 1;
2168 }
2169
2170 return 0;
2171 }
2172
2173 /* Write into appropriate registers a function return value of type
2174 TYPE, given in virtual format. */
2175
2176 static void
2177 aarch64_store_return_value (struct type *type, struct regcache *regs,
2178 const gdb_byte *valbuf)
2179 {
2180 struct gdbarch *gdbarch = regs->arch ();
2181 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2182 int elements;
2183 struct type *fundamental_type;
2184
2185 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2186 &fundamental_type))
2187 {
2188 int len = TYPE_LENGTH (fundamental_type);
2189
2190 for (int i = 0; i < elements; i++)
2191 {
2192 int regno = AARCH64_V0_REGNUM + i;
2193 /* Enough space for a full vector register. */
2194 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2195 gdb_assert (len <= sizeof (tmpbuf));
2196
2197 if (aarch64_debug)
2198 {
2199 debug_printf ("write HFA or HVA return value element %d to %s\n",
2200 i + 1,
2201 gdbarch_register_name (gdbarch, regno));
2202 }
2203
2204 memcpy (tmpbuf, valbuf,
2205 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2206 regs->cooked_write (regno, tmpbuf);
2207 valbuf += len;
2208 }
2209 }
2210 else if (TYPE_CODE (type) == TYPE_CODE_INT
2211 || TYPE_CODE (type) == TYPE_CODE_CHAR
2212 || TYPE_CODE (type) == TYPE_CODE_BOOL
2213 || TYPE_CODE (type) == TYPE_CODE_PTR
2214 || TYPE_IS_REFERENCE (type)
2215 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2216 {
2217 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2218 {
2219 /* Values of one word or less are zero/sign-extended and
2220 returned in r0. */
2221 bfd_byte tmpbuf[X_REGISTER_SIZE];
2222 LONGEST val = unpack_long (type, valbuf);
2223
2224 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2225 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2226 }
2227 else
2228 {
2229 /* Integral values greater than one word are stored in
2230 consecutive registers starting with r0. This will always
2231 be a multiple of the regiser size. */
2232 int len = TYPE_LENGTH (type);
2233 int regno = AARCH64_X0_REGNUM;
2234
2235 while (len > 0)
2236 {
2237 regs->cooked_write (regno++, valbuf);
2238 len -= X_REGISTER_SIZE;
2239 valbuf += X_REGISTER_SIZE;
2240 }
2241 }
2242 }
2243 else
2244 {
2245 /* For a structure or union the behaviour is as if the value had
2246 been stored to word-aligned memory and then loaded into
2247 registers with 64-bit load instruction(s). */
2248 int len = TYPE_LENGTH (type);
2249 int regno = AARCH64_X0_REGNUM;
2250 bfd_byte tmpbuf[X_REGISTER_SIZE];
2251
2252 while (len > 0)
2253 {
2254 memcpy (tmpbuf, valbuf,
2255 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2256 regs->cooked_write (regno++, tmpbuf);
2257 len -= X_REGISTER_SIZE;
2258 valbuf += X_REGISTER_SIZE;
2259 }
2260 }
2261 }
2262
2263 /* Implement the "return_value" gdbarch method. */
2264
2265 static enum return_value_convention
2266 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2267 struct type *valtype, struct regcache *regcache,
2268 gdb_byte *readbuf, const gdb_byte *writebuf)
2269 {
2270
2271 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2272 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2273 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2274 {
2275 if (aarch64_return_in_memory (gdbarch, valtype))
2276 {
2277 if (aarch64_debug)
2278 debug_printf ("return value in memory\n");
2279 return RETURN_VALUE_STRUCT_CONVENTION;
2280 }
2281 }
2282
2283 if (writebuf)
2284 aarch64_store_return_value (valtype, regcache, writebuf);
2285
2286 if (readbuf)
2287 aarch64_extract_return_value (valtype, regcache, readbuf);
2288
2289 if (aarch64_debug)
2290 debug_printf ("return value in registers\n");
2291
2292 return RETURN_VALUE_REGISTER_CONVENTION;
2293 }
2294
2295 /* Implement the "get_longjmp_target" gdbarch method. */
2296
2297 static int
2298 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2299 {
2300 CORE_ADDR jb_addr;
2301 gdb_byte buf[X_REGISTER_SIZE];
2302 struct gdbarch *gdbarch = get_frame_arch (frame);
2303 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2304 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2305
2306 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2307
2308 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2309 X_REGISTER_SIZE))
2310 return 0;
2311
2312 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2313 return 1;
2314 }
2315
2316 /* Implement the "gen_return_address" gdbarch method. */
2317
2318 static void
2319 aarch64_gen_return_address (struct gdbarch *gdbarch,
2320 struct agent_expr *ax, struct axs_value *value,
2321 CORE_ADDR scope)
2322 {
2323 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2324 value->kind = axs_lvalue_register;
2325 value->u.reg = AARCH64_LR_REGNUM;
2326 }
2327 \f
2328
2329 /* Return the pseudo register name corresponding to register regnum. */
2330
2331 static const char *
2332 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2333 {
2334 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2335
2336 static const char *const q_name[] =
2337 {
2338 "q0", "q1", "q2", "q3",
2339 "q4", "q5", "q6", "q7",
2340 "q8", "q9", "q10", "q11",
2341 "q12", "q13", "q14", "q15",
2342 "q16", "q17", "q18", "q19",
2343 "q20", "q21", "q22", "q23",
2344 "q24", "q25", "q26", "q27",
2345 "q28", "q29", "q30", "q31",
2346 };
2347
2348 static const char *const d_name[] =
2349 {
2350 "d0", "d1", "d2", "d3",
2351 "d4", "d5", "d6", "d7",
2352 "d8", "d9", "d10", "d11",
2353 "d12", "d13", "d14", "d15",
2354 "d16", "d17", "d18", "d19",
2355 "d20", "d21", "d22", "d23",
2356 "d24", "d25", "d26", "d27",
2357 "d28", "d29", "d30", "d31",
2358 };
2359
2360 static const char *const s_name[] =
2361 {
2362 "s0", "s1", "s2", "s3",
2363 "s4", "s5", "s6", "s7",
2364 "s8", "s9", "s10", "s11",
2365 "s12", "s13", "s14", "s15",
2366 "s16", "s17", "s18", "s19",
2367 "s20", "s21", "s22", "s23",
2368 "s24", "s25", "s26", "s27",
2369 "s28", "s29", "s30", "s31",
2370 };
2371
2372 static const char *const h_name[] =
2373 {
2374 "h0", "h1", "h2", "h3",
2375 "h4", "h5", "h6", "h7",
2376 "h8", "h9", "h10", "h11",
2377 "h12", "h13", "h14", "h15",
2378 "h16", "h17", "h18", "h19",
2379 "h20", "h21", "h22", "h23",
2380 "h24", "h25", "h26", "h27",
2381 "h28", "h29", "h30", "h31",
2382 };
2383
2384 static const char *const b_name[] =
2385 {
2386 "b0", "b1", "b2", "b3",
2387 "b4", "b5", "b6", "b7",
2388 "b8", "b9", "b10", "b11",
2389 "b12", "b13", "b14", "b15",
2390 "b16", "b17", "b18", "b19",
2391 "b20", "b21", "b22", "b23",
2392 "b24", "b25", "b26", "b27",
2393 "b28", "b29", "b30", "b31",
2394 };
2395
2396 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2397
2398 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2399 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2400
2401 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2402 return d_name[p_regnum - AARCH64_D0_REGNUM];
2403
2404 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2405 return s_name[p_regnum - AARCH64_S0_REGNUM];
2406
2407 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2408 return h_name[p_regnum - AARCH64_H0_REGNUM];
2409
2410 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2411 return b_name[p_regnum - AARCH64_B0_REGNUM];
2412
2413 if (tdep->has_sve ())
2414 {
2415 static const char *const sve_v_name[] =
2416 {
2417 "v0", "v1", "v2", "v3",
2418 "v4", "v5", "v6", "v7",
2419 "v8", "v9", "v10", "v11",
2420 "v12", "v13", "v14", "v15",
2421 "v16", "v17", "v18", "v19",
2422 "v20", "v21", "v22", "v23",
2423 "v24", "v25", "v26", "v27",
2424 "v28", "v29", "v30", "v31",
2425 };
2426
2427 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2428 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2429 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2430 }
2431
2432 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2433 prevents it from being read by methods such as
2434 mi_cmd_trace_frame_collected. */
2435 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2436 return "";
2437
2438 internal_error (__FILE__, __LINE__,
2439 _("aarch64_pseudo_register_name: bad register number %d"),
2440 p_regnum);
2441 }
2442
2443 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2444
2445 static struct type *
2446 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2447 {
2448 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2449
2450 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2451
2452 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2453 return aarch64_vnq_type (gdbarch);
2454
2455 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2456 return aarch64_vnd_type (gdbarch);
2457
2458 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2459 return aarch64_vns_type (gdbarch);
2460
2461 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2462 return aarch64_vnh_type (gdbarch);
2463
2464 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2465 return aarch64_vnb_type (gdbarch);
2466
2467 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2468 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2469 return aarch64_vnv_type (gdbarch);
2470
2471 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2472 return builtin_type (gdbarch)->builtin_uint64;
2473
2474 internal_error (__FILE__, __LINE__,
2475 _("aarch64_pseudo_register_type: bad register number %d"),
2476 p_regnum);
2477 }
2478
2479 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2480
2481 static int
2482 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2483 struct reggroup *group)
2484 {
2485 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2486
2487 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2488
2489 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2490 return group == all_reggroup || group == vector_reggroup;
2491 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2492 return (group == all_reggroup || group == vector_reggroup
2493 || group == float_reggroup);
2494 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2495 return (group == all_reggroup || group == vector_reggroup
2496 || group == float_reggroup);
2497 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2498 return group == all_reggroup || group == vector_reggroup;
2499 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2500 return group == all_reggroup || group == vector_reggroup;
2501 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2502 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2503 return group == all_reggroup || group == vector_reggroup;
2504 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2505 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2506 return 0;
2507
2508 return group == all_reggroup;
2509 }
2510
2511 /* Helper for aarch64_pseudo_read_value. */
2512
2513 static struct value *
2514 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2515 readable_regcache *regcache, int regnum_offset,
2516 int regsize, struct value *result_value)
2517 {
2518 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2519
2520 /* Enough space for a full vector register. */
2521 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2522 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2523
2524 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2525 mark_value_bytes_unavailable (result_value, 0,
2526 TYPE_LENGTH (value_type (result_value)));
2527 else
2528 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2529
2530 return result_value;
2531 }
2532
2533 /* Implement the "pseudo_register_read_value" gdbarch method. */
2534
2535 static struct value *
2536 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2537 int regnum)
2538 {
2539 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2540 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2541
2542 VALUE_LVAL (result_value) = lval_register;
2543 VALUE_REGNUM (result_value) = regnum;
2544
2545 regnum -= gdbarch_num_regs (gdbarch);
2546
2547 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2548 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2549 regnum - AARCH64_Q0_REGNUM,
2550 Q_REGISTER_SIZE, result_value);
2551
2552 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2553 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2554 regnum - AARCH64_D0_REGNUM,
2555 D_REGISTER_SIZE, result_value);
2556
2557 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2558 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2559 regnum - AARCH64_S0_REGNUM,
2560 S_REGISTER_SIZE, result_value);
2561
2562 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2563 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2564 regnum - AARCH64_H0_REGNUM,
2565 H_REGISTER_SIZE, result_value);
2566
2567 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2568 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2569 regnum - AARCH64_B0_REGNUM,
2570 B_REGISTER_SIZE, result_value);
2571
2572 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2573 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2574 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2575 regnum - AARCH64_SVE_V0_REGNUM,
2576 V_REGISTER_SIZE, result_value);
2577
2578 gdb_assert_not_reached ("regnum out of bound");
2579 }
2580
2581 /* Helper for aarch64_pseudo_write. */
2582
2583 static void
2584 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2585 int regnum_offset, int regsize, const gdb_byte *buf)
2586 {
2587 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2588
2589 /* Enough space for a full vector register. */
2590 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2591 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2592
2593 /* Ensure the register buffer is zero, we want gdb writes of the
2594 various 'scalar' pseudo registers to behavior like architectural
2595 writes, register width bytes are written the remainder are set to
2596 zero. */
2597 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2598
2599 memcpy (reg_buf, buf, regsize);
2600 regcache->raw_write (v_regnum, reg_buf);
2601 }
2602
2603 /* Implement the "pseudo_register_write" gdbarch method. */
2604
2605 static void
2606 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2607 int regnum, const gdb_byte *buf)
2608 {
2609 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2610 regnum -= gdbarch_num_regs (gdbarch);
2611
2612 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2613 return aarch64_pseudo_write_1 (gdbarch, regcache,
2614 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2615 buf);
2616
2617 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2618 return aarch64_pseudo_write_1 (gdbarch, regcache,
2619 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2620 buf);
2621
2622 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2623 return aarch64_pseudo_write_1 (gdbarch, regcache,
2624 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2625 buf);
2626
2627 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2628 return aarch64_pseudo_write_1 (gdbarch, regcache,
2629 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2630 buf);
2631
2632 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2633 return aarch64_pseudo_write_1 (gdbarch, regcache,
2634 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2635 buf);
2636
2637 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2638 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2639 return aarch64_pseudo_write_1 (gdbarch, regcache,
2640 regnum - AARCH64_SVE_V0_REGNUM,
2641 V_REGISTER_SIZE, buf);
2642
2643 gdb_assert_not_reached ("regnum out of bound");
2644 }
2645
2646 /* Callback function for user_reg_add. */
2647
2648 static struct value *
2649 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2650 {
2651 const int *reg_p = (const int *) baton;
2652
2653 return value_of_register (*reg_p, frame);
2654 }
2655 \f
2656
2657 /* Implement the "software_single_step" gdbarch method, needed to
2658 single step through atomic sequences on AArch64. */
2659
2660 static std::vector<CORE_ADDR>
2661 aarch64_software_single_step (struct regcache *regcache)
2662 {
2663 struct gdbarch *gdbarch = regcache->arch ();
2664 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2665 const int insn_size = 4;
2666 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2667 CORE_ADDR pc = regcache_read_pc (regcache);
2668 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2669 CORE_ADDR loc = pc;
2670 CORE_ADDR closing_insn = 0;
2671 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2672 byte_order_for_code);
2673 int index;
2674 int insn_count;
2675 int bc_insn_count = 0; /* Conditional branch instruction count. */
2676 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2677 aarch64_inst inst;
2678
2679 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2680 return {};
2681
2682 /* Look for a Load Exclusive instruction which begins the sequence. */
2683 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2684 return {};
2685
2686 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2687 {
2688 loc += insn_size;
2689 insn = read_memory_unsigned_integer (loc, insn_size,
2690 byte_order_for_code);
2691
2692 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2693 return {};
2694 /* Check if the instruction is a conditional branch. */
2695 if (inst.opcode->iclass == condbranch)
2696 {
2697 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2698
2699 if (bc_insn_count >= 1)
2700 return {};
2701
2702 /* It is, so we'll try to set a breakpoint at the destination. */
2703 breaks[1] = loc + inst.operands[0].imm.value;
2704
2705 bc_insn_count++;
2706 last_breakpoint++;
2707 }
2708
2709 /* Look for the Store Exclusive which closes the atomic sequence. */
2710 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2711 {
2712 closing_insn = loc;
2713 break;
2714 }
2715 }
2716
2717 /* We didn't find a closing Store Exclusive instruction, fall back. */
2718 if (!closing_insn)
2719 return {};
2720
2721 /* Insert breakpoint after the end of the atomic sequence. */
2722 breaks[0] = loc + insn_size;
2723
2724 /* Check for duplicated breakpoints, and also check that the second
2725 breakpoint is not within the atomic sequence. */
2726 if (last_breakpoint
2727 && (breaks[1] == breaks[0]
2728 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2729 last_breakpoint = 0;
2730
2731 std::vector<CORE_ADDR> next_pcs;
2732
2733 /* Insert the breakpoint at the end of the sequence, and one at the
2734 destination of the conditional branch, if it exists. */
2735 for (index = 0; index <= last_breakpoint; index++)
2736 next_pcs.push_back (breaks[index]);
2737
2738 return next_pcs;
2739 }
2740
2741 struct aarch64_displaced_step_closure : public displaced_step_closure
2742 {
2743 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2744 is being displaced stepping. */
2745 int cond = 0;
2746
2747 /* PC adjustment offset after displaced stepping. */
2748 int32_t pc_adjust = 0;
2749 };
2750
2751 /* Data when visiting instructions for displaced stepping. */
2752
2753 struct aarch64_displaced_step_data
2754 {
2755 struct aarch64_insn_data base;
2756
2757 /* The address where the instruction will be executed at. */
2758 CORE_ADDR new_addr;
2759 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2760 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2761 /* Number of instructions in INSN_BUF. */
2762 unsigned insn_count;
2763 /* Registers when doing displaced stepping. */
2764 struct regcache *regs;
2765
2766 aarch64_displaced_step_closure *dsc;
2767 };
2768
2769 /* Implementation of aarch64_insn_visitor method "b". */
2770
2771 static void
2772 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2773 struct aarch64_insn_data *data)
2774 {
2775 struct aarch64_displaced_step_data *dsd
2776 = (struct aarch64_displaced_step_data *) data;
2777 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2778
2779 if (can_encode_int32 (new_offset, 28))
2780 {
2781 /* Emit B rather than BL, because executing BL on a new address
2782 will get the wrong address into LR. In order to avoid this,
2783 we emit B, and update LR if the instruction is BL. */
2784 emit_b (dsd->insn_buf, 0, new_offset);
2785 dsd->insn_count++;
2786 }
2787 else
2788 {
2789 /* Write NOP. */
2790 emit_nop (dsd->insn_buf);
2791 dsd->insn_count++;
2792 dsd->dsc->pc_adjust = offset;
2793 }
2794
2795 if (is_bl)
2796 {
2797 /* Update LR. */
2798 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2799 data->insn_addr + 4);
2800 }
2801 }
2802
2803 /* Implementation of aarch64_insn_visitor method "b_cond". */
2804
2805 static void
2806 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2807 struct aarch64_insn_data *data)
2808 {
2809 struct aarch64_displaced_step_data *dsd
2810 = (struct aarch64_displaced_step_data *) data;
2811
2812 /* GDB has to fix up PC after displaced step this instruction
2813 differently according to the condition is true or false. Instead
2814 of checking COND against conditional flags, we can use
2815 the following instructions, and GDB can tell how to fix up PC
2816 according to the PC value.
2817
2818 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2819 INSN1 ;
2820 TAKEN:
2821 INSN2
2822 */
2823
2824 emit_bcond (dsd->insn_buf, cond, 8);
2825 dsd->dsc->cond = 1;
2826 dsd->dsc->pc_adjust = offset;
2827 dsd->insn_count = 1;
2828 }
2829
2830 /* Dynamically allocate a new register. If we know the register
2831 statically, we should make it a global as above instead of using this
2832 helper function. */
2833
2834 static struct aarch64_register
2835 aarch64_register (unsigned num, int is64)
2836 {
2837 return (struct aarch64_register) { num, is64 };
2838 }
2839
2840 /* Implementation of aarch64_insn_visitor method "cb". */
2841
2842 static void
2843 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2844 const unsigned rn, int is64,
2845 struct aarch64_insn_data *data)
2846 {
2847 struct aarch64_displaced_step_data *dsd
2848 = (struct aarch64_displaced_step_data *) data;
2849
2850 /* The offset is out of range for a compare and branch
2851 instruction. We can use the following instructions instead:
2852
2853 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2854 INSN1 ;
2855 TAKEN:
2856 INSN2
2857 */
2858 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2859 dsd->insn_count = 1;
2860 dsd->dsc->cond = 1;
2861 dsd->dsc->pc_adjust = offset;
2862 }
2863
2864 /* Implementation of aarch64_insn_visitor method "tb". */
2865
2866 static void
2867 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2868 const unsigned rt, unsigned bit,
2869 struct aarch64_insn_data *data)
2870 {
2871 struct aarch64_displaced_step_data *dsd
2872 = (struct aarch64_displaced_step_data *) data;
2873
2874 /* The offset is out of range for a test bit and branch
2875 instruction We can use the following instructions instead:
2876
2877 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2878 INSN1 ;
2879 TAKEN:
2880 INSN2
2881
2882 */
2883 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2884 dsd->insn_count = 1;
2885 dsd->dsc->cond = 1;
2886 dsd->dsc->pc_adjust = offset;
2887 }
2888
2889 /* Implementation of aarch64_insn_visitor method "adr". */
2890
2891 static void
2892 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2893 const int is_adrp, struct aarch64_insn_data *data)
2894 {
2895 struct aarch64_displaced_step_data *dsd
2896 = (struct aarch64_displaced_step_data *) data;
2897 /* We know exactly the address the ADR{P,} instruction will compute.
2898 We can just write it to the destination register. */
2899 CORE_ADDR address = data->insn_addr + offset;
2900
2901 if (is_adrp)
2902 {
2903 /* Clear the lower 12 bits of the offset to get the 4K page. */
2904 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2905 address & ~0xfff);
2906 }
2907 else
2908 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2909 address);
2910
2911 dsd->dsc->pc_adjust = 4;
2912 emit_nop (dsd->insn_buf);
2913 dsd->insn_count = 1;
2914 }
2915
2916 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2917
2918 static void
2919 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2920 const unsigned rt, const int is64,
2921 struct aarch64_insn_data *data)
2922 {
2923 struct aarch64_displaced_step_data *dsd
2924 = (struct aarch64_displaced_step_data *) data;
2925 CORE_ADDR address = data->insn_addr + offset;
2926 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2927
2928 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2929 address);
2930
2931 if (is_sw)
2932 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2933 aarch64_register (rt, 1), zero);
2934 else
2935 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2936 aarch64_register (rt, 1), zero);
2937
2938 dsd->dsc->pc_adjust = 4;
2939 }
2940
2941 /* Implementation of aarch64_insn_visitor method "others". */
2942
2943 static void
2944 aarch64_displaced_step_others (const uint32_t insn,
2945 struct aarch64_insn_data *data)
2946 {
2947 struct aarch64_displaced_step_data *dsd
2948 = (struct aarch64_displaced_step_data *) data;
2949
2950 aarch64_emit_insn (dsd->insn_buf, insn);
2951 dsd->insn_count = 1;
2952
2953 if ((insn & 0xfffffc1f) == 0xd65f0000)
2954 {
2955 /* RET */
2956 dsd->dsc->pc_adjust = 0;
2957 }
2958 else
2959 dsd->dsc->pc_adjust = 4;
2960 }
2961
2962 static const struct aarch64_insn_visitor visitor =
2963 {
2964 aarch64_displaced_step_b,
2965 aarch64_displaced_step_b_cond,
2966 aarch64_displaced_step_cb,
2967 aarch64_displaced_step_tb,
2968 aarch64_displaced_step_adr,
2969 aarch64_displaced_step_ldr_literal,
2970 aarch64_displaced_step_others,
2971 };
2972
2973 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2974
2975 struct displaced_step_closure *
2976 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2977 CORE_ADDR from, CORE_ADDR to,
2978 struct regcache *regs)
2979 {
2980 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2981 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2982 struct aarch64_displaced_step_data dsd;
2983 aarch64_inst inst;
2984
2985 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2986 return NULL;
2987
2988 /* Look for a Load Exclusive instruction which begins the sequence. */
2989 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2990 {
2991 /* We can't displaced step atomic sequences. */
2992 return NULL;
2993 }
2994
2995 std::unique_ptr<aarch64_displaced_step_closure> dsc
2996 (new aarch64_displaced_step_closure);
2997 dsd.base.insn_addr = from;
2998 dsd.new_addr = to;
2999 dsd.regs = regs;
3000 dsd.dsc = dsc.get ();
3001 dsd.insn_count = 0;
3002 aarch64_relocate_instruction (insn, &visitor,
3003 (struct aarch64_insn_data *) &dsd);
3004 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3005
3006 if (dsd.insn_count != 0)
3007 {
3008 int i;
3009
3010 /* Instruction can be relocated to scratch pad. Copy
3011 relocated instruction(s) there. */
3012 for (i = 0; i < dsd.insn_count; i++)
3013 {
3014 if (debug_displaced)
3015 {
3016 debug_printf ("displaced: writing insn ");
3017 debug_printf ("%.8x", dsd.insn_buf[i]);
3018 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3019 }
3020 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3021 (ULONGEST) dsd.insn_buf[i]);
3022 }
3023 }
3024 else
3025 {
3026 dsc = NULL;
3027 }
3028
3029 return dsc.release ();
3030 }
3031
3032 /* Implement the "displaced_step_fixup" gdbarch method. */
3033
3034 void
3035 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3036 struct displaced_step_closure *dsc_,
3037 CORE_ADDR from, CORE_ADDR to,
3038 struct regcache *regs)
3039 {
3040 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3041
3042 if (dsc->cond)
3043 {
3044 ULONGEST pc;
3045
3046 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3047 if (pc - to == 8)
3048 {
3049 /* Condition is true. */
3050 }
3051 else if (pc - to == 4)
3052 {
3053 /* Condition is false. */
3054 dsc->pc_adjust = 4;
3055 }
3056 else
3057 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3058 }
3059
3060 if (dsc->pc_adjust != 0)
3061 {
3062 if (debug_displaced)
3063 {
3064 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3065 paddress (gdbarch, from), dsc->pc_adjust);
3066 }
3067 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3068 from + dsc->pc_adjust);
3069 }
3070 }
3071
3072 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3073
3074 int
3075 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3076 struct displaced_step_closure *closure)
3077 {
3078 return 1;
3079 }
3080
3081 /* Get the correct target description for the given VQ value.
3082 If VQ is zero then it is assumed SVE is not supported.
3083 (It is not possible to set VQ to zero on an SVE system). */
3084
3085 const target_desc *
3086 aarch64_read_description (uint64_t vq, bool pauth_p)
3087 {
3088 if (vq > AARCH64_MAX_SVE_VQ)
3089 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3090 AARCH64_MAX_SVE_VQ);
3091
3092 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3093
3094 if (tdesc == NULL)
3095 {
3096 tdesc = aarch64_create_target_description (vq, pauth_p);
3097 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3098 }
3099
3100 return tdesc;
3101 }
3102
3103 /* Return the VQ used when creating the target description TDESC. */
3104
3105 static uint64_t
3106 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3107 {
3108 const struct tdesc_feature *feature_sve;
3109
3110 if (!tdesc_has_registers (tdesc))
3111 return 0;
3112
3113 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3114
3115 if (feature_sve == nullptr)
3116 return 0;
3117
3118 uint64_t vl = tdesc_register_bitsize (feature_sve,
3119 aarch64_sve_register_names[0]) / 8;
3120 return sve_vq_from_vl (vl);
3121 }
3122
3123 /* Add all the expected register sets into GDBARCH. */
3124
3125 static void
3126 aarch64_add_reggroups (struct gdbarch *gdbarch)
3127 {
3128 reggroup_add (gdbarch, general_reggroup);
3129 reggroup_add (gdbarch, float_reggroup);
3130 reggroup_add (gdbarch, system_reggroup);
3131 reggroup_add (gdbarch, vector_reggroup);
3132 reggroup_add (gdbarch, all_reggroup);
3133 reggroup_add (gdbarch, save_reggroup);
3134 reggroup_add (gdbarch, restore_reggroup);
3135 }
3136
3137 /* Implement the "cannot_store_register" gdbarch method. */
3138
3139 static int
3140 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3141 {
3142 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3143
3144 if (!tdep->has_pauth ())
3145 return 0;
3146
3147 /* Pointer authentication registers are read-only. */
3148 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3149 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3150 }
3151
3152 /* Initialize the current architecture based on INFO. If possible,
3153 re-use an architecture from ARCHES, which is a list of
3154 architectures already created during this debugging session.
3155
3156 Called e.g. at program startup, when reading a core file, and when
3157 reading a binary file. */
3158
3159 static struct gdbarch *
3160 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3161 {
3162 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3163 const struct tdesc_feature *feature_pauth;
3164 bool valid_p = true;
3165 int i, num_regs = 0, num_pseudo_regs = 0;
3166 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3167
3168 /* Use the vector length passed via the target info. Here -1 is used for no
3169 SVE, and 0 is unset. If unset then use the vector length from the existing
3170 tdesc. */
3171 uint64_t vq = 0;
3172 if (info.id == (int *) -1)
3173 vq = 0;
3174 else if (info.id != 0)
3175 vq = (uint64_t) info.id;
3176 else
3177 vq = aarch64_get_tdesc_vq (info.target_desc);
3178
3179 if (vq > AARCH64_MAX_SVE_VQ)
3180 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3181 pulongest (vq), AARCH64_MAX_SVE_VQ);
3182
3183 /* If there is already a candidate, use it. */
3184 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3185 best_arch != nullptr;
3186 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3187 {
3188 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3189 if (tdep && tdep->vq == vq)
3190 return best_arch->gdbarch;
3191 }
3192
3193 /* Ensure we always have a target descriptor, and that it is for the given VQ
3194 value. */
3195 const struct target_desc *tdesc = info.target_desc;
3196 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3197 tdesc = aarch64_read_description (vq, false);
3198 gdb_assert (tdesc);
3199
3200 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3201 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3202 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3203 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3204
3205 if (feature_core == nullptr)
3206 return nullptr;
3207
3208 struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
3209
3210 /* Validate the description provides the mandatory core R registers
3211 and allocate their numbers. */
3212 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3213 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3214 AARCH64_X0_REGNUM + i,
3215 aarch64_r_register_names[i]);
3216
3217 num_regs = AARCH64_X0_REGNUM + i;
3218
3219 /* Add the V registers. */
3220 if (feature_fpu != nullptr)
3221 {
3222 if (feature_sve != nullptr)
3223 error (_("Program contains both fpu and SVE features."));
3224
3225 /* Validate the description provides the mandatory V registers
3226 and allocate their numbers. */
3227 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3228 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3229 AARCH64_V0_REGNUM + i,
3230 aarch64_v_register_names[i]);
3231
3232 num_regs = AARCH64_V0_REGNUM + i;
3233 }
3234
3235 /* Add the SVE registers. */
3236 if (feature_sve != nullptr)
3237 {
3238 /* Validate the description provides the mandatory SVE registers
3239 and allocate their numbers. */
3240 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3241 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3242 AARCH64_SVE_Z0_REGNUM + i,
3243 aarch64_sve_register_names[i]);
3244
3245 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3246 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3247 }
3248
3249 if (feature_fpu != nullptr || feature_sve != nullptr)
3250 {
3251 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3252 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3253 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3254 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3255 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3256 }
3257
3258 /* Add the pauth registers. */
3259 if (feature_pauth != NULL)
3260 {
3261 first_pauth_regnum = num_regs;
3262 pauth_ra_state_offset = num_pseudo_regs;
3263 /* Validate the descriptor provides the mandatory PAUTH registers and
3264 allocate their numbers. */
3265 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3266 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3267 first_pauth_regnum + i,
3268 aarch64_pauth_register_names[i]);
3269
3270 num_regs += i;
3271 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3272 }
3273
3274 if (!valid_p)
3275 {
3276 tdesc_data_cleanup (tdesc_data);
3277 return nullptr;
3278 }
3279
3280 /* AArch64 code is always little-endian. */
3281 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3282
3283 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3284 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3285
3286 /* This should be low enough for everything. */
3287 tdep->lowest_pc = 0x20;
3288 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3289 tdep->jb_elt_size = 8;
3290 tdep->vq = vq;
3291 tdep->pauth_reg_base = first_pauth_regnum;
3292 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3293 : pauth_ra_state_offset + num_regs;
3294
3295 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3296 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3297
3298 /* Advance PC across function entry code. */
3299 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3300
3301 /* The stack grows downward. */
3302 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3303
3304 /* Breakpoint manipulation. */
3305 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3306 aarch64_breakpoint::kind_from_pc);
3307 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3308 aarch64_breakpoint::bp_from_kind);
3309 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3310 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3311
3312 /* Information about registers, etc. */
3313 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3314 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3315 set_gdbarch_num_regs (gdbarch, num_regs);
3316
3317 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3318 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3319 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3320 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3321 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3322 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3323 aarch64_pseudo_register_reggroup_p);
3324 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3325
3326 /* ABI */
3327 set_gdbarch_short_bit (gdbarch, 16);
3328 set_gdbarch_int_bit (gdbarch, 32);
3329 set_gdbarch_float_bit (gdbarch, 32);
3330 set_gdbarch_double_bit (gdbarch, 64);
3331 set_gdbarch_long_double_bit (gdbarch, 128);
3332 set_gdbarch_long_bit (gdbarch, 64);
3333 set_gdbarch_long_long_bit (gdbarch, 64);
3334 set_gdbarch_ptr_bit (gdbarch, 64);
3335 set_gdbarch_char_signed (gdbarch, 0);
3336 set_gdbarch_wchar_signed (gdbarch, 0);
3337 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3338 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3339 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3340 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3341
3342 /* Internal <-> external register number maps. */
3343 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3344
3345 /* Returning results. */
3346 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3347
3348 /* Disassembly. */
3349 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3350
3351 /* Virtual tables. */
3352 set_gdbarch_vbit_in_delta (gdbarch, 1);
3353
3354 /* Register architecture. */
3355 aarch64_add_reggroups (gdbarch);
3356
3357 /* Hook in the ABI-specific overrides, if they have been registered. */
3358 info.target_desc = tdesc;
3359 info.tdesc_data = tdesc_data;
3360 gdbarch_init_osabi (info, gdbarch);
3361
3362 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3363 /* Register DWARF CFA vendor handler. */
3364 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3365 aarch64_execute_dwarf_cfa_vendor_op);
3366
3367 /* Add some default predicates. */
3368 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3369 dwarf2_append_unwinders (gdbarch);
3370 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3371
3372 frame_base_set_default (gdbarch, &aarch64_normal_base);
3373
3374 /* Now we have tuned the configuration, set a few final things,
3375 based on what the OS ABI has told us. */
3376
3377 if (tdep->jb_pc >= 0)
3378 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3379
3380 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3381
3382 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3383
3384 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3385
3386 /* Add standard register aliases. */
3387 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3388 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3389 value_of_aarch64_user_reg,
3390 &aarch64_register_aliases[i].regnum);
3391
3392 register_aarch64_ravenscar_ops (gdbarch);
3393
3394 return gdbarch;
3395 }
3396
3397 static void
3398 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3399 {
3400 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3401
3402 if (tdep == NULL)
3403 return;
3404
3405 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3406 paddress (gdbarch, tdep->lowest_pc));
3407 }
3408
3409 #if GDB_SELF_TEST
3410 namespace selftests
3411 {
3412 static void aarch64_process_record_test (void);
3413 }
3414 #endif
3415
3416 void
3417 _initialize_aarch64_tdep (void)
3418 {
3419 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3420 aarch64_dump_tdep);
3421
3422 /* Debug this file's internals. */
3423 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3424 Set AArch64 debugging."), _("\
3425 Show AArch64 debugging."), _("\
3426 When on, AArch64 specific debugging is enabled."),
3427 NULL,
3428 show_aarch64_debug,
3429 &setdebuglist, &showdebuglist);
3430
3431 #if GDB_SELF_TEST
3432 selftests::register_test ("aarch64-analyze-prologue",
3433 selftests::aarch64_analyze_prologue_test);
3434 selftests::register_test ("aarch64-process-record",
3435 selftests::aarch64_process_record_test);
3436 #endif
3437 }
3438
3439 /* AArch64 process record-replay related structures, defines etc. */
3440
3441 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3442 do \
3443 { \
3444 unsigned int reg_len = LENGTH; \
3445 if (reg_len) \
3446 { \
3447 REGS = XNEWVEC (uint32_t, reg_len); \
3448 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3449 } \
3450 } \
3451 while (0)
3452
3453 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3454 do \
3455 { \
3456 unsigned int mem_len = LENGTH; \
3457 if (mem_len) \
3458 { \
3459 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3460 memcpy(&MEMS->len, &RECORD_BUF[0], \
3461 sizeof(struct aarch64_mem_r) * LENGTH); \
3462 } \
3463 } \
3464 while (0)
3465
3466 /* AArch64 record/replay structures and enumerations. */
3467
3468 struct aarch64_mem_r
3469 {
3470 uint64_t len; /* Record length. */
3471 uint64_t addr; /* Memory address. */
3472 };
3473
3474 enum aarch64_record_result
3475 {
3476 AARCH64_RECORD_SUCCESS,
3477 AARCH64_RECORD_UNSUPPORTED,
3478 AARCH64_RECORD_UNKNOWN
3479 };
3480
3481 typedef struct insn_decode_record_t
3482 {
3483 struct gdbarch *gdbarch;
3484 struct regcache *regcache;
3485 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3486 uint32_t aarch64_insn; /* Insn to be recorded. */
3487 uint32_t mem_rec_count; /* Count of memory records. */
3488 uint32_t reg_rec_count; /* Count of register records. */
3489 uint32_t *aarch64_regs; /* Registers to be recorded. */
3490 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3491 } insn_decode_record;
3492
3493 /* Record handler for data processing - register instructions. */
3494
3495 static unsigned int
3496 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3497 {
3498 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3499 uint32_t record_buf[4];
3500
3501 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3502 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3503 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3504
3505 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3506 {
3507 uint8_t setflags;
3508
3509 /* Logical (shifted register). */
3510 if (insn_bits24_27 == 0x0a)
3511 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3512 /* Add/subtract. */
3513 else if (insn_bits24_27 == 0x0b)
3514 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3515 else
3516 return AARCH64_RECORD_UNKNOWN;
3517
3518 record_buf[0] = reg_rd;
3519 aarch64_insn_r->reg_rec_count = 1;
3520 if (setflags)
3521 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3522 }
3523 else
3524 {
3525 if (insn_bits24_27 == 0x0b)
3526 {
3527 /* Data-processing (3 source). */
3528 record_buf[0] = reg_rd;
3529 aarch64_insn_r->reg_rec_count = 1;
3530 }
3531 else if (insn_bits24_27 == 0x0a)
3532 {
3533 if (insn_bits21_23 == 0x00)
3534 {
3535 /* Add/subtract (with carry). */
3536 record_buf[0] = reg_rd;
3537 aarch64_insn_r->reg_rec_count = 1;
3538 if (bit (aarch64_insn_r->aarch64_insn, 29))
3539 {
3540 record_buf[1] = AARCH64_CPSR_REGNUM;
3541 aarch64_insn_r->reg_rec_count = 2;
3542 }
3543 }
3544 else if (insn_bits21_23 == 0x02)
3545 {
3546 /* Conditional compare (register) and conditional compare
3547 (immediate) instructions. */
3548 record_buf[0] = AARCH64_CPSR_REGNUM;
3549 aarch64_insn_r->reg_rec_count = 1;
3550 }
3551 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3552 {
3553 /* Conditional select. */
3554 /* Data-processing (2 source). */
3555 /* Data-processing (1 source). */
3556 record_buf[0] = reg_rd;
3557 aarch64_insn_r->reg_rec_count = 1;
3558 }
3559 else
3560 return AARCH64_RECORD_UNKNOWN;
3561 }
3562 }
3563
3564 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3565 record_buf);
3566 return AARCH64_RECORD_SUCCESS;
3567 }
3568
3569 /* Record handler for data processing - immediate instructions. */
3570
3571 static unsigned int
3572 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3573 {
3574 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3575 uint32_t record_buf[4];
3576
3577 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3578 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3579 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3580
3581 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3582 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3583 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3584 {
3585 record_buf[0] = reg_rd;
3586 aarch64_insn_r->reg_rec_count = 1;
3587 }
3588 else if (insn_bits24_27 == 0x01)
3589 {
3590 /* Add/Subtract (immediate). */
3591 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3592 record_buf[0] = reg_rd;
3593 aarch64_insn_r->reg_rec_count = 1;
3594 if (setflags)
3595 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3596 }
3597 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3598 {
3599 /* Logical (immediate). */
3600 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3601 record_buf[0] = reg_rd;
3602 aarch64_insn_r->reg_rec_count = 1;
3603 if (setflags)
3604 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3605 }
3606 else
3607 return AARCH64_RECORD_UNKNOWN;
3608
3609 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3610 record_buf);
3611 return AARCH64_RECORD_SUCCESS;
3612 }
3613
3614 /* Record handler for branch, exception generation and system instructions. */
3615
3616 static unsigned int
3617 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3618 {
3619 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3620 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3621 uint32_t record_buf[4];
3622
3623 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3624 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3625 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3626
3627 if (insn_bits28_31 == 0x0d)
3628 {
3629 /* Exception generation instructions. */
3630 if (insn_bits24_27 == 0x04)
3631 {
3632 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3633 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3634 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3635 {
3636 ULONGEST svc_number;
3637
3638 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3639 &svc_number);
3640 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3641 svc_number);
3642 }
3643 else
3644 return AARCH64_RECORD_UNSUPPORTED;
3645 }
3646 /* System instructions. */
3647 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3648 {
3649 uint32_t reg_rt, reg_crn;
3650
3651 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3652 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3653
3654 /* Record rt in case of sysl and mrs instructions. */
3655 if (bit (aarch64_insn_r->aarch64_insn, 21))
3656 {
3657 record_buf[0] = reg_rt;
3658 aarch64_insn_r->reg_rec_count = 1;
3659 }
3660 /* Record cpsr for hint and msr(immediate) instructions. */
3661 else if (reg_crn == 0x02 || reg_crn == 0x04)
3662 {
3663 record_buf[0] = AARCH64_CPSR_REGNUM;
3664 aarch64_insn_r->reg_rec_count = 1;
3665 }
3666 }
3667 /* Unconditional branch (register). */
3668 else if((insn_bits24_27 & 0x0e) == 0x06)
3669 {
3670 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3671 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3672 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3673 }
3674 else
3675 return AARCH64_RECORD_UNKNOWN;
3676 }
3677 /* Unconditional branch (immediate). */
3678 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3679 {
3680 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3681 if (bit (aarch64_insn_r->aarch64_insn, 31))
3682 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3683 }
3684 else
3685 /* Compare & branch (immediate), Test & branch (immediate) and
3686 Conditional branch (immediate). */
3687 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3688
3689 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3690 record_buf);
3691 return AARCH64_RECORD_SUCCESS;
3692 }
3693
3694 /* Record handler for advanced SIMD load and store instructions. */
3695
3696 static unsigned int
3697 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3698 {
3699 CORE_ADDR address;
3700 uint64_t addr_offset = 0;
3701 uint32_t record_buf[24];
3702 uint64_t record_buf_mem[24];
3703 uint32_t reg_rn, reg_rt;
3704 uint32_t reg_index = 0, mem_index = 0;
3705 uint8_t opcode_bits, size_bits;
3706
3707 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3708 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3709 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3710 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3711 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3712
3713 if (record_debug)
3714 debug_printf ("Process record: Advanced SIMD load/store\n");
3715
3716 /* Load/store single structure. */
3717 if (bit (aarch64_insn_r->aarch64_insn, 24))
3718 {
3719 uint8_t sindex, scale, selem, esize, replicate = 0;
3720 scale = opcode_bits >> 2;
3721 selem = ((opcode_bits & 0x02) |
3722 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3723 switch (scale)
3724 {
3725 case 1:
3726 if (size_bits & 0x01)
3727 return AARCH64_RECORD_UNKNOWN;
3728 break;
3729 case 2:
3730 if ((size_bits >> 1) & 0x01)
3731 return AARCH64_RECORD_UNKNOWN;
3732 if (size_bits & 0x01)
3733 {
3734 if (!((opcode_bits >> 1) & 0x01))
3735 scale = 3;
3736 else
3737 return AARCH64_RECORD_UNKNOWN;
3738 }
3739 break;
3740 case 3:
3741 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3742 {
3743 scale = size_bits;
3744 replicate = 1;
3745 break;
3746 }
3747 else
3748 return AARCH64_RECORD_UNKNOWN;
3749 default:
3750 break;
3751 }
3752 esize = 8 << scale;
3753 if (replicate)
3754 for (sindex = 0; sindex < selem; sindex++)
3755 {
3756 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3757 reg_rt = (reg_rt + 1) % 32;
3758 }
3759 else
3760 {
3761 for (sindex = 0; sindex < selem; sindex++)
3762 {
3763 if (bit (aarch64_insn_r->aarch64_insn, 22))
3764 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3765 else
3766 {
3767 record_buf_mem[mem_index++] = esize / 8;
3768 record_buf_mem[mem_index++] = address + addr_offset;
3769 }
3770 addr_offset = addr_offset + (esize / 8);
3771 reg_rt = (reg_rt + 1) % 32;
3772 }
3773 }
3774 }
3775 /* Load/store multiple structure. */
3776 else
3777 {
3778 uint8_t selem, esize, rpt, elements;
3779 uint8_t eindex, rindex;
3780
3781 esize = 8 << size_bits;
3782 if (bit (aarch64_insn_r->aarch64_insn, 30))
3783 elements = 128 / esize;
3784 else
3785 elements = 64 / esize;
3786
3787 switch (opcode_bits)
3788 {
3789 /*LD/ST4 (4 Registers). */
3790 case 0:
3791 rpt = 1;
3792 selem = 4;
3793 break;
3794 /*LD/ST1 (4 Registers). */
3795 case 2:
3796 rpt = 4;
3797 selem = 1;
3798 break;
3799 /*LD/ST3 (3 Registers). */
3800 case 4:
3801 rpt = 1;
3802 selem = 3;
3803 break;
3804 /*LD/ST1 (3 Registers). */
3805 case 6:
3806 rpt = 3;
3807 selem = 1;
3808 break;
3809 /*LD/ST1 (1 Register). */
3810 case 7:
3811 rpt = 1;
3812 selem = 1;
3813 break;
3814 /*LD/ST2 (2 Registers). */
3815 case 8:
3816 rpt = 1;
3817 selem = 2;
3818 break;
3819 /*LD/ST1 (2 Registers). */
3820 case 10:
3821 rpt = 2;
3822 selem = 1;
3823 break;
3824 default:
3825 return AARCH64_RECORD_UNSUPPORTED;
3826 break;
3827 }
3828 for (rindex = 0; rindex < rpt; rindex++)
3829 for (eindex = 0; eindex < elements; eindex++)
3830 {
3831 uint8_t reg_tt, sindex;
3832 reg_tt = (reg_rt + rindex) % 32;
3833 for (sindex = 0; sindex < selem; sindex++)
3834 {
3835 if (bit (aarch64_insn_r->aarch64_insn, 22))
3836 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3837 else
3838 {
3839 record_buf_mem[mem_index++] = esize / 8;
3840 record_buf_mem[mem_index++] = address + addr_offset;
3841 }
3842 addr_offset = addr_offset + (esize / 8);
3843 reg_tt = (reg_tt + 1) % 32;
3844 }
3845 }
3846 }
3847
3848 if (bit (aarch64_insn_r->aarch64_insn, 23))
3849 record_buf[reg_index++] = reg_rn;
3850
3851 aarch64_insn_r->reg_rec_count = reg_index;
3852 aarch64_insn_r->mem_rec_count = mem_index / 2;
3853 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3854 record_buf_mem);
3855 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3856 record_buf);
3857 return AARCH64_RECORD_SUCCESS;
3858 }
3859
3860 /* Record handler for load and store instructions. */
3861
3862 static unsigned int
3863 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3864 {
3865 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3866 uint8_t insn_bit23, insn_bit21;
3867 uint8_t opc, size_bits, ld_flag, vector_flag;
3868 uint32_t reg_rn, reg_rt, reg_rt2;
3869 uint64_t datasize, offset;
3870 uint32_t record_buf[8];
3871 uint64_t record_buf_mem[8];
3872 CORE_ADDR address;
3873
3874 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3875 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3876 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3877 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3878 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3879 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3880 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3881 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3882 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3883 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3884 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3885
3886 /* Load/store exclusive. */
3887 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3888 {
3889 if (record_debug)
3890 debug_printf ("Process record: load/store exclusive\n");
3891
3892 if (ld_flag)
3893 {
3894 record_buf[0] = reg_rt;
3895 aarch64_insn_r->reg_rec_count = 1;
3896 if (insn_bit21)
3897 {
3898 record_buf[1] = reg_rt2;
3899 aarch64_insn_r->reg_rec_count = 2;
3900 }
3901 }
3902 else
3903 {
3904 if (insn_bit21)
3905 datasize = (8 << size_bits) * 2;
3906 else
3907 datasize = (8 << size_bits);
3908 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3909 &address);
3910 record_buf_mem[0] = datasize / 8;
3911 record_buf_mem[1] = address;
3912 aarch64_insn_r->mem_rec_count = 1;
3913 if (!insn_bit23)
3914 {
3915 /* Save register rs. */
3916 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3917 aarch64_insn_r->reg_rec_count = 1;
3918 }
3919 }
3920 }
3921 /* Load register (literal) instructions decoding. */
3922 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3923 {
3924 if (record_debug)
3925 debug_printf ("Process record: load register (literal)\n");
3926 if (vector_flag)
3927 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3928 else
3929 record_buf[0] = reg_rt;
3930 aarch64_insn_r->reg_rec_count = 1;
3931 }
3932 /* All types of load/store pair instructions decoding. */
3933 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3934 {
3935 if (record_debug)
3936 debug_printf ("Process record: load/store pair\n");
3937
3938 if (ld_flag)
3939 {
3940 if (vector_flag)
3941 {
3942 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3943 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3944 }
3945 else
3946 {
3947 record_buf[0] = reg_rt;
3948 record_buf[1] = reg_rt2;
3949 }
3950 aarch64_insn_r->reg_rec_count = 2;
3951 }
3952 else
3953 {
3954 uint16_t imm7_off;
3955 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3956 if (!vector_flag)
3957 size_bits = size_bits >> 1;
3958 datasize = 8 << (2 + size_bits);
3959 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3960 offset = offset << (2 + size_bits);
3961 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3962 &address);
3963 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3964 {
3965 if (imm7_off & 0x40)
3966 address = address - offset;
3967 else
3968 address = address + offset;
3969 }
3970
3971 record_buf_mem[0] = datasize / 8;
3972 record_buf_mem[1] = address;
3973 record_buf_mem[2] = datasize / 8;
3974 record_buf_mem[3] = address + (datasize / 8);
3975 aarch64_insn_r->mem_rec_count = 2;
3976 }
3977 if (bit (aarch64_insn_r->aarch64_insn, 23))
3978 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3979 }
3980 /* Load/store register (unsigned immediate) instructions. */
3981 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3982 {
3983 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3984 if (!(opc >> 1))
3985 {
3986 if (opc & 0x01)
3987 ld_flag = 0x01;
3988 else
3989 ld_flag = 0x0;
3990 }
3991 else
3992 {
3993 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3994 {
3995 /* PRFM (immediate) */
3996 return AARCH64_RECORD_SUCCESS;
3997 }
3998 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3999 {
4000 /* LDRSW (immediate) */
4001 ld_flag = 0x1;
4002 }
4003 else
4004 {
4005 if (opc & 0x01)
4006 ld_flag = 0x01;
4007 else
4008 ld_flag = 0x0;
4009 }
4010 }
4011
4012 if (record_debug)
4013 {
4014 debug_printf ("Process record: load/store (unsigned immediate):"
4015 " size %x V %d opc %x\n", size_bits, vector_flag,
4016 opc);
4017 }
4018
4019 if (!ld_flag)
4020 {
4021 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4022 datasize = 8 << size_bits;
4023 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4024 &address);
4025 offset = offset << size_bits;
4026 address = address + offset;
4027
4028 record_buf_mem[0] = datasize >> 3;
4029 record_buf_mem[1] = address;
4030 aarch64_insn_r->mem_rec_count = 1;
4031 }
4032 else
4033 {
4034 if (vector_flag)
4035 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4036 else
4037 record_buf[0] = reg_rt;
4038 aarch64_insn_r->reg_rec_count = 1;
4039 }
4040 }
4041 /* Load/store register (register offset) instructions. */
4042 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4043 && insn_bits10_11 == 0x02 && insn_bit21)
4044 {
4045 if (record_debug)
4046 debug_printf ("Process record: load/store (register offset)\n");
4047 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4048 if (!(opc >> 1))
4049 if (opc & 0x01)
4050 ld_flag = 0x01;
4051 else
4052 ld_flag = 0x0;
4053 else
4054 if (size_bits != 0x03)
4055 ld_flag = 0x01;
4056 else
4057 return AARCH64_RECORD_UNKNOWN;
4058
4059 if (!ld_flag)
4060 {
4061 ULONGEST reg_rm_val;
4062
4063 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4064 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4065 if (bit (aarch64_insn_r->aarch64_insn, 12))
4066 offset = reg_rm_val << size_bits;
4067 else
4068 offset = reg_rm_val;
4069 datasize = 8 << size_bits;
4070 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4071 &address);
4072 address = address + offset;
4073 record_buf_mem[0] = datasize >> 3;
4074 record_buf_mem[1] = address;
4075 aarch64_insn_r->mem_rec_count = 1;
4076 }
4077 else
4078 {
4079 if (vector_flag)
4080 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4081 else
4082 record_buf[0] = reg_rt;
4083 aarch64_insn_r->reg_rec_count = 1;
4084 }
4085 }
4086 /* Load/store register (immediate and unprivileged) instructions. */
4087 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4088 && !insn_bit21)
4089 {
4090 if (record_debug)
4091 {
4092 debug_printf ("Process record: load/store "
4093 "(immediate and unprivileged)\n");
4094 }
4095 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4096 if (!(opc >> 1))
4097 if (opc & 0x01)
4098 ld_flag = 0x01;
4099 else
4100 ld_flag = 0x0;
4101 else
4102 if (size_bits != 0x03)
4103 ld_flag = 0x01;
4104 else
4105 return AARCH64_RECORD_UNKNOWN;
4106
4107 if (!ld_flag)
4108 {
4109 uint16_t imm9_off;
4110 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4111 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4112 datasize = 8 << size_bits;
4113 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4114 &address);
4115 if (insn_bits10_11 != 0x01)
4116 {
4117 if (imm9_off & 0x0100)
4118 address = address - offset;
4119 else
4120 address = address + offset;
4121 }
4122 record_buf_mem[0] = datasize >> 3;
4123 record_buf_mem[1] = address;
4124 aarch64_insn_r->mem_rec_count = 1;
4125 }
4126 else
4127 {
4128 if (vector_flag)
4129 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4130 else
4131 record_buf[0] = reg_rt;
4132 aarch64_insn_r->reg_rec_count = 1;
4133 }
4134 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4135 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4136 }
4137 /* Advanced SIMD load/store instructions. */
4138 else
4139 return aarch64_record_asimd_load_store (aarch64_insn_r);
4140
4141 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4142 record_buf_mem);
4143 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4144 record_buf);
4145 return AARCH64_RECORD_SUCCESS;
4146 }
4147
4148 /* Record handler for data processing SIMD and floating point instructions. */
4149
4150 static unsigned int
4151 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4152 {
4153 uint8_t insn_bit21, opcode, rmode, reg_rd;
4154 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4155 uint8_t insn_bits11_14;
4156 uint32_t record_buf[2];
4157
4158 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4159 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4160 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4161 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4162 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4163 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4164 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4165 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4166 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4167
4168 if (record_debug)
4169 debug_printf ("Process record: data processing SIMD/FP: ");
4170
4171 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4172 {
4173 /* Floating point - fixed point conversion instructions. */
4174 if (!insn_bit21)
4175 {
4176 if (record_debug)
4177 debug_printf ("FP - fixed point conversion");
4178
4179 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4180 record_buf[0] = reg_rd;
4181 else
4182 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4183 }
4184 /* Floating point - conditional compare instructions. */
4185 else if (insn_bits10_11 == 0x01)
4186 {
4187 if (record_debug)
4188 debug_printf ("FP - conditional compare");
4189
4190 record_buf[0] = AARCH64_CPSR_REGNUM;
4191 }
4192 /* Floating point - data processing (2-source) and
4193 conditional select instructions. */
4194 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4195 {
4196 if (record_debug)
4197 debug_printf ("FP - DP (2-source)");
4198
4199 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4200 }
4201 else if (insn_bits10_11 == 0x00)
4202 {
4203 /* Floating point - immediate instructions. */
4204 if ((insn_bits12_15 & 0x01) == 0x01
4205 || (insn_bits12_15 & 0x07) == 0x04)
4206 {
4207 if (record_debug)
4208 debug_printf ("FP - immediate");
4209 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4210 }
4211 /* Floating point - compare instructions. */
4212 else if ((insn_bits12_15 & 0x03) == 0x02)
4213 {
4214 if (record_debug)
4215 debug_printf ("FP - immediate");
4216 record_buf[0] = AARCH64_CPSR_REGNUM;
4217 }
4218 /* Floating point - integer conversions instructions. */
4219 else if (insn_bits12_15 == 0x00)
4220 {
4221 /* Convert float to integer instruction. */
4222 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4223 {
4224 if (record_debug)
4225 debug_printf ("float to int conversion");
4226
4227 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4228 }
4229 /* Convert integer to float instruction. */
4230 else if ((opcode >> 1) == 0x01 && !rmode)
4231 {
4232 if (record_debug)
4233 debug_printf ("int to float conversion");
4234
4235 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4236 }
4237 /* Move float to integer instruction. */
4238 else if ((opcode >> 1) == 0x03)
4239 {
4240 if (record_debug)
4241 debug_printf ("move float to int");
4242
4243 if (!(opcode & 0x01))
4244 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4245 else
4246 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4247 }
4248 else
4249 return AARCH64_RECORD_UNKNOWN;
4250 }
4251 else
4252 return AARCH64_RECORD_UNKNOWN;
4253 }
4254 else
4255 return AARCH64_RECORD_UNKNOWN;
4256 }
4257 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4258 {
4259 if (record_debug)
4260 debug_printf ("SIMD copy");
4261
4262 /* Advanced SIMD copy instructions. */
4263 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4264 && !bit (aarch64_insn_r->aarch64_insn, 15)
4265 && bit (aarch64_insn_r->aarch64_insn, 10))
4266 {
4267 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4268 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4269 else
4270 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4271 }
4272 else
4273 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4274 }
4275 /* All remaining floating point or advanced SIMD instructions. */
4276 else
4277 {
4278 if (record_debug)
4279 debug_printf ("all remain");
4280
4281 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4282 }
4283
4284 if (record_debug)
4285 debug_printf ("\n");
4286
4287 aarch64_insn_r->reg_rec_count++;
4288 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4289 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4290 record_buf);
4291 return AARCH64_RECORD_SUCCESS;
4292 }
4293
4294 /* Decodes insns type and invokes its record handler. */
4295
4296 static unsigned int
4297 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4298 {
4299 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4300
4301 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4302 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4303 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4304 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4305
4306 /* Data processing - immediate instructions. */
4307 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4308 return aarch64_record_data_proc_imm (aarch64_insn_r);
4309
4310 /* Branch, exception generation and system instructions. */
4311 if (ins_bit26 && !ins_bit27 && ins_bit28)
4312 return aarch64_record_branch_except_sys (aarch64_insn_r);
4313
4314 /* Load and store instructions. */
4315 if (!ins_bit25 && ins_bit27)
4316 return aarch64_record_load_store (aarch64_insn_r);
4317
4318 /* Data processing - register instructions. */
4319 if (ins_bit25 && !ins_bit26 && ins_bit27)
4320 return aarch64_record_data_proc_reg (aarch64_insn_r);
4321
4322 /* Data processing - SIMD and floating point instructions. */
4323 if (ins_bit25 && ins_bit26 && ins_bit27)
4324 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4325
4326 return AARCH64_RECORD_UNSUPPORTED;
4327 }
4328
4329 /* Cleans up local record registers and memory allocations. */
4330
4331 static void
4332 deallocate_reg_mem (insn_decode_record *record)
4333 {
4334 xfree (record->aarch64_regs);
4335 xfree (record->aarch64_mems);
4336 }
4337
4338 #if GDB_SELF_TEST
4339 namespace selftests {
4340
4341 static void
4342 aarch64_process_record_test (void)
4343 {
4344 struct gdbarch_info info;
4345 uint32_t ret;
4346
4347 gdbarch_info_init (&info);
4348 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4349
4350 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4351 SELF_CHECK (gdbarch != NULL);
4352
4353 insn_decode_record aarch64_record;
4354
4355 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4356 aarch64_record.regcache = NULL;
4357 aarch64_record.this_addr = 0;
4358 aarch64_record.gdbarch = gdbarch;
4359
4360 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4361 aarch64_record.aarch64_insn = 0xf9800020;
4362 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4363 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4364 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4365 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4366
4367 deallocate_reg_mem (&aarch64_record);
4368 }
4369
4370 } // namespace selftests
4371 #endif /* GDB_SELF_TEST */
4372
4373 /* Parse the current instruction and record the values of the registers and
4374 memory that will be changed in current instruction to record_arch_list
4375 return -1 if something is wrong. */
4376
4377 int
4378 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4379 CORE_ADDR insn_addr)
4380 {
4381 uint32_t rec_no = 0;
4382 uint8_t insn_size = 4;
4383 uint32_t ret = 0;
4384 gdb_byte buf[insn_size];
4385 insn_decode_record aarch64_record;
4386
4387 memset (&buf[0], 0, insn_size);
4388 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4389 target_read_memory (insn_addr, &buf[0], insn_size);
4390 aarch64_record.aarch64_insn
4391 = (uint32_t) extract_unsigned_integer (&buf[0],
4392 insn_size,
4393 gdbarch_byte_order (gdbarch));
4394 aarch64_record.regcache = regcache;
4395 aarch64_record.this_addr = insn_addr;
4396 aarch64_record.gdbarch = gdbarch;
4397
4398 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4399 if (ret == AARCH64_RECORD_UNSUPPORTED)
4400 {
4401 printf_unfiltered (_("Process record does not support instruction "
4402 "0x%0x at address %s.\n"),
4403 aarch64_record.aarch64_insn,
4404 paddress (gdbarch, insn_addr));
4405 ret = -1;
4406 }
4407
4408 if (0 == ret)
4409 {
4410 /* Record registers. */
4411 record_full_arch_list_add_reg (aarch64_record.regcache,
4412 AARCH64_PC_REGNUM);
4413 /* Always record register CPSR. */
4414 record_full_arch_list_add_reg (aarch64_record.regcache,
4415 AARCH64_CPSR_REGNUM);
4416 if (aarch64_record.aarch64_regs)
4417 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4418 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4419 aarch64_record.aarch64_regs[rec_no]))
4420 ret = -1;
4421
4422 /* Record memories. */
4423 if (aarch64_record.aarch64_mems)
4424 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4425 if (record_full_arch_list_add_mem
4426 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4427 aarch64_record.aarch64_mems[rec_no].len))
4428 ret = -1;
4429
4430 if (record_full_arch_list_add_end ())
4431 ret = -1;
4432 }
4433
4434 deallocate_reg_mem (&aarch64_record);
4435 return ret;
4436 }
This page took 0.122824 seconds and 4 git commands to generate.