e512118579e198897d494bb5e85d1497a05b7bcb
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47 #include "gdbsupport/selftest.h"
48
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
51
52 #include "elf-bfd.h"
53 #include "elf/aarch64.h"
54
55 #include "gdbsupport/vec.h"
56
57 #include "record.h"
58 #include "record-full.h"
59 #include "arch/aarch64-insn.h"
60 #include "gdbarch.h"
61
62 #include "opcode/aarch64.h"
63 #include <algorithm>
64
65 #define submask(x) ((1L << ((x) + 1)) - 1)
66 #define bit(obj,st) (((obj) >> (st)) & 1)
67 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68
69 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
70 four members. */
71 #define HA_MAX_NUM_FLDS 4
72
73 /* All possible aarch64 target descriptors. */
74 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
75
76 /* The standard register names, and all the valid aliases for them. */
77 static const struct
78 {
79 const char *const name;
80 int regnum;
81 } aarch64_register_aliases[] =
82 {
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
87
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
120
121 /* specials */
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
124 };
125
126 /* The required core 'R' registers. */
127 static const char *const aarch64_r_register_names[] =
128 {
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
139 "pc", "cpsr"
140 };
141
142 /* The FP/SIMD 'V' registers. */
143 static const char *const aarch64_v_register_names[] =
144 {
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
155 "fpsr",
156 "fpcr"
157 };
158
159 /* The SVE 'Z' and 'P' registers. */
160 static const char *const aarch64_sve_register_names[] =
161 {
162 /* These registers must appear in consecutive RAW register number
163 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
164 "z0", "z1", "z2", "z3",
165 "z4", "z5", "z6", "z7",
166 "z8", "z9", "z10", "z11",
167 "z12", "z13", "z14", "z15",
168 "z16", "z17", "z18", "z19",
169 "z20", "z21", "z22", "z23",
170 "z24", "z25", "z26", "z27",
171 "z28", "z29", "z30", "z31",
172 "fpsr", "fpcr",
173 "p0", "p1", "p2", "p3",
174 "p4", "p5", "p6", "p7",
175 "p8", "p9", "p10", "p11",
176 "p12", "p13", "p14", "p15",
177 "ffr", "vg"
178 };
179
180 static const char *const aarch64_pauth_register_names[] =
181 {
182 /* Authentication mask for data pointer. */
183 "pauth_dmask",
184 /* Authentication mask for code pointer. */
185 "pauth_cmask"
186 };
187
188 /* AArch64 prologue cache structure. */
189 struct aarch64_prologue_cache
190 {
191 /* The program counter at the start of the function. It is used to
192 identify this frame as a prologue frame. */
193 CORE_ADDR func;
194
195 /* The program counter at the time this frame was created; i.e. where
196 this function was called from. It is used to identify this frame as a
197 stub frame. */
198 CORE_ADDR prev_pc;
199
200 /* The stack pointer at the time this frame was created; i.e. the
201 caller's stack pointer when this function was called. It is used
202 to identify this frame. */
203 CORE_ADDR prev_sp;
204
205 /* Is the target available to read from? */
206 int available_p;
207
208 /* The frame base for this frame is just prev_sp - frame size.
209 FRAMESIZE is the distance from the frame pointer to the
210 initial stack pointer. */
211 int framesize;
212
213 /* The register used to hold the frame pointer for this frame. */
214 int framereg;
215
216 /* Saved register offsets. */
217 struct trad_frame_saved_reg *saved_regs;
218 };
219
220 static void
221 show_aarch64_debug (struct ui_file *file, int from_tty,
222 struct cmd_list_element *c, const char *value)
223 {
224 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
225 }
226
227 namespace {
228
229 /* Abstract instruction reader. */
230
231 class abstract_instruction_reader
232 {
233 public:
234 /* Read in one instruction. */
235 virtual ULONGEST read (CORE_ADDR memaddr, int len,
236 enum bfd_endian byte_order) = 0;
237 };
238
239 /* Instruction reader from real target. */
240
241 class instruction_reader : public abstract_instruction_reader
242 {
243 public:
244 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
245 override
246 {
247 return read_code_unsigned_integer (memaddr, len, byte_order);
248 }
249 };
250
251 } // namespace
252
253 /* If address signing is enabled, mask off the signature bits from the link
254 register, which is passed by value in ADDR, using the register values in
255 THIS_FRAME. */
256
257 static CORE_ADDR
258 aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
259 struct frame_info *this_frame, CORE_ADDR addr)
260 {
261 if (tdep->has_pauth ()
262 && frame_unwind_register_unsigned (this_frame,
263 tdep->pauth_ra_state_regnum))
264 {
265 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
266 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
267 addr = addr & ~cmask;
268
269 /* Record in the frame that the link register required unmasking. */
270 set_frame_previous_pc_masked (this_frame);
271 }
272
273 return addr;
274 }
275
276 /* Implement the "get_pc_address_flags" gdbarch method. */
277
278 static std::string
279 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
280 {
281 if (pc != 0 && get_frame_pc_masked (frame))
282 return "PAC";
283
284 return "";
285 }
286
287 /* Analyze a prologue, looking for a recognizable stack frame
288 and frame pointer. Scan until we encounter a store that could
289 clobber the stack frame unexpectedly, or an unknown instruction. */
290
291 static CORE_ADDR
292 aarch64_analyze_prologue (struct gdbarch *gdbarch,
293 CORE_ADDR start, CORE_ADDR limit,
294 struct aarch64_prologue_cache *cache,
295 abstract_instruction_reader& reader)
296 {
297 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
298 int i;
299 /* Track X registers and D registers in prologue. */
300 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
301
302 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
303 regs[i] = pv_register (i, 0);
304 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
305
306 for (; start < limit; start += 4)
307 {
308 uint32_t insn;
309 aarch64_inst inst;
310
311 insn = reader.read (start, 4, byte_order_for_code);
312
313 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
314 break;
315
316 if (inst.opcode->iclass == addsub_imm
317 && (inst.opcode->op == OP_ADD
318 || strcmp ("sub", inst.opcode->name) == 0))
319 {
320 unsigned rd = inst.operands[0].reg.regno;
321 unsigned rn = inst.operands[1].reg.regno;
322
323 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
324 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
325 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
326 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
327
328 if (inst.opcode->op == OP_ADD)
329 {
330 regs[rd] = pv_add_constant (regs[rn],
331 inst.operands[2].imm.value);
332 }
333 else
334 {
335 regs[rd] = pv_add_constant (regs[rn],
336 -inst.operands[2].imm.value);
337 }
338 }
339 else if (inst.opcode->iclass == pcreladdr
340 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
341 {
342 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
343 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
344
345 regs[inst.operands[0].reg.regno] = pv_unknown ();
346 }
347 else if (inst.opcode->iclass == branch_imm)
348 {
349 /* Stop analysis on branch. */
350 break;
351 }
352 else if (inst.opcode->iclass == condbranch)
353 {
354 /* Stop analysis on branch. */
355 break;
356 }
357 else if (inst.opcode->iclass == branch_reg)
358 {
359 /* Stop analysis on branch. */
360 break;
361 }
362 else if (inst.opcode->iclass == compbranch)
363 {
364 /* Stop analysis on branch. */
365 break;
366 }
367 else if (inst.opcode->op == OP_MOVZ)
368 {
369 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
370 regs[inst.operands[0].reg.regno] = pv_unknown ();
371 }
372 else if (inst.opcode->iclass == log_shift
373 && strcmp (inst.opcode->name, "orr") == 0)
374 {
375 unsigned rd = inst.operands[0].reg.regno;
376 unsigned rn = inst.operands[1].reg.regno;
377 unsigned rm = inst.operands[2].reg.regno;
378
379 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
380 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
381 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
382
383 if (inst.operands[2].shifter.amount == 0
384 && rn == AARCH64_SP_REGNUM)
385 regs[rd] = regs[rm];
386 else
387 {
388 if (aarch64_debug)
389 {
390 debug_printf ("aarch64: prologue analysis gave up "
391 "addr=%s opcode=0x%x (orr x register)\n",
392 core_addr_to_string_nz (start), insn);
393 }
394 break;
395 }
396 }
397 else if (inst.opcode->op == OP_STUR)
398 {
399 unsigned rt = inst.operands[0].reg.regno;
400 unsigned rn = inst.operands[1].addr.base_regno;
401 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
402
403 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
404 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
405 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
406 gdb_assert (!inst.operands[1].addr.offset.is_reg);
407
408 stack.store
409 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
410 size, regs[rt]);
411 }
412 else if ((inst.opcode->iclass == ldstpair_off
413 || (inst.opcode->iclass == ldstpair_indexed
414 && inst.operands[2].addr.preind))
415 && strcmp ("stp", inst.opcode->name) == 0)
416 {
417 /* STP with addressing mode Pre-indexed and Base register. */
418 unsigned rt1;
419 unsigned rt2;
420 unsigned rn = inst.operands[2].addr.base_regno;
421 int32_t imm = inst.operands[2].addr.offset.imm;
422 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
423
424 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
425 || inst.operands[0].type == AARCH64_OPND_Ft);
426 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
427 || inst.operands[1].type == AARCH64_OPND_Ft2);
428 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
429 gdb_assert (!inst.operands[2].addr.offset.is_reg);
430
431 /* If recording this store would invalidate the store area
432 (perhaps because rn is not known) then we should abandon
433 further prologue analysis. */
434 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
435 break;
436
437 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
438 break;
439
440 rt1 = inst.operands[0].reg.regno;
441 rt2 = inst.operands[1].reg.regno;
442 if (inst.operands[0].type == AARCH64_OPND_Ft)
443 {
444 rt1 += AARCH64_X_REGISTER_COUNT;
445 rt2 += AARCH64_X_REGISTER_COUNT;
446 }
447
448 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
449 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
450
451 if (inst.operands[2].addr.writeback)
452 regs[rn] = pv_add_constant (regs[rn], imm);
453
454 }
455 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
456 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
457 && (inst.opcode->op == OP_STR_POS
458 || inst.opcode->op == OP_STRF_POS)))
459 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
460 && strcmp ("str", inst.opcode->name) == 0)
461 {
462 /* STR (immediate) */
463 unsigned int rt = inst.operands[0].reg.regno;
464 int32_t imm = inst.operands[1].addr.offset.imm;
465 unsigned int rn = inst.operands[1].addr.base_regno;
466 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
467 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
468 || inst.operands[0].type == AARCH64_OPND_Ft);
469
470 if (inst.operands[0].type == AARCH64_OPND_Ft)
471 rt += AARCH64_X_REGISTER_COUNT;
472
473 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
474 if (inst.operands[1].addr.writeback)
475 regs[rn] = pv_add_constant (regs[rn], imm);
476 }
477 else if (inst.opcode->iclass == testbranch)
478 {
479 /* Stop analysis on branch. */
480 break;
481 }
482 else if (inst.opcode->iclass == ic_system)
483 {
484 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
485 int ra_state_val = 0;
486
487 if (insn == 0xd503233f /* paciasp. */
488 || insn == 0xd503237f /* pacibsp. */)
489 {
490 /* Return addresses are mangled. */
491 ra_state_val = 1;
492 }
493 else if (insn == 0xd50323bf /* autiasp. */
494 || insn == 0xd50323ff /* autibsp. */)
495 {
496 /* Return addresses are not mangled. */
497 ra_state_val = 0;
498 }
499 else
500 {
501 if (aarch64_debug)
502 debug_printf ("aarch64: prologue analysis gave up addr=%s"
503 " opcode=0x%x (iclass)\n",
504 core_addr_to_string_nz (start), insn);
505 break;
506 }
507
508 if (tdep->has_pauth () && cache != nullptr)
509 trad_frame_set_value (cache->saved_regs,
510 tdep->pauth_ra_state_regnum,
511 ra_state_val);
512 }
513 else
514 {
515 if (aarch64_debug)
516 {
517 debug_printf ("aarch64: prologue analysis gave up addr=%s"
518 " opcode=0x%x\n",
519 core_addr_to_string_nz (start), insn);
520 }
521 break;
522 }
523 }
524
525 if (cache == NULL)
526 return start;
527
528 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
529 {
530 /* Frame pointer is fp. Frame size is constant. */
531 cache->framereg = AARCH64_FP_REGNUM;
532 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
533 }
534 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
535 {
536 /* Try the stack pointer. */
537 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
538 cache->framereg = AARCH64_SP_REGNUM;
539 }
540 else
541 {
542 /* We're just out of luck. We don't know where the frame is. */
543 cache->framereg = -1;
544 cache->framesize = 0;
545 }
546
547 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
548 {
549 CORE_ADDR offset;
550
551 if (stack.find_reg (gdbarch, i, &offset))
552 cache->saved_regs[i].addr = offset;
553 }
554
555 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
556 {
557 int regnum = gdbarch_num_regs (gdbarch);
558 CORE_ADDR offset;
559
560 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
561 &offset))
562 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
563 }
564
565 return start;
566 }
567
568 static CORE_ADDR
569 aarch64_analyze_prologue (struct gdbarch *gdbarch,
570 CORE_ADDR start, CORE_ADDR limit,
571 struct aarch64_prologue_cache *cache)
572 {
573 instruction_reader reader;
574
575 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
576 reader);
577 }
578
579 #if GDB_SELF_TEST
580
581 namespace selftests {
582
583 /* Instruction reader from manually cooked instruction sequences. */
584
585 class instruction_reader_test : public abstract_instruction_reader
586 {
587 public:
588 template<size_t SIZE>
589 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
590 : m_insns (insns), m_insns_size (SIZE)
591 {}
592
593 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
594 override
595 {
596 SELF_CHECK (len == 4);
597 SELF_CHECK (memaddr % 4 == 0);
598 SELF_CHECK (memaddr / 4 < m_insns_size);
599
600 return m_insns[memaddr / 4];
601 }
602
603 private:
604 const uint32_t *m_insns;
605 size_t m_insns_size;
606 };
607
608 static void
609 aarch64_analyze_prologue_test (void)
610 {
611 struct gdbarch_info info;
612
613 gdbarch_info_init (&info);
614 info.bfd_arch_info = bfd_scan_arch ("aarch64");
615
616 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
617 SELF_CHECK (gdbarch != NULL);
618
619 struct aarch64_prologue_cache cache;
620 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
621
622 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
623
624 /* Test the simple prologue in which frame pointer is used. */
625 {
626 static const uint32_t insns[] = {
627 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
628 0x910003fd, /* mov x29, sp */
629 0x97ffffe6, /* bl 0x400580 */
630 };
631 instruction_reader_test reader (insns);
632
633 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
634 SELF_CHECK (end == 4 * 2);
635
636 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
637 SELF_CHECK (cache.framesize == 272);
638
639 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
640 {
641 if (i == AARCH64_FP_REGNUM)
642 SELF_CHECK (cache.saved_regs[i].addr == -272);
643 else if (i == AARCH64_LR_REGNUM)
644 SELF_CHECK (cache.saved_regs[i].addr == -264);
645 else
646 SELF_CHECK (cache.saved_regs[i].addr == -1);
647 }
648
649 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
650 {
651 int regnum = gdbarch_num_regs (gdbarch);
652
653 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
654 == -1);
655 }
656 }
657
658 /* Test a prologue in which STR is used and frame pointer is not
659 used. */
660 {
661 static const uint32_t insns[] = {
662 0xf81d0ff3, /* str x19, [sp, #-48]! */
663 0xb9002fe0, /* str w0, [sp, #44] */
664 0xf90013e1, /* str x1, [sp, #32]*/
665 0xfd000fe0, /* str d0, [sp, #24] */
666 0xaa0203f3, /* mov x19, x2 */
667 0xf94013e0, /* ldr x0, [sp, #32] */
668 };
669 instruction_reader_test reader (insns);
670
671 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
672 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
673
674 SELF_CHECK (end == 4 * 5);
675
676 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
677 SELF_CHECK (cache.framesize == 48);
678
679 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
680 {
681 if (i == 1)
682 SELF_CHECK (cache.saved_regs[i].addr == -16);
683 else if (i == 19)
684 SELF_CHECK (cache.saved_regs[i].addr == -48);
685 else
686 SELF_CHECK (cache.saved_regs[i].addr == -1);
687 }
688
689 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
690 {
691 int regnum = gdbarch_num_regs (gdbarch);
692
693 if (i == 0)
694 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
695 == -24);
696 else
697 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
698 == -1);
699 }
700 }
701
702 /* Test a prologue in which there is a return address signing instruction. */
703 if (tdep->has_pauth ())
704 {
705 static const uint32_t insns[] = {
706 0xd503233f, /* paciasp */
707 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
708 0x910003fd, /* mov x29, sp */
709 0xf801c3f3, /* str x19, [sp, #28] */
710 0xb9401fa0, /* ldr x19, [x29, #28] */
711 };
712 instruction_reader_test reader (insns);
713
714 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
715 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
716 reader);
717
718 SELF_CHECK (end == 4 * 4);
719 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
720 SELF_CHECK (cache.framesize == 48);
721
722 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
723 {
724 if (i == 19)
725 SELF_CHECK (cache.saved_regs[i].addr == -20);
726 else if (i == AARCH64_FP_REGNUM)
727 SELF_CHECK (cache.saved_regs[i].addr == -48);
728 else if (i == AARCH64_LR_REGNUM)
729 SELF_CHECK (cache.saved_regs[i].addr == -40);
730 else
731 SELF_CHECK (cache.saved_regs[i].addr == -1);
732 }
733
734 if (tdep->has_pauth ())
735 {
736 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
737 tdep->pauth_ra_state_regnum));
738 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
739 }
740 }
741 }
742 } // namespace selftests
743 #endif /* GDB_SELF_TEST */
744
745 /* Implement the "skip_prologue" gdbarch method. */
746
747 static CORE_ADDR
748 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
749 {
750 CORE_ADDR func_addr, limit_pc;
751
752 /* See if we can determine the end of the prologue via the symbol
753 table. If so, then return either PC, or the PC after the
754 prologue, whichever is greater. */
755 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
756 {
757 CORE_ADDR post_prologue_pc
758 = skip_prologue_using_sal (gdbarch, func_addr);
759
760 if (post_prologue_pc != 0)
761 return std::max (pc, post_prologue_pc);
762 }
763
764 /* Can't determine prologue from the symbol table, need to examine
765 instructions. */
766
767 /* Find an upper limit on the function prologue using the debug
768 information. If the debug information could not be used to
769 provide that bound, then use an arbitrary large number as the
770 upper bound. */
771 limit_pc = skip_prologue_using_sal (gdbarch, pc);
772 if (limit_pc == 0)
773 limit_pc = pc + 128; /* Magic. */
774
775 /* Try disassembling prologue. */
776 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
777 }
778
779 /* Scan the function prologue for THIS_FRAME and populate the prologue
780 cache CACHE. */
781
782 static void
783 aarch64_scan_prologue (struct frame_info *this_frame,
784 struct aarch64_prologue_cache *cache)
785 {
786 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
787 CORE_ADDR prologue_start;
788 CORE_ADDR prologue_end;
789 CORE_ADDR prev_pc = get_frame_pc (this_frame);
790 struct gdbarch *gdbarch = get_frame_arch (this_frame);
791
792 cache->prev_pc = prev_pc;
793
794 /* Assume we do not find a frame. */
795 cache->framereg = -1;
796 cache->framesize = 0;
797
798 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
799 &prologue_end))
800 {
801 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
802
803 if (sal.line == 0)
804 {
805 /* No line info so use the current PC. */
806 prologue_end = prev_pc;
807 }
808 else if (sal.end < prologue_end)
809 {
810 /* The next line begins after the function end. */
811 prologue_end = sal.end;
812 }
813
814 prologue_end = std::min (prologue_end, prev_pc);
815 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
816 }
817 else
818 {
819 CORE_ADDR frame_loc;
820
821 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
822 if (frame_loc == 0)
823 return;
824
825 cache->framereg = AARCH64_FP_REGNUM;
826 cache->framesize = 16;
827 cache->saved_regs[29].addr = 0;
828 cache->saved_regs[30].addr = 8;
829 }
830 }
831
832 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
833 function may throw an exception if the inferior's registers or memory is
834 not available. */
835
836 static void
837 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
838 struct aarch64_prologue_cache *cache)
839 {
840 CORE_ADDR unwound_fp;
841 int reg;
842
843 aarch64_scan_prologue (this_frame, cache);
844
845 if (cache->framereg == -1)
846 return;
847
848 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
849 if (unwound_fp == 0)
850 return;
851
852 cache->prev_sp = unwound_fp + cache->framesize;
853
854 /* Calculate actual addresses of saved registers using offsets
855 determined by aarch64_analyze_prologue. */
856 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
857 if (trad_frame_addr_p (cache->saved_regs, reg))
858 cache->saved_regs[reg].addr += cache->prev_sp;
859
860 cache->func = get_frame_func (this_frame);
861
862 cache->available_p = 1;
863 }
864
865 /* Allocate and fill in *THIS_CACHE with information about the prologue of
866 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
867 Return a pointer to the current aarch64_prologue_cache in
868 *THIS_CACHE. */
869
870 static struct aarch64_prologue_cache *
871 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
872 {
873 struct aarch64_prologue_cache *cache;
874
875 if (*this_cache != NULL)
876 return (struct aarch64_prologue_cache *) *this_cache;
877
878 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
879 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
880 *this_cache = cache;
881
882 try
883 {
884 aarch64_make_prologue_cache_1 (this_frame, cache);
885 }
886 catch (const gdb_exception_error &ex)
887 {
888 if (ex.error != NOT_AVAILABLE_ERROR)
889 throw;
890 }
891
892 return cache;
893 }
894
895 /* Implement the "stop_reason" frame_unwind method. */
896
897 static enum unwind_stop_reason
898 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
899 void **this_cache)
900 {
901 struct aarch64_prologue_cache *cache
902 = aarch64_make_prologue_cache (this_frame, this_cache);
903
904 if (!cache->available_p)
905 return UNWIND_UNAVAILABLE;
906
907 /* Halt the backtrace at "_start". */
908 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
909 return UNWIND_OUTERMOST;
910
911 /* We've hit a wall, stop. */
912 if (cache->prev_sp == 0)
913 return UNWIND_OUTERMOST;
914
915 return UNWIND_NO_REASON;
916 }
917
918 /* Our frame ID for a normal frame is the current function's starting
919 PC and the caller's SP when we were called. */
920
921 static void
922 aarch64_prologue_this_id (struct frame_info *this_frame,
923 void **this_cache, struct frame_id *this_id)
924 {
925 struct aarch64_prologue_cache *cache
926 = aarch64_make_prologue_cache (this_frame, this_cache);
927
928 if (!cache->available_p)
929 *this_id = frame_id_build_unavailable_stack (cache->func);
930 else
931 *this_id = frame_id_build (cache->prev_sp, cache->func);
932 }
933
934 /* Implement the "prev_register" frame_unwind method. */
935
936 static struct value *
937 aarch64_prologue_prev_register (struct frame_info *this_frame,
938 void **this_cache, int prev_regnum)
939 {
940 struct aarch64_prologue_cache *cache
941 = aarch64_make_prologue_cache (this_frame, this_cache);
942
943 /* If we are asked to unwind the PC, then we need to return the LR
944 instead. The prologue may save PC, but it will point into this
945 frame's prologue, not the next frame's resume location. */
946 if (prev_regnum == AARCH64_PC_REGNUM)
947 {
948 CORE_ADDR lr;
949 struct gdbarch *gdbarch = get_frame_arch (this_frame);
950 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
951
952 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
953
954 if (tdep->has_pauth ()
955 && trad_frame_value_p (cache->saved_regs,
956 tdep->pauth_ra_state_regnum))
957 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
958
959 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
960 }
961
962 /* SP is generally not saved to the stack, but this frame is
963 identified by the next frame's stack pointer at the time of the
964 call. The value was already reconstructed into PREV_SP. */
965 /*
966 +----------+ ^
967 | saved lr | |
968 +->| saved fp |--+
969 | | |
970 | | | <- Previous SP
971 | +----------+
972 | | saved lr |
973 +--| saved fp |<- FP
974 | |
975 | |<- SP
976 +----------+ */
977 if (prev_regnum == AARCH64_SP_REGNUM)
978 return frame_unwind_got_constant (this_frame, prev_regnum,
979 cache->prev_sp);
980
981 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
982 prev_regnum);
983 }
984
985 /* AArch64 prologue unwinder. */
986 struct frame_unwind aarch64_prologue_unwind =
987 {
988 NORMAL_FRAME,
989 aarch64_prologue_frame_unwind_stop_reason,
990 aarch64_prologue_this_id,
991 aarch64_prologue_prev_register,
992 NULL,
993 default_frame_sniffer
994 };
995
996 /* Allocate and fill in *THIS_CACHE with information about the prologue of
997 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
998 Return a pointer to the current aarch64_prologue_cache in
999 *THIS_CACHE. */
1000
1001 static struct aarch64_prologue_cache *
1002 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1003 {
1004 struct aarch64_prologue_cache *cache;
1005
1006 if (*this_cache != NULL)
1007 return (struct aarch64_prologue_cache *) *this_cache;
1008
1009 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1010 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1011 *this_cache = cache;
1012
1013 try
1014 {
1015 cache->prev_sp = get_frame_register_unsigned (this_frame,
1016 AARCH64_SP_REGNUM);
1017 cache->prev_pc = get_frame_pc (this_frame);
1018 cache->available_p = 1;
1019 }
1020 catch (const gdb_exception_error &ex)
1021 {
1022 if (ex.error != NOT_AVAILABLE_ERROR)
1023 throw;
1024 }
1025
1026 return cache;
1027 }
1028
1029 /* Implement the "stop_reason" frame_unwind method. */
1030
1031 static enum unwind_stop_reason
1032 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1033 void **this_cache)
1034 {
1035 struct aarch64_prologue_cache *cache
1036 = aarch64_make_stub_cache (this_frame, this_cache);
1037
1038 if (!cache->available_p)
1039 return UNWIND_UNAVAILABLE;
1040
1041 return UNWIND_NO_REASON;
1042 }
1043
1044 /* Our frame ID for a stub frame is the current SP and LR. */
1045
1046 static void
1047 aarch64_stub_this_id (struct frame_info *this_frame,
1048 void **this_cache, struct frame_id *this_id)
1049 {
1050 struct aarch64_prologue_cache *cache
1051 = aarch64_make_stub_cache (this_frame, this_cache);
1052
1053 if (cache->available_p)
1054 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1055 else
1056 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1057 }
1058
1059 /* Implement the "sniffer" frame_unwind method. */
1060
1061 static int
1062 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1063 struct frame_info *this_frame,
1064 void **this_prologue_cache)
1065 {
1066 CORE_ADDR addr_in_block;
1067 gdb_byte dummy[4];
1068
1069 addr_in_block = get_frame_address_in_block (this_frame);
1070 if (in_plt_section (addr_in_block)
1071 /* We also use the stub winder if the target memory is unreadable
1072 to avoid having the prologue unwinder trying to read it. */
1073 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1074 return 1;
1075
1076 return 0;
1077 }
1078
1079 /* AArch64 stub unwinder. */
1080 struct frame_unwind aarch64_stub_unwind =
1081 {
1082 NORMAL_FRAME,
1083 aarch64_stub_frame_unwind_stop_reason,
1084 aarch64_stub_this_id,
1085 aarch64_prologue_prev_register,
1086 NULL,
1087 aarch64_stub_unwind_sniffer
1088 };
1089
1090 /* Return the frame base address of *THIS_FRAME. */
1091
1092 static CORE_ADDR
1093 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1094 {
1095 struct aarch64_prologue_cache *cache
1096 = aarch64_make_prologue_cache (this_frame, this_cache);
1097
1098 return cache->prev_sp - cache->framesize;
1099 }
1100
1101 /* AArch64 default frame base information. */
1102 struct frame_base aarch64_normal_base =
1103 {
1104 &aarch64_prologue_unwind,
1105 aarch64_normal_frame_base,
1106 aarch64_normal_frame_base,
1107 aarch64_normal_frame_base
1108 };
1109
1110 /* Return the value of the REGNUM register in the previous frame of
1111 *THIS_FRAME. */
1112
1113 static struct value *
1114 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1115 void **this_cache, int regnum)
1116 {
1117 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1118 CORE_ADDR lr;
1119
1120 switch (regnum)
1121 {
1122 case AARCH64_PC_REGNUM:
1123 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1124 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1125 return frame_unwind_got_constant (this_frame, regnum, lr);
1126
1127 default:
1128 internal_error (__FILE__, __LINE__,
1129 _("Unexpected register %d"), regnum);
1130 }
1131 }
1132
1133 static const unsigned char op_lit0 = DW_OP_lit0;
1134 static const unsigned char op_lit1 = DW_OP_lit1;
1135
1136 /* Implement the "init_reg" dwarf2_frame_ops method. */
1137
1138 static void
1139 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1140 struct dwarf2_frame_state_reg *reg,
1141 struct frame_info *this_frame)
1142 {
1143 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1144
1145 switch (regnum)
1146 {
1147 case AARCH64_PC_REGNUM:
1148 reg->how = DWARF2_FRAME_REG_FN;
1149 reg->loc.fn = aarch64_dwarf2_prev_register;
1150 return;
1151
1152 case AARCH64_SP_REGNUM:
1153 reg->how = DWARF2_FRAME_REG_CFA;
1154 return;
1155 }
1156
1157 /* Init pauth registers. */
1158 if (tdep->has_pauth ())
1159 {
1160 if (regnum == tdep->pauth_ra_state_regnum)
1161 {
1162 /* Initialize RA_STATE to zero. */
1163 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1164 reg->loc.exp.start = &op_lit0;
1165 reg->loc.exp.len = 1;
1166 return;
1167 }
1168 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1169 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1170 {
1171 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1172 return;
1173 }
1174 }
1175 }
1176
1177 /* Implement the execute_dwarf_cfa_vendor_op method. */
1178
1179 static bool
1180 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1181 struct dwarf2_frame_state *fs)
1182 {
1183 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1184 struct dwarf2_frame_state_reg *ra_state;
1185
1186 if (op == DW_CFA_AARCH64_negate_ra_state)
1187 {
1188 /* On systems without pauth, treat as a nop. */
1189 if (!tdep->has_pauth ())
1190 return true;
1191
1192 /* Allocate RA_STATE column if it's not allocated yet. */
1193 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1194
1195 /* Toggle the status of RA_STATE between 0 and 1. */
1196 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1197 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1198
1199 if (ra_state->loc.exp.start == nullptr
1200 || ra_state->loc.exp.start == &op_lit0)
1201 ra_state->loc.exp.start = &op_lit1;
1202 else
1203 ra_state->loc.exp.start = &op_lit0;
1204
1205 ra_state->loc.exp.len = 1;
1206
1207 return true;
1208 }
1209
1210 return false;
1211 }
1212
1213 /* When arguments must be pushed onto the stack, they go on in reverse
1214 order. The code below implements a FILO (stack) to do this. */
1215
1216 struct stack_item_t
1217 {
1218 /* Value to pass on stack. It can be NULL if this item is for stack
1219 padding. */
1220 const gdb_byte *data;
1221
1222 /* Size in bytes of value to pass on stack. */
1223 int len;
1224 };
1225
1226 /* Implement the gdbarch type alignment method, overrides the generic
1227 alignment algorithm for anything that is aarch64 specific. */
1228
1229 static ULONGEST
1230 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1231 {
1232 t = check_typedef (t);
1233 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
1234 {
1235 /* Use the natural alignment for vector types (the same for
1236 scalar type), but the maximum alignment is 128-bit. */
1237 if (TYPE_LENGTH (t) > 16)
1238 return 16;
1239 else
1240 return TYPE_LENGTH (t);
1241 }
1242
1243 /* Allow the common code to calculate the alignment. */
1244 return 0;
1245 }
1246
1247 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1248
1249 Return the number of register required, or -1 on failure.
1250
1251 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1252 to the element, else fail if the type of this element does not match the
1253 existing value. */
1254
1255 static int
1256 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1257 struct type **fundamental_type)
1258 {
1259 if (type == nullptr)
1260 return -1;
1261
1262 switch (TYPE_CODE (type))
1263 {
1264 case TYPE_CODE_FLT:
1265 if (TYPE_LENGTH (type) > 16)
1266 return -1;
1267
1268 if (*fundamental_type == nullptr)
1269 *fundamental_type = type;
1270 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1271 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1272 return -1;
1273
1274 return 1;
1275
1276 case TYPE_CODE_COMPLEX:
1277 {
1278 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1279 if (TYPE_LENGTH (target_type) > 16)
1280 return -1;
1281
1282 if (*fundamental_type == nullptr)
1283 *fundamental_type = target_type;
1284 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1285 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1286 return -1;
1287
1288 return 2;
1289 }
1290
1291 case TYPE_CODE_ARRAY:
1292 {
1293 if (TYPE_VECTOR (type))
1294 {
1295 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1296 return -1;
1297
1298 if (*fundamental_type == nullptr)
1299 *fundamental_type = type;
1300 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1301 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1302 return -1;
1303
1304 return 1;
1305 }
1306 else
1307 {
1308 struct type *target_type = TYPE_TARGET_TYPE (type);
1309 int count = aapcs_is_vfp_call_or_return_candidate_1
1310 (target_type, fundamental_type);
1311
1312 if (count == -1)
1313 return count;
1314
1315 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1316 return count;
1317 }
1318 }
1319
1320 case TYPE_CODE_STRUCT:
1321 case TYPE_CODE_UNION:
1322 {
1323 int count = 0;
1324
1325 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1326 {
1327 /* Ignore any static fields. */
1328 if (field_is_static (&TYPE_FIELD (type, i)))
1329 continue;
1330
1331 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1332
1333 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1334 (member, fundamental_type);
1335 if (sub_count == -1)
1336 return -1;
1337 count += sub_count;
1338 }
1339
1340 /* Ensure there is no padding between the fields (allowing for empty
1341 zero length structs) */
1342 int ftype_length = (*fundamental_type == nullptr)
1343 ? 0 : TYPE_LENGTH (*fundamental_type);
1344 if (count * ftype_length != TYPE_LENGTH (type))
1345 return -1;
1346
1347 return count;
1348 }
1349
1350 default:
1351 break;
1352 }
1353
1354 return -1;
1355 }
1356
1357 /* Return true if an argument, whose type is described by TYPE, can be passed or
1358 returned in simd/fp registers, providing enough parameter passing registers
1359 are available. This is as described in the AAPCS64.
1360
1361 Upon successful return, *COUNT returns the number of needed registers,
1362 *FUNDAMENTAL_TYPE contains the type of those registers.
1363
1364 Candidate as per the AAPCS64 5.4.2.C is either a:
1365 - float.
1366 - short-vector.
1367 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1368 all the members are floats and has at most 4 members.
1369 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1370 all the members are short vectors and has at most 4 members.
1371 - Complex (7.1.1)
1372
1373 Note that HFAs and HVAs can include nested structures and arrays. */
1374
1375 static bool
1376 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1377 struct type **fundamental_type)
1378 {
1379 if (type == nullptr)
1380 return false;
1381
1382 *fundamental_type = nullptr;
1383
1384 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1385 fundamental_type);
1386
1387 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1388 {
1389 *count = ag_count;
1390 return true;
1391 }
1392 else
1393 return false;
1394 }
1395
1396 /* AArch64 function call information structure. */
1397 struct aarch64_call_info
1398 {
1399 /* the current argument number. */
1400 unsigned argnum = 0;
1401
1402 /* The next general purpose register number, equivalent to NGRN as
1403 described in the AArch64 Procedure Call Standard. */
1404 unsigned ngrn = 0;
1405
1406 /* The next SIMD and floating point register number, equivalent to
1407 NSRN as described in the AArch64 Procedure Call Standard. */
1408 unsigned nsrn = 0;
1409
1410 /* The next stacked argument address, equivalent to NSAA as
1411 described in the AArch64 Procedure Call Standard. */
1412 unsigned nsaa = 0;
1413
1414 /* Stack item vector. */
1415 std::vector<stack_item_t> si;
1416 };
1417
1418 /* Pass a value in a sequence of consecutive X registers. The caller
1419 is responsbile for ensuring sufficient registers are available. */
1420
1421 static void
1422 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1423 struct aarch64_call_info *info, struct type *type,
1424 struct value *arg)
1425 {
1426 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1427 int len = TYPE_LENGTH (type);
1428 enum type_code typecode = TYPE_CODE (type);
1429 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1430 const bfd_byte *buf = value_contents (arg);
1431
1432 info->argnum++;
1433
1434 while (len > 0)
1435 {
1436 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1437 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1438 byte_order);
1439
1440
1441 /* Adjust sub-word struct/union args when big-endian. */
1442 if (byte_order == BFD_ENDIAN_BIG
1443 && partial_len < X_REGISTER_SIZE
1444 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1445 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1446
1447 if (aarch64_debug)
1448 {
1449 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1450 gdbarch_register_name (gdbarch, regnum),
1451 phex (regval, X_REGISTER_SIZE));
1452 }
1453 regcache_cooked_write_unsigned (regcache, regnum, regval);
1454 len -= partial_len;
1455 buf += partial_len;
1456 regnum++;
1457 }
1458 }
1459
1460 /* Attempt to marshall a value in a V register. Return 1 if
1461 successful, or 0 if insufficient registers are available. This
1462 function, unlike the equivalent pass_in_x() function does not
1463 handle arguments spread across multiple registers. */
1464
1465 static int
1466 pass_in_v (struct gdbarch *gdbarch,
1467 struct regcache *regcache,
1468 struct aarch64_call_info *info,
1469 int len, const bfd_byte *buf)
1470 {
1471 if (info->nsrn < 8)
1472 {
1473 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1474 /* Enough space for a full vector register. */
1475 gdb_byte reg[register_size (gdbarch, regnum)];
1476 gdb_assert (len <= sizeof (reg));
1477
1478 info->argnum++;
1479 info->nsrn++;
1480
1481 memset (reg, 0, sizeof (reg));
1482 /* PCS C.1, the argument is allocated to the least significant
1483 bits of V register. */
1484 memcpy (reg, buf, len);
1485 regcache->cooked_write (regnum, reg);
1486
1487 if (aarch64_debug)
1488 {
1489 debug_printf ("arg %d in %s\n", info->argnum,
1490 gdbarch_register_name (gdbarch, regnum));
1491 }
1492 return 1;
1493 }
1494 info->nsrn = 8;
1495 return 0;
1496 }
1497
1498 /* Marshall an argument onto the stack. */
1499
1500 static void
1501 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1502 struct value *arg)
1503 {
1504 const bfd_byte *buf = value_contents (arg);
1505 int len = TYPE_LENGTH (type);
1506 int align;
1507 stack_item_t item;
1508
1509 info->argnum++;
1510
1511 align = type_align (type);
1512
1513 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1514 Natural alignment of the argument's type. */
1515 align = align_up (align, 8);
1516
1517 /* The AArch64 PCS requires at most doubleword alignment. */
1518 if (align > 16)
1519 align = 16;
1520
1521 if (aarch64_debug)
1522 {
1523 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1524 info->nsaa);
1525 }
1526
1527 item.len = len;
1528 item.data = buf;
1529 info->si.push_back (item);
1530
1531 info->nsaa += len;
1532 if (info->nsaa & (align - 1))
1533 {
1534 /* Push stack alignment padding. */
1535 int pad = align - (info->nsaa & (align - 1));
1536
1537 item.len = pad;
1538 item.data = NULL;
1539
1540 info->si.push_back (item);
1541 info->nsaa += pad;
1542 }
1543 }
1544
1545 /* Marshall an argument into a sequence of one or more consecutive X
1546 registers or, if insufficient X registers are available then onto
1547 the stack. */
1548
1549 static void
1550 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1551 struct aarch64_call_info *info, struct type *type,
1552 struct value *arg)
1553 {
1554 int len = TYPE_LENGTH (type);
1555 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1556
1557 /* PCS C.13 - Pass in registers if we have enough spare */
1558 if (info->ngrn + nregs <= 8)
1559 {
1560 pass_in_x (gdbarch, regcache, info, type, arg);
1561 info->ngrn += nregs;
1562 }
1563 else
1564 {
1565 info->ngrn = 8;
1566 pass_on_stack (info, type, arg);
1567 }
1568 }
1569
1570 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1571 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1572 registers. A return value of false is an error state as the value will have
1573 been partially passed to the stack. */
1574 static bool
1575 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1576 struct aarch64_call_info *info, struct type *arg_type,
1577 struct value *arg)
1578 {
1579 switch (TYPE_CODE (arg_type))
1580 {
1581 case TYPE_CODE_FLT:
1582 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1583 value_contents (arg));
1584 break;
1585
1586 case TYPE_CODE_COMPLEX:
1587 {
1588 const bfd_byte *buf = value_contents (arg);
1589 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1590
1591 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1592 buf))
1593 return false;
1594
1595 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1596 buf + TYPE_LENGTH (target_type));
1597 }
1598
1599 case TYPE_CODE_ARRAY:
1600 if (TYPE_VECTOR (arg_type))
1601 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1602 value_contents (arg));
1603 /* fall through. */
1604
1605 case TYPE_CODE_STRUCT:
1606 case TYPE_CODE_UNION:
1607 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1608 {
1609 /* Don't include static fields. */
1610 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1611 continue;
1612
1613 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1614 struct type *field_type = check_typedef (value_type (field));
1615
1616 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1617 field))
1618 return false;
1619 }
1620 return true;
1621
1622 default:
1623 return false;
1624 }
1625 }
1626
1627 /* Implement the "push_dummy_call" gdbarch method. */
1628
1629 static CORE_ADDR
1630 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1631 struct regcache *regcache, CORE_ADDR bp_addr,
1632 int nargs,
1633 struct value **args, CORE_ADDR sp,
1634 function_call_return_method return_method,
1635 CORE_ADDR struct_addr)
1636 {
1637 int argnum;
1638 struct aarch64_call_info info;
1639
1640 /* We need to know what the type of the called function is in order
1641 to determine the number of named/anonymous arguments for the
1642 actual argument placement, and the return type in order to handle
1643 return value correctly.
1644
1645 The generic code above us views the decision of return in memory
1646 or return in registers as a two stage processes. The language
1647 handler is consulted first and may decide to return in memory (eg
1648 class with copy constructor returned by value), this will cause
1649 the generic code to allocate space AND insert an initial leading
1650 argument.
1651
1652 If the language code does not decide to pass in memory then the
1653 target code is consulted.
1654
1655 If the language code decides to pass in memory we want to move
1656 the pointer inserted as the initial argument from the argument
1657 list and into X8, the conventional AArch64 struct return pointer
1658 register. */
1659
1660 /* Set the return address. For the AArch64, the return breakpoint
1661 is always at BP_ADDR. */
1662 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1663
1664 /* If we were given an initial argument for the return slot, lose it. */
1665 if (return_method == return_method_hidden_param)
1666 {
1667 args++;
1668 nargs--;
1669 }
1670
1671 /* The struct_return pointer occupies X8. */
1672 if (return_method != return_method_normal)
1673 {
1674 if (aarch64_debug)
1675 {
1676 debug_printf ("struct return in %s = 0x%s\n",
1677 gdbarch_register_name (gdbarch,
1678 AARCH64_STRUCT_RETURN_REGNUM),
1679 paddress (gdbarch, struct_addr));
1680 }
1681 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1682 struct_addr);
1683 }
1684
1685 for (argnum = 0; argnum < nargs; argnum++)
1686 {
1687 struct value *arg = args[argnum];
1688 struct type *arg_type, *fundamental_type;
1689 int len, elements;
1690
1691 arg_type = check_typedef (value_type (arg));
1692 len = TYPE_LENGTH (arg_type);
1693
1694 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1695 if there are enough spare registers. */
1696 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1697 &fundamental_type))
1698 {
1699 if (info.nsrn + elements <= 8)
1700 {
1701 /* We know that we have sufficient registers available therefore
1702 this will never need to fallback to the stack. */
1703 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1704 arg))
1705 gdb_assert_not_reached ("Failed to push args");
1706 }
1707 else
1708 {
1709 info.nsrn = 8;
1710 pass_on_stack (&info, arg_type, arg);
1711 }
1712 continue;
1713 }
1714
1715 switch (TYPE_CODE (arg_type))
1716 {
1717 case TYPE_CODE_INT:
1718 case TYPE_CODE_BOOL:
1719 case TYPE_CODE_CHAR:
1720 case TYPE_CODE_RANGE:
1721 case TYPE_CODE_ENUM:
1722 if (len < 4)
1723 {
1724 /* Promote to 32 bit integer. */
1725 if (TYPE_UNSIGNED (arg_type))
1726 arg_type = builtin_type (gdbarch)->builtin_uint32;
1727 else
1728 arg_type = builtin_type (gdbarch)->builtin_int32;
1729 arg = value_cast (arg_type, arg);
1730 }
1731 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1732 break;
1733
1734 case TYPE_CODE_STRUCT:
1735 case TYPE_CODE_ARRAY:
1736 case TYPE_CODE_UNION:
1737 if (len > 16)
1738 {
1739 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1740 invisible reference. */
1741
1742 /* Allocate aligned storage. */
1743 sp = align_down (sp - len, 16);
1744
1745 /* Write the real data into the stack. */
1746 write_memory (sp, value_contents (arg), len);
1747
1748 /* Construct the indirection. */
1749 arg_type = lookup_pointer_type (arg_type);
1750 arg = value_from_pointer (arg_type, sp);
1751 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1752 }
1753 else
1754 /* PCS C.15 / C.18 multiple values pass. */
1755 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1756 break;
1757
1758 default:
1759 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1760 break;
1761 }
1762 }
1763
1764 /* Make sure stack retains 16 byte alignment. */
1765 if (info.nsaa & 15)
1766 sp -= 16 - (info.nsaa & 15);
1767
1768 while (!info.si.empty ())
1769 {
1770 const stack_item_t &si = info.si.back ();
1771
1772 sp -= si.len;
1773 if (si.data != NULL)
1774 write_memory (sp, si.data, si.len);
1775 info.si.pop_back ();
1776 }
1777
1778 /* Finally, update the SP register. */
1779 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1780
1781 return sp;
1782 }
1783
1784 /* Implement the "frame_align" gdbarch method. */
1785
1786 static CORE_ADDR
1787 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1788 {
1789 /* Align the stack to sixteen bytes. */
1790 return sp & ~(CORE_ADDR) 15;
1791 }
1792
1793 /* Return the type for an AdvSISD Q register. */
1794
1795 static struct type *
1796 aarch64_vnq_type (struct gdbarch *gdbarch)
1797 {
1798 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1799
1800 if (tdep->vnq_type == NULL)
1801 {
1802 struct type *t;
1803 struct type *elem;
1804
1805 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1806 TYPE_CODE_UNION);
1807
1808 elem = builtin_type (gdbarch)->builtin_uint128;
1809 append_composite_type_field (t, "u", elem);
1810
1811 elem = builtin_type (gdbarch)->builtin_int128;
1812 append_composite_type_field (t, "s", elem);
1813
1814 tdep->vnq_type = t;
1815 }
1816
1817 return tdep->vnq_type;
1818 }
1819
1820 /* Return the type for an AdvSISD D register. */
1821
1822 static struct type *
1823 aarch64_vnd_type (struct gdbarch *gdbarch)
1824 {
1825 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1826
1827 if (tdep->vnd_type == NULL)
1828 {
1829 struct type *t;
1830 struct type *elem;
1831
1832 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1833 TYPE_CODE_UNION);
1834
1835 elem = builtin_type (gdbarch)->builtin_double;
1836 append_composite_type_field (t, "f", elem);
1837
1838 elem = builtin_type (gdbarch)->builtin_uint64;
1839 append_composite_type_field (t, "u", elem);
1840
1841 elem = builtin_type (gdbarch)->builtin_int64;
1842 append_composite_type_field (t, "s", elem);
1843
1844 tdep->vnd_type = t;
1845 }
1846
1847 return tdep->vnd_type;
1848 }
1849
1850 /* Return the type for an AdvSISD S register. */
1851
1852 static struct type *
1853 aarch64_vns_type (struct gdbarch *gdbarch)
1854 {
1855 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1856
1857 if (tdep->vns_type == NULL)
1858 {
1859 struct type *t;
1860 struct type *elem;
1861
1862 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1863 TYPE_CODE_UNION);
1864
1865 elem = builtin_type (gdbarch)->builtin_float;
1866 append_composite_type_field (t, "f", elem);
1867
1868 elem = builtin_type (gdbarch)->builtin_uint32;
1869 append_composite_type_field (t, "u", elem);
1870
1871 elem = builtin_type (gdbarch)->builtin_int32;
1872 append_composite_type_field (t, "s", elem);
1873
1874 tdep->vns_type = t;
1875 }
1876
1877 return tdep->vns_type;
1878 }
1879
1880 /* Return the type for an AdvSISD H register. */
1881
1882 static struct type *
1883 aarch64_vnh_type (struct gdbarch *gdbarch)
1884 {
1885 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1886
1887 if (tdep->vnh_type == NULL)
1888 {
1889 struct type *t;
1890 struct type *elem;
1891
1892 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1893 TYPE_CODE_UNION);
1894
1895 elem = builtin_type (gdbarch)->builtin_half;
1896 append_composite_type_field (t, "f", elem);
1897
1898 elem = builtin_type (gdbarch)->builtin_uint16;
1899 append_composite_type_field (t, "u", elem);
1900
1901 elem = builtin_type (gdbarch)->builtin_int16;
1902 append_composite_type_field (t, "s", elem);
1903
1904 tdep->vnh_type = t;
1905 }
1906
1907 return tdep->vnh_type;
1908 }
1909
1910 /* Return the type for an AdvSISD B register. */
1911
1912 static struct type *
1913 aarch64_vnb_type (struct gdbarch *gdbarch)
1914 {
1915 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1916
1917 if (tdep->vnb_type == NULL)
1918 {
1919 struct type *t;
1920 struct type *elem;
1921
1922 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1923 TYPE_CODE_UNION);
1924
1925 elem = builtin_type (gdbarch)->builtin_uint8;
1926 append_composite_type_field (t, "u", elem);
1927
1928 elem = builtin_type (gdbarch)->builtin_int8;
1929 append_composite_type_field (t, "s", elem);
1930
1931 tdep->vnb_type = t;
1932 }
1933
1934 return tdep->vnb_type;
1935 }
1936
1937 /* Return the type for an AdvSISD V register. */
1938
1939 static struct type *
1940 aarch64_vnv_type (struct gdbarch *gdbarch)
1941 {
1942 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1943
1944 if (tdep->vnv_type == NULL)
1945 {
1946 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1947 slice from the non-pseudo vector registers. However NEON V registers
1948 are always vector registers, and need constructing as such. */
1949 const struct builtin_type *bt = builtin_type (gdbarch);
1950
1951 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1952 TYPE_CODE_UNION);
1953
1954 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1955 TYPE_CODE_UNION);
1956 append_composite_type_field (sub, "f",
1957 init_vector_type (bt->builtin_double, 2));
1958 append_composite_type_field (sub, "u",
1959 init_vector_type (bt->builtin_uint64, 2));
1960 append_composite_type_field (sub, "s",
1961 init_vector_type (bt->builtin_int64, 2));
1962 append_composite_type_field (t, "d", sub);
1963
1964 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1965 TYPE_CODE_UNION);
1966 append_composite_type_field (sub, "f",
1967 init_vector_type (bt->builtin_float, 4));
1968 append_composite_type_field (sub, "u",
1969 init_vector_type (bt->builtin_uint32, 4));
1970 append_composite_type_field (sub, "s",
1971 init_vector_type (bt->builtin_int32, 4));
1972 append_composite_type_field (t, "s", sub);
1973
1974 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1975 TYPE_CODE_UNION);
1976 append_composite_type_field (sub, "f",
1977 init_vector_type (bt->builtin_half, 8));
1978 append_composite_type_field (sub, "u",
1979 init_vector_type (bt->builtin_uint16, 8));
1980 append_composite_type_field (sub, "s",
1981 init_vector_type (bt->builtin_int16, 8));
1982 append_composite_type_field (t, "h", sub);
1983
1984 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1985 TYPE_CODE_UNION);
1986 append_composite_type_field (sub, "u",
1987 init_vector_type (bt->builtin_uint8, 16));
1988 append_composite_type_field (sub, "s",
1989 init_vector_type (bt->builtin_int8, 16));
1990 append_composite_type_field (t, "b", sub);
1991
1992 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1993 TYPE_CODE_UNION);
1994 append_composite_type_field (sub, "u",
1995 init_vector_type (bt->builtin_uint128, 1));
1996 append_composite_type_field (sub, "s",
1997 init_vector_type (bt->builtin_int128, 1));
1998 append_composite_type_field (t, "q", sub);
1999
2000 tdep->vnv_type = t;
2001 }
2002
2003 return tdep->vnv_type;
2004 }
2005
2006 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2007
2008 static int
2009 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2010 {
2011 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2012
2013 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2014 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2015
2016 if (reg == AARCH64_DWARF_SP)
2017 return AARCH64_SP_REGNUM;
2018
2019 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2020 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2021
2022 if (reg == AARCH64_DWARF_SVE_VG)
2023 return AARCH64_SVE_VG_REGNUM;
2024
2025 if (reg == AARCH64_DWARF_SVE_FFR)
2026 return AARCH64_SVE_FFR_REGNUM;
2027
2028 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2029 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2030
2031 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2032 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2033
2034 if (tdep->has_pauth ())
2035 {
2036 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2037 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2038
2039 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2040 return tdep->pauth_ra_state_regnum;
2041 }
2042
2043 return -1;
2044 }
2045
2046 /* Implement the "print_insn" gdbarch method. */
2047
2048 static int
2049 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2050 {
2051 info->symbols = NULL;
2052 return default_print_insn (memaddr, info);
2053 }
2054
2055 /* AArch64 BRK software debug mode instruction.
2056 Note that AArch64 code is always little-endian.
2057 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2058 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2059
2060 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2061
2062 /* Extract from an array REGS containing the (raw) register state a
2063 function return value of type TYPE, and copy that, in virtual
2064 format, into VALBUF. */
2065
2066 static void
2067 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2068 gdb_byte *valbuf)
2069 {
2070 struct gdbarch *gdbarch = regs->arch ();
2071 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2072 int elements;
2073 struct type *fundamental_type;
2074
2075 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2076 &fundamental_type))
2077 {
2078 int len = TYPE_LENGTH (fundamental_type);
2079
2080 for (int i = 0; i < elements; i++)
2081 {
2082 int regno = AARCH64_V0_REGNUM + i;
2083 /* Enough space for a full vector register. */
2084 gdb_byte buf[register_size (gdbarch, regno)];
2085 gdb_assert (len <= sizeof (buf));
2086
2087 if (aarch64_debug)
2088 {
2089 debug_printf ("read HFA or HVA return value element %d from %s\n",
2090 i + 1,
2091 gdbarch_register_name (gdbarch, regno));
2092 }
2093 regs->cooked_read (regno, buf);
2094
2095 memcpy (valbuf, buf, len);
2096 valbuf += len;
2097 }
2098 }
2099 else if (TYPE_CODE (type) == TYPE_CODE_INT
2100 || TYPE_CODE (type) == TYPE_CODE_CHAR
2101 || TYPE_CODE (type) == TYPE_CODE_BOOL
2102 || TYPE_CODE (type) == TYPE_CODE_PTR
2103 || TYPE_IS_REFERENCE (type)
2104 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2105 {
2106 /* If the type is a plain integer, then the access is
2107 straight-forward. Otherwise we have to play around a bit
2108 more. */
2109 int len = TYPE_LENGTH (type);
2110 int regno = AARCH64_X0_REGNUM;
2111 ULONGEST tmp;
2112
2113 while (len > 0)
2114 {
2115 /* By using store_unsigned_integer we avoid having to do
2116 anything special for small big-endian values. */
2117 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2118 store_unsigned_integer (valbuf,
2119 (len > X_REGISTER_SIZE
2120 ? X_REGISTER_SIZE : len), byte_order, tmp);
2121 len -= X_REGISTER_SIZE;
2122 valbuf += X_REGISTER_SIZE;
2123 }
2124 }
2125 else
2126 {
2127 /* For a structure or union the behaviour is as if the value had
2128 been stored to word-aligned memory and then loaded into
2129 registers with 64-bit load instruction(s). */
2130 int len = TYPE_LENGTH (type);
2131 int regno = AARCH64_X0_REGNUM;
2132 bfd_byte buf[X_REGISTER_SIZE];
2133
2134 while (len > 0)
2135 {
2136 regs->cooked_read (regno++, buf);
2137 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2138 len -= X_REGISTER_SIZE;
2139 valbuf += X_REGISTER_SIZE;
2140 }
2141 }
2142 }
2143
2144
2145 /* Will a function return an aggregate type in memory or in a
2146 register? Return 0 if an aggregate type can be returned in a
2147 register, 1 if it must be returned in memory. */
2148
2149 static int
2150 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2151 {
2152 type = check_typedef (type);
2153 int elements;
2154 struct type *fundamental_type;
2155
2156 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2157 &fundamental_type))
2158 {
2159 /* v0-v7 are used to return values and one register is allocated
2160 for one member. However, HFA or HVA has at most four members. */
2161 return 0;
2162 }
2163
2164 if (TYPE_LENGTH (type) > 16)
2165 {
2166 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2167 invisible reference. */
2168
2169 return 1;
2170 }
2171
2172 return 0;
2173 }
2174
2175 /* Write into appropriate registers a function return value of type
2176 TYPE, given in virtual format. */
2177
2178 static void
2179 aarch64_store_return_value (struct type *type, struct regcache *regs,
2180 const gdb_byte *valbuf)
2181 {
2182 struct gdbarch *gdbarch = regs->arch ();
2183 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2184 int elements;
2185 struct type *fundamental_type;
2186
2187 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2188 &fundamental_type))
2189 {
2190 int len = TYPE_LENGTH (fundamental_type);
2191
2192 for (int i = 0; i < elements; i++)
2193 {
2194 int regno = AARCH64_V0_REGNUM + i;
2195 /* Enough space for a full vector register. */
2196 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2197 gdb_assert (len <= sizeof (tmpbuf));
2198
2199 if (aarch64_debug)
2200 {
2201 debug_printf ("write HFA or HVA return value element %d to %s\n",
2202 i + 1,
2203 gdbarch_register_name (gdbarch, regno));
2204 }
2205
2206 memcpy (tmpbuf, valbuf,
2207 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2208 regs->cooked_write (regno, tmpbuf);
2209 valbuf += len;
2210 }
2211 }
2212 else if (TYPE_CODE (type) == TYPE_CODE_INT
2213 || TYPE_CODE (type) == TYPE_CODE_CHAR
2214 || TYPE_CODE (type) == TYPE_CODE_BOOL
2215 || TYPE_CODE (type) == TYPE_CODE_PTR
2216 || TYPE_IS_REFERENCE (type)
2217 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2218 {
2219 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2220 {
2221 /* Values of one word or less are zero/sign-extended and
2222 returned in r0. */
2223 bfd_byte tmpbuf[X_REGISTER_SIZE];
2224 LONGEST val = unpack_long (type, valbuf);
2225
2226 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2227 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2228 }
2229 else
2230 {
2231 /* Integral values greater than one word are stored in
2232 consecutive registers starting with r0. This will always
2233 be a multiple of the regiser size. */
2234 int len = TYPE_LENGTH (type);
2235 int regno = AARCH64_X0_REGNUM;
2236
2237 while (len > 0)
2238 {
2239 regs->cooked_write (regno++, valbuf);
2240 len -= X_REGISTER_SIZE;
2241 valbuf += X_REGISTER_SIZE;
2242 }
2243 }
2244 }
2245 else
2246 {
2247 /* For a structure or union the behaviour is as if the value had
2248 been stored to word-aligned memory and then loaded into
2249 registers with 64-bit load instruction(s). */
2250 int len = TYPE_LENGTH (type);
2251 int regno = AARCH64_X0_REGNUM;
2252 bfd_byte tmpbuf[X_REGISTER_SIZE];
2253
2254 while (len > 0)
2255 {
2256 memcpy (tmpbuf, valbuf,
2257 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2258 regs->cooked_write (regno++, tmpbuf);
2259 len -= X_REGISTER_SIZE;
2260 valbuf += X_REGISTER_SIZE;
2261 }
2262 }
2263 }
2264
2265 /* Implement the "return_value" gdbarch method. */
2266
2267 static enum return_value_convention
2268 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2269 struct type *valtype, struct regcache *regcache,
2270 gdb_byte *readbuf, const gdb_byte *writebuf)
2271 {
2272
2273 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2274 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2275 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2276 {
2277 if (aarch64_return_in_memory (gdbarch, valtype))
2278 {
2279 if (aarch64_debug)
2280 debug_printf ("return value in memory\n");
2281 return RETURN_VALUE_STRUCT_CONVENTION;
2282 }
2283 }
2284
2285 if (writebuf)
2286 aarch64_store_return_value (valtype, regcache, writebuf);
2287
2288 if (readbuf)
2289 aarch64_extract_return_value (valtype, regcache, readbuf);
2290
2291 if (aarch64_debug)
2292 debug_printf ("return value in registers\n");
2293
2294 return RETURN_VALUE_REGISTER_CONVENTION;
2295 }
2296
2297 /* Implement the "get_longjmp_target" gdbarch method. */
2298
2299 static int
2300 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2301 {
2302 CORE_ADDR jb_addr;
2303 gdb_byte buf[X_REGISTER_SIZE];
2304 struct gdbarch *gdbarch = get_frame_arch (frame);
2305 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2306 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2307
2308 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2309
2310 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2311 X_REGISTER_SIZE))
2312 return 0;
2313
2314 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2315 return 1;
2316 }
2317
2318 /* Implement the "gen_return_address" gdbarch method. */
2319
2320 static void
2321 aarch64_gen_return_address (struct gdbarch *gdbarch,
2322 struct agent_expr *ax, struct axs_value *value,
2323 CORE_ADDR scope)
2324 {
2325 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2326 value->kind = axs_lvalue_register;
2327 value->u.reg = AARCH64_LR_REGNUM;
2328 }
2329 \f
2330
2331 /* Return the pseudo register name corresponding to register regnum. */
2332
2333 static const char *
2334 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2335 {
2336 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2337
2338 static const char *const q_name[] =
2339 {
2340 "q0", "q1", "q2", "q3",
2341 "q4", "q5", "q6", "q7",
2342 "q8", "q9", "q10", "q11",
2343 "q12", "q13", "q14", "q15",
2344 "q16", "q17", "q18", "q19",
2345 "q20", "q21", "q22", "q23",
2346 "q24", "q25", "q26", "q27",
2347 "q28", "q29", "q30", "q31",
2348 };
2349
2350 static const char *const d_name[] =
2351 {
2352 "d0", "d1", "d2", "d3",
2353 "d4", "d5", "d6", "d7",
2354 "d8", "d9", "d10", "d11",
2355 "d12", "d13", "d14", "d15",
2356 "d16", "d17", "d18", "d19",
2357 "d20", "d21", "d22", "d23",
2358 "d24", "d25", "d26", "d27",
2359 "d28", "d29", "d30", "d31",
2360 };
2361
2362 static const char *const s_name[] =
2363 {
2364 "s0", "s1", "s2", "s3",
2365 "s4", "s5", "s6", "s7",
2366 "s8", "s9", "s10", "s11",
2367 "s12", "s13", "s14", "s15",
2368 "s16", "s17", "s18", "s19",
2369 "s20", "s21", "s22", "s23",
2370 "s24", "s25", "s26", "s27",
2371 "s28", "s29", "s30", "s31",
2372 };
2373
2374 static const char *const h_name[] =
2375 {
2376 "h0", "h1", "h2", "h3",
2377 "h4", "h5", "h6", "h7",
2378 "h8", "h9", "h10", "h11",
2379 "h12", "h13", "h14", "h15",
2380 "h16", "h17", "h18", "h19",
2381 "h20", "h21", "h22", "h23",
2382 "h24", "h25", "h26", "h27",
2383 "h28", "h29", "h30", "h31",
2384 };
2385
2386 static const char *const b_name[] =
2387 {
2388 "b0", "b1", "b2", "b3",
2389 "b4", "b5", "b6", "b7",
2390 "b8", "b9", "b10", "b11",
2391 "b12", "b13", "b14", "b15",
2392 "b16", "b17", "b18", "b19",
2393 "b20", "b21", "b22", "b23",
2394 "b24", "b25", "b26", "b27",
2395 "b28", "b29", "b30", "b31",
2396 };
2397
2398 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2399
2400 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2401 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2402
2403 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2404 return d_name[p_regnum - AARCH64_D0_REGNUM];
2405
2406 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2407 return s_name[p_regnum - AARCH64_S0_REGNUM];
2408
2409 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2410 return h_name[p_regnum - AARCH64_H0_REGNUM];
2411
2412 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2413 return b_name[p_regnum - AARCH64_B0_REGNUM];
2414
2415 if (tdep->has_sve ())
2416 {
2417 static const char *const sve_v_name[] =
2418 {
2419 "v0", "v1", "v2", "v3",
2420 "v4", "v5", "v6", "v7",
2421 "v8", "v9", "v10", "v11",
2422 "v12", "v13", "v14", "v15",
2423 "v16", "v17", "v18", "v19",
2424 "v20", "v21", "v22", "v23",
2425 "v24", "v25", "v26", "v27",
2426 "v28", "v29", "v30", "v31",
2427 };
2428
2429 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2430 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2431 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2432 }
2433
2434 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2435 prevents it from being read by methods such as
2436 mi_cmd_trace_frame_collected. */
2437 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2438 return "";
2439
2440 internal_error (__FILE__, __LINE__,
2441 _("aarch64_pseudo_register_name: bad register number %d"),
2442 p_regnum);
2443 }
2444
2445 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2446
2447 static struct type *
2448 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2449 {
2450 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2451
2452 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2453
2454 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2455 return aarch64_vnq_type (gdbarch);
2456
2457 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2458 return aarch64_vnd_type (gdbarch);
2459
2460 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2461 return aarch64_vns_type (gdbarch);
2462
2463 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2464 return aarch64_vnh_type (gdbarch);
2465
2466 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2467 return aarch64_vnb_type (gdbarch);
2468
2469 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2470 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2471 return aarch64_vnv_type (gdbarch);
2472
2473 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2474 return builtin_type (gdbarch)->builtin_uint64;
2475
2476 internal_error (__FILE__, __LINE__,
2477 _("aarch64_pseudo_register_type: bad register number %d"),
2478 p_regnum);
2479 }
2480
2481 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2482
2483 static int
2484 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2485 struct reggroup *group)
2486 {
2487 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2488
2489 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2490
2491 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2492 return group == all_reggroup || group == vector_reggroup;
2493 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2494 return (group == all_reggroup || group == vector_reggroup
2495 || group == float_reggroup);
2496 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2497 return (group == all_reggroup || group == vector_reggroup
2498 || group == float_reggroup);
2499 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2500 return group == all_reggroup || group == vector_reggroup;
2501 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2502 return group == all_reggroup || group == vector_reggroup;
2503 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2504 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2505 return group == all_reggroup || group == vector_reggroup;
2506 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2507 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2508 return 0;
2509
2510 return group == all_reggroup;
2511 }
2512
2513 /* Helper for aarch64_pseudo_read_value. */
2514
2515 static struct value *
2516 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2517 readable_regcache *regcache, int regnum_offset,
2518 int regsize, struct value *result_value)
2519 {
2520 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2521
2522 /* Enough space for a full vector register. */
2523 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2524 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2525
2526 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2527 mark_value_bytes_unavailable (result_value, 0,
2528 TYPE_LENGTH (value_type (result_value)));
2529 else
2530 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2531
2532 return result_value;
2533 }
2534
2535 /* Implement the "pseudo_register_read_value" gdbarch method. */
2536
2537 static struct value *
2538 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2539 int regnum)
2540 {
2541 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2542 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2543
2544 VALUE_LVAL (result_value) = lval_register;
2545 VALUE_REGNUM (result_value) = regnum;
2546
2547 regnum -= gdbarch_num_regs (gdbarch);
2548
2549 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2550 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2551 regnum - AARCH64_Q0_REGNUM,
2552 Q_REGISTER_SIZE, result_value);
2553
2554 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2555 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2556 regnum - AARCH64_D0_REGNUM,
2557 D_REGISTER_SIZE, result_value);
2558
2559 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2560 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2561 regnum - AARCH64_S0_REGNUM,
2562 S_REGISTER_SIZE, result_value);
2563
2564 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2565 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2566 regnum - AARCH64_H0_REGNUM,
2567 H_REGISTER_SIZE, result_value);
2568
2569 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2570 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2571 regnum - AARCH64_B0_REGNUM,
2572 B_REGISTER_SIZE, result_value);
2573
2574 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2575 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2576 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2577 regnum - AARCH64_SVE_V0_REGNUM,
2578 V_REGISTER_SIZE, result_value);
2579
2580 gdb_assert_not_reached ("regnum out of bound");
2581 }
2582
2583 /* Helper for aarch64_pseudo_write. */
2584
2585 static void
2586 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2587 int regnum_offset, int regsize, const gdb_byte *buf)
2588 {
2589 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2590
2591 /* Enough space for a full vector register. */
2592 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2593 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2594
2595 /* Ensure the register buffer is zero, we want gdb writes of the
2596 various 'scalar' pseudo registers to behavior like architectural
2597 writes, register width bytes are written the remainder are set to
2598 zero. */
2599 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2600
2601 memcpy (reg_buf, buf, regsize);
2602 regcache->raw_write (v_regnum, reg_buf);
2603 }
2604
2605 /* Implement the "pseudo_register_write" gdbarch method. */
2606
2607 static void
2608 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2609 int regnum, const gdb_byte *buf)
2610 {
2611 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2612 regnum -= gdbarch_num_regs (gdbarch);
2613
2614 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2615 return aarch64_pseudo_write_1 (gdbarch, regcache,
2616 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2617 buf);
2618
2619 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2620 return aarch64_pseudo_write_1 (gdbarch, regcache,
2621 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2622 buf);
2623
2624 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2625 return aarch64_pseudo_write_1 (gdbarch, regcache,
2626 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2627 buf);
2628
2629 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2630 return aarch64_pseudo_write_1 (gdbarch, regcache,
2631 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2632 buf);
2633
2634 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2635 return aarch64_pseudo_write_1 (gdbarch, regcache,
2636 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2637 buf);
2638
2639 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2640 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2641 return aarch64_pseudo_write_1 (gdbarch, regcache,
2642 regnum - AARCH64_SVE_V0_REGNUM,
2643 V_REGISTER_SIZE, buf);
2644
2645 gdb_assert_not_reached ("regnum out of bound");
2646 }
2647
2648 /* Callback function for user_reg_add. */
2649
2650 static struct value *
2651 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2652 {
2653 const int *reg_p = (const int *) baton;
2654
2655 return value_of_register (*reg_p, frame);
2656 }
2657 \f
2658
2659 /* Implement the "software_single_step" gdbarch method, needed to
2660 single step through atomic sequences on AArch64. */
2661
2662 static std::vector<CORE_ADDR>
2663 aarch64_software_single_step (struct regcache *regcache)
2664 {
2665 struct gdbarch *gdbarch = regcache->arch ();
2666 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2667 const int insn_size = 4;
2668 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2669 CORE_ADDR pc = regcache_read_pc (regcache);
2670 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2671 CORE_ADDR loc = pc;
2672 CORE_ADDR closing_insn = 0;
2673 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2674 byte_order_for_code);
2675 int index;
2676 int insn_count;
2677 int bc_insn_count = 0; /* Conditional branch instruction count. */
2678 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2679 aarch64_inst inst;
2680
2681 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2682 return {};
2683
2684 /* Look for a Load Exclusive instruction which begins the sequence. */
2685 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2686 return {};
2687
2688 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2689 {
2690 loc += insn_size;
2691 insn = read_memory_unsigned_integer (loc, insn_size,
2692 byte_order_for_code);
2693
2694 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2695 return {};
2696 /* Check if the instruction is a conditional branch. */
2697 if (inst.opcode->iclass == condbranch)
2698 {
2699 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2700
2701 if (bc_insn_count >= 1)
2702 return {};
2703
2704 /* It is, so we'll try to set a breakpoint at the destination. */
2705 breaks[1] = loc + inst.operands[0].imm.value;
2706
2707 bc_insn_count++;
2708 last_breakpoint++;
2709 }
2710
2711 /* Look for the Store Exclusive which closes the atomic sequence. */
2712 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2713 {
2714 closing_insn = loc;
2715 break;
2716 }
2717 }
2718
2719 /* We didn't find a closing Store Exclusive instruction, fall back. */
2720 if (!closing_insn)
2721 return {};
2722
2723 /* Insert breakpoint after the end of the atomic sequence. */
2724 breaks[0] = loc + insn_size;
2725
2726 /* Check for duplicated breakpoints, and also check that the second
2727 breakpoint is not within the atomic sequence. */
2728 if (last_breakpoint
2729 && (breaks[1] == breaks[0]
2730 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2731 last_breakpoint = 0;
2732
2733 std::vector<CORE_ADDR> next_pcs;
2734
2735 /* Insert the breakpoint at the end of the sequence, and one at the
2736 destination of the conditional branch, if it exists. */
2737 for (index = 0; index <= last_breakpoint; index++)
2738 next_pcs.push_back (breaks[index]);
2739
2740 return next_pcs;
2741 }
2742
2743 struct aarch64_displaced_step_closure : public displaced_step_closure
2744 {
2745 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2746 is being displaced stepping. */
2747 int cond = 0;
2748
2749 /* PC adjustment offset after displaced stepping. */
2750 int32_t pc_adjust = 0;
2751 };
2752
2753 /* Data when visiting instructions for displaced stepping. */
2754
2755 struct aarch64_displaced_step_data
2756 {
2757 struct aarch64_insn_data base;
2758
2759 /* The address where the instruction will be executed at. */
2760 CORE_ADDR new_addr;
2761 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2762 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2763 /* Number of instructions in INSN_BUF. */
2764 unsigned insn_count;
2765 /* Registers when doing displaced stepping. */
2766 struct regcache *regs;
2767
2768 aarch64_displaced_step_closure *dsc;
2769 };
2770
2771 /* Implementation of aarch64_insn_visitor method "b". */
2772
2773 static void
2774 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2775 struct aarch64_insn_data *data)
2776 {
2777 struct aarch64_displaced_step_data *dsd
2778 = (struct aarch64_displaced_step_data *) data;
2779 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2780
2781 if (can_encode_int32 (new_offset, 28))
2782 {
2783 /* Emit B rather than BL, because executing BL on a new address
2784 will get the wrong address into LR. In order to avoid this,
2785 we emit B, and update LR if the instruction is BL. */
2786 emit_b (dsd->insn_buf, 0, new_offset);
2787 dsd->insn_count++;
2788 }
2789 else
2790 {
2791 /* Write NOP. */
2792 emit_nop (dsd->insn_buf);
2793 dsd->insn_count++;
2794 dsd->dsc->pc_adjust = offset;
2795 }
2796
2797 if (is_bl)
2798 {
2799 /* Update LR. */
2800 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2801 data->insn_addr + 4);
2802 }
2803 }
2804
2805 /* Implementation of aarch64_insn_visitor method "b_cond". */
2806
2807 static void
2808 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2809 struct aarch64_insn_data *data)
2810 {
2811 struct aarch64_displaced_step_data *dsd
2812 = (struct aarch64_displaced_step_data *) data;
2813
2814 /* GDB has to fix up PC after displaced step this instruction
2815 differently according to the condition is true or false. Instead
2816 of checking COND against conditional flags, we can use
2817 the following instructions, and GDB can tell how to fix up PC
2818 according to the PC value.
2819
2820 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2821 INSN1 ;
2822 TAKEN:
2823 INSN2
2824 */
2825
2826 emit_bcond (dsd->insn_buf, cond, 8);
2827 dsd->dsc->cond = 1;
2828 dsd->dsc->pc_adjust = offset;
2829 dsd->insn_count = 1;
2830 }
2831
2832 /* Dynamically allocate a new register. If we know the register
2833 statically, we should make it a global as above instead of using this
2834 helper function. */
2835
2836 static struct aarch64_register
2837 aarch64_register (unsigned num, int is64)
2838 {
2839 return (struct aarch64_register) { num, is64 };
2840 }
2841
2842 /* Implementation of aarch64_insn_visitor method "cb". */
2843
2844 static void
2845 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2846 const unsigned rn, int is64,
2847 struct aarch64_insn_data *data)
2848 {
2849 struct aarch64_displaced_step_data *dsd
2850 = (struct aarch64_displaced_step_data *) data;
2851
2852 /* The offset is out of range for a compare and branch
2853 instruction. We can use the following instructions instead:
2854
2855 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2856 INSN1 ;
2857 TAKEN:
2858 INSN2
2859 */
2860 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2861 dsd->insn_count = 1;
2862 dsd->dsc->cond = 1;
2863 dsd->dsc->pc_adjust = offset;
2864 }
2865
2866 /* Implementation of aarch64_insn_visitor method "tb". */
2867
2868 static void
2869 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2870 const unsigned rt, unsigned bit,
2871 struct aarch64_insn_data *data)
2872 {
2873 struct aarch64_displaced_step_data *dsd
2874 = (struct aarch64_displaced_step_data *) data;
2875
2876 /* The offset is out of range for a test bit and branch
2877 instruction We can use the following instructions instead:
2878
2879 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2880 INSN1 ;
2881 TAKEN:
2882 INSN2
2883
2884 */
2885 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2886 dsd->insn_count = 1;
2887 dsd->dsc->cond = 1;
2888 dsd->dsc->pc_adjust = offset;
2889 }
2890
2891 /* Implementation of aarch64_insn_visitor method "adr". */
2892
2893 static void
2894 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2895 const int is_adrp, struct aarch64_insn_data *data)
2896 {
2897 struct aarch64_displaced_step_data *dsd
2898 = (struct aarch64_displaced_step_data *) data;
2899 /* We know exactly the address the ADR{P,} instruction will compute.
2900 We can just write it to the destination register. */
2901 CORE_ADDR address = data->insn_addr + offset;
2902
2903 if (is_adrp)
2904 {
2905 /* Clear the lower 12 bits of the offset to get the 4K page. */
2906 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2907 address & ~0xfff);
2908 }
2909 else
2910 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2911 address);
2912
2913 dsd->dsc->pc_adjust = 4;
2914 emit_nop (dsd->insn_buf);
2915 dsd->insn_count = 1;
2916 }
2917
2918 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2919
2920 static void
2921 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2922 const unsigned rt, const int is64,
2923 struct aarch64_insn_data *data)
2924 {
2925 struct aarch64_displaced_step_data *dsd
2926 = (struct aarch64_displaced_step_data *) data;
2927 CORE_ADDR address = data->insn_addr + offset;
2928 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2929
2930 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2931 address);
2932
2933 if (is_sw)
2934 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2935 aarch64_register (rt, 1), zero);
2936 else
2937 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2938 aarch64_register (rt, 1), zero);
2939
2940 dsd->dsc->pc_adjust = 4;
2941 }
2942
2943 /* Implementation of aarch64_insn_visitor method "others". */
2944
2945 static void
2946 aarch64_displaced_step_others (const uint32_t insn,
2947 struct aarch64_insn_data *data)
2948 {
2949 struct aarch64_displaced_step_data *dsd
2950 = (struct aarch64_displaced_step_data *) data;
2951
2952 aarch64_emit_insn (dsd->insn_buf, insn);
2953 dsd->insn_count = 1;
2954
2955 if ((insn & 0xfffffc1f) == 0xd65f0000)
2956 {
2957 /* RET */
2958 dsd->dsc->pc_adjust = 0;
2959 }
2960 else
2961 dsd->dsc->pc_adjust = 4;
2962 }
2963
2964 static const struct aarch64_insn_visitor visitor =
2965 {
2966 aarch64_displaced_step_b,
2967 aarch64_displaced_step_b_cond,
2968 aarch64_displaced_step_cb,
2969 aarch64_displaced_step_tb,
2970 aarch64_displaced_step_adr,
2971 aarch64_displaced_step_ldr_literal,
2972 aarch64_displaced_step_others,
2973 };
2974
2975 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2976
2977 struct displaced_step_closure *
2978 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2979 CORE_ADDR from, CORE_ADDR to,
2980 struct regcache *regs)
2981 {
2982 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2983 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2984 struct aarch64_displaced_step_data dsd;
2985 aarch64_inst inst;
2986
2987 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2988 return NULL;
2989
2990 /* Look for a Load Exclusive instruction which begins the sequence. */
2991 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2992 {
2993 /* We can't displaced step atomic sequences. */
2994 return NULL;
2995 }
2996
2997 std::unique_ptr<aarch64_displaced_step_closure> dsc
2998 (new aarch64_displaced_step_closure);
2999 dsd.base.insn_addr = from;
3000 dsd.new_addr = to;
3001 dsd.regs = regs;
3002 dsd.dsc = dsc.get ();
3003 dsd.insn_count = 0;
3004 aarch64_relocate_instruction (insn, &visitor,
3005 (struct aarch64_insn_data *) &dsd);
3006 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3007
3008 if (dsd.insn_count != 0)
3009 {
3010 int i;
3011
3012 /* Instruction can be relocated to scratch pad. Copy
3013 relocated instruction(s) there. */
3014 for (i = 0; i < dsd.insn_count; i++)
3015 {
3016 if (debug_displaced)
3017 {
3018 debug_printf ("displaced: writing insn ");
3019 debug_printf ("%.8x", dsd.insn_buf[i]);
3020 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3021 }
3022 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3023 (ULONGEST) dsd.insn_buf[i]);
3024 }
3025 }
3026 else
3027 {
3028 dsc = NULL;
3029 }
3030
3031 return dsc.release ();
3032 }
3033
3034 /* Implement the "displaced_step_fixup" gdbarch method. */
3035
3036 void
3037 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3038 struct displaced_step_closure *dsc_,
3039 CORE_ADDR from, CORE_ADDR to,
3040 struct regcache *regs)
3041 {
3042 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3043
3044 if (dsc->cond)
3045 {
3046 ULONGEST pc;
3047
3048 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3049 if (pc - to == 8)
3050 {
3051 /* Condition is true. */
3052 }
3053 else if (pc - to == 4)
3054 {
3055 /* Condition is false. */
3056 dsc->pc_adjust = 4;
3057 }
3058 else
3059 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3060 }
3061
3062 if (dsc->pc_adjust != 0)
3063 {
3064 if (debug_displaced)
3065 {
3066 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3067 paddress (gdbarch, from), dsc->pc_adjust);
3068 }
3069 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3070 from + dsc->pc_adjust);
3071 }
3072 }
3073
3074 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3075
3076 int
3077 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3078 struct displaced_step_closure *closure)
3079 {
3080 return 1;
3081 }
3082
3083 /* Get the correct target description for the given VQ value.
3084 If VQ is zero then it is assumed SVE is not supported.
3085 (It is not possible to set VQ to zero on an SVE system). */
3086
3087 const target_desc *
3088 aarch64_read_description (uint64_t vq, bool pauth_p)
3089 {
3090 if (vq > AARCH64_MAX_SVE_VQ)
3091 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3092 AARCH64_MAX_SVE_VQ);
3093
3094 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3095
3096 if (tdesc == NULL)
3097 {
3098 tdesc = aarch64_create_target_description (vq, pauth_p);
3099 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3100 }
3101
3102 return tdesc;
3103 }
3104
3105 /* Return the VQ used when creating the target description TDESC. */
3106
3107 static uint64_t
3108 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3109 {
3110 const struct tdesc_feature *feature_sve;
3111
3112 if (!tdesc_has_registers (tdesc))
3113 return 0;
3114
3115 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3116
3117 if (feature_sve == nullptr)
3118 return 0;
3119
3120 uint64_t vl = tdesc_register_bitsize (feature_sve,
3121 aarch64_sve_register_names[0]) / 8;
3122 return sve_vq_from_vl (vl);
3123 }
3124
3125 /* Add all the expected register sets into GDBARCH. */
3126
3127 static void
3128 aarch64_add_reggroups (struct gdbarch *gdbarch)
3129 {
3130 reggroup_add (gdbarch, general_reggroup);
3131 reggroup_add (gdbarch, float_reggroup);
3132 reggroup_add (gdbarch, system_reggroup);
3133 reggroup_add (gdbarch, vector_reggroup);
3134 reggroup_add (gdbarch, all_reggroup);
3135 reggroup_add (gdbarch, save_reggroup);
3136 reggroup_add (gdbarch, restore_reggroup);
3137 }
3138
3139 /* Implement the "cannot_store_register" gdbarch method. */
3140
3141 static int
3142 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3143 {
3144 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3145
3146 if (!tdep->has_pauth ())
3147 return 0;
3148
3149 /* Pointer authentication registers are read-only. */
3150 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3151 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3152 }
3153
3154 /* Initialize the current architecture based on INFO. If possible,
3155 re-use an architecture from ARCHES, which is a list of
3156 architectures already created during this debugging session.
3157
3158 Called e.g. at program startup, when reading a core file, and when
3159 reading a binary file. */
3160
3161 static struct gdbarch *
3162 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3163 {
3164 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3165 const struct tdesc_feature *feature_pauth;
3166 bool valid_p = true;
3167 int i, num_regs = 0, num_pseudo_regs = 0;
3168 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3169
3170 /* Use the vector length passed via the target info. Here -1 is used for no
3171 SVE, and 0 is unset. If unset then use the vector length from the existing
3172 tdesc. */
3173 uint64_t vq = 0;
3174 if (info.id == (int *) -1)
3175 vq = 0;
3176 else if (info.id != 0)
3177 vq = (uint64_t) info.id;
3178 else
3179 vq = aarch64_get_tdesc_vq (info.target_desc);
3180
3181 if (vq > AARCH64_MAX_SVE_VQ)
3182 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3183 pulongest (vq), AARCH64_MAX_SVE_VQ);
3184
3185 /* If there is already a candidate, use it. */
3186 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3187 best_arch != nullptr;
3188 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3189 {
3190 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3191 if (tdep && tdep->vq == vq)
3192 return best_arch->gdbarch;
3193 }
3194
3195 /* Ensure we always have a target descriptor, and that it is for the given VQ
3196 value. */
3197 const struct target_desc *tdesc = info.target_desc;
3198 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3199 tdesc = aarch64_read_description (vq, false);
3200 gdb_assert (tdesc);
3201
3202 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3203 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3204 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3205 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3206
3207 if (feature_core == nullptr)
3208 return nullptr;
3209
3210 struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
3211
3212 /* Validate the description provides the mandatory core R registers
3213 and allocate their numbers. */
3214 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3215 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3216 AARCH64_X0_REGNUM + i,
3217 aarch64_r_register_names[i]);
3218
3219 num_regs = AARCH64_X0_REGNUM + i;
3220
3221 /* Add the V registers. */
3222 if (feature_fpu != nullptr)
3223 {
3224 if (feature_sve != nullptr)
3225 error (_("Program contains both fpu and SVE features."));
3226
3227 /* Validate the description provides the mandatory V registers
3228 and allocate their numbers. */
3229 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3230 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3231 AARCH64_V0_REGNUM + i,
3232 aarch64_v_register_names[i]);
3233
3234 num_regs = AARCH64_V0_REGNUM + i;
3235 }
3236
3237 /* Add the SVE registers. */
3238 if (feature_sve != nullptr)
3239 {
3240 /* Validate the description provides the mandatory SVE registers
3241 and allocate their numbers. */
3242 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3243 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3244 AARCH64_SVE_Z0_REGNUM + i,
3245 aarch64_sve_register_names[i]);
3246
3247 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3248 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3249 }
3250
3251 if (feature_fpu != nullptr || feature_sve != nullptr)
3252 {
3253 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3254 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3255 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3256 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3257 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3258 }
3259
3260 /* Add the pauth registers. */
3261 if (feature_pauth != NULL)
3262 {
3263 first_pauth_regnum = num_regs;
3264 pauth_ra_state_offset = num_pseudo_regs;
3265 /* Validate the descriptor provides the mandatory PAUTH registers and
3266 allocate their numbers. */
3267 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3268 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3269 first_pauth_regnum + i,
3270 aarch64_pauth_register_names[i]);
3271
3272 num_regs += i;
3273 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3274 }
3275
3276 if (!valid_p)
3277 {
3278 tdesc_data_cleanup (tdesc_data);
3279 return nullptr;
3280 }
3281
3282 /* AArch64 code is always little-endian. */
3283 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3284
3285 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3286 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3287
3288 /* This should be low enough for everything. */
3289 tdep->lowest_pc = 0x20;
3290 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3291 tdep->jb_elt_size = 8;
3292 tdep->vq = vq;
3293 tdep->pauth_reg_base = first_pauth_regnum;
3294 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3295 : pauth_ra_state_offset + num_regs;
3296
3297 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3298 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3299
3300 /* Advance PC across function entry code. */
3301 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3302
3303 /* The stack grows downward. */
3304 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3305
3306 /* Breakpoint manipulation. */
3307 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3308 aarch64_breakpoint::kind_from_pc);
3309 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3310 aarch64_breakpoint::bp_from_kind);
3311 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3312 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3313
3314 /* Information about registers, etc. */
3315 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3316 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3317 set_gdbarch_num_regs (gdbarch, num_regs);
3318
3319 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3320 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3321 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3322 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3323 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3324 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3325 aarch64_pseudo_register_reggroup_p);
3326 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3327
3328 /* ABI */
3329 set_gdbarch_short_bit (gdbarch, 16);
3330 set_gdbarch_int_bit (gdbarch, 32);
3331 set_gdbarch_float_bit (gdbarch, 32);
3332 set_gdbarch_double_bit (gdbarch, 64);
3333 set_gdbarch_long_double_bit (gdbarch, 128);
3334 set_gdbarch_long_bit (gdbarch, 64);
3335 set_gdbarch_long_long_bit (gdbarch, 64);
3336 set_gdbarch_ptr_bit (gdbarch, 64);
3337 set_gdbarch_char_signed (gdbarch, 0);
3338 set_gdbarch_wchar_signed (gdbarch, 0);
3339 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3340 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3341 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3342 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3343
3344 /* Internal <-> external register number maps. */
3345 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3346
3347 /* Returning results. */
3348 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3349
3350 /* Disassembly. */
3351 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3352
3353 /* Virtual tables. */
3354 set_gdbarch_vbit_in_delta (gdbarch, 1);
3355
3356 /* Register architecture. */
3357 aarch64_add_reggroups (gdbarch);
3358
3359 /* Hook in the ABI-specific overrides, if they have been registered. */
3360 info.target_desc = tdesc;
3361 info.tdesc_data = tdesc_data;
3362 gdbarch_init_osabi (info, gdbarch);
3363
3364 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3365 /* Register DWARF CFA vendor handler. */
3366 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3367 aarch64_execute_dwarf_cfa_vendor_op);
3368
3369 /* Add some default predicates. */
3370 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3371 dwarf2_append_unwinders (gdbarch);
3372 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3373
3374 frame_base_set_default (gdbarch, &aarch64_normal_base);
3375
3376 /* Now we have tuned the configuration, set a few final things,
3377 based on what the OS ABI has told us. */
3378
3379 if (tdep->jb_pc >= 0)
3380 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3381
3382 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3383
3384 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3385
3386 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3387
3388 /* Add standard register aliases. */
3389 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3390 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3391 value_of_aarch64_user_reg,
3392 &aarch64_register_aliases[i].regnum);
3393
3394 register_aarch64_ravenscar_ops (gdbarch);
3395
3396 return gdbarch;
3397 }
3398
3399 static void
3400 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3401 {
3402 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3403
3404 if (tdep == NULL)
3405 return;
3406
3407 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3408 paddress (gdbarch, tdep->lowest_pc));
3409 }
3410
3411 #if GDB_SELF_TEST
3412 namespace selftests
3413 {
3414 static void aarch64_process_record_test (void);
3415 }
3416 #endif
3417
3418 void
3419 _initialize_aarch64_tdep (void)
3420 {
3421 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3422 aarch64_dump_tdep);
3423
3424 /* Debug this file's internals. */
3425 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3426 Set AArch64 debugging."), _("\
3427 Show AArch64 debugging."), _("\
3428 When on, AArch64 specific debugging is enabled."),
3429 NULL,
3430 show_aarch64_debug,
3431 &setdebuglist, &showdebuglist);
3432
3433 #if GDB_SELF_TEST
3434 selftests::register_test ("aarch64-analyze-prologue",
3435 selftests::aarch64_analyze_prologue_test);
3436 selftests::register_test ("aarch64-process-record",
3437 selftests::aarch64_process_record_test);
3438 #endif
3439 }
3440
3441 /* AArch64 process record-replay related structures, defines etc. */
3442
3443 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3444 do \
3445 { \
3446 unsigned int reg_len = LENGTH; \
3447 if (reg_len) \
3448 { \
3449 REGS = XNEWVEC (uint32_t, reg_len); \
3450 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3451 } \
3452 } \
3453 while (0)
3454
3455 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3456 do \
3457 { \
3458 unsigned int mem_len = LENGTH; \
3459 if (mem_len) \
3460 { \
3461 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3462 memcpy(&MEMS->len, &RECORD_BUF[0], \
3463 sizeof(struct aarch64_mem_r) * LENGTH); \
3464 } \
3465 } \
3466 while (0)
3467
3468 /* AArch64 record/replay structures and enumerations. */
3469
3470 struct aarch64_mem_r
3471 {
3472 uint64_t len; /* Record length. */
3473 uint64_t addr; /* Memory address. */
3474 };
3475
3476 enum aarch64_record_result
3477 {
3478 AARCH64_RECORD_SUCCESS,
3479 AARCH64_RECORD_UNSUPPORTED,
3480 AARCH64_RECORD_UNKNOWN
3481 };
3482
3483 typedef struct insn_decode_record_t
3484 {
3485 struct gdbarch *gdbarch;
3486 struct regcache *regcache;
3487 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3488 uint32_t aarch64_insn; /* Insn to be recorded. */
3489 uint32_t mem_rec_count; /* Count of memory records. */
3490 uint32_t reg_rec_count; /* Count of register records. */
3491 uint32_t *aarch64_regs; /* Registers to be recorded. */
3492 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3493 } insn_decode_record;
3494
3495 /* Record handler for data processing - register instructions. */
3496
3497 static unsigned int
3498 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3499 {
3500 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3501 uint32_t record_buf[4];
3502
3503 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3504 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3505 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3506
3507 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3508 {
3509 uint8_t setflags;
3510
3511 /* Logical (shifted register). */
3512 if (insn_bits24_27 == 0x0a)
3513 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3514 /* Add/subtract. */
3515 else if (insn_bits24_27 == 0x0b)
3516 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3517 else
3518 return AARCH64_RECORD_UNKNOWN;
3519
3520 record_buf[0] = reg_rd;
3521 aarch64_insn_r->reg_rec_count = 1;
3522 if (setflags)
3523 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3524 }
3525 else
3526 {
3527 if (insn_bits24_27 == 0x0b)
3528 {
3529 /* Data-processing (3 source). */
3530 record_buf[0] = reg_rd;
3531 aarch64_insn_r->reg_rec_count = 1;
3532 }
3533 else if (insn_bits24_27 == 0x0a)
3534 {
3535 if (insn_bits21_23 == 0x00)
3536 {
3537 /* Add/subtract (with carry). */
3538 record_buf[0] = reg_rd;
3539 aarch64_insn_r->reg_rec_count = 1;
3540 if (bit (aarch64_insn_r->aarch64_insn, 29))
3541 {
3542 record_buf[1] = AARCH64_CPSR_REGNUM;
3543 aarch64_insn_r->reg_rec_count = 2;
3544 }
3545 }
3546 else if (insn_bits21_23 == 0x02)
3547 {
3548 /* Conditional compare (register) and conditional compare
3549 (immediate) instructions. */
3550 record_buf[0] = AARCH64_CPSR_REGNUM;
3551 aarch64_insn_r->reg_rec_count = 1;
3552 }
3553 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3554 {
3555 /* CConditional select. */
3556 /* Data-processing (2 source). */
3557 /* Data-processing (1 source). */
3558 record_buf[0] = reg_rd;
3559 aarch64_insn_r->reg_rec_count = 1;
3560 }
3561 else
3562 return AARCH64_RECORD_UNKNOWN;
3563 }
3564 }
3565
3566 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3567 record_buf);
3568 return AARCH64_RECORD_SUCCESS;
3569 }
3570
3571 /* Record handler for data processing - immediate instructions. */
3572
3573 static unsigned int
3574 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3575 {
3576 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3577 uint32_t record_buf[4];
3578
3579 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3580 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3581 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3582
3583 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3584 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3585 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3586 {
3587 record_buf[0] = reg_rd;
3588 aarch64_insn_r->reg_rec_count = 1;
3589 }
3590 else if (insn_bits24_27 == 0x01)
3591 {
3592 /* Add/Subtract (immediate). */
3593 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3594 record_buf[0] = reg_rd;
3595 aarch64_insn_r->reg_rec_count = 1;
3596 if (setflags)
3597 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3598 }
3599 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3600 {
3601 /* Logical (immediate). */
3602 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3603 record_buf[0] = reg_rd;
3604 aarch64_insn_r->reg_rec_count = 1;
3605 if (setflags)
3606 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3607 }
3608 else
3609 return AARCH64_RECORD_UNKNOWN;
3610
3611 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3612 record_buf);
3613 return AARCH64_RECORD_SUCCESS;
3614 }
3615
3616 /* Record handler for branch, exception generation and system instructions. */
3617
3618 static unsigned int
3619 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3620 {
3621 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3622 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3623 uint32_t record_buf[4];
3624
3625 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3626 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3627 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3628
3629 if (insn_bits28_31 == 0x0d)
3630 {
3631 /* Exception generation instructions. */
3632 if (insn_bits24_27 == 0x04)
3633 {
3634 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3635 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3636 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3637 {
3638 ULONGEST svc_number;
3639
3640 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3641 &svc_number);
3642 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3643 svc_number);
3644 }
3645 else
3646 return AARCH64_RECORD_UNSUPPORTED;
3647 }
3648 /* System instructions. */
3649 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3650 {
3651 uint32_t reg_rt, reg_crn;
3652
3653 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3654 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3655
3656 /* Record rt in case of sysl and mrs instructions. */
3657 if (bit (aarch64_insn_r->aarch64_insn, 21))
3658 {
3659 record_buf[0] = reg_rt;
3660 aarch64_insn_r->reg_rec_count = 1;
3661 }
3662 /* Record cpsr for hint and msr(immediate) instructions. */
3663 else if (reg_crn == 0x02 || reg_crn == 0x04)
3664 {
3665 record_buf[0] = AARCH64_CPSR_REGNUM;
3666 aarch64_insn_r->reg_rec_count = 1;
3667 }
3668 }
3669 /* Unconditional branch (register). */
3670 else if((insn_bits24_27 & 0x0e) == 0x06)
3671 {
3672 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3673 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3674 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3675 }
3676 else
3677 return AARCH64_RECORD_UNKNOWN;
3678 }
3679 /* Unconditional branch (immediate). */
3680 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3681 {
3682 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3683 if (bit (aarch64_insn_r->aarch64_insn, 31))
3684 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3685 }
3686 else
3687 /* Compare & branch (immediate), Test & branch (immediate) and
3688 Conditional branch (immediate). */
3689 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3690
3691 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3692 record_buf);
3693 return AARCH64_RECORD_SUCCESS;
3694 }
3695
3696 /* Record handler for advanced SIMD load and store instructions. */
3697
3698 static unsigned int
3699 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3700 {
3701 CORE_ADDR address;
3702 uint64_t addr_offset = 0;
3703 uint32_t record_buf[24];
3704 uint64_t record_buf_mem[24];
3705 uint32_t reg_rn, reg_rt;
3706 uint32_t reg_index = 0, mem_index = 0;
3707 uint8_t opcode_bits, size_bits;
3708
3709 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3710 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3711 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3712 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3713 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3714
3715 if (record_debug)
3716 debug_printf ("Process record: Advanced SIMD load/store\n");
3717
3718 /* Load/store single structure. */
3719 if (bit (aarch64_insn_r->aarch64_insn, 24))
3720 {
3721 uint8_t sindex, scale, selem, esize, replicate = 0;
3722 scale = opcode_bits >> 2;
3723 selem = ((opcode_bits & 0x02) |
3724 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3725 switch (scale)
3726 {
3727 case 1:
3728 if (size_bits & 0x01)
3729 return AARCH64_RECORD_UNKNOWN;
3730 break;
3731 case 2:
3732 if ((size_bits >> 1) & 0x01)
3733 return AARCH64_RECORD_UNKNOWN;
3734 if (size_bits & 0x01)
3735 {
3736 if (!((opcode_bits >> 1) & 0x01))
3737 scale = 3;
3738 else
3739 return AARCH64_RECORD_UNKNOWN;
3740 }
3741 break;
3742 case 3:
3743 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3744 {
3745 scale = size_bits;
3746 replicate = 1;
3747 break;
3748 }
3749 else
3750 return AARCH64_RECORD_UNKNOWN;
3751 default:
3752 break;
3753 }
3754 esize = 8 << scale;
3755 if (replicate)
3756 for (sindex = 0; sindex < selem; sindex++)
3757 {
3758 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3759 reg_rt = (reg_rt + 1) % 32;
3760 }
3761 else
3762 {
3763 for (sindex = 0; sindex < selem; sindex++)
3764 {
3765 if (bit (aarch64_insn_r->aarch64_insn, 22))
3766 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3767 else
3768 {
3769 record_buf_mem[mem_index++] = esize / 8;
3770 record_buf_mem[mem_index++] = address + addr_offset;
3771 }
3772 addr_offset = addr_offset + (esize / 8);
3773 reg_rt = (reg_rt + 1) % 32;
3774 }
3775 }
3776 }
3777 /* Load/store multiple structure. */
3778 else
3779 {
3780 uint8_t selem, esize, rpt, elements;
3781 uint8_t eindex, rindex;
3782
3783 esize = 8 << size_bits;
3784 if (bit (aarch64_insn_r->aarch64_insn, 30))
3785 elements = 128 / esize;
3786 else
3787 elements = 64 / esize;
3788
3789 switch (opcode_bits)
3790 {
3791 /*LD/ST4 (4 Registers). */
3792 case 0:
3793 rpt = 1;
3794 selem = 4;
3795 break;
3796 /*LD/ST1 (4 Registers). */
3797 case 2:
3798 rpt = 4;
3799 selem = 1;
3800 break;
3801 /*LD/ST3 (3 Registers). */
3802 case 4:
3803 rpt = 1;
3804 selem = 3;
3805 break;
3806 /*LD/ST1 (3 Registers). */
3807 case 6:
3808 rpt = 3;
3809 selem = 1;
3810 break;
3811 /*LD/ST1 (1 Register). */
3812 case 7:
3813 rpt = 1;
3814 selem = 1;
3815 break;
3816 /*LD/ST2 (2 Registers). */
3817 case 8:
3818 rpt = 1;
3819 selem = 2;
3820 break;
3821 /*LD/ST1 (2 Registers). */
3822 case 10:
3823 rpt = 2;
3824 selem = 1;
3825 break;
3826 default:
3827 return AARCH64_RECORD_UNSUPPORTED;
3828 break;
3829 }
3830 for (rindex = 0; rindex < rpt; rindex++)
3831 for (eindex = 0; eindex < elements; eindex++)
3832 {
3833 uint8_t reg_tt, sindex;
3834 reg_tt = (reg_rt + rindex) % 32;
3835 for (sindex = 0; sindex < selem; sindex++)
3836 {
3837 if (bit (aarch64_insn_r->aarch64_insn, 22))
3838 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3839 else
3840 {
3841 record_buf_mem[mem_index++] = esize / 8;
3842 record_buf_mem[mem_index++] = address + addr_offset;
3843 }
3844 addr_offset = addr_offset + (esize / 8);
3845 reg_tt = (reg_tt + 1) % 32;
3846 }
3847 }
3848 }
3849
3850 if (bit (aarch64_insn_r->aarch64_insn, 23))
3851 record_buf[reg_index++] = reg_rn;
3852
3853 aarch64_insn_r->reg_rec_count = reg_index;
3854 aarch64_insn_r->mem_rec_count = mem_index / 2;
3855 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3856 record_buf_mem);
3857 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3858 record_buf);
3859 return AARCH64_RECORD_SUCCESS;
3860 }
3861
3862 /* Record handler for load and store instructions. */
3863
3864 static unsigned int
3865 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3866 {
3867 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3868 uint8_t insn_bit23, insn_bit21;
3869 uint8_t opc, size_bits, ld_flag, vector_flag;
3870 uint32_t reg_rn, reg_rt, reg_rt2;
3871 uint64_t datasize, offset;
3872 uint32_t record_buf[8];
3873 uint64_t record_buf_mem[8];
3874 CORE_ADDR address;
3875
3876 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3877 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3878 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3879 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3880 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3881 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3882 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3883 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3884 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3885 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3886 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3887
3888 /* Load/store exclusive. */
3889 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3890 {
3891 if (record_debug)
3892 debug_printf ("Process record: load/store exclusive\n");
3893
3894 if (ld_flag)
3895 {
3896 record_buf[0] = reg_rt;
3897 aarch64_insn_r->reg_rec_count = 1;
3898 if (insn_bit21)
3899 {
3900 record_buf[1] = reg_rt2;
3901 aarch64_insn_r->reg_rec_count = 2;
3902 }
3903 }
3904 else
3905 {
3906 if (insn_bit21)
3907 datasize = (8 << size_bits) * 2;
3908 else
3909 datasize = (8 << size_bits);
3910 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3911 &address);
3912 record_buf_mem[0] = datasize / 8;
3913 record_buf_mem[1] = address;
3914 aarch64_insn_r->mem_rec_count = 1;
3915 if (!insn_bit23)
3916 {
3917 /* Save register rs. */
3918 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3919 aarch64_insn_r->reg_rec_count = 1;
3920 }
3921 }
3922 }
3923 /* Load register (literal) instructions decoding. */
3924 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3925 {
3926 if (record_debug)
3927 debug_printf ("Process record: load register (literal)\n");
3928 if (vector_flag)
3929 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3930 else
3931 record_buf[0] = reg_rt;
3932 aarch64_insn_r->reg_rec_count = 1;
3933 }
3934 /* All types of load/store pair instructions decoding. */
3935 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3936 {
3937 if (record_debug)
3938 debug_printf ("Process record: load/store pair\n");
3939
3940 if (ld_flag)
3941 {
3942 if (vector_flag)
3943 {
3944 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3945 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3946 }
3947 else
3948 {
3949 record_buf[0] = reg_rt;
3950 record_buf[1] = reg_rt2;
3951 }
3952 aarch64_insn_r->reg_rec_count = 2;
3953 }
3954 else
3955 {
3956 uint16_t imm7_off;
3957 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3958 if (!vector_flag)
3959 size_bits = size_bits >> 1;
3960 datasize = 8 << (2 + size_bits);
3961 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3962 offset = offset << (2 + size_bits);
3963 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3964 &address);
3965 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3966 {
3967 if (imm7_off & 0x40)
3968 address = address - offset;
3969 else
3970 address = address + offset;
3971 }
3972
3973 record_buf_mem[0] = datasize / 8;
3974 record_buf_mem[1] = address;
3975 record_buf_mem[2] = datasize / 8;
3976 record_buf_mem[3] = address + (datasize / 8);
3977 aarch64_insn_r->mem_rec_count = 2;
3978 }
3979 if (bit (aarch64_insn_r->aarch64_insn, 23))
3980 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3981 }
3982 /* Load/store register (unsigned immediate) instructions. */
3983 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3984 {
3985 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3986 if (!(opc >> 1))
3987 {
3988 if (opc & 0x01)
3989 ld_flag = 0x01;
3990 else
3991 ld_flag = 0x0;
3992 }
3993 else
3994 {
3995 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3996 {
3997 /* PRFM (immediate) */
3998 return AARCH64_RECORD_SUCCESS;
3999 }
4000 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4001 {
4002 /* LDRSW (immediate) */
4003 ld_flag = 0x1;
4004 }
4005 else
4006 {
4007 if (opc & 0x01)
4008 ld_flag = 0x01;
4009 else
4010 ld_flag = 0x0;
4011 }
4012 }
4013
4014 if (record_debug)
4015 {
4016 debug_printf ("Process record: load/store (unsigned immediate):"
4017 " size %x V %d opc %x\n", size_bits, vector_flag,
4018 opc);
4019 }
4020
4021 if (!ld_flag)
4022 {
4023 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4024 datasize = 8 << size_bits;
4025 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4026 &address);
4027 offset = offset << size_bits;
4028 address = address + offset;
4029
4030 record_buf_mem[0] = datasize >> 3;
4031 record_buf_mem[1] = address;
4032 aarch64_insn_r->mem_rec_count = 1;
4033 }
4034 else
4035 {
4036 if (vector_flag)
4037 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4038 else
4039 record_buf[0] = reg_rt;
4040 aarch64_insn_r->reg_rec_count = 1;
4041 }
4042 }
4043 /* Load/store register (register offset) instructions. */
4044 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4045 && insn_bits10_11 == 0x02 && insn_bit21)
4046 {
4047 if (record_debug)
4048 debug_printf ("Process record: load/store (register offset)\n");
4049 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4050 if (!(opc >> 1))
4051 if (opc & 0x01)
4052 ld_flag = 0x01;
4053 else
4054 ld_flag = 0x0;
4055 else
4056 if (size_bits != 0x03)
4057 ld_flag = 0x01;
4058 else
4059 return AARCH64_RECORD_UNKNOWN;
4060
4061 if (!ld_flag)
4062 {
4063 ULONGEST reg_rm_val;
4064
4065 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4066 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4067 if (bit (aarch64_insn_r->aarch64_insn, 12))
4068 offset = reg_rm_val << size_bits;
4069 else
4070 offset = reg_rm_val;
4071 datasize = 8 << size_bits;
4072 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4073 &address);
4074 address = address + offset;
4075 record_buf_mem[0] = datasize >> 3;
4076 record_buf_mem[1] = address;
4077 aarch64_insn_r->mem_rec_count = 1;
4078 }
4079 else
4080 {
4081 if (vector_flag)
4082 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4083 else
4084 record_buf[0] = reg_rt;
4085 aarch64_insn_r->reg_rec_count = 1;
4086 }
4087 }
4088 /* Load/store register (immediate and unprivileged) instructions. */
4089 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4090 && !insn_bit21)
4091 {
4092 if (record_debug)
4093 {
4094 debug_printf ("Process record: load/store "
4095 "(immediate and unprivileged)\n");
4096 }
4097 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4098 if (!(opc >> 1))
4099 if (opc & 0x01)
4100 ld_flag = 0x01;
4101 else
4102 ld_flag = 0x0;
4103 else
4104 if (size_bits != 0x03)
4105 ld_flag = 0x01;
4106 else
4107 return AARCH64_RECORD_UNKNOWN;
4108
4109 if (!ld_flag)
4110 {
4111 uint16_t imm9_off;
4112 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4113 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4114 datasize = 8 << size_bits;
4115 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4116 &address);
4117 if (insn_bits10_11 != 0x01)
4118 {
4119 if (imm9_off & 0x0100)
4120 address = address - offset;
4121 else
4122 address = address + offset;
4123 }
4124 record_buf_mem[0] = datasize >> 3;
4125 record_buf_mem[1] = address;
4126 aarch64_insn_r->mem_rec_count = 1;
4127 }
4128 else
4129 {
4130 if (vector_flag)
4131 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4132 else
4133 record_buf[0] = reg_rt;
4134 aarch64_insn_r->reg_rec_count = 1;
4135 }
4136 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4137 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4138 }
4139 /* Advanced SIMD load/store instructions. */
4140 else
4141 return aarch64_record_asimd_load_store (aarch64_insn_r);
4142
4143 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4144 record_buf_mem);
4145 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4146 record_buf);
4147 return AARCH64_RECORD_SUCCESS;
4148 }
4149
4150 /* Record handler for data processing SIMD and floating point instructions. */
4151
4152 static unsigned int
4153 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4154 {
4155 uint8_t insn_bit21, opcode, rmode, reg_rd;
4156 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4157 uint8_t insn_bits11_14;
4158 uint32_t record_buf[2];
4159
4160 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4161 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4162 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4163 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4164 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4165 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4166 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4167 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4168 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4169
4170 if (record_debug)
4171 debug_printf ("Process record: data processing SIMD/FP: ");
4172
4173 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4174 {
4175 /* Floating point - fixed point conversion instructions. */
4176 if (!insn_bit21)
4177 {
4178 if (record_debug)
4179 debug_printf ("FP - fixed point conversion");
4180
4181 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4182 record_buf[0] = reg_rd;
4183 else
4184 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4185 }
4186 /* Floating point - conditional compare instructions. */
4187 else if (insn_bits10_11 == 0x01)
4188 {
4189 if (record_debug)
4190 debug_printf ("FP - conditional compare");
4191
4192 record_buf[0] = AARCH64_CPSR_REGNUM;
4193 }
4194 /* Floating point - data processing (2-source) and
4195 conditional select instructions. */
4196 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4197 {
4198 if (record_debug)
4199 debug_printf ("FP - DP (2-source)");
4200
4201 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4202 }
4203 else if (insn_bits10_11 == 0x00)
4204 {
4205 /* Floating point - immediate instructions. */
4206 if ((insn_bits12_15 & 0x01) == 0x01
4207 || (insn_bits12_15 & 0x07) == 0x04)
4208 {
4209 if (record_debug)
4210 debug_printf ("FP - immediate");
4211 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4212 }
4213 /* Floating point - compare instructions. */
4214 else if ((insn_bits12_15 & 0x03) == 0x02)
4215 {
4216 if (record_debug)
4217 debug_printf ("FP - immediate");
4218 record_buf[0] = AARCH64_CPSR_REGNUM;
4219 }
4220 /* Floating point - integer conversions instructions. */
4221 else if (insn_bits12_15 == 0x00)
4222 {
4223 /* Convert float to integer instruction. */
4224 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4225 {
4226 if (record_debug)
4227 debug_printf ("float to int conversion");
4228
4229 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4230 }
4231 /* Convert integer to float instruction. */
4232 else if ((opcode >> 1) == 0x01 && !rmode)
4233 {
4234 if (record_debug)
4235 debug_printf ("int to float conversion");
4236
4237 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4238 }
4239 /* Move float to integer instruction. */
4240 else if ((opcode >> 1) == 0x03)
4241 {
4242 if (record_debug)
4243 debug_printf ("move float to int");
4244
4245 if (!(opcode & 0x01))
4246 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4247 else
4248 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4249 }
4250 else
4251 return AARCH64_RECORD_UNKNOWN;
4252 }
4253 else
4254 return AARCH64_RECORD_UNKNOWN;
4255 }
4256 else
4257 return AARCH64_RECORD_UNKNOWN;
4258 }
4259 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4260 {
4261 if (record_debug)
4262 debug_printf ("SIMD copy");
4263
4264 /* Advanced SIMD copy instructions. */
4265 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4266 && !bit (aarch64_insn_r->aarch64_insn, 15)
4267 && bit (aarch64_insn_r->aarch64_insn, 10))
4268 {
4269 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4270 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4271 else
4272 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4273 }
4274 else
4275 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4276 }
4277 /* All remaining floating point or advanced SIMD instructions. */
4278 else
4279 {
4280 if (record_debug)
4281 debug_printf ("all remain");
4282
4283 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4284 }
4285
4286 if (record_debug)
4287 debug_printf ("\n");
4288
4289 aarch64_insn_r->reg_rec_count++;
4290 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4291 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4292 record_buf);
4293 return AARCH64_RECORD_SUCCESS;
4294 }
4295
4296 /* Decodes insns type and invokes its record handler. */
4297
4298 static unsigned int
4299 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4300 {
4301 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4302
4303 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4304 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4305 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4306 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4307
4308 /* Data processing - immediate instructions. */
4309 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4310 return aarch64_record_data_proc_imm (aarch64_insn_r);
4311
4312 /* Branch, exception generation and system instructions. */
4313 if (ins_bit26 && !ins_bit27 && ins_bit28)
4314 return aarch64_record_branch_except_sys (aarch64_insn_r);
4315
4316 /* Load and store instructions. */
4317 if (!ins_bit25 && ins_bit27)
4318 return aarch64_record_load_store (aarch64_insn_r);
4319
4320 /* Data processing - register instructions. */
4321 if (ins_bit25 && !ins_bit26 && ins_bit27)
4322 return aarch64_record_data_proc_reg (aarch64_insn_r);
4323
4324 /* Data processing - SIMD and floating point instructions. */
4325 if (ins_bit25 && ins_bit26 && ins_bit27)
4326 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4327
4328 return AARCH64_RECORD_UNSUPPORTED;
4329 }
4330
4331 /* Cleans up local record registers and memory allocations. */
4332
4333 static void
4334 deallocate_reg_mem (insn_decode_record *record)
4335 {
4336 xfree (record->aarch64_regs);
4337 xfree (record->aarch64_mems);
4338 }
4339
4340 #if GDB_SELF_TEST
4341 namespace selftests {
4342
4343 static void
4344 aarch64_process_record_test (void)
4345 {
4346 struct gdbarch_info info;
4347 uint32_t ret;
4348
4349 gdbarch_info_init (&info);
4350 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4351
4352 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4353 SELF_CHECK (gdbarch != NULL);
4354
4355 insn_decode_record aarch64_record;
4356
4357 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4358 aarch64_record.regcache = NULL;
4359 aarch64_record.this_addr = 0;
4360 aarch64_record.gdbarch = gdbarch;
4361
4362 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4363 aarch64_record.aarch64_insn = 0xf9800020;
4364 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4365 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4366 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4367 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4368
4369 deallocate_reg_mem (&aarch64_record);
4370 }
4371
4372 } // namespace selftests
4373 #endif /* GDB_SELF_TEST */
4374
4375 /* Parse the current instruction and record the values of the registers and
4376 memory that will be changed in current instruction to record_arch_list
4377 return -1 if something is wrong. */
4378
4379 int
4380 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4381 CORE_ADDR insn_addr)
4382 {
4383 uint32_t rec_no = 0;
4384 uint8_t insn_size = 4;
4385 uint32_t ret = 0;
4386 gdb_byte buf[insn_size];
4387 insn_decode_record aarch64_record;
4388
4389 memset (&buf[0], 0, insn_size);
4390 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4391 target_read_memory (insn_addr, &buf[0], insn_size);
4392 aarch64_record.aarch64_insn
4393 = (uint32_t) extract_unsigned_integer (&buf[0],
4394 insn_size,
4395 gdbarch_byte_order (gdbarch));
4396 aarch64_record.regcache = regcache;
4397 aarch64_record.this_addr = insn_addr;
4398 aarch64_record.gdbarch = gdbarch;
4399
4400 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4401 if (ret == AARCH64_RECORD_UNSUPPORTED)
4402 {
4403 printf_unfiltered (_("Process record does not support instruction "
4404 "0x%0x at address %s.\n"),
4405 aarch64_record.aarch64_insn,
4406 paddress (gdbarch, insn_addr));
4407 ret = -1;
4408 }
4409
4410 if (0 == ret)
4411 {
4412 /* Record registers. */
4413 record_full_arch_list_add_reg (aarch64_record.regcache,
4414 AARCH64_PC_REGNUM);
4415 /* Always record register CPSR. */
4416 record_full_arch_list_add_reg (aarch64_record.regcache,
4417 AARCH64_CPSR_REGNUM);
4418 if (aarch64_record.aarch64_regs)
4419 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4420 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4421 aarch64_record.aarch64_regs[rec_no]))
4422 ret = -1;
4423
4424 /* Record memories. */
4425 if (aarch64_record.aarch64_mems)
4426 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4427 if (record_full_arch_list_add_mem
4428 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4429 aarch64_record.aarch64_mems[rec_no].len))
4430 ret = -1;
4431
4432 if (record_full_arch_list_add_end ())
4433 ret = -1;
4434 }
4435
4436 deallocate_reg_mem (&aarch64_record);
4437 return ret;
4438 }
This page took 0.119472 seconds and 3 git commands to generate.