AArch64: Add pauth DWARF registers
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "common/selftest.h"
47
48 #include "aarch64-tdep.h"
49 #include "aarch64-ravenscar-thread.h"
50
51 #include "elf-bfd.h"
52 #include "elf/aarch64.h"
53
54 #include "common/vec.h"
55
56 #include "record.h"
57 #include "record-full.h"
58 #include "arch/aarch64-insn.h"
59
60 #include "opcode/aarch64.h"
61 #include <algorithm>
62
63 #define submask(x) ((1L << ((x) + 1)) - 1)
64 #define bit(obj,st) (((obj) >> (st)) & 1)
65 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66
67 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
68 four members. */
69 #define HA_MAX_NUM_FLDS 4
70
71 /* All possible aarch64 target descriptors. */
72 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
73
74 /* The standard register names, and all the valid aliases for them. */
75 static const struct
76 {
77 const char *const name;
78 int regnum;
79 } aarch64_register_aliases[] =
80 {
81 /* 64-bit register names. */
82 {"fp", AARCH64_FP_REGNUM},
83 {"lr", AARCH64_LR_REGNUM},
84 {"sp", AARCH64_SP_REGNUM},
85
86 /* 32-bit register names. */
87 {"w0", AARCH64_X0_REGNUM + 0},
88 {"w1", AARCH64_X0_REGNUM + 1},
89 {"w2", AARCH64_X0_REGNUM + 2},
90 {"w3", AARCH64_X0_REGNUM + 3},
91 {"w4", AARCH64_X0_REGNUM + 4},
92 {"w5", AARCH64_X0_REGNUM + 5},
93 {"w6", AARCH64_X0_REGNUM + 6},
94 {"w7", AARCH64_X0_REGNUM + 7},
95 {"w8", AARCH64_X0_REGNUM + 8},
96 {"w9", AARCH64_X0_REGNUM + 9},
97 {"w10", AARCH64_X0_REGNUM + 10},
98 {"w11", AARCH64_X0_REGNUM + 11},
99 {"w12", AARCH64_X0_REGNUM + 12},
100 {"w13", AARCH64_X0_REGNUM + 13},
101 {"w14", AARCH64_X0_REGNUM + 14},
102 {"w15", AARCH64_X0_REGNUM + 15},
103 {"w16", AARCH64_X0_REGNUM + 16},
104 {"w17", AARCH64_X0_REGNUM + 17},
105 {"w18", AARCH64_X0_REGNUM + 18},
106 {"w19", AARCH64_X0_REGNUM + 19},
107 {"w20", AARCH64_X0_REGNUM + 20},
108 {"w21", AARCH64_X0_REGNUM + 21},
109 {"w22", AARCH64_X0_REGNUM + 22},
110 {"w23", AARCH64_X0_REGNUM + 23},
111 {"w24", AARCH64_X0_REGNUM + 24},
112 {"w25", AARCH64_X0_REGNUM + 25},
113 {"w26", AARCH64_X0_REGNUM + 26},
114 {"w27", AARCH64_X0_REGNUM + 27},
115 {"w28", AARCH64_X0_REGNUM + 28},
116 {"w29", AARCH64_X0_REGNUM + 29},
117 {"w30", AARCH64_X0_REGNUM + 30},
118
119 /* specials */
120 {"ip0", AARCH64_X0_REGNUM + 16},
121 {"ip1", AARCH64_X0_REGNUM + 17}
122 };
123
124 /* The required core 'R' registers. */
125 static const char *const aarch64_r_register_names[] =
126 {
127 /* These registers must appear in consecutive RAW register number
128 order and they must begin with AARCH64_X0_REGNUM! */
129 "x0", "x1", "x2", "x3",
130 "x4", "x5", "x6", "x7",
131 "x8", "x9", "x10", "x11",
132 "x12", "x13", "x14", "x15",
133 "x16", "x17", "x18", "x19",
134 "x20", "x21", "x22", "x23",
135 "x24", "x25", "x26", "x27",
136 "x28", "x29", "x30", "sp",
137 "pc", "cpsr"
138 };
139
140 /* The FP/SIMD 'V' registers. */
141 static const char *const aarch64_v_register_names[] =
142 {
143 /* These registers must appear in consecutive RAW register number
144 order and they must begin with AARCH64_V0_REGNUM! */
145 "v0", "v1", "v2", "v3",
146 "v4", "v5", "v6", "v7",
147 "v8", "v9", "v10", "v11",
148 "v12", "v13", "v14", "v15",
149 "v16", "v17", "v18", "v19",
150 "v20", "v21", "v22", "v23",
151 "v24", "v25", "v26", "v27",
152 "v28", "v29", "v30", "v31",
153 "fpsr",
154 "fpcr"
155 };
156
157 /* The SVE 'Z' and 'P' registers. */
158 static const char *const aarch64_sve_register_names[] =
159 {
160 /* These registers must appear in consecutive RAW register number
161 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
162 "z0", "z1", "z2", "z3",
163 "z4", "z5", "z6", "z7",
164 "z8", "z9", "z10", "z11",
165 "z12", "z13", "z14", "z15",
166 "z16", "z17", "z18", "z19",
167 "z20", "z21", "z22", "z23",
168 "z24", "z25", "z26", "z27",
169 "z28", "z29", "z30", "z31",
170 "fpsr", "fpcr",
171 "p0", "p1", "p2", "p3",
172 "p4", "p5", "p6", "p7",
173 "p8", "p9", "p10", "p11",
174 "p12", "p13", "p14", "p15",
175 "ffr", "vg"
176 };
177
178 static const char *const aarch64_pauth_register_names[] =
179 {
180 /* Authentication mask for data pointer. */
181 "pauth_dmask",
182 /* Authentication mask for code pointer. */
183 "pauth_cmask"
184 };
185
186 /* AArch64 prologue cache structure. */
187 struct aarch64_prologue_cache
188 {
189 /* The program counter at the start of the function. It is used to
190 identify this frame as a prologue frame. */
191 CORE_ADDR func;
192
193 /* The program counter at the time this frame was created; i.e. where
194 this function was called from. It is used to identify this frame as a
195 stub frame. */
196 CORE_ADDR prev_pc;
197
198 /* The stack pointer at the time this frame was created; i.e. the
199 caller's stack pointer when this function was called. It is used
200 to identify this frame. */
201 CORE_ADDR prev_sp;
202
203 /* Is the target available to read from? */
204 int available_p;
205
206 /* The frame base for this frame is just prev_sp - frame size.
207 FRAMESIZE is the distance from the frame pointer to the
208 initial stack pointer. */
209 int framesize;
210
211 /* The register used to hold the frame pointer for this frame. */
212 int framereg;
213
214 /* Saved register offsets. */
215 struct trad_frame_saved_reg *saved_regs;
216 };
217
218 static void
219 show_aarch64_debug (struct ui_file *file, int from_tty,
220 struct cmd_list_element *c, const char *value)
221 {
222 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
223 }
224
225 namespace {
226
227 /* Abstract instruction reader. */
228
229 class abstract_instruction_reader
230 {
231 public:
232 /* Read in one instruction. */
233 virtual ULONGEST read (CORE_ADDR memaddr, int len,
234 enum bfd_endian byte_order) = 0;
235 };
236
237 /* Instruction reader from real target. */
238
239 class instruction_reader : public abstract_instruction_reader
240 {
241 public:
242 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
243 override
244 {
245 return read_code_unsigned_integer (memaddr, len, byte_order);
246 }
247 };
248
249 } // namespace
250
251 /* Analyze a prologue, looking for a recognizable stack frame
252 and frame pointer. Scan until we encounter a store that could
253 clobber the stack frame unexpectedly, or an unknown instruction. */
254
255 static CORE_ADDR
256 aarch64_analyze_prologue (struct gdbarch *gdbarch,
257 CORE_ADDR start, CORE_ADDR limit,
258 struct aarch64_prologue_cache *cache,
259 abstract_instruction_reader& reader)
260 {
261 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
262 int i;
263 /* Track X registers and D registers in prologue. */
264 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
265
266 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
267 regs[i] = pv_register (i, 0);
268 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
269
270 for (; start < limit; start += 4)
271 {
272 uint32_t insn;
273 aarch64_inst inst;
274
275 insn = reader.read (start, 4, byte_order_for_code);
276
277 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
278 break;
279
280 if (inst.opcode->iclass == addsub_imm
281 && (inst.opcode->op == OP_ADD
282 || strcmp ("sub", inst.opcode->name) == 0))
283 {
284 unsigned rd = inst.operands[0].reg.regno;
285 unsigned rn = inst.operands[1].reg.regno;
286
287 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
288 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
289 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
290 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
291
292 if (inst.opcode->op == OP_ADD)
293 {
294 regs[rd] = pv_add_constant (regs[rn],
295 inst.operands[2].imm.value);
296 }
297 else
298 {
299 regs[rd] = pv_add_constant (regs[rn],
300 -inst.operands[2].imm.value);
301 }
302 }
303 else if (inst.opcode->iclass == pcreladdr
304 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
305 {
306 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
307 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
308
309 regs[inst.operands[0].reg.regno] = pv_unknown ();
310 }
311 else if (inst.opcode->iclass == branch_imm)
312 {
313 /* Stop analysis on branch. */
314 break;
315 }
316 else if (inst.opcode->iclass == condbranch)
317 {
318 /* Stop analysis on branch. */
319 break;
320 }
321 else if (inst.opcode->iclass == branch_reg)
322 {
323 /* Stop analysis on branch. */
324 break;
325 }
326 else if (inst.opcode->iclass == compbranch)
327 {
328 /* Stop analysis on branch. */
329 break;
330 }
331 else if (inst.opcode->op == OP_MOVZ)
332 {
333 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
334 regs[inst.operands[0].reg.regno] = pv_unknown ();
335 }
336 else if (inst.opcode->iclass == log_shift
337 && strcmp (inst.opcode->name, "orr") == 0)
338 {
339 unsigned rd = inst.operands[0].reg.regno;
340 unsigned rn = inst.operands[1].reg.regno;
341 unsigned rm = inst.operands[2].reg.regno;
342
343 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
344 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
345 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
346
347 if (inst.operands[2].shifter.amount == 0
348 && rn == AARCH64_SP_REGNUM)
349 regs[rd] = regs[rm];
350 else
351 {
352 if (aarch64_debug)
353 {
354 debug_printf ("aarch64: prologue analysis gave up "
355 "addr=%s opcode=0x%x (orr x register)\n",
356 core_addr_to_string_nz (start), insn);
357 }
358 break;
359 }
360 }
361 else if (inst.opcode->op == OP_STUR)
362 {
363 unsigned rt = inst.operands[0].reg.regno;
364 unsigned rn = inst.operands[1].addr.base_regno;
365 int is64
366 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
367
368 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
369 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
370 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
371 gdb_assert (!inst.operands[1].addr.offset.is_reg);
372
373 stack.store (pv_add_constant (regs[rn],
374 inst.operands[1].addr.offset.imm),
375 is64 ? 8 : 4, regs[rt]);
376 }
377 else if ((inst.opcode->iclass == ldstpair_off
378 || (inst.opcode->iclass == ldstpair_indexed
379 && inst.operands[2].addr.preind))
380 && strcmp ("stp", inst.opcode->name) == 0)
381 {
382 /* STP with addressing mode Pre-indexed and Base register. */
383 unsigned rt1;
384 unsigned rt2;
385 unsigned rn = inst.operands[2].addr.base_regno;
386 int32_t imm = inst.operands[2].addr.offset.imm;
387
388 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
389 || inst.operands[0].type == AARCH64_OPND_Ft);
390 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
391 || inst.operands[1].type == AARCH64_OPND_Ft2);
392 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
393 gdb_assert (!inst.operands[2].addr.offset.is_reg);
394
395 /* If recording this store would invalidate the store area
396 (perhaps because rn is not known) then we should abandon
397 further prologue analysis. */
398 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
399 break;
400
401 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
402 break;
403
404 rt1 = inst.operands[0].reg.regno;
405 rt2 = inst.operands[1].reg.regno;
406 if (inst.operands[0].type == AARCH64_OPND_Ft)
407 {
408 /* Only bottom 64-bit of each V register (D register) need
409 to be preserved. */
410 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
411 rt1 += AARCH64_X_REGISTER_COUNT;
412 rt2 += AARCH64_X_REGISTER_COUNT;
413 }
414
415 stack.store (pv_add_constant (regs[rn], imm), 8,
416 regs[rt1]);
417 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
418 regs[rt2]);
419
420 if (inst.operands[2].addr.writeback)
421 regs[rn] = pv_add_constant (regs[rn], imm);
422
423 }
424 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
425 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
426 && (inst.opcode->op == OP_STR_POS
427 || inst.opcode->op == OP_STRF_POS)))
428 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
429 && strcmp ("str", inst.opcode->name) == 0)
430 {
431 /* STR (immediate) */
432 unsigned int rt = inst.operands[0].reg.regno;
433 int32_t imm = inst.operands[1].addr.offset.imm;
434 unsigned int rn = inst.operands[1].addr.base_regno;
435 bool is64
436 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
437 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
438 || inst.operands[0].type == AARCH64_OPND_Ft);
439
440 if (inst.operands[0].type == AARCH64_OPND_Ft)
441 {
442 /* Only bottom 64-bit of each V register (D register) need
443 to be preserved. */
444 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
445 rt += AARCH64_X_REGISTER_COUNT;
446 }
447
448 stack.store (pv_add_constant (regs[rn], imm),
449 is64 ? 8 : 4, regs[rt]);
450 if (inst.operands[1].addr.writeback)
451 regs[rn] = pv_add_constant (regs[rn], imm);
452 }
453 else if (inst.opcode->iclass == testbranch)
454 {
455 /* Stop analysis on branch. */
456 break;
457 }
458 else
459 {
460 if (aarch64_debug)
461 {
462 debug_printf ("aarch64: prologue analysis gave up addr=%s"
463 " opcode=0x%x\n",
464 core_addr_to_string_nz (start), insn);
465 }
466 break;
467 }
468 }
469
470 if (cache == NULL)
471 return start;
472
473 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
474 {
475 /* Frame pointer is fp. Frame size is constant. */
476 cache->framereg = AARCH64_FP_REGNUM;
477 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
478 }
479 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
480 {
481 /* Try the stack pointer. */
482 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
483 cache->framereg = AARCH64_SP_REGNUM;
484 }
485 else
486 {
487 /* We're just out of luck. We don't know where the frame is. */
488 cache->framereg = -1;
489 cache->framesize = 0;
490 }
491
492 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
493 {
494 CORE_ADDR offset;
495
496 if (stack.find_reg (gdbarch, i, &offset))
497 cache->saved_regs[i].addr = offset;
498 }
499
500 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
501 {
502 int regnum = gdbarch_num_regs (gdbarch);
503 CORE_ADDR offset;
504
505 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
506 &offset))
507 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
508 }
509
510 return start;
511 }
512
513 static CORE_ADDR
514 aarch64_analyze_prologue (struct gdbarch *gdbarch,
515 CORE_ADDR start, CORE_ADDR limit,
516 struct aarch64_prologue_cache *cache)
517 {
518 instruction_reader reader;
519
520 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
521 reader);
522 }
523
524 #if GDB_SELF_TEST
525
526 namespace selftests {
527
528 /* Instruction reader from manually cooked instruction sequences. */
529
530 class instruction_reader_test : public abstract_instruction_reader
531 {
532 public:
533 template<size_t SIZE>
534 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
535 : m_insns (insns), m_insns_size (SIZE)
536 {}
537
538 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
539 override
540 {
541 SELF_CHECK (len == 4);
542 SELF_CHECK (memaddr % 4 == 0);
543 SELF_CHECK (memaddr / 4 < m_insns_size);
544
545 return m_insns[memaddr / 4];
546 }
547
548 private:
549 const uint32_t *m_insns;
550 size_t m_insns_size;
551 };
552
553 static void
554 aarch64_analyze_prologue_test (void)
555 {
556 struct gdbarch_info info;
557
558 gdbarch_info_init (&info);
559 info.bfd_arch_info = bfd_scan_arch ("aarch64");
560
561 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
562 SELF_CHECK (gdbarch != NULL);
563
564 /* Test the simple prologue in which frame pointer is used. */
565 {
566 struct aarch64_prologue_cache cache;
567 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
568
569 static const uint32_t insns[] = {
570 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
571 0x910003fd, /* mov x29, sp */
572 0x97ffffe6, /* bl 0x400580 */
573 };
574 instruction_reader_test reader (insns);
575
576 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
577 SELF_CHECK (end == 4 * 2);
578
579 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
580 SELF_CHECK (cache.framesize == 272);
581
582 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
583 {
584 if (i == AARCH64_FP_REGNUM)
585 SELF_CHECK (cache.saved_regs[i].addr == -272);
586 else if (i == AARCH64_LR_REGNUM)
587 SELF_CHECK (cache.saved_regs[i].addr == -264);
588 else
589 SELF_CHECK (cache.saved_regs[i].addr == -1);
590 }
591
592 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
593 {
594 int regnum = gdbarch_num_regs (gdbarch);
595
596 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
597 == -1);
598 }
599 }
600
601 /* Test a prologue in which STR is used and frame pointer is not
602 used. */
603 {
604 struct aarch64_prologue_cache cache;
605 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
606
607 static const uint32_t insns[] = {
608 0xf81d0ff3, /* str x19, [sp, #-48]! */
609 0xb9002fe0, /* str w0, [sp, #44] */
610 0xf90013e1, /* str x1, [sp, #32]*/
611 0xfd000fe0, /* str d0, [sp, #24] */
612 0xaa0203f3, /* mov x19, x2 */
613 0xf94013e0, /* ldr x0, [sp, #32] */
614 };
615 instruction_reader_test reader (insns);
616
617 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
618
619 SELF_CHECK (end == 4 * 5);
620
621 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
622 SELF_CHECK (cache.framesize == 48);
623
624 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
625 {
626 if (i == 1)
627 SELF_CHECK (cache.saved_regs[i].addr == -16);
628 else if (i == 19)
629 SELF_CHECK (cache.saved_regs[i].addr == -48);
630 else
631 SELF_CHECK (cache.saved_regs[i].addr == -1);
632 }
633
634 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
635 {
636 int regnum = gdbarch_num_regs (gdbarch);
637
638 if (i == 0)
639 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
640 == -24);
641 else
642 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
643 == -1);
644 }
645 }
646 }
647 } // namespace selftests
648 #endif /* GDB_SELF_TEST */
649
650 /* Implement the "skip_prologue" gdbarch method. */
651
652 static CORE_ADDR
653 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
654 {
655 CORE_ADDR func_addr, limit_pc;
656
657 /* See if we can determine the end of the prologue via the symbol
658 table. If so, then return either PC, or the PC after the
659 prologue, whichever is greater. */
660 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
661 {
662 CORE_ADDR post_prologue_pc
663 = skip_prologue_using_sal (gdbarch, func_addr);
664
665 if (post_prologue_pc != 0)
666 return std::max (pc, post_prologue_pc);
667 }
668
669 /* Can't determine prologue from the symbol table, need to examine
670 instructions. */
671
672 /* Find an upper limit on the function prologue using the debug
673 information. If the debug information could not be used to
674 provide that bound, then use an arbitrary large number as the
675 upper bound. */
676 limit_pc = skip_prologue_using_sal (gdbarch, pc);
677 if (limit_pc == 0)
678 limit_pc = pc + 128; /* Magic. */
679
680 /* Try disassembling prologue. */
681 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
682 }
683
684 /* Scan the function prologue for THIS_FRAME and populate the prologue
685 cache CACHE. */
686
687 static void
688 aarch64_scan_prologue (struct frame_info *this_frame,
689 struct aarch64_prologue_cache *cache)
690 {
691 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
692 CORE_ADDR prologue_start;
693 CORE_ADDR prologue_end;
694 CORE_ADDR prev_pc = get_frame_pc (this_frame);
695 struct gdbarch *gdbarch = get_frame_arch (this_frame);
696
697 cache->prev_pc = prev_pc;
698
699 /* Assume we do not find a frame. */
700 cache->framereg = -1;
701 cache->framesize = 0;
702
703 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
704 &prologue_end))
705 {
706 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
707
708 if (sal.line == 0)
709 {
710 /* No line info so use the current PC. */
711 prologue_end = prev_pc;
712 }
713 else if (sal.end < prologue_end)
714 {
715 /* The next line begins after the function end. */
716 prologue_end = sal.end;
717 }
718
719 prologue_end = std::min (prologue_end, prev_pc);
720 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
721 }
722 else
723 {
724 CORE_ADDR frame_loc;
725
726 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
727 if (frame_loc == 0)
728 return;
729
730 cache->framereg = AARCH64_FP_REGNUM;
731 cache->framesize = 16;
732 cache->saved_regs[29].addr = 0;
733 cache->saved_regs[30].addr = 8;
734 }
735 }
736
737 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
738 function may throw an exception if the inferior's registers or memory is
739 not available. */
740
741 static void
742 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
743 struct aarch64_prologue_cache *cache)
744 {
745 CORE_ADDR unwound_fp;
746 int reg;
747
748 aarch64_scan_prologue (this_frame, cache);
749
750 if (cache->framereg == -1)
751 return;
752
753 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
754 if (unwound_fp == 0)
755 return;
756
757 cache->prev_sp = unwound_fp + cache->framesize;
758
759 /* Calculate actual addresses of saved registers using offsets
760 determined by aarch64_analyze_prologue. */
761 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
762 if (trad_frame_addr_p (cache->saved_regs, reg))
763 cache->saved_regs[reg].addr += cache->prev_sp;
764
765 cache->func = get_frame_func (this_frame);
766
767 cache->available_p = 1;
768 }
769
770 /* Allocate and fill in *THIS_CACHE with information about the prologue of
771 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
772 Return a pointer to the current aarch64_prologue_cache in
773 *THIS_CACHE. */
774
775 static struct aarch64_prologue_cache *
776 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
777 {
778 struct aarch64_prologue_cache *cache;
779
780 if (*this_cache != NULL)
781 return (struct aarch64_prologue_cache *) *this_cache;
782
783 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
784 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
785 *this_cache = cache;
786
787 TRY
788 {
789 aarch64_make_prologue_cache_1 (this_frame, cache);
790 }
791 CATCH (ex, RETURN_MASK_ERROR)
792 {
793 if (ex.error != NOT_AVAILABLE_ERROR)
794 throw_exception (ex);
795 }
796 END_CATCH
797
798 return cache;
799 }
800
801 /* Implement the "stop_reason" frame_unwind method. */
802
803 static enum unwind_stop_reason
804 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
805 void **this_cache)
806 {
807 struct aarch64_prologue_cache *cache
808 = aarch64_make_prologue_cache (this_frame, this_cache);
809
810 if (!cache->available_p)
811 return UNWIND_UNAVAILABLE;
812
813 /* Halt the backtrace at "_start". */
814 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
815 return UNWIND_OUTERMOST;
816
817 /* We've hit a wall, stop. */
818 if (cache->prev_sp == 0)
819 return UNWIND_OUTERMOST;
820
821 return UNWIND_NO_REASON;
822 }
823
824 /* Our frame ID for a normal frame is the current function's starting
825 PC and the caller's SP when we were called. */
826
827 static void
828 aarch64_prologue_this_id (struct frame_info *this_frame,
829 void **this_cache, struct frame_id *this_id)
830 {
831 struct aarch64_prologue_cache *cache
832 = aarch64_make_prologue_cache (this_frame, this_cache);
833
834 if (!cache->available_p)
835 *this_id = frame_id_build_unavailable_stack (cache->func);
836 else
837 *this_id = frame_id_build (cache->prev_sp, cache->func);
838 }
839
840 /* Implement the "prev_register" frame_unwind method. */
841
842 static struct value *
843 aarch64_prologue_prev_register (struct frame_info *this_frame,
844 void **this_cache, int prev_regnum)
845 {
846 struct aarch64_prologue_cache *cache
847 = aarch64_make_prologue_cache (this_frame, this_cache);
848
849 /* If we are asked to unwind the PC, then we need to return the LR
850 instead. The prologue may save PC, but it will point into this
851 frame's prologue, not the next frame's resume location. */
852 if (prev_regnum == AARCH64_PC_REGNUM)
853 {
854 CORE_ADDR lr;
855
856 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
857 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
858 }
859
860 /* SP is generally not saved to the stack, but this frame is
861 identified by the next frame's stack pointer at the time of the
862 call. The value was already reconstructed into PREV_SP. */
863 /*
864 +----------+ ^
865 | saved lr | |
866 +->| saved fp |--+
867 | | |
868 | | | <- Previous SP
869 | +----------+
870 | | saved lr |
871 +--| saved fp |<- FP
872 | |
873 | |<- SP
874 +----------+ */
875 if (prev_regnum == AARCH64_SP_REGNUM)
876 return frame_unwind_got_constant (this_frame, prev_regnum,
877 cache->prev_sp);
878
879 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
880 prev_regnum);
881 }
882
883 /* AArch64 prologue unwinder. */
884 struct frame_unwind aarch64_prologue_unwind =
885 {
886 NORMAL_FRAME,
887 aarch64_prologue_frame_unwind_stop_reason,
888 aarch64_prologue_this_id,
889 aarch64_prologue_prev_register,
890 NULL,
891 default_frame_sniffer
892 };
893
894 /* Allocate and fill in *THIS_CACHE with information about the prologue of
895 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
896 Return a pointer to the current aarch64_prologue_cache in
897 *THIS_CACHE. */
898
899 static struct aarch64_prologue_cache *
900 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
901 {
902 struct aarch64_prologue_cache *cache;
903
904 if (*this_cache != NULL)
905 return (struct aarch64_prologue_cache *) *this_cache;
906
907 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
908 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
909 *this_cache = cache;
910
911 TRY
912 {
913 cache->prev_sp = get_frame_register_unsigned (this_frame,
914 AARCH64_SP_REGNUM);
915 cache->prev_pc = get_frame_pc (this_frame);
916 cache->available_p = 1;
917 }
918 CATCH (ex, RETURN_MASK_ERROR)
919 {
920 if (ex.error != NOT_AVAILABLE_ERROR)
921 throw_exception (ex);
922 }
923 END_CATCH
924
925 return cache;
926 }
927
928 /* Implement the "stop_reason" frame_unwind method. */
929
930 static enum unwind_stop_reason
931 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
932 void **this_cache)
933 {
934 struct aarch64_prologue_cache *cache
935 = aarch64_make_stub_cache (this_frame, this_cache);
936
937 if (!cache->available_p)
938 return UNWIND_UNAVAILABLE;
939
940 return UNWIND_NO_REASON;
941 }
942
943 /* Our frame ID for a stub frame is the current SP and LR. */
944
945 static void
946 aarch64_stub_this_id (struct frame_info *this_frame,
947 void **this_cache, struct frame_id *this_id)
948 {
949 struct aarch64_prologue_cache *cache
950 = aarch64_make_stub_cache (this_frame, this_cache);
951
952 if (cache->available_p)
953 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
954 else
955 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
956 }
957
958 /* Implement the "sniffer" frame_unwind method. */
959
960 static int
961 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
962 struct frame_info *this_frame,
963 void **this_prologue_cache)
964 {
965 CORE_ADDR addr_in_block;
966 gdb_byte dummy[4];
967
968 addr_in_block = get_frame_address_in_block (this_frame);
969 if (in_plt_section (addr_in_block)
970 /* We also use the stub winder if the target memory is unreadable
971 to avoid having the prologue unwinder trying to read it. */
972 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
973 return 1;
974
975 return 0;
976 }
977
978 /* AArch64 stub unwinder. */
979 struct frame_unwind aarch64_stub_unwind =
980 {
981 NORMAL_FRAME,
982 aarch64_stub_frame_unwind_stop_reason,
983 aarch64_stub_this_id,
984 aarch64_prologue_prev_register,
985 NULL,
986 aarch64_stub_unwind_sniffer
987 };
988
989 /* Return the frame base address of *THIS_FRAME. */
990
991 static CORE_ADDR
992 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
993 {
994 struct aarch64_prologue_cache *cache
995 = aarch64_make_prologue_cache (this_frame, this_cache);
996
997 return cache->prev_sp - cache->framesize;
998 }
999
1000 /* AArch64 default frame base information. */
1001 struct frame_base aarch64_normal_base =
1002 {
1003 &aarch64_prologue_unwind,
1004 aarch64_normal_frame_base,
1005 aarch64_normal_frame_base,
1006 aarch64_normal_frame_base
1007 };
1008
1009 /* Return the value of the REGNUM register in the previous frame of
1010 *THIS_FRAME. */
1011
1012 static struct value *
1013 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1014 void **this_cache, int regnum)
1015 {
1016 CORE_ADDR lr;
1017
1018 switch (regnum)
1019 {
1020 case AARCH64_PC_REGNUM:
1021 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1022 return frame_unwind_got_constant (this_frame, regnum, lr);
1023
1024 default:
1025 internal_error (__FILE__, __LINE__,
1026 _("Unexpected register %d"), regnum);
1027 }
1028 }
1029
1030 /* Implement the "init_reg" dwarf2_frame_ops method. */
1031
1032 static void
1033 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1034 struct dwarf2_frame_state_reg *reg,
1035 struct frame_info *this_frame)
1036 {
1037 switch (regnum)
1038 {
1039 case AARCH64_PC_REGNUM:
1040 reg->how = DWARF2_FRAME_REG_FN;
1041 reg->loc.fn = aarch64_dwarf2_prev_register;
1042 break;
1043 case AARCH64_SP_REGNUM:
1044 reg->how = DWARF2_FRAME_REG_CFA;
1045 break;
1046 }
1047 }
1048
1049 /* When arguments must be pushed onto the stack, they go on in reverse
1050 order. The code below implements a FILO (stack) to do this. */
1051
1052 typedef struct
1053 {
1054 /* Value to pass on stack. It can be NULL if this item is for stack
1055 padding. */
1056 const gdb_byte *data;
1057
1058 /* Size in bytes of value to pass on stack. */
1059 int len;
1060 } stack_item_t;
1061
1062 DEF_VEC_O (stack_item_t);
1063
1064 /* Return the alignment (in bytes) of the given type. */
1065
1066 static int
1067 aarch64_type_align (struct type *t)
1068 {
1069 int n;
1070 int align;
1071 int falign;
1072
1073 t = check_typedef (t);
1074 switch (TYPE_CODE (t))
1075 {
1076 default:
1077 /* Should never happen. */
1078 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1079 return 4;
1080
1081 case TYPE_CODE_PTR:
1082 case TYPE_CODE_ENUM:
1083 case TYPE_CODE_INT:
1084 case TYPE_CODE_FLT:
1085 case TYPE_CODE_SET:
1086 case TYPE_CODE_RANGE:
1087 case TYPE_CODE_BITSTRING:
1088 case TYPE_CODE_REF:
1089 case TYPE_CODE_RVALUE_REF:
1090 case TYPE_CODE_CHAR:
1091 case TYPE_CODE_BOOL:
1092 return TYPE_LENGTH (t);
1093
1094 case TYPE_CODE_ARRAY:
1095 if (TYPE_VECTOR (t))
1096 {
1097 /* Use the natural alignment for vector types (the same for
1098 scalar type), but the maximum alignment is 128-bit. */
1099 if (TYPE_LENGTH (t) > 16)
1100 return 16;
1101 else
1102 return TYPE_LENGTH (t);
1103 }
1104 else
1105 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1106 case TYPE_CODE_COMPLEX:
1107 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1108
1109 case TYPE_CODE_STRUCT:
1110 case TYPE_CODE_UNION:
1111 align = 1;
1112 for (n = 0; n < TYPE_NFIELDS (t); n++)
1113 {
1114 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1115 if (falign > align)
1116 align = falign;
1117 }
1118 return align;
1119 }
1120 }
1121
1122 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1123
1124 Return the number of register required, or -1 on failure.
1125
1126 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1127 to the element, else fail if the type of this element does not match the
1128 existing value. */
1129
1130 static int
1131 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1132 struct type **fundamental_type)
1133 {
1134 if (type == nullptr)
1135 return -1;
1136
1137 switch (TYPE_CODE (type))
1138 {
1139 case TYPE_CODE_FLT:
1140 if (TYPE_LENGTH (type) > 16)
1141 return -1;
1142
1143 if (*fundamental_type == nullptr)
1144 *fundamental_type = type;
1145 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1146 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1147 return -1;
1148
1149 return 1;
1150
1151 case TYPE_CODE_COMPLEX:
1152 {
1153 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1154 if (TYPE_LENGTH (target_type) > 16)
1155 return -1;
1156
1157 if (*fundamental_type == nullptr)
1158 *fundamental_type = target_type;
1159 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1160 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1161 return -1;
1162
1163 return 2;
1164 }
1165
1166 case TYPE_CODE_ARRAY:
1167 {
1168 if (TYPE_VECTOR (type))
1169 {
1170 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1171 return -1;
1172
1173 if (*fundamental_type == nullptr)
1174 *fundamental_type = type;
1175 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1176 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1177 return -1;
1178
1179 return 1;
1180 }
1181 else
1182 {
1183 struct type *target_type = TYPE_TARGET_TYPE (type);
1184 int count = aapcs_is_vfp_call_or_return_candidate_1
1185 (target_type, fundamental_type);
1186
1187 if (count == -1)
1188 return count;
1189
1190 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1191 return count;
1192 }
1193 }
1194
1195 case TYPE_CODE_STRUCT:
1196 case TYPE_CODE_UNION:
1197 {
1198 int count = 0;
1199
1200 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1201 {
1202 /* Ignore any static fields. */
1203 if (field_is_static (&TYPE_FIELD (type, i)))
1204 continue;
1205
1206 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1207
1208 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1209 (member, fundamental_type);
1210 if (sub_count == -1)
1211 return -1;
1212 count += sub_count;
1213 }
1214
1215 /* Ensure there is no padding between the fields (allowing for empty
1216 zero length structs) */
1217 int ftype_length = (*fundamental_type == nullptr)
1218 ? 0 : TYPE_LENGTH (*fundamental_type);
1219 if (count * ftype_length != TYPE_LENGTH (type))
1220 return -1;
1221
1222 return count;
1223 }
1224
1225 default:
1226 break;
1227 }
1228
1229 return -1;
1230 }
1231
1232 /* Return true if an argument, whose type is described by TYPE, can be passed or
1233 returned in simd/fp registers, providing enough parameter passing registers
1234 are available. This is as described in the AAPCS64.
1235
1236 Upon successful return, *COUNT returns the number of needed registers,
1237 *FUNDAMENTAL_TYPE contains the type of those registers.
1238
1239 Candidate as per the AAPCS64 5.4.2.C is either a:
1240 - float.
1241 - short-vector.
1242 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1243 all the members are floats and has at most 4 members.
1244 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1245 all the members are short vectors and has at most 4 members.
1246 - Complex (7.1.1)
1247
1248 Note that HFAs and HVAs can include nested structures and arrays. */
1249
1250 static bool
1251 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1252 struct type **fundamental_type)
1253 {
1254 if (type == nullptr)
1255 return false;
1256
1257 *fundamental_type = nullptr;
1258
1259 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1260 fundamental_type);
1261
1262 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1263 {
1264 *count = ag_count;
1265 return true;
1266 }
1267 else
1268 return false;
1269 }
1270
1271 /* AArch64 function call information structure. */
1272 struct aarch64_call_info
1273 {
1274 /* the current argument number. */
1275 unsigned argnum;
1276
1277 /* The next general purpose register number, equivalent to NGRN as
1278 described in the AArch64 Procedure Call Standard. */
1279 unsigned ngrn;
1280
1281 /* The next SIMD and floating point register number, equivalent to
1282 NSRN as described in the AArch64 Procedure Call Standard. */
1283 unsigned nsrn;
1284
1285 /* The next stacked argument address, equivalent to NSAA as
1286 described in the AArch64 Procedure Call Standard. */
1287 unsigned nsaa;
1288
1289 /* Stack item vector. */
1290 VEC(stack_item_t) *si;
1291 };
1292
1293 /* Pass a value in a sequence of consecutive X registers. The caller
1294 is responsbile for ensuring sufficient registers are available. */
1295
1296 static void
1297 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1298 struct aarch64_call_info *info, struct type *type,
1299 struct value *arg)
1300 {
1301 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1302 int len = TYPE_LENGTH (type);
1303 enum type_code typecode = TYPE_CODE (type);
1304 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1305 const bfd_byte *buf = value_contents (arg);
1306
1307 info->argnum++;
1308
1309 while (len > 0)
1310 {
1311 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1312 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1313 byte_order);
1314
1315
1316 /* Adjust sub-word struct/union args when big-endian. */
1317 if (byte_order == BFD_ENDIAN_BIG
1318 && partial_len < X_REGISTER_SIZE
1319 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1320 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1321
1322 if (aarch64_debug)
1323 {
1324 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1325 gdbarch_register_name (gdbarch, regnum),
1326 phex (regval, X_REGISTER_SIZE));
1327 }
1328 regcache_cooked_write_unsigned (regcache, regnum, regval);
1329 len -= partial_len;
1330 buf += partial_len;
1331 regnum++;
1332 }
1333 }
1334
1335 /* Attempt to marshall a value in a V register. Return 1 if
1336 successful, or 0 if insufficient registers are available. This
1337 function, unlike the equivalent pass_in_x() function does not
1338 handle arguments spread across multiple registers. */
1339
1340 static int
1341 pass_in_v (struct gdbarch *gdbarch,
1342 struct regcache *regcache,
1343 struct aarch64_call_info *info,
1344 int len, const bfd_byte *buf)
1345 {
1346 if (info->nsrn < 8)
1347 {
1348 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1349 /* Enough space for a full vector register. */
1350 gdb_byte reg[register_size (gdbarch, regnum)];
1351 gdb_assert (len <= sizeof (reg));
1352
1353 info->argnum++;
1354 info->nsrn++;
1355
1356 memset (reg, 0, sizeof (reg));
1357 /* PCS C.1, the argument is allocated to the least significant
1358 bits of V register. */
1359 memcpy (reg, buf, len);
1360 regcache->cooked_write (regnum, reg);
1361
1362 if (aarch64_debug)
1363 {
1364 debug_printf ("arg %d in %s\n", info->argnum,
1365 gdbarch_register_name (gdbarch, regnum));
1366 }
1367 return 1;
1368 }
1369 info->nsrn = 8;
1370 return 0;
1371 }
1372
1373 /* Marshall an argument onto the stack. */
1374
1375 static void
1376 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1377 struct value *arg)
1378 {
1379 const bfd_byte *buf = value_contents (arg);
1380 int len = TYPE_LENGTH (type);
1381 int align;
1382 stack_item_t item;
1383
1384 info->argnum++;
1385
1386 align = aarch64_type_align (type);
1387
1388 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1389 Natural alignment of the argument's type. */
1390 align = align_up (align, 8);
1391
1392 /* The AArch64 PCS requires at most doubleword alignment. */
1393 if (align > 16)
1394 align = 16;
1395
1396 if (aarch64_debug)
1397 {
1398 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1399 info->nsaa);
1400 }
1401
1402 item.len = len;
1403 item.data = buf;
1404 VEC_safe_push (stack_item_t, info->si, &item);
1405
1406 info->nsaa += len;
1407 if (info->nsaa & (align - 1))
1408 {
1409 /* Push stack alignment padding. */
1410 int pad = align - (info->nsaa & (align - 1));
1411
1412 item.len = pad;
1413 item.data = NULL;
1414
1415 VEC_safe_push (stack_item_t, info->si, &item);
1416 info->nsaa += pad;
1417 }
1418 }
1419
1420 /* Marshall an argument into a sequence of one or more consecutive X
1421 registers or, if insufficient X registers are available then onto
1422 the stack. */
1423
1424 static void
1425 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1426 struct aarch64_call_info *info, struct type *type,
1427 struct value *arg)
1428 {
1429 int len = TYPE_LENGTH (type);
1430 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1431
1432 /* PCS C.13 - Pass in registers if we have enough spare */
1433 if (info->ngrn + nregs <= 8)
1434 {
1435 pass_in_x (gdbarch, regcache, info, type, arg);
1436 info->ngrn += nregs;
1437 }
1438 else
1439 {
1440 info->ngrn = 8;
1441 pass_on_stack (info, type, arg);
1442 }
1443 }
1444
1445 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1446 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1447 registers. A return value of false is an error state as the value will have
1448 been partially passed to the stack. */
1449 static bool
1450 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1451 struct aarch64_call_info *info, struct type *arg_type,
1452 struct value *arg)
1453 {
1454 switch (TYPE_CODE (arg_type))
1455 {
1456 case TYPE_CODE_FLT:
1457 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1458 value_contents (arg));
1459 break;
1460
1461 case TYPE_CODE_COMPLEX:
1462 {
1463 const bfd_byte *buf = value_contents (arg);
1464 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1465
1466 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1467 buf))
1468 return false;
1469
1470 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1471 buf + TYPE_LENGTH (target_type));
1472 }
1473
1474 case TYPE_CODE_ARRAY:
1475 if (TYPE_VECTOR (arg_type))
1476 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1477 value_contents (arg));
1478 /* fall through. */
1479
1480 case TYPE_CODE_STRUCT:
1481 case TYPE_CODE_UNION:
1482 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1483 {
1484 /* Don't include static fields. */
1485 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1486 continue;
1487
1488 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1489 struct type *field_type = check_typedef (value_type (field));
1490
1491 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1492 field))
1493 return false;
1494 }
1495 return true;
1496
1497 default:
1498 return false;
1499 }
1500 }
1501
1502 /* Implement the "push_dummy_call" gdbarch method. */
1503
1504 static CORE_ADDR
1505 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1506 struct regcache *regcache, CORE_ADDR bp_addr,
1507 int nargs,
1508 struct value **args, CORE_ADDR sp,
1509 function_call_return_method return_method,
1510 CORE_ADDR struct_addr)
1511 {
1512 int argnum;
1513 struct aarch64_call_info info;
1514
1515 memset (&info, 0, sizeof (info));
1516
1517 /* We need to know what the type of the called function is in order
1518 to determine the number of named/anonymous arguments for the
1519 actual argument placement, and the return type in order to handle
1520 return value correctly.
1521
1522 The generic code above us views the decision of return in memory
1523 or return in registers as a two stage processes. The language
1524 handler is consulted first and may decide to return in memory (eg
1525 class with copy constructor returned by value), this will cause
1526 the generic code to allocate space AND insert an initial leading
1527 argument.
1528
1529 If the language code does not decide to pass in memory then the
1530 target code is consulted.
1531
1532 If the language code decides to pass in memory we want to move
1533 the pointer inserted as the initial argument from the argument
1534 list and into X8, the conventional AArch64 struct return pointer
1535 register. */
1536
1537 /* Set the return address. For the AArch64, the return breakpoint
1538 is always at BP_ADDR. */
1539 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1540
1541 /* If we were given an initial argument for the return slot, lose it. */
1542 if (return_method == return_method_hidden_param)
1543 {
1544 args++;
1545 nargs--;
1546 }
1547
1548 /* The struct_return pointer occupies X8. */
1549 if (return_method != return_method_normal)
1550 {
1551 if (aarch64_debug)
1552 {
1553 debug_printf ("struct return in %s = 0x%s\n",
1554 gdbarch_register_name (gdbarch,
1555 AARCH64_STRUCT_RETURN_REGNUM),
1556 paddress (gdbarch, struct_addr));
1557 }
1558 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1559 struct_addr);
1560 }
1561
1562 for (argnum = 0; argnum < nargs; argnum++)
1563 {
1564 struct value *arg = args[argnum];
1565 struct type *arg_type, *fundamental_type;
1566 int len, elements;
1567
1568 arg_type = check_typedef (value_type (arg));
1569 len = TYPE_LENGTH (arg_type);
1570
1571 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1572 if there are enough spare registers. */
1573 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1574 &fundamental_type))
1575 {
1576 if (info.nsrn + elements <= 8)
1577 {
1578 /* We know that we have sufficient registers available therefore
1579 this will never need to fallback to the stack. */
1580 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1581 arg))
1582 gdb_assert_not_reached ("Failed to push args");
1583 }
1584 else
1585 {
1586 info.nsrn = 8;
1587 pass_on_stack (&info, arg_type, arg);
1588 }
1589 continue;
1590 }
1591
1592 switch (TYPE_CODE (arg_type))
1593 {
1594 case TYPE_CODE_INT:
1595 case TYPE_CODE_BOOL:
1596 case TYPE_CODE_CHAR:
1597 case TYPE_CODE_RANGE:
1598 case TYPE_CODE_ENUM:
1599 if (len < 4)
1600 {
1601 /* Promote to 32 bit integer. */
1602 if (TYPE_UNSIGNED (arg_type))
1603 arg_type = builtin_type (gdbarch)->builtin_uint32;
1604 else
1605 arg_type = builtin_type (gdbarch)->builtin_int32;
1606 arg = value_cast (arg_type, arg);
1607 }
1608 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1609 break;
1610
1611 case TYPE_CODE_STRUCT:
1612 case TYPE_CODE_ARRAY:
1613 case TYPE_CODE_UNION:
1614 if (len > 16)
1615 {
1616 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1617 invisible reference. */
1618
1619 /* Allocate aligned storage. */
1620 sp = align_down (sp - len, 16);
1621
1622 /* Write the real data into the stack. */
1623 write_memory (sp, value_contents (arg), len);
1624
1625 /* Construct the indirection. */
1626 arg_type = lookup_pointer_type (arg_type);
1627 arg = value_from_pointer (arg_type, sp);
1628 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1629 }
1630 else
1631 /* PCS C.15 / C.18 multiple values pass. */
1632 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1633 break;
1634
1635 default:
1636 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1637 break;
1638 }
1639 }
1640
1641 /* Make sure stack retains 16 byte alignment. */
1642 if (info.nsaa & 15)
1643 sp -= 16 - (info.nsaa & 15);
1644
1645 while (!VEC_empty (stack_item_t, info.si))
1646 {
1647 stack_item_t *si = VEC_last (stack_item_t, info.si);
1648
1649 sp -= si->len;
1650 if (si->data != NULL)
1651 write_memory (sp, si->data, si->len);
1652 VEC_pop (stack_item_t, info.si);
1653 }
1654
1655 VEC_free (stack_item_t, info.si);
1656
1657 /* Finally, update the SP register. */
1658 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1659
1660 return sp;
1661 }
1662
1663 /* Implement the "frame_align" gdbarch method. */
1664
1665 static CORE_ADDR
1666 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1667 {
1668 /* Align the stack to sixteen bytes. */
1669 return sp & ~(CORE_ADDR) 15;
1670 }
1671
1672 /* Return the type for an AdvSISD Q register. */
1673
1674 static struct type *
1675 aarch64_vnq_type (struct gdbarch *gdbarch)
1676 {
1677 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1678
1679 if (tdep->vnq_type == NULL)
1680 {
1681 struct type *t;
1682 struct type *elem;
1683
1684 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1685 TYPE_CODE_UNION);
1686
1687 elem = builtin_type (gdbarch)->builtin_uint128;
1688 append_composite_type_field (t, "u", elem);
1689
1690 elem = builtin_type (gdbarch)->builtin_int128;
1691 append_composite_type_field (t, "s", elem);
1692
1693 tdep->vnq_type = t;
1694 }
1695
1696 return tdep->vnq_type;
1697 }
1698
1699 /* Return the type for an AdvSISD D register. */
1700
1701 static struct type *
1702 aarch64_vnd_type (struct gdbarch *gdbarch)
1703 {
1704 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1705
1706 if (tdep->vnd_type == NULL)
1707 {
1708 struct type *t;
1709 struct type *elem;
1710
1711 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1712 TYPE_CODE_UNION);
1713
1714 elem = builtin_type (gdbarch)->builtin_double;
1715 append_composite_type_field (t, "f", elem);
1716
1717 elem = builtin_type (gdbarch)->builtin_uint64;
1718 append_composite_type_field (t, "u", elem);
1719
1720 elem = builtin_type (gdbarch)->builtin_int64;
1721 append_composite_type_field (t, "s", elem);
1722
1723 tdep->vnd_type = t;
1724 }
1725
1726 return tdep->vnd_type;
1727 }
1728
1729 /* Return the type for an AdvSISD S register. */
1730
1731 static struct type *
1732 aarch64_vns_type (struct gdbarch *gdbarch)
1733 {
1734 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1735
1736 if (tdep->vns_type == NULL)
1737 {
1738 struct type *t;
1739 struct type *elem;
1740
1741 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1742 TYPE_CODE_UNION);
1743
1744 elem = builtin_type (gdbarch)->builtin_float;
1745 append_composite_type_field (t, "f", elem);
1746
1747 elem = builtin_type (gdbarch)->builtin_uint32;
1748 append_composite_type_field (t, "u", elem);
1749
1750 elem = builtin_type (gdbarch)->builtin_int32;
1751 append_composite_type_field (t, "s", elem);
1752
1753 tdep->vns_type = t;
1754 }
1755
1756 return tdep->vns_type;
1757 }
1758
1759 /* Return the type for an AdvSISD H register. */
1760
1761 static struct type *
1762 aarch64_vnh_type (struct gdbarch *gdbarch)
1763 {
1764 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1765
1766 if (tdep->vnh_type == NULL)
1767 {
1768 struct type *t;
1769 struct type *elem;
1770
1771 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1772 TYPE_CODE_UNION);
1773
1774 elem = builtin_type (gdbarch)->builtin_uint16;
1775 append_composite_type_field (t, "u", elem);
1776
1777 elem = builtin_type (gdbarch)->builtin_int16;
1778 append_composite_type_field (t, "s", elem);
1779
1780 tdep->vnh_type = t;
1781 }
1782
1783 return tdep->vnh_type;
1784 }
1785
1786 /* Return the type for an AdvSISD B register. */
1787
1788 static struct type *
1789 aarch64_vnb_type (struct gdbarch *gdbarch)
1790 {
1791 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1792
1793 if (tdep->vnb_type == NULL)
1794 {
1795 struct type *t;
1796 struct type *elem;
1797
1798 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1799 TYPE_CODE_UNION);
1800
1801 elem = builtin_type (gdbarch)->builtin_uint8;
1802 append_composite_type_field (t, "u", elem);
1803
1804 elem = builtin_type (gdbarch)->builtin_int8;
1805 append_composite_type_field (t, "s", elem);
1806
1807 tdep->vnb_type = t;
1808 }
1809
1810 return tdep->vnb_type;
1811 }
1812
1813 /* Return the type for an AdvSISD V register. */
1814
1815 static struct type *
1816 aarch64_vnv_type (struct gdbarch *gdbarch)
1817 {
1818 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1819
1820 if (tdep->vnv_type == NULL)
1821 {
1822 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1823 TYPE_CODE_UNION);
1824
1825 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1826 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1827 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1828 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1829 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1830
1831 tdep->vnv_type = t;
1832 }
1833
1834 return tdep->vnv_type;
1835 }
1836
1837 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1838
1839 static int
1840 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1841 {
1842 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1843
1844 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1845 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1846
1847 if (reg == AARCH64_DWARF_SP)
1848 return AARCH64_SP_REGNUM;
1849
1850 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1851 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1852
1853 if (reg == AARCH64_DWARF_SVE_VG)
1854 return AARCH64_SVE_VG_REGNUM;
1855
1856 if (reg == AARCH64_DWARF_SVE_FFR)
1857 return AARCH64_SVE_FFR_REGNUM;
1858
1859 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1860 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1861
1862 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1863 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1864
1865 if (tdep->has_pauth ())
1866 {
1867 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
1868 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
1869
1870 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
1871 return tdep->pauth_ra_state_regnum;
1872 }
1873
1874 return -1;
1875 }
1876
1877 /* Implement the "print_insn" gdbarch method. */
1878
1879 static int
1880 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1881 {
1882 info->symbols = NULL;
1883 return default_print_insn (memaddr, info);
1884 }
1885
1886 /* AArch64 BRK software debug mode instruction.
1887 Note that AArch64 code is always little-endian.
1888 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1889 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1890
1891 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1892
1893 /* Extract from an array REGS containing the (raw) register state a
1894 function return value of type TYPE, and copy that, in virtual
1895 format, into VALBUF. */
1896
1897 static void
1898 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1899 gdb_byte *valbuf)
1900 {
1901 struct gdbarch *gdbarch = regs->arch ();
1902 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1903 int elements;
1904 struct type *fundamental_type;
1905
1906 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1907 &fundamental_type))
1908 {
1909 int len = TYPE_LENGTH (fundamental_type);
1910
1911 for (int i = 0; i < elements; i++)
1912 {
1913 int regno = AARCH64_V0_REGNUM + i;
1914 /* Enough space for a full vector register. */
1915 gdb_byte buf[register_size (gdbarch, regno)];
1916 gdb_assert (len <= sizeof (buf));
1917
1918 if (aarch64_debug)
1919 {
1920 debug_printf ("read HFA or HVA return value element %d from %s\n",
1921 i + 1,
1922 gdbarch_register_name (gdbarch, regno));
1923 }
1924 regs->cooked_read (regno, buf);
1925
1926 memcpy (valbuf, buf, len);
1927 valbuf += len;
1928 }
1929 }
1930 else if (TYPE_CODE (type) == TYPE_CODE_INT
1931 || TYPE_CODE (type) == TYPE_CODE_CHAR
1932 || TYPE_CODE (type) == TYPE_CODE_BOOL
1933 || TYPE_CODE (type) == TYPE_CODE_PTR
1934 || TYPE_IS_REFERENCE (type)
1935 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1936 {
1937 /* If the type is a plain integer, then the access is
1938 straight-forward. Otherwise we have to play around a bit
1939 more. */
1940 int len = TYPE_LENGTH (type);
1941 int regno = AARCH64_X0_REGNUM;
1942 ULONGEST tmp;
1943
1944 while (len > 0)
1945 {
1946 /* By using store_unsigned_integer we avoid having to do
1947 anything special for small big-endian values. */
1948 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1949 store_unsigned_integer (valbuf,
1950 (len > X_REGISTER_SIZE
1951 ? X_REGISTER_SIZE : len), byte_order, tmp);
1952 len -= X_REGISTER_SIZE;
1953 valbuf += X_REGISTER_SIZE;
1954 }
1955 }
1956 else
1957 {
1958 /* For a structure or union the behaviour is as if the value had
1959 been stored to word-aligned memory and then loaded into
1960 registers with 64-bit load instruction(s). */
1961 int len = TYPE_LENGTH (type);
1962 int regno = AARCH64_X0_REGNUM;
1963 bfd_byte buf[X_REGISTER_SIZE];
1964
1965 while (len > 0)
1966 {
1967 regs->cooked_read (regno++, buf);
1968 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1969 len -= X_REGISTER_SIZE;
1970 valbuf += X_REGISTER_SIZE;
1971 }
1972 }
1973 }
1974
1975
1976 /* Will a function return an aggregate type in memory or in a
1977 register? Return 0 if an aggregate type can be returned in a
1978 register, 1 if it must be returned in memory. */
1979
1980 static int
1981 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1982 {
1983 type = check_typedef (type);
1984 int elements;
1985 struct type *fundamental_type;
1986
1987 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1988 &fundamental_type))
1989 {
1990 /* v0-v7 are used to return values and one register is allocated
1991 for one member. However, HFA or HVA has at most four members. */
1992 return 0;
1993 }
1994
1995 if (TYPE_LENGTH (type) > 16)
1996 {
1997 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1998 invisible reference. */
1999
2000 return 1;
2001 }
2002
2003 return 0;
2004 }
2005
2006 /* Write into appropriate registers a function return value of type
2007 TYPE, given in virtual format. */
2008
2009 static void
2010 aarch64_store_return_value (struct type *type, struct regcache *regs,
2011 const gdb_byte *valbuf)
2012 {
2013 struct gdbarch *gdbarch = regs->arch ();
2014 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2015 int elements;
2016 struct type *fundamental_type;
2017
2018 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2019 &fundamental_type))
2020 {
2021 int len = TYPE_LENGTH (fundamental_type);
2022
2023 for (int i = 0; i < elements; i++)
2024 {
2025 int regno = AARCH64_V0_REGNUM + i;
2026 /* Enough space for a full vector register. */
2027 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2028 gdb_assert (len <= sizeof (tmpbuf));
2029
2030 if (aarch64_debug)
2031 {
2032 debug_printf ("write HFA or HVA return value element %d to %s\n",
2033 i + 1,
2034 gdbarch_register_name (gdbarch, regno));
2035 }
2036
2037 memcpy (tmpbuf, valbuf,
2038 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2039 regs->cooked_write (regno, tmpbuf);
2040 valbuf += len;
2041 }
2042 }
2043 else if (TYPE_CODE (type) == TYPE_CODE_INT
2044 || TYPE_CODE (type) == TYPE_CODE_CHAR
2045 || TYPE_CODE (type) == TYPE_CODE_BOOL
2046 || TYPE_CODE (type) == TYPE_CODE_PTR
2047 || TYPE_IS_REFERENCE (type)
2048 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2049 {
2050 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2051 {
2052 /* Values of one word or less are zero/sign-extended and
2053 returned in r0. */
2054 bfd_byte tmpbuf[X_REGISTER_SIZE];
2055 LONGEST val = unpack_long (type, valbuf);
2056
2057 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2058 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2059 }
2060 else
2061 {
2062 /* Integral values greater than one word are stored in
2063 consecutive registers starting with r0. This will always
2064 be a multiple of the regiser size. */
2065 int len = TYPE_LENGTH (type);
2066 int regno = AARCH64_X0_REGNUM;
2067
2068 while (len > 0)
2069 {
2070 regs->cooked_write (regno++, valbuf);
2071 len -= X_REGISTER_SIZE;
2072 valbuf += X_REGISTER_SIZE;
2073 }
2074 }
2075 }
2076 else
2077 {
2078 /* For a structure or union the behaviour is as if the value had
2079 been stored to word-aligned memory and then loaded into
2080 registers with 64-bit load instruction(s). */
2081 int len = TYPE_LENGTH (type);
2082 int regno = AARCH64_X0_REGNUM;
2083 bfd_byte tmpbuf[X_REGISTER_SIZE];
2084
2085 while (len > 0)
2086 {
2087 memcpy (tmpbuf, valbuf,
2088 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2089 regs->cooked_write (regno++, tmpbuf);
2090 len -= X_REGISTER_SIZE;
2091 valbuf += X_REGISTER_SIZE;
2092 }
2093 }
2094 }
2095
2096 /* Implement the "return_value" gdbarch method. */
2097
2098 static enum return_value_convention
2099 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2100 struct type *valtype, struct regcache *regcache,
2101 gdb_byte *readbuf, const gdb_byte *writebuf)
2102 {
2103
2104 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2105 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2106 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2107 {
2108 if (aarch64_return_in_memory (gdbarch, valtype))
2109 {
2110 if (aarch64_debug)
2111 debug_printf ("return value in memory\n");
2112 return RETURN_VALUE_STRUCT_CONVENTION;
2113 }
2114 }
2115
2116 if (writebuf)
2117 aarch64_store_return_value (valtype, regcache, writebuf);
2118
2119 if (readbuf)
2120 aarch64_extract_return_value (valtype, regcache, readbuf);
2121
2122 if (aarch64_debug)
2123 debug_printf ("return value in registers\n");
2124
2125 return RETURN_VALUE_REGISTER_CONVENTION;
2126 }
2127
2128 /* Implement the "get_longjmp_target" gdbarch method. */
2129
2130 static int
2131 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2132 {
2133 CORE_ADDR jb_addr;
2134 gdb_byte buf[X_REGISTER_SIZE];
2135 struct gdbarch *gdbarch = get_frame_arch (frame);
2136 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2137 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2138
2139 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2140
2141 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2142 X_REGISTER_SIZE))
2143 return 0;
2144
2145 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2146 return 1;
2147 }
2148
2149 /* Implement the "gen_return_address" gdbarch method. */
2150
2151 static void
2152 aarch64_gen_return_address (struct gdbarch *gdbarch,
2153 struct agent_expr *ax, struct axs_value *value,
2154 CORE_ADDR scope)
2155 {
2156 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2157 value->kind = axs_lvalue_register;
2158 value->u.reg = AARCH64_LR_REGNUM;
2159 }
2160 \f
2161
2162 /* Return the pseudo register name corresponding to register regnum. */
2163
2164 static const char *
2165 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2166 {
2167 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2168
2169 static const char *const q_name[] =
2170 {
2171 "q0", "q1", "q2", "q3",
2172 "q4", "q5", "q6", "q7",
2173 "q8", "q9", "q10", "q11",
2174 "q12", "q13", "q14", "q15",
2175 "q16", "q17", "q18", "q19",
2176 "q20", "q21", "q22", "q23",
2177 "q24", "q25", "q26", "q27",
2178 "q28", "q29", "q30", "q31",
2179 };
2180
2181 static const char *const d_name[] =
2182 {
2183 "d0", "d1", "d2", "d3",
2184 "d4", "d5", "d6", "d7",
2185 "d8", "d9", "d10", "d11",
2186 "d12", "d13", "d14", "d15",
2187 "d16", "d17", "d18", "d19",
2188 "d20", "d21", "d22", "d23",
2189 "d24", "d25", "d26", "d27",
2190 "d28", "d29", "d30", "d31",
2191 };
2192
2193 static const char *const s_name[] =
2194 {
2195 "s0", "s1", "s2", "s3",
2196 "s4", "s5", "s6", "s7",
2197 "s8", "s9", "s10", "s11",
2198 "s12", "s13", "s14", "s15",
2199 "s16", "s17", "s18", "s19",
2200 "s20", "s21", "s22", "s23",
2201 "s24", "s25", "s26", "s27",
2202 "s28", "s29", "s30", "s31",
2203 };
2204
2205 static const char *const h_name[] =
2206 {
2207 "h0", "h1", "h2", "h3",
2208 "h4", "h5", "h6", "h7",
2209 "h8", "h9", "h10", "h11",
2210 "h12", "h13", "h14", "h15",
2211 "h16", "h17", "h18", "h19",
2212 "h20", "h21", "h22", "h23",
2213 "h24", "h25", "h26", "h27",
2214 "h28", "h29", "h30", "h31",
2215 };
2216
2217 static const char *const b_name[] =
2218 {
2219 "b0", "b1", "b2", "b3",
2220 "b4", "b5", "b6", "b7",
2221 "b8", "b9", "b10", "b11",
2222 "b12", "b13", "b14", "b15",
2223 "b16", "b17", "b18", "b19",
2224 "b20", "b21", "b22", "b23",
2225 "b24", "b25", "b26", "b27",
2226 "b28", "b29", "b30", "b31",
2227 };
2228
2229 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2230
2231 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2232 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2233
2234 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2235 return d_name[p_regnum - AARCH64_D0_REGNUM];
2236
2237 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2238 return s_name[p_regnum - AARCH64_S0_REGNUM];
2239
2240 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2241 return h_name[p_regnum - AARCH64_H0_REGNUM];
2242
2243 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2244 return b_name[p_regnum - AARCH64_B0_REGNUM];
2245
2246 if (tdep->has_sve ())
2247 {
2248 static const char *const sve_v_name[] =
2249 {
2250 "v0", "v1", "v2", "v3",
2251 "v4", "v5", "v6", "v7",
2252 "v8", "v9", "v10", "v11",
2253 "v12", "v13", "v14", "v15",
2254 "v16", "v17", "v18", "v19",
2255 "v20", "v21", "v22", "v23",
2256 "v24", "v25", "v26", "v27",
2257 "v28", "v29", "v30", "v31",
2258 };
2259
2260 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2261 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2262 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2263 }
2264
2265 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2266 prevents it from being read by methods such as
2267 mi_cmd_trace_frame_collected. */
2268 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2269 return "";
2270
2271 internal_error (__FILE__, __LINE__,
2272 _("aarch64_pseudo_register_name: bad register number %d"),
2273 p_regnum);
2274 }
2275
2276 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2277
2278 static struct type *
2279 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2280 {
2281 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2282
2283 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2284
2285 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2286 return aarch64_vnq_type (gdbarch);
2287
2288 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2289 return aarch64_vnd_type (gdbarch);
2290
2291 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2292 return aarch64_vns_type (gdbarch);
2293
2294 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2295 return aarch64_vnh_type (gdbarch);
2296
2297 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2298 return aarch64_vnb_type (gdbarch);
2299
2300 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2301 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2302 return aarch64_vnv_type (gdbarch);
2303
2304 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2305 return builtin_type (gdbarch)->builtin_uint64;
2306
2307 internal_error (__FILE__, __LINE__,
2308 _("aarch64_pseudo_register_type: bad register number %d"),
2309 p_regnum);
2310 }
2311
2312 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2313
2314 static int
2315 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2316 struct reggroup *group)
2317 {
2318 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2319
2320 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2321
2322 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2323 return group == all_reggroup || group == vector_reggroup;
2324 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2325 return (group == all_reggroup || group == vector_reggroup
2326 || group == float_reggroup);
2327 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2328 return (group == all_reggroup || group == vector_reggroup
2329 || group == float_reggroup);
2330 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2331 return group == all_reggroup || group == vector_reggroup;
2332 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2333 return group == all_reggroup || group == vector_reggroup;
2334 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2335 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2336 return group == all_reggroup || group == vector_reggroup;
2337 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2338 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2339 return 0;
2340
2341 return group == all_reggroup;
2342 }
2343
2344 /* Helper for aarch64_pseudo_read_value. */
2345
2346 static struct value *
2347 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2348 readable_regcache *regcache, int regnum_offset,
2349 int regsize, struct value *result_value)
2350 {
2351 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2352
2353 /* Enough space for a full vector register. */
2354 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2355 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2356
2357 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2358 mark_value_bytes_unavailable (result_value, 0,
2359 TYPE_LENGTH (value_type (result_value)));
2360 else
2361 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2362
2363 return result_value;
2364 }
2365
2366 /* Implement the "pseudo_register_read_value" gdbarch method. */
2367
2368 static struct value *
2369 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2370 int regnum)
2371 {
2372 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2373 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2374
2375 VALUE_LVAL (result_value) = lval_register;
2376 VALUE_REGNUM (result_value) = regnum;
2377
2378 regnum -= gdbarch_num_regs (gdbarch);
2379
2380 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2381 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2382 regnum - AARCH64_Q0_REGNUM,
2383 Q_REGISTER_SIZE, result_value);
2384
2385 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2386 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2387 regnum - AARCH64_D0_REGNUM,
2388 D_REGISTER_SIZE, result_value);
2389
2390 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2391 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2392 regnum - AARCH64_S0_REGNUM,
2393 S_REGISTER_SIZE, result_value);
2394
2395 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2396 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2397 regnum - AARCH64_H0_REGNUM,
2398 H_REGISTER_SIZE, result_value);
2399
2400 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2401 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2402 regnum - AARCH64_B0_REGNUM,
2403 B_REGISTER_SIZE, result_value);
2404
2405 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2406 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2407 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2408 regnum - AARCH64_SVE_V0_REGNUM,
2409 V_REGISTER_SIZE, result_value);
2410
2411 gdb_assert_not_reached ("regnum out of bound");
2412 }
2413
2414 /* Helper for aarch64_pseudo_write. */
2415
2416 static void
2417 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2418 int regnum_offset, int regsize, const gdb_byte *buf)
2419 {
2420 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2421
2422 /* Enough space for a full vector register. */
2423 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2424 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2425
2426 /* Ensure the register buffer is zero, we want gdb writes of the
2427 various 'scalar' pseudo registers to behavior like architectural
2428 writes, register width bytes are written the remainder are set to
2429 zero. */
2430 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2431
2432 memcpy (reg_buf, buf, regsize);
2433 regcache->raw_write (v_regnum, reg_buf);
2434 }
2435
2436 /* Implement the "pseudo_register_write" gdbarch method. */
2437
2438 static void
2439 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2440 int regnum, const gdb_byte *buf)
2441 {
2442 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2443 regnum -= gdbarch_num_regs (gdbarch);
2444
2445 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2446 return aarch64_pseudo_write_1 (gdbarch, regcache,
2447 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2448 buf);
2449
2450 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2451 return aarch64_pseudo_write_1 (gdbarch, regcache,
2452 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2453 buf);
2454
2455 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2456 return aarch64_pseudo_write_1 (gdbarch, regcache,
2457 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2458 buf);
2459
2460 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2461 return aarch64_pseudo_write_1 (gdbarch, regcache,
2462 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2463 buf);
2464
2465 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2466 return aarch64_pseudo_write_1 (gdbarch, regcache,
2467 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2468 buf);
2469
2470 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2471 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2472 return aarch64_pseudo_write_1 (gdbarch, regcache,
2473 regnum - AARCH64_SVE_V0_REGNUM,
2474 V_REGISTER_SIZE, buf);
2475
2476 gdb_assert_not_reached ("regnum out of bound");
2477 }
2478
2479 /* Callback function for user_reg_add. */
2480
2481 static struct value *
2482 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2483 {
2484 const int *reg_p = (const int *) baton;
2485
2486 return value_of_register (*reg_p, frame);
2487 }
2488 \f
2489
2490 /* Implement the "software_single_step" gdbarch method, needed to
2491 single step through atomic sequences on AArch64. */
2492
2493 static std::vector<CORE_ADDR>
2494 aarch64_software_single_step (struct regcache *regcache)
2495 {
2496 struct gdbarch *gdbarch = regcache->arch ();
2497 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2498 const int insn_size = 4;
2499 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2500 CORE_ADDR pc = regcache_read_pc (regcache);
2501 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2502 CORE_ADDR loc = pc;
2503 CORE_ADDR closing_insn = 0;
2504 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2505 byte_order_for_code);
2506 int index;
2507 int insn_count;
2508 int bc_insn_count = 0; /* Conditional branch instruction count. */
2509 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2510 aarch64_inst inst;
2511
2512 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2513 return {};
2514
2515 /* Look for a Load Exclusive instruction which begins the sequence. */
2516 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2517 return {};
2518
2519 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2520 {
2521 loc += insn_size;
2522 insn = read_memory_unsigned_integer (loc, insn_size,
2523 byte_order_for_code);
2524
2525 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2526 return {};
2527 /* Check if the instruction is a conditional branch. */
2528 if (inst.opcode->iclass == condbranch)
2529 {
2530 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2531
2532 if (bc_insn_count >= 1)
2533 return {};
2534
2535 /* It is, so we'll try to set a breakpoint at the destination. */
2536 breaks[1] = loc + inst.operands[0].imm.value;
2537
2538 bc_insn_count++;
2539 last_breakpoint++;
2540 }
2541
2542 /* Look for the Store Exclusive which closes the atomic sequence. */
2543 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2544 {
2545 closing_insn = loc;
2546 break;
2547 }
2548 }
2549
2550 /* We didn't find a closing Store Exclusive instruction, fall back. */
2551 if (!closing_insn)
2552 return {};
2553
2554 /* Insert breakpoint after the end of the atomic sequence. */
2555 breaks[0] = loc + insn_size;
2556
2557 /* Check for duplicated breakpoints, and also check that the second
2558 breakpoint is not within the atomic sequence. */
2559 if (last_breakpoint
2560 && (breaks[1] == breaks[0]
2561 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2562 last_breakpoint = 0;
2563
2564 std::vector<CORE_ADDR> next_pcs;
2565
2566 /* Insert the breakpoint at the end of the sequence, and one at the
2567 destination of the conditional branch, if it exists. */
2568 for (index = 0; index <= last_breakpoint; index++)
2569 next_pcs.push_back (breaks[index]);
2570
2571 return next_pcs;
2572 }
2573
2574 struct aarch64_displaced_step_closure : public displaced_step_closure
2575 {
2576 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2577 is being displaced stepping. */
2578 int cond = 0;
2579
2580 /* PC adjustment offset after displaced stepping. */
2581 int32_t pc_adjust = 0;
2582 };
2583
2584 /* Data when visiting instructions for displaced stepping. */
2585
2586 struct aarch64_displaced_step_data
2587 {
2588 struct aarch64_insn_data base;
2589
2590 /* The address where the instruction will be executed at. */
2591 CORE_ADDR new_addr;
2592 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2593 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2594 /* Number of instructions in INSN_BUF. */
2595 unsigned insn_count;
2596 /* Registers when doing displaced stepping. */
2597 struct regcache *regs;
2598
2599 aarch64_displaced_step_closure *dsc;
2600 };
2601
2602 /* Implementation of aarch64_insn_visitor method "b". */
2603
2604 static void
2605 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2606 struct aarch64_insn_data *data)
2607 {
2608 struct aarch64_displaced_step_data *dsd
2609 = (struct aarch64_displaced_step_data *) data;
2610 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2611
2612 if (can_encode_int32 (new_offset, 28))
2613 {
2614 /* Emit B rather than BL, because executing BL on a new address
2615 will get the wrong address into LR. In order to avoid this,
2616 we emit B, and update LR if the instruction is BL. */
2617 emit_b (dsd->insn_buf, 0, new_offset);
2618 dsd->insn_count++;
2619 }
2620 else
2621 {
2622 /* Write NOP. */
2623 emit_nop (dsd->insn_buf);
2624 dsd->insn_count++;
2625 dsd->dsc->pc_adjust = offset;
2626 }
2627
2628 if (is_bl)
2629 {
2630 /* Update LR. */
2631 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2632 data->insn_addr + 4);
2633 }
2634 }
2635
2636 /* Implementation of aarch64_insn_visitor method "b_cond". */
2637
2638 static void
2639 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2640 struct aarch64_insn_data *data)
2641 {
2642 struct aarch64_displaced_step_data *dsd
2643 = (struct aarch64_displaced_step_data *) data;
2644
2645 /* GDB has to fix up PC after displaced step this instruction
2646 differently according to the condition is true or false. Instead
2647 of checking COND against conditional flags, we can use
2648 the following instructions, and GDB can tell how to fix up PC
2649 according to the PC value.
2650
2651 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2652 INSN1 ;
2653 TAKEN:
2654 INSN2
2655 */
2656
2657 emit_bcond (dsd->insn_buf, cond, 8);
2658 dsd->dsc->cond = 1;
2659 dsd->dsc->pc_adjust = offset;
2660 dsd->insn_count = 1;
2661 }
2662
2663 /* Dynamically allocate a new register. If we know the register
2664 statically, we should make it a global as above instead of using this
2665 helper function. */
2666
2667 static struct aarch64_register
2668 aarch64_register (unsigned num, int is64)
2669 {
2670 return (struct aarch64_register) { num, is64 };
2671 }
2672
2673 /* Implementation of aarch64_insn_visitor method "cb". */
2674
2675 static void
2676 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2677 const unsigned rn, int is64,
2678 struct aarch64_insn_data *data)
2679 {
2680 struct aarch64_displaced_step_data *dsd
2681 = (struct aarch64_displaced_step_data *) data;
2682
2683 /* The offset is out of range for a compare and branch
2684 instruction. We can use the following instructions instead:
2685
2686 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2687 INSN1 ;
2688 TAKEN:
2689 INSN2
2690 */
2691 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2692 dsd->insn_count = 1;
2693 dsd->dsc->cond = 1;
2694 dsd->dsc->pc_adjust = offset;
2695 }
2696
2697 /* Implementation of aarch64_insn_visitor method "tb". */
2698
2699 static void
2700 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2701 const unsigned rt, unsigned bit,
2702 struct aarch64_insn_data *data)
2703 {
2704 struct aarch64_displaced_step_data *dsd
2705 = (struct aarch64_displaced_step_data *) data;
2706
2707 /* The offset is out of range for a test bit and branch
2708 instruction We can use the following instructions instead:
2709
2710 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2711 INSN1 ;
2712 TAKEN:
2713 INSN2
2714
2715 */
2716 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2717 dsd->insn_count = 1;
2718 dsd->dsc->cond = 1;
2719 dsd->dsc->pc_adjust = offset;
2720 }
2721
2722 /* Implementation of aarch64_insn_visitor method "adr". */
2723
2724 static void
2725 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2726 const int is_adrp, struct aarch64_insn_data *data)
2727 {
2728 struct aarch64_displaced_step_data *dsd
2729 = (struct aarch64_displaced_step_data *) data;
2730 /* We know exactly the address the ADR{P,} instruction will compute.
2731 We can just write it to the destination register. */
2732 CORE_ADDR address = data->insn_addr + offset;
2733
2734 if (is_adrp)
2735 {
2736 /* Clear the lower 12 bits of the offset to get the 4K page. */
2737 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2738 address & ~0xfff);
2739 }
2740 else
2741 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2742 address);
2743
2744 dsd->dsc->pc_adjust = 4;
2745 emit_nop (dsd->insn_buf);
2746 dsd->insn_count = 1;
2747 }
2748
2749 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2750
2751 static void
2752 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2753 const unsigned rt, const int is64,
2754 struct aarch64_insn_data *data)
2755 {
2756 struct aarch64_displaced_step_data *dsd
2757 = (struct aarch64_displaced_step_data *) data;
2758 CORE_ADDR address = data->insn_addr + offset;
2759 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2760
2761 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2762 address);
2763
2764 if (is_sw)
2765 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2766 aarch64_register (rt, 1), zero);
2767 else
2768 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2769 aarch64_register (rt, 1), zero);
2770
2771 dsd->dsc->pc_adjust = 4;
2772 }
2773
2774 /* Implementation of aarch64_insn_visitor method "others". */
2775
2776 static void
2777 aarch64_displaced_step_others (const uint32_t insn,
2778 struct aarch64_insn_data *data)
2779 {
2780 struct aarch64_displaced_step_data *dsd
2781 = (struct aarch64_displaced_step_data *) data;
2782
2783 aarch64_emit_insn (dsd->insn_buf, insn);
2784 dsd->insn_count = 1;
2785
2786 if ((insn & 0xfffffc1f) == 0xd65f0000)
2787 {
2788 /* RET */
2789 dsd->dsc->pc_adjust = 0;
2790 }
2791 else
2792 dsd->dsc->pc_adjust = 4;
2793 }
2794
2795 static const struct aarch64_insn_visitor visitor =
2796 {
2797 aarch64_displaced_step_b,
2798 aarch64_displaced_step_b_cond,
2799 aarch64_displaced_step_cb,
2800 aarch64_displaced_step_tb,
2801 aarch64_displaced_step_adr,
2802 aarch64_displaced_step_ldr_literal,
2803 aarch64_displaced_step_others,
2804 };
2805
2806 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2807
2808 struct displaced_step_closure *
2809 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2810 CORE_ADDR from, CORE_ADDR to,
2811 struct regcache *regs)
2812 {
2813 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2814 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2815 struct aarch64_displaced_step_data dsd;
2816 aarch64_inst inst;
2817
2818 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2819 return NULL;
2820
2821 /* Look for a Load Exclusive instruction which begins the sequence. */
2822 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2823 {
2824 /* We can't displaced step atomic sequences. */
2825 return NULL;
2826 }
2827
2828 std::unique_ptr<aarch64_displaced_step_closure> dsc
2829 (new aarch64_displaced_step_closure);
2830 dsd.base.insn_addr = from;
2831 dsd.new_addr = to;
2832 dsd.regs = regs;
2833 dsd.dsc = dsc.get ();
2834 dsd.insn_count = 0;
2835 aarch64_relocate_instruction (insn, &visitor,
2836 (struct aarch64_insn_data *) &dsd);
2837 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2838
2839 if (dsd.insn_count != 0)
2840 {
2841 int i;
2842
2843 /* Instruction can be relocated to scratch pad. Copy
2844 relocated instruction(s) there. */
2845 for (i = 0; i < dsd.insn_count; i++)
2846 {
2847 if (debug_displaced)
2848 {
2849 debug_printf ("displaced: writing insn ");
2850 debug_printf ("%.8x", dsd.insn_buf[i]);
2851 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2852 }
2853 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2854 (ULONGEST) dsd.insn_buf[i]);
2855 }
2856 }
2857 else
2858 {
2859 dsc = NULL;
2860 }
2861
2862 return dsc.release ();
2863 }
2864
2865 /* Implement the "displaced_step_fixup" gdbarch method. */
2866
2867 void
2868 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2869 struct displaced_step_closure *dsc_,
2870 CORE_ADDR from, CORE_ADDR to,
2871 struct regcache *regs)
2872 {
2873 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2874
2875 if (dsc->cond)
2876 {
2877 ULONGEST pc;
2878
2879 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2880 if (pc - to == 8)
2881 {
2882 /* Condition is true. */
2883 }
2884 else if (pc - to == 4)
2885 {
2886 /* Condition is false. */
2887 dsc->pc_adjust = 4;
2888 }
2889 else
2890 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2891 }
2892
2893 if (dsc->pc_adjust != 0)
2894 {
2895 if (debug_displaced)
2896 {
2897 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2898 paddress (gdbarch, from), dsc->pc_adjust);
2899 }
2900 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2901 from + dsc->pc_adjust);
2902 }
2903 }
2904
2905 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2906
2907 int
2908 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2909 struct displaced_step_closure *closure)
2910 {
2911 return 1;
2912 }
2913
2914 /* Get the correct target description for the given VQ value.
2915 If VQ is zero then it is assumed SVE is not supported.
2916 (It is not possible to set VQ to zero on an SVE system). */
2917
2918 const target_desc *
2919 aarch64_read_description (uint64_t vq, bool pauth_p)
2920 {
2921 if (vq > AARCH64_MAX_SVE_VQ)
2922 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
2923 AARCH64_MAX_SVE_VQ);
2924
2925 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
2926
2927 if (tdesc == NULL)
2928 {
2929 tdesc = aarch64_create_target_description (vq, pauth_p);
2930 tdesc_aarch64_list[vq][pauth_p] = tdesc;
2931 }
2932
2933 return tdesc;
2934 }
2935
2936 /* Return the VQ used when creating the target description TDESC. */
2937
2938 static uint64_t
2939 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2940 {
2941 const struct tdesc_feature *feature_sve;
2942
2943 if (!tdesc_has_registers (tdesc))
2944 return 0;
2945
2946 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2947
2948 if (feature_sve == nullptr)
2949 return 0;
2950
2951 uint64_t vl = tdesc_register_bitsize (feature_sve,
2952 aarch64_sve_register_names[0]) / 8;
2953 return sve_vq_from_vl (vl);
2954 }
2955
2956 /* Add all the expected register sets into GDBARCH. */
2957
2958 static void
2959 aarch64_add_reggroups (struct gdbarch *gdbarch)
2960 {
2961 reggroup_add (gdbarch, general_reggroup);
2962 reggroup_add (gdbarch, float_reggroup);
2963 reggroup_add (gdbarch, system_reggroup);
2964 reggroup_add (gdbarch, vector_reggroup);
2965 reggroup_add (gdbarch, all_reggroup);
2966 reggroup_add (gdbarch, save_reggroup);
2967 reggroup_add (gdbarch, restore_reggroup);
2968 }
2969
2970 /* Implement the "cannot_store_register" gdbarch method. */
2971
2972 static int
2973 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
2974 {
2975 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2976
2977 if (!tdep->has_pauth ())
2978 return 0;
2979
2980 /* Pointer authentication registers are read-only. */
2981 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
2982 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
2983 }
2984
2985 /* Initialize the current architecture based on INFO. If possible,
2986 re-use an architecture from ARCHES, which is a list of
2987 architectures already created during this debugging session.
2988
2989 Called e.g. at program startup, when reading a core file, and when
2990 reading a binary file. */
2991
2992 static struct gdbarch *
2993 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2994 {
2995 struct gdbarch_tdep *tdep;
2996 struct gdbarch *gdbarch;
2997 struct gdbarch_list *best_arch;
2998 struct tdesc_arch_data *tdesc_data = NULL;
2999 const struct target_desc *tdesc = info.target_desc;
3000 int i;
3001 int valid_p = 1;
3002 const struct tdesc_feature *feature_core;
3003 const struct tdesc_feature *feature_fpu;
3004 const struct tdesc_feature *feature_sve;
3005 const struct tdesc_feature *feature_pauth;
3006 int num_regs = 0;
3007 int num_pseudo_regs = 0;
3008 int first_pauth_regnum = -1;
3009 int pauth_ra_state_offset = -1;
3010
3011 /* Ensure we always have a target description. */
3012 if (!tdesc_has_registers (tdesc))
3013 tdesc = aarch64_read_description (0, false);
3014 gdb_assert (tdesc);
3015
3016 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
3017 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3018 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3019 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3020
3021 if (feature_core == NULL)
3022 return NULL;
3023
3024 tdesc_data = tdesc_data_alloc ();
3025
3026 /* Validate the description provides the mandatory core R registers
3027 and allocate their numbers. */
3028 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3029 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3030 AARCH64_X0_REGNUM + i,
3031 aarch64_r_register_names[i]);
3032
3033 num_regs = AARCH64_X0_REGNUM + i;
3034
3035 /* Add the V registers. */
3036 if (feature_fpu != NULL)
3037 {
3038 if (feature_sve != NULL)
3039 error (_("Program contains both fpu and SVE features."));
3040
3041 /* Validate the description provides the mandatory V registers
3042 and allocate their numbers. */
3043 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3044 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3045 AARCH64_V0_REGNUM + i,
3046 aarch64_v_register_names[i]);
3047
3048 num_regs = AARCH64_V0_REGNUM + i;
3049 }
3050
3051 /* Add the SVE registers. */
3052 if (feature_sve != NULL)
3053 {
3054 /* Validate the description provides the mandatory SVE registers
3055 and allocate their numbers. */
3056 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3057 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3058 AARCH64_SVE_Z0_REGNUM + i,
3059 aarch64_sve_register_names[i]);
3060
3061 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3062 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3063 }
3064
3065 if (feature_fpu != NULL || feature_sve != NULL)
3066 {
3067 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3068 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3069 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3070 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3071 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3072 }
3073
3074 /* Add the pauth registers. */
3075 if (feature_pauth != NULL)
3076 {
3077 first_pauth_regnum = num_regs;
3078 pauth_ra_state_offset = num_pseudo_regs;
3079 /* Validate the descriptor provides the mandatory PAUTH registers and
3080 allocate their numbers. */
3081 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3082 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3083 first_pauth_regnum + i,
3084 aarch64_pauth_register_names[i]);
3085
3086 num_regs += i;
3087 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3088 }
3089
3090 if (!valid_p)
3091 {
3092 tdesc_data_cleanup (tdesc_data);
3093 return NULL;
3094 }
3095
3096 /* AArch64 code is always little-endian. */
3097 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3098
3099 /* If there is already a candidate, use it. */
3100 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3101 best_arch != NULL;
3102 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3103 {
3104 /* Found a match. */
3105 break;
3106 }
3107
3108 if (best_arch != NULL)
3109 {
3110 if (tdesc_data != NULL)
3111 tdesc_data_cleanup (tdesc_data);
3112 return best_arch->gdbarch;
3113 }
3114
3115 tdep = XCNEW (struct gdbarch_tdep);
3116 gdbarch = gdbarch_alloc (&info, tdep);
3117
3118 /* This should be low enough for everything. */
3119 tdep->lowest_pc = 0x20;
3120 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3121 tdep->jb_elt_size = 8;
3122 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3123 tdep->pauth_reg_base = first_pauth_regnum;
3124 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3125 : pauth_ra_state_offset + num_regs;
3126
3127
3128 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3129 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3130
3131 /* Advance PC across function entry code. */
3132 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3133
3134 /* The stack grows downward. */
3135 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3136
3137 /* Breakpoint manipulation. */
3138 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3139 aarch64_breakpoint::kind_from_pc);
3140 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3141 aarch64_breakpoint::bp_from_kind);
3142 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3143 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3144
3145 /* Information about registers, etc. */
3146 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3147 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3148 set_gdbarch_num_regs (gdbarch, num_regs);
3149
3150 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3151 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3152 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3153 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3154 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3155 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3156 aarch64_pseudo_register_reggroup_p);
3157 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3158
3159 /* ABI */
3160 set_gdbarch_short_bit (gdbarch, 16);
3161 set_gdbarch_int_bit (gdbarch, 32);
3162 set_gdbarch_float_bit (gdbarch, 32);
3163 set_gdbarch_double_bit (gdbarch, 64);
3164 set_gdbarch_long_double_bit (gdbarch, 128);
3165 set_gdbarch_long_bit (gdbarch, 64);
3166 set_gdbarch_long_long_bit (gdbarch, 64);
3167 set_gdbarch_ptr_bit (gdbarch, 64);
3168 set_gdbarch_char_signed (gdbarch, 0);
3169 set_gdbarch_wchar_signed (gdbarch, 0);
3170 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3171 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3172 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3173
3174 /* Internal <-> external register number maps. */
3175 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3176
3177 /* Returning results. */
3178 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3179
3180 /* Disassembly. */
3181 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3182
3183 /* Virtual tables. */
3184 set_gdbarch_vbit_in_delta (gdbarch, 1);
3185
3186 /* Register architecture. */
3187 aarch64_add_reggroups (gdbarch);
3188
3189 /* Hook in the ABI-specific overrides, if they have been registered. */
3190 info.target_desc = tdesc;
3191 info.tdesc_data = tdesc_data;
3192 gdbarch_init_osabi (info, gdbarch);
3193
3194 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3195
3196 /* Add some default predicates. */
3197 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3198 dwarf2_append_unwinders (gdbarch);
3199 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3200
3201 frame_base_set_default (gdbarch, &aarch64_normal_base);
3202
3203 /* Now we have tuned the configuration, set a few final things,
3204 based on what the OS ABI has told us. */
3205
3206 if (tdep->jb_pc >= 0)
3207 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3208
3209 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3210
3211 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3212
3213 /* Add standard register aliases. */
3214 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3215 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3216 value_of_aarch64_user_reg,
3217 &aarch64_register_aliases[i].regnum);
3218
3219 register_aarch64_ravenscar_ops (gdbarch);
3220
3221 return gdbarch;
3222 }
3223
3224 static void
3225 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3226 {
3227 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3228
3229 if (tdep == NULL)
3230 return;
3231
3232 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3233 paddress (gdbarch, tdep->lowest_pc));
3234 }
3235
3236 #if GDB_SELF_TEST
3237 namespace selftests
3238 {
3239 static void aarch64_process_record_test (void);
3240 }
3241 #endif
3242
3243 void
3244 _initialize_aarch64_tdep (void)
3245 {
3246 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3247 aarch64_dump_tdep);
3248
3249 /* Debug this file's internals. */
3250 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3251 Set AArch64 debugging."), _("\
3252 Show AArch64 debugging."), _("\
3253 When on, AArch64 specific debugging is enabled."),
3254 NULL,
3255 show_aarch64_debug,
3256 &setdebuglist, &showdebuglist);
3257
3258 #if GDB_SELF_TEST
3259 selftests::register_test ("aarch64-analyze-prologue",
3260 selftests::aarch64_analyze_prologue_test);
3261 selftests::register_test ("aarch64-process-record",
3262 selftests::aarch64_process_record_test);
3263 selftests::record_xml_tdesc ("aarch64.xml",
3264 aarch64_create_target_description (0, false));
3265 #endif
3266 }
3267
3268 /* AArch64 process record-replay related structures, defines etc. */
3269
3270 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3271 do \
3272 { \
3273 unsigned int reg_len = LENGTH; \
3274 if (reg_len) \
3275 { \
3276 REGS = XNEWVEC (uint32_t, reg_len); \
3277 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3278 } \
3279 } \
3280 while (0)
3281
3282 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3283 do \
3284 { \
3285 unsigned int mem_len = LENGTH; \
3286 if (mem_len) \
3287 { \
3288 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3289 memcpy(&MEMS->len, &RECORD_BUF[0], \
3290 sizeof(struct aarch64_mem_r) * LENGTH); \
3291 } \
3292 } \
3293 while (0)
3294
3295 /* AArch64 record/replay structures and enumerations. */
3296
3297 struct aarch64_mem_r
3298 {
3299 uint64_t len; /* Record length. */
3300 uint64_t addr; /* Memory address. */
3301 };
3302
3303 enum aarch64_record_result
3304 {
3305 AARCH64_RECORD_SUCCESS,
3306 AARCH64_RECORD_UNSUPPORTED,
3307 AARCH64_RECORD_UNKNOWN
3308 };
3309
3310 typedef struct insn_decode_record_t
3311 {
3312 struct gdbarch *gdbarch;
3313 struct regcache *regcache;
3314 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3315 uint32_t aarch64_insn; /* Insn to be recorded. */
3316 uint32_t mem_rec_count; /* Count of memory records. */
3317 uint32_t reg_rec_count; /* Count of register records. */
3318 uint32_t *aarch64_regs; /* Registers to be recorded. */
3319 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3320 } insn_decode_record;
3321
3322 /* Record handler for data processing - register instructions. */
3323
3324 static unsigned int
3325 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3326 {
3327 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3328 uint32_t record_buf[4];
3329
3330 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3331 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3332 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3333
3334 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3335 {
3336 uint8_t setflags;
3337
3338 /* Logical (shifted register). */
3339 if (insn_bits24_27 == 0x0a)
3340 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3341 /* Add/subtract. */
3342 else if (insn_bits24_27 == 0x0b)
3343 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3344 else
3345 return AARCH64_RECORD_UNKNOWN;
3346
3347 record_buf[0] = reg_rd;
3348 aarch64_insn_r->reg_rec_count = 1;
3349 if (setflags)
3350 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3351 }
3352 else
3353 {
3354 if (insn_bits24_27 == 0x0b)
3355 {
3356 /* Data-processing (3 source). */
3357 record_buf[0] = reg_rd;
3358 aarch64_insn_r->reg_rec_count = 1;
3359 }
3360 else if (insn_bits24_27 == 0x0a)
3361 {
3362 if (insn_bits21_23 == 0x00)
3363 {
3364 /* Add/subtract (with carry). */
3365 record_buf[0] = reg_rd;
3366 aarch64_insn_r->reg_rec_count = 1;
3367 if (bit (aarch64_insn_r->aarch64_insn, 29))
3368 {
3369 record_buf[1] = AARCH64_CPSR_REGNUM;
3370 aarch64_insn_r->reg_rec_count = 2;
3371 }
3372 }
3373 else if (insn_bits21_23 == 0x02)
3374 {
3375 /* Conditional compare (register) and conditional compare
3376 (immediate) instructions. */
3377 record_buf[0] = AARCH64_CPSR_REGNUM;
3378 aarch64_insn_r->reg_rec_count = 1;
3379 }
3380 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3381 {
3382 /* CConditional select. */
3383 /* Data-processing (2 source). */
3384 /* Data-processing (1 source). */
3385 record_buf[0] = reg_rd;
3386 aarch64_insn_r->reg_rec_count = 1;
3387 }
3388 else
3389 return AARCH64_RECORD_UNKNOWN;
3390 }
3391 }
3392
3393 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3394 record_buf);
3395 return AARCH64_RECORD_SUCCESS;
3396 }
3397
3398 /* Record handler for data processing - immediate instructions. */
3399
3400 static unsigned int
3401 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3402 {
3403 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3404 uint32_t record_buf[4];
3405
3406 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3407 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3408 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3409
3410 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3411 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3412 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3413 {
3414 record_buf[0] = reg_rd;
3415 aarch64_insn_r->reg_rec_count = 1;
3416 }
3417 else if (insn_bits24_27 == 0x01)
3418 {
3419 /* Add/Subtract (immediate). */
3420 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3421 record_buf[0] = reg_rd;
3422 aarch64_insn_r->reg_rec_count = 1;
3423 if (setflags)
3424 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3425 }
3426 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3427 {
3428 /* Logical (immediate). */
3429 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3430 record_buf[0] = reg_rd;
3431 aarch64_insn_r->reg_rec_count = 1;
3432 if (setflags)
3433 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3434 }
3435 else
3436 return AARCH64_RECORD_UNKNOWN;
3437
3438 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3439 record_buf);
3440 return AARCH64_RECORD_SUCCESS;
3441 }
3442
3443 /* Record handler for branch, exception generation and system instructions. */
3444
3445 static unsigned int
3446 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3447 {
3448 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3449 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3450 uint32_t record_buf[4];
3451
3452 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3453 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3454 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3455
3456 if (insn_bits28_31 == 0x0d)
3457 {
3458 /* Exception generation instructions. */
3459 if (insn_bits24_27 == 0x04)
3460 {
3461 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3462 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3463 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3464 {
3465 ULONGEST svc_number;
3466
3467 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3468 &svc_number);
3469 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3470 svc_number);
3471 }
3472 else
3473 return AARCH64_RECORD_UNSUPPORTED;
3474 }
3475 /* System instructions. */
3476 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3477 {
3478 uint32_t reg_rt, reg_crn;
3479
3480 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3481 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3482
3483 /* Record rt in case of sysl and mrs instructions. */
3484 if (bit (aarch64_insn_r->aarch64_insn, 21))
3485 {
3486 record_buf[0] = reg_rt;
3487 aarch64_insn_r->reg_rec_count = 1;
3488 }
3489 /* Record cpsr for hint and msr(immediate) instructions. */
3490 else if (reg_crn == 0x02 || reg_crn == 0x04)
3491 {
3492 record_buf[0] = AARCH64_CPSR_REGNUM;
3493 aarch64_insn_r->reg_rec_count = 1;
3494 }
3495 }
3496 /* Unconditional branch (register). */
3497 else if((insn_bits24_27 & 0x0e) == 0x06)
3498 {
3499 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3500 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3501 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3502 }
3503 else
3504 return AARCH64_RECORD_UNKNOWN;
3505 }
3506 /* Unconditional branch (immediate). */
3507 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3508 {
3509 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3510 if (bit (aarch64_insn_r->aarch64_insn, 31))
3511 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3512 }
3513 else
3514 /* Compare & branch (immediate), Test & branch (immediate) and
3515 Conditional branch (immediate). */
3516 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3517
3518 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3519 record_buf);
3520 return AARCH64_RECORD_SUCCESS;
3521 }
3522
3523 /* Record handler for advanced SIMD load and store instructions. */
3524
3525 static unsigned int
3526 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3527 {
3528 CORE_ADDR address;
3529 uint64_t addr_offset = 0;
3530 uint32_t record_buf[24];
3531 uint64_t record_buf_mem[24];
3532 uint32_t reg_rn, reg_rt;
3533 uint32_t reg_index = 0, mem_index = 0;
3534 uint8_t opcode_bits, size_bits;
3535
3536 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3537 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3538 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3539 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3540 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3541
3542 if (record_debug)
3543 debug_printf ("Process record: Advanced SIMD load/store\n");
3544
3545 /* Load/store single structure. */
3546 if (bit (aarch64_insn_r->aarch64_insn, 24))
3547 {
3548 uint8_t sindex, scale, selem, esize, replicate = 0;
3549 scale = opcode_bits >> 2;
3550 selem = ((opcode_bits & 0x02) |
3551 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3552 switch (scale)
3553 {
3554 case 1:
3555 if (size_bits & 0x01)
3556 return AARCH64_RECORD_UNKNOWN;
3557 break;
3558 case 2:
3559 if ((size_bits >> 1) & 0x01)
3560 return AARCH64_RECORD_UNKNOWN;
3561 if (size_bits & 0x01)
3562 {
3563 if (!((opcode_bits >> 1) & 0x01))
3564 scale = 3;
3565 else
3566 return AARCH64_RECORD_UNKNOWN;
3567 }
3568 break;
3569 case 3:
3570 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3571 {
3572 scale = size_bits;
3573 replicate = 1;
3574 break;
3575 }
3576 else
3577 return AARCH64_RECORD_UNKNOWN;
3578 default:
3579 break;
3580 }
3581 esize = 8 << scale;
3582 if (replicate)
3583 for (sindex = 0; sindex < selem; sindex++)
3584 {
3585 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3586 reg_rt = (reg_rt + 1) % 32;
3587 }
3588 else
3589 {
3590 for (sindex = 0; sindex < selem; sindex++)
3591 {
3592 if (bit (aarch64_insn_r->aarch64_insn, 22))
3593 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3594 else
3595 {
3596 record_buf_mem[mem_index++] = esize / 8;
3597 record_buf_mem[mem_index++] = address + addr_offset;
3598 }
3599 addr_offset = addr_offset + (esize / 8);
3600 reg_rt = (reg_rt + 1) % 32;
3601 }
3602 }
3603 }
3604 /* Load/store multiple structure. */
3605 else
3606 {
3607 uint8_t selem, esize, rpt, elements;
3608 uint8_t eindex, rindex;
3609
3610 esize = 8 << size_bits;
3611 if (bit (aarch64_insn_r->aarch64_insn, 30))
3612 elements = 128 / esize;
3613 else
3614 elements = 64 / esize;
3615
3616 switch (opcode_bits)
3617 {
3618 /*LD/ST4 (4 Registers). */
3619 case 0:
3620 rpt = 1;
3621 selem = 4;
3622 break;
3623 /*LD/ST1 (4 Registers). */
3624 case 2:
3625 rpt = 4;
3626 selem = 1;
3627 break;
3628 /*LD/ST3 (3 Registers). */
3629 case 4:
3630 rpt = 1;
3631 selem = 3;
3632 break;
3633 /*LD/ST1 (3 Registers). */
3634 case 6:
3635 rpt = 3;
3636 selem = 1;
3637 break;
3638 /*LD/ST1 (1 Register). */
3639 case 7:
3640 rpt = 1;
3641 selem = 1;
3642 break;
3643 /*LD/ST2 (2 Registers). */
3644 case 8:
3645 rpt = 1;
3646 selem = 2;
3647 break;
3648 /*LD/ST1 (2 Registers). */
3649 case 10:
3650 rpt = 2;
3651 selem = 1;
3652 break;
3653 default:
3654 return AARCH64_RECORD_UNSUPPORTED;
3655 break;
3656 }
3657 for (rindex = 0; rindex < rpt; rindex++)
3658 for (eindex = 0; eindex < elements; eindex++)
3659 {
3660 uint8_t reg_tt, sindex;
3661 reg_tt = (reg_rt + rindex) % 32;
3662 for (sindex = 0; sindex < selem; sindex++)
3663 {
3664 if (bit (aarch64_insn_r->aarch64_insn, 22))
3665 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3666 else
3667 {
3668 record_buf_mem[mem_index++] = esize / 8;
3669 record_buf_mem[mem_index++] = address + addr_offset;
3670 }
3671 addr_offset = addr_offset + (esize / 8);
3672 reg_tt = (reg_tt + 1) % 32;
3673 }
3674 }
3675 }
3676
3677 if (bit (aarch64_insn_r->aarch64_insn, 23))
3678 record_buf[reg_index++] = reg_rn;
3679
3680 aarch64_insn_r->reg_rec_count = reg_index;
3681 aarch64_insn_r->mem_rec_count = mem_index / 2;
3682 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3683 record_buf_mem);
3684 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3685 record_buf);
3686 return AARCH64_RECORD_SUCCESS;
3687 }
3688
3689 /* Record handler for load and store instructions. */
3690
3691 static unsigned int
3692 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3693 {
3694 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3695 uint8_t insn_bit23, insn_bit21;
3696 uint8_t opc, size_bits, ld_flag, vector_flag;
3697 uint32_t reg_rn, reg_rt, reg_rt2;
3698 uint64_t datasize, offset;
3699 uint32_t record_buf[8];
3700 uint64_t record_buf_mem[8];
3701 CORE_ADDR address;
3702
3703 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3704 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3705 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3706 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3707 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3708 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3709 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3710 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3711 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3712 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3713 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3714
3715 /* Load/store exclusive. */
3716 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3717 {
3718 if (record_debug)
3719 debug_printf ("Process record: load/store exclusive\n");
3720
3721 if (ld_flag)
3722 {
3723 record_buf[0] = reg_rt;
3724 aarch64_insn_r->reg_rec_count = 1;
3725 if (insn_bit21)
3726 {
3727 record_buf[1] = reg_rt2;
3728 aarch64_insn_r->reg_rec_count = 2;
3729 }
3730 }
3731 else
3732 {
3733 if (insn_bit21)
3734 datasize = (8 << size_bits) * 2;
3735 else
3736 datasize = (8 << size_bits);
3737 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3738 &address);
3739 record_buf_mem[0] = datasize / 8;
3740 record_buf_mem[1] = address;
3741 aarch64_insn_r->mem_rec_count = 1;
3742 if (!insn_bit23)
3743 {
3744 /* Save register rs. */
3745 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3746 aarch64_insn_r->reg_rec_count = 1;
3747 }
3748 }
3749 }
3750 /* Load register (literal) instructions decoding. */
3751 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3752 {
3753 if (record_debug)
3754 debug_printf ("Process record: load register (literal)\n");
3755 if (vector_flag)
3756 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3757 else
3758 record_buf[0] = reg_rt;
3759 aarch64_insn_r->reg_rec_count = 1;
3760 }
3761 /* All types of load/store pair instructions decoding. */
3762 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3763 {
3764 if (record_debug)
3765 debug_printf ("Process record: load/store pair\n");
3766
3767 if (ld_flag)
3768 {
3769 if (vector_flag)
3770 {
3771 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3772 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3773 }
3774 else
3775 {
3776 record_buf[0] = reg_rt;
3777 record_buf[1] = reg_rt2;
3778 }
3779 aarch64_insn_r->reg_rec_count = 2;
3780 }
3781 else
3782 {
3783 uint16_t imm7_off;
3784 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3785 if (!vector_flag)
3786 size_bits = size_bits >> 1;
3787 datasize = 8 << (2 + size_bits);
3788 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3789 offset = offset << (2 + size_bits);
3790 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3791 &address);
3792 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3793 {
3794 if (imm7_off & 0x40)
3795 address = address - offset;
3796 else
3797 address = address + offset;
3798 }
3799
3800 record_buf_mem[0] = datasize / 8;
3801 record_buf_mem[1] = address;
3802 record_buf_mem[2] = datasize / 8;
3803 record_buf_mem[3] = address + (datasize / 8);
3804 aarch64_insn_r->mem_rec_count = 2;
3805 }
3806 if (bit (aarch64_insn_r->aarch64_insn, 23))
3807 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3808 }
3809 /* Load/store register (unsigned immediate) instructions. */
3810 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3811 {
3812 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3813 if (!(opc >> 1))
3814 {
3815 if (opc & 0x01)
3816 ld_flag = 0x01;
3817 else
3818 ld_flag = 0x0;
3819 }
3820 else
3821 {
3822 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3823 {
3824 /* PRFM (immediate) */
3825 return AARCH64_RECORD_SUCCESS;
3826 }
3827 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3828 {
3829 /* LDRSW (immediate) */
3830 ld_flag = 0x1;
3831 }
3832 else
3833 {
3834 if (opc & 0x01)
3835 ld_flag = 0x01;
3836 else
3837 ld_flag = 0x0;
3838 }
3839 }
3840
3841 if (record_debug)
3842 {
3843 debug_printf ("Process record: load/store (unsigned immediate):"
3844 " size %x V %d opc %x\n", size_bits, vector_flag,
3845 opc);
3846 }
3847
3848 if (!ld_flag)
3849 {
3850 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3851 datasize = 8 << size_bits;
3852 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3853 &address);
3854 offset = offset << size_bits;
3855 address = address + offset;
3856
3857 record_buf_mem[0] = datasize >> 3;
3858 record_buf_mem[1] = address;
3859 aarch64_insn_r->mem_rec_count = 1;
3860 }
3861 else
3862 {
3863 if (vector_flag)
3864 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3865 else
3866 record_buf[0] = reg_rt;
3867 aarch64_insn_r->reg_rec_count = 1;
3868 }
3869 }
3870 /* Load/store register (register offset) instructions. */
3871 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3872 && insn_bits10_11 == 0x02 && insn_bit21)
3873 {
3874 if (record_debug)
3875 debug_printf ("Process record: load/store (register offset)\n");
3876 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3877 if (!(opc >> 1))
3878 if (opc & 0x01)
3879 ld_flag = 0x01;
3880 else
3881 ld_flag = 0x0;
3882 else
3883 if (size_bits != 0x03)
3884 ld_flag = 0x01;
3885 else
3886 return AARCH64_RECORD_UNKNOWN;
3887
3888 if (!ld_flag)
3889 {
3890 ULONGEST reg_rm_val;
3891
3892 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3893 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3894 if (bit (aarch64_insn_r->aarch64_insn, 12))
3895 offset = reg_rm_val << size_bits;
3896 else
3897 offset = reg_rm_val;
3898 datasize = 8 << size_bits;
3899 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3900 &address);
3901 address = address + offset;
3902 record_buf_mem[0] = datasize >> 3;
3903 record_buf_mem[1] = address;
3904 aarch64_insn_r->mem_rec_count = 1;
3905 }
3906 else
3907 {
3908 if (vector_flag)
3909 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3910 else
3911 record_buf[0] = reg_rt;
3912 aarch64_insn_r->reg_rec_count = 1;
3913 }
3914 }
3915 /* Load/store register (immediate and unprivileged) instructions. */
3916 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3917 && !insn_bit21)
3918 {
3919 if (record_debug)
3920 {
3921 debug_printf ("Process record: load/store "
3922 "(immediate and unprivileged)\n");
3923 }
3924 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3925 if (!(opc >> 1))
3926 if (opc & 0x01)
3927 ld_flag = 0x01;
3928 else
3929 ld_flag = 0x0;
3930 else
3931 if (size_bits != 0x03)
3932 ld_flag = 0x01;
3933 else
3934 return AARCH64_RECORD_UNKNOWN;
3935
3936 if (!ld_flag)
3937 {
3938 uint16_t imm9_off;
3939 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3940 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3941 datasize = 8 << size_bits;
3942 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3943 &address);
3944 if (insn_bits10_11 != 0x01)
3945 {
3946 if (imm9_off & 0x0100)
3947 address = address - offset;
3948 else
3949 address = address + offset;
3950 }
3951 record_buf_mem[0] = datasize >> 3;
3952 record_buf_mem[1] = address;
3953 aarch64_insn_r->mem_rec_count = 1;
3954 }
3955 else
3956 {
3957 if (vector_flag)
3958 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3959 else
3960 record_buf[0] = reg_rt;
3961 aarch64_insn_r->reg_rec_count = 1;
3962 }
3963 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3964 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3965 }
3966 /* Advanced SIMD load/store instructions. */
3967 else
3968 return aarch64_record_asimd_load_store (aarch64_insn_r);
3969
3970 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3971 record_buf_mem);
3972 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3973 record_buf);
3974 return AARCH64_RECORD_SUCCESS;
3975 }
3976
3977 /* Record handler for data processing SIMD and floating point instructions. */
3978
3979 static unsigned int
3980 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3981 {
3982 uint8_t insn_bit21, opcode, rmode, reg_rd;
3983 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3984 uint8_t insn_bits11_14;
3985 uint32_t record_buf[2];
3986
3987 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3988 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3989 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3990 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3991 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3992 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3993 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3994 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3995 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3996
3997 if (record_debug)
3998 debug_printf ("Process record: data processing SIMD/FP: ");
3999
4000 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4001 {
4002 /* Floating point - fixed point conversion instructions. */
4003 if (!insn_bit21)
4004 {
4005 if (record_debug)
4006 debug_printf ("FP - fixed point conversion");
4007
4008 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4009 record_buf[0] = reg_rd;
4010 else
4011 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4012 }
4013 /* Floating point - conditional compare instructions. */
4014 else if (insn_bits10_11 == 0x01)
4015 {
4016 if (record_debug)
4017 debug_printf ("FP - conditional compare");
4018
4019 record_buf[0] = AARCH64_CPSR_REGNUM;
4020 }
4021 /* Floating point - data processing (2-source) and
4022 conditional select instructions. */
4023 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4024 {
4025 if (record_debug)
4026 debug_printf ("FP - DP (2-source)");
4027
4028 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4029 }
4030 else if (insn_bits10_11 == 0x00)
4031 {
4032 /* Floating point - immediate instructions. */
4033 if ((insn_bits12_15 & 0x01) == 0x01
4034 || (insn_bits12_15 & 0x07) == 0x04)
4035 {
4036 if (record_debug)
4037 debug_printf ("FP - immediate");
4038 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4039 }
4040 /* Floating point - compare instructions. */
4041 else if ((insn_bits12_15 & 0x03) == 0x02)
4042 {
4043 if (record_debug)
4044 debug_printf ("FP - immediate");
4045 record_buf[0] = AARCH64_CPSR_REGNUM;
4046 }
4047 /* Floating point - integer conversions instructions. */
4048 else if (insn_bits12_15 == 0x00)
4049 {
4050 /* Convert float to integer instruction. */
4051 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4052 {
4053 if (record_debug)
4054 debug_printf ("float to int conversion");
4055
4056 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4057 }
4058 /* Convert integer to float instruction. */
4059 else if ((opcode >> 1) == 0x01 && !rmode)
4060 {
4061 if (record_debug)
4062 debug_printf ("int to float conversion");
4063
4064 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4065 }
4066 /* Move float to integer instruction. */
4067 else if ((opcode >> 1) == 0x03)
4068 {
4069 if (record_debug)
4070 debug_printf ("move float to int");
4071
4072 if (!(opcode & 0x01))
4073 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4074 else
4075 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4076 }
4077 else
4078 return AARCH64_RECORD_UNKNOWN;
4079 }
4080 else
4081 return AARCH64_RECORD_UNKNOWN;
4082 }
4083 else
4084 return AARCH64_RECORD_UNKNOWN;
4085 }
4086 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4087 {
4088 if (record_debug)
4089 debug_printf ("SIMD copy");
4090
4091 /* Advanced SIMD copy instructions. */
4092 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4093 && !bit (aarch64_insn_r->aarch64_insn, 15)
4094 && bit (aarch64_insn_r->aarch64_insn, 10))
4095 {
4096 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4097 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4098 else
4099 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4100 }
4101 else
4102 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4103 }
4104 /* All remaining floating point or advanced SIMD instructions. */
4105 else
4106 {
4107 if (record_debug)
4108 debug_printf ("all remain");
4109
4110 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4111 }
4112
4113 if (record_debug)
4114 debug_printf ("\n");
4115
4116 aarch64_insn_r->reg_rec_count++;
4117 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4118 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4119 record_buf);
4120 return AARCH64_RECORD_SUCCESS;
4121 }
4122
4123 /* Decodes insns type and invokes its record handler. */
4124
4125 static unsigned int
4126 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4127 {
4128 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4129
4130 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4131 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4132 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4133 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4134
4135 /* Data processing - immediate instructions. */
4136 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4137 return aarch64_record_data_proc_imm (aarch64_insn_r);
4138
4139 /* Branch, exception generation and system instructions. */
4140 if (ins_bit26 && !ins_bit27 && ins_bit28)
4141 return aarch64_record_branch_except_sys (aarch64_insn_r);
4142
4143 /* Load and store instructions. */
4144 if (!ins_bit25 && ins_bit27)
4145 return aarch64_record_load_store (aarch64_insn_r);
4146
4147 /* Data processing - register instructions. */
4148 if (ins_bit25 && !ins_bit26 && ins_bit27)
4149 return aarch64_record_data_proc_reg (aarch64_insn_r);
4150
4151 /* Data processing - SIMD and floating point instructions. */
4152 if (ins_bit25 && ins_bit26 && ins_bit27)
4153 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4154
4155 return AARCH64_RECORD_UNSUPPORTED;
4156 }
4157
4158 /* Cleans up local record registers and memory allocations. */
4159
4160 static void
4161 deallocate_reg_mem (insn_decode_record *record)
4162 {
4163 xfree (record->aarch64_regs);
4164 xfree (record->aarch64_mems);
4165 }
4166
4167 #if GDB_SELF_TEST
4168 namespace selftests {
4169
4170 static void
4171 aarch64_process_record_test (void)
4172 {
4173 struct gdbarch_info info;
4174 uint32_t ret;
4175
4176 gdbarch_info_init (&info);
4177 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4178
4179 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4180 SELF_CHECK (gdbarch != NULL);
4181
4182 insn_decode_record aarch64_record;
4183
4184 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4185 aarch64_record.regcache = NULL;
4186 aarch64_record.this_addr = 0;
4187 aarch64_record.gdbarch = gdbarch;
4188
4189 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4190 aarch64_record.aarch64_insn = 0xf9800020;
4191 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4192 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4193 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4194 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4195
4196 deallocate_reg_mem (&aarch64_record);
4197 }
4198
4199 } // namespace selftests
4200 #endif /* GDB_SELF_TEST */
4201
4202 /* Parse the current instruction and record the values of the registers and
4203 memory that will be changed in current instruction to record_arch_list
4204 return -1 if something is wrong. */
4205
4206 int
4207 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4208 CORE_ADDR insn_addr)
4209 {
4210 uint32_t rec_no = 0;
4211 uint8_t insn_size = 4;
4212 uint32_t ret = 0;
4213 gdb_byte buf[insn_size];
4214 insn_decode_record aarch64_record;
4215
4216 memset (&buf[0], 0, insn_size);
4217 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4218 target_read_memory (insn_addr, &buf[0], insn_size);
4219 aarch64_record.aarch64_insn
4220 = (uint32_t) extract_unsigned_integer (&buf[0],
4221 insn_size,
4222 gdbarch_byte_order (gdbarch));
4223 aarch64_record.regcache = regcache;
4224 aarch64_record.this_addr = insn_addr;
4225 aarch64_record.gdbarch = gdbarch;
4226
4227 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4228 if (ret == AARCH64_RECORD_UNSUPPORTED)
4229 {
4230 printf_unfiltered (_("Process record does not support instruction "
4231 "0x%0x at address %s.\n"),
4232 aarch64_record.aarch64_insn,
4233 paddress (gdbarch, insn_addr));
4234 ret = -1;
4235 }
4236
4237 if (0 == ret)
4238 {
4239 /* Record registers. */
4240 record_full_arch_list_add_reg (aarch64_record.regcache,
4241 AARCH64_PC_REGNUM);
4242 /* Always record register CPSR. */
4243 record_full_arch_list_add_reg (aarch64_record.regcache,
4244 AARCH64_CPSR_REGNUM);
4245 if (aarch64_record.aarch64_regs)
4246 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4247 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4248 aarch64_record.aarch64_regs[rec_no]))
4249 ret = -1;
4250
4251 /* Record memories. */
4252 if (aarch64_record.aarch64_mems)
4253 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4254 if (record_full_arch_list_add_mem
4255 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4256 aarch64_record.aarch64_mems[rec_no].len))
4257 ret = -1;
4258
4259 if (record_full_arch_list_add_end ())
4260 ret = -1;
4261 }
4262
4263 deallocate_reg_mem (&aarch64_record);
4264 return ret;
4265 }
This page took 0.131472 seconds and 5 git commands to generate.