66fdd7bf05ac3a293a4dcd57bde8fce5fe0df6d8
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "common/selftest.h"
47
48 #include "aarch64-tdep.h"
49 #include "aarch64-ravenscar-thread.h"
50
51 #include "elf-bfd.h"
52 #include "elf/aarch64.h"
53
54 #include "common/vec.h"
55
56 #include "record.h"
57 #include "record-full.h"
58 #include "arch/aarch64-insn.h"
59
60 #include "opcode/aarch64.h"
61 #include <algorithm>
62
63 #define submask(x) ((1L << ((x) + 1)) - 1)
64 #define bit(obj,st) (((obj) >> (st)) & 1)
65 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66
67 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
68 four members. */
69 #define HA_MAX_NUM_FLDS 4
70
71 /* All possible aarch64 target descriptors. */
72 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
73
74 /* The standard register names, and all the valid aliases for them. */
75 static const struct
76 {
77 const char *const name;
78 int regnum;
79 } aarch64_register_aliases[] =
80 {
81 /* 64-bit register names. */
82 {"fp", AARCH64_FP_REGNUM},
83 {"lr", AARCH64_LR_REGNUM},
84 {"sp", AARCH64_SP_REGNUM},
85
86 /* 32-bit register names. */
87 {"w0", AARCH64_X0_REGNUM + 0},
88 {"w1", AARCH64_X0_REGNUM + 1},
89 {"w2", AARCH64_X0_REGNUM + 2},
90 {"w3", AARCH64_X0_REGNUM + 3},
91 {"w4", AARCH64_X0_REGNUM + 4},
92 {"w5", AARCH64_X0_REGNUM + 5},
93 {"w6", AARCH64_X0_REGNUM + 6},
94 {"w7", AARCH64_X0_REGNUM + 7},
95 {"w8", AARCH64_X0_REGNUM + 8},
96 {"w9", AARCH64_X0_REGNUM + 9},
97 {"w10", AARCH64_X0_REGNUM + 10},
98 {"w11", AARCH64_X0_REGNUM + 11},
99 {"w12", AARCH64_X0_REGNUM + 12},
100 {"w13", AARCH64_X0_REGNUM + 13},
101 {"w14", AARCH64_X0_REGNUM + 14},
102 {"w15", AARCH64_X0_REGNUM + 15},
103 {"w16", AARCH64_X0_REGNUM + 16},
104 {"w17", AARCH64_X0_REGNUM + 17},
105 {"w18", AARCH64_X0_REGNUM + 18},
106 {"w19", AARCH64_X0_REGNUM + 19},
107 {"w20", AARCH64_X0_REGNUM + 20},
108 {"w21", AARCH64_X0_REGNUM + 21},
109 {"w22", AARCH64_X0_REGNUM + 22},
110 {"w23", AARCH64_X0_REGNUM + 23},
111 {"w24", AARCH64_X0_REGNUM + 24},
112 {"w25", AARCH64_X0_REGNUM + 25},
113 {"w26", AARCH64_X0_REGNUM + 26},
114 {"w27", AARCH64_X0_REGNUM + 27},
115 {"w28", AARCH64_X0_REGNUM + 28},
116 {"w29", AARCH64_X0_REGNUM + 29},
117 {"w30", AARCH64_X0_REGNUM + 30},
118
119 /* specials */
120 {"ip0", AARCH64_X0_REGNUM + 16},
121 {"ip1", AARCH64_X0_REGNUM + 17}
122 };
123
124 /* The required core 'R' registers. */
125 static const char *const aarch64_r_register_names[] =
126 {
127 /* These registers must appear in consecutive RAW register number
128 order and they must begin with AARCH64_X0_REGNUM! */
129 "x0", "x1", "x2", "x3",
130 "x4", "x5", "x6", "x7",
131 "x8", "x9", "x10", "x11",
132 "x12", "x13", "x14", "x15",
133 "x16", "x17", "x18", "x19",
134 "x20", "x21", "x22", "x23",
135 "x24", "x25", "x26", "x27",
136 "x28", "x29", "x30", "sp",
137 "pc", "cpsr"
138 };
139
140 /* The FP/SIMD 'V' registers. */
141 static const char *const aarch64_v_register_names[] =
142 {
143 /* These registers must appear in consecutive RAW register number
144 order and they must begin with AARCH64_V0_REGNUM! */
145 "v0", "v1", "v2", "v3",
146 "v4", "v5", "v6", "v7",
147 "v8", "v9", "v10", "v11",
148 "v12", "v13", "v14", "v15",
149 "v16", "v17", "v18", "v19",
150 "v20", "v21", "v22", "v23",
151 "v24", "v25", "v26", "v27",
152 "v28", "v29", "v30", "v31",
153 "fpsr",
154 "fpcr"
155 };
156
157 /* The SVE 'Z' and 'P' registers. */
158 static const char *const aarch64_sve_register_names[] =
159 {
160 /* These registers must appear in consecutive RAW register number
161 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
162 "z0", "z1", "z2", "z3",
163 "z4", "z5", "z6", "z7",
164 "z8", "z9", "z10", "z11",
165 "z12", "z13", "z14", "z15",
166 "z16", "z17", "z18", "z19",
167 "z20", "z21", "z22", "z23",
168 "z24", "z25", "z26", "z27",
169 "z28", "z29", "z30", "z31",
170 "fpsr", "fpcr",
171 "p0", "p1", "p2", "p3",
172 "p4", "p5", "p6", "p7",
173 "p8", "p9", "p10", "p11",
174 "p12", "p13", "p14", "p15",
175 "ffr", "vg"
176 };
177
178 static const char *const aarch64_pauth_register_names[] =
179 {
180 /* Authentication mask for data pointer. */
181 "pauth_dmask",
182 /* Authentication mask for code pointer. */
183 "pauth_cmask"
184 };
185
186 /* AArch64 prologue cache structure. */
187 struct aarch64_prologue_cache
188 {
189 /* The program counter at the start of the function. It is used to
190 identify this frame as a prologue frame. */
191 CORE_ADDR func;
192
193 /* The program counter at the time this frame was created; i.e. where
194 this function was called from. It is used to identify this frame as a
195 stub frame. */
196 CORE_ADDR prev_pc;
197
198 /* The stack pointer at the time this frame was created; i.e. the
199 caller's stack pointer when this function was called. It is used
200 to identify this frame. */
201 CORE_ADDR prev_sp;
202
203 /* Is the target available to read from? */
204 int available_p;
205
206 /* The frame base for this frame is just prev_sp - frame size.
207 FRAMESIZE is the distance from the frame pointer to the
208 initial stack pointer. */
209 int framesize;
210
211 /* The register used to hold the frame pointer for this frame. */
212 int framereg;
213
214 /* Saved register offsets. */
215 struct trad_frame_saved_reg *saved_regs;
216 };
217
218 static void
219 show_aarch64_debug (struct ui_file *file, int from_tty,
220 struct cmd_list_element *c, const char *value)
221 {
222 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
223 }
224
225 namespace {
226
227 /* Abstract instruction reader. */
228
229 class abstract_instruction_reader
230 {
231 public:
232 /* Read in one instruction. */
233 virtual ULONGEST read (CORE_ADDR memaddr, int len,
234 enum bfd_endian byte_order) = 0;
235 };
236
237 /* Instruction reader from real target. */
238
239 class instruction_reader : public abstract_instruction_reader
240 {
241 public:
242 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
243 override
244 {
245 return read_code_unsigned_integer (memaddr, len, byte_order);
246 }
247 };
248
249 } // namespace
250
251 /* Analyze a prologue, looking for a recognizable stack frame
252 and frame pointer. Scan until we encounter a store that could
253 clobber the stack frame unexpectedly, or an unknown instruction. */
254
255 static CORE_ADDR
256 aarch64_analyze_prologue (struct gdbarch *gdbarch,
257 CORE_ADDR start, CORE_ADDR limit,
258 struct aarch64_prologue_cache *cache,
259 abstract_instruction_reader& reader)
260 {
261 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
262 int i;
263 /* Track X registers and D registers in prologue. */
264 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
265
266 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
267 regs[i] = pv_register (i, 0);
268 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
269
270 for (; start < limit; start += 4)
271 {
272 uint32_t insn;
273 aarch64_inst inst;
274
275 insn = reader.read (start, 4, byte_order_for_code);
276
277 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
278 break;
279
280 if (inst.opcode->iclass == addsub_imm
281 && (inst.opcode->op == OP_ADD
282 || strcmp ("sub", inst.opcode->name) == 0))
283 {
284 unsigned rd = inst.operands[0].reg.regno;
285 unsigned rn = inst.operands[1].reg.regno;
286
287 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
288 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
289 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
290 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
291
292 if (inst.opcode->op == OP_ADD)
293 {
294 regs[rd] = pv_add_constant (regs[rn],
295 inst.operands[2].imm.value);
296 }
297 else
298 {
299 regs[rd] = pv_add_constant (regs[rn],
300 -inst.operands[2].imm.value);
301 }
302 }
303 else if (inst.opcode->iclass == pcreladdr
304 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
305 {
306 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
307 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
308
309 regs[inst.operands[0].reg.regno] = pv_unknown ();
310 }
311 else if (inst.opcode->iclass == branch_imm)
312 {
313 /* Stop analysis on branch. */
314 break;
315 }
316 else if (inst.opcode->iclass == condbranch)
317 {
318 /* Stop analysis on branch. */
319 break;
320 }
321 else if (inst.opcode->iclass == branch_reg)
322 {
323 /* Stop analysis on branch. */
324 break;
325 }
326 else if (inst.opcode->iclass == compbranch)
327 {
328 /* Stop analysis on branch. */
329 break;
330 }
331 else if (inst.opcode->op == OP_MOVZ)
332 {
333 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
334 regs[inst.operands[0].reg.regno] = pv_unknown ();
335 }
336 else if (inst.opcode->iclass == log_shift
337 && strcmp (inst.opcode->name, "orr") == 0)
338 {
339 unsigned rd = inst.operands[0].reg.regno;
340 unsigned rn = inst.operands[1].reg.regno;
341 unsigned rm = inst.operands[2].reg.regno;
342
343 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
344 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
345 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
346
347 if (inst.operands[2].shifter.amount == 0
348 && rn == AARCH64_SP_REGNUM)
349 regs[rd] = regs[rm];
350 else
351 {
352 if (aarch64_debug)
353 {
354 debug_printf ("aarch64: prologue analysis gave up "
355 "addr=%s opcode=0x%x (orr x register)\n",
356 core_addr_to_string_nz (start), insn);
357 }
358 break;
359 }
360 }
361 else if (inst.opcode->op == OP_STUR)
362 {
363 unsigned rt = inst.operands[0].reg.regno;
364 unsigned rn = inst.operands[1].addr.base_regno;
365 int is64
366 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
367
368 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
369 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
370 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
371 gdb_assert (!inst.operands[1].addr.offset.is_reg);
372
373 stack.store (pv_add_constant (regs[rn],
374 inst.operands[1].addr.offset.imm),
375 is64 ? 8 : 4, regs[rt]);
376 }
377 else if ((inst.opcode->iclass == ldstpair_off
378 || (inst.opcode->iclass == ldstpair_indexed
379 && inst.operands[2].addr.preind))
380 && strcmp ("stp", inst.opcode->name) == 0)
381 {
382 /* STP with addressing mode Pre-indexed and Base register. */
383 unsigned rt1;
384 unsigned rt2;
385 unsigned rn = inst.operands[2].addr.base_regno;
386 int32_t imm = inst.operands[2].addr.offset.imm;
387
388 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
389 || inst.operands[0].type == AARCH64_OPND_Ft);
390 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
391 || inst.operands[1].type == AARCH64_OPND_Ft2);
392 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
393 gdb_assert (!inst.operands[2].addr.offset.is_reg);
394
395 /* If recording this store would invalidate the store area
396 (perhaps because rn is not known) then we should abandon
397 further prologue analysis. */
398 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
399 break;
400
401 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
402 break;
403
404 rt1 = inst.operands[0].reg.regno;
405 rt2 = inst.operands[1].reg.regno;
406 if (inst.operands[0].type == AARCH64_OPND_Ft)
407 {
408 /* Only bottom 64-bit of each V register (D register) need
409 to be preserved. */
410 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
411 rt1 += AARCH64_X_REGISTER_COUNT;
412 rt2 += AARCH64_X_REGISTER_COUNT;
413 }
414
415 stack.store (pv_add_constant (regs[rn], imm), 8,
416 regs[rt1]);
417 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
418 regs[rt2]);
419
420 if (inst.operands[2].addr.writeback)
421 regs[rn] = pv_add_constant (regs[rn], imm);
422
423 }
424 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
425 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
426 && (inst.opcode->op == OP_STR_POS
427 || inst.opcode->op == OP_STRF_POS)))
428 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
429 && strcmp ("str", inst.opcode->name) == 0)
430 {
431 /* STR (immediate) */
432 unsigned int rt = inst.operands[0].reg.regno;
433 int32_t imm = inst.operands[1].addr.offset.imm;
434 unsigned int rn = inst.operands[1].addr.base_regno;
435 bool is64
436 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
437 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
438 || inst.operands[0].type == AARCH64_OPND_Ft);
439
440 if (inst.operands[0].type == AARCH64_OPND_Ft)
441 {
442 /* Only bottom 64-bit of each V register (D register) need
443 to be preserved. */
444 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
445 rt += AARCH64_X_REGISTER_COUNT;
446 }
447
448 stack.store (pv_add_constant (regs[rn], imm),
449 is64 ? 8 : 4, regs[rt]);
450 if (inst.operands[1].addr.writeback)
451 regs[rn] = pv_add_constant (regs[rn], imm);
452 }
453 else if (inst.opcode->iclass == testbranch)
454 {
455 /* Stop analysis on branch. */
456 break;
457 }
458 else
459 {
460 if (aarch64_debug)
461 {
462 debug_printf ("aarch64: prologue analysis gave up addr=%s"
463 " opcode=0x%x\n",
464 core_addr_to_string_nz (start), insn);
465 }
466 break;
467 }
468 }
469
470 if (cache == NULL)
471 return start;
472
473 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
474 {
475 /* Frame pointer is fp. Frame size is constant. */
476 cache->framereg = AARCH64_FP_REGNUM;
477 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
478 }
479 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
480 {
481 /* Try the stack pointer. */
482 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
483 cache->framereg = AARCH64_SP_REGNUM;
484 }
485 else
486 {
487 /* We're just out of luck. We don't know where the frame is. */
488 cache->framereg = -1;
489 cache->framesize = 0;
490 }
491
492 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
493 {
494 CORE_ADDR offset;
495
496 if (stack.find_reg (gdbarch, i, &offset))
497 cache->saved_regs[i].addr = offset;
498 }
499
500 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
501 {
502 int regnum = gdbarch_num_regs (gdbarch);
503 CORE_ADDR offset;
504
505 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
506 &offset))
507 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
508 }
509
510 return start;
511 }
512
513 static CORE_ADDR
514 aarch64_analyze_prologue (struct gdbarch *gdbarch,
515 CORE_ADDR start, CORE_ADDR limit,
516 struct aarch64_prologue_cache *cache)
517 {
518 instruction_reader reader;
519
520 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
521 reader);
522 }
523
524 #if GDB_SELF_TEST
525
526 namespace selftests {
527
528 /* Instruction reader from manually cooked instruction sequences. */
529
530 class instruction_reader_test : public abstract_instruction_reader
531 {
532 public:
533 template<size_t SIZE>
534 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
535 : m_insns (insns), m_insns_size (SIZE)
536 {}
537
538 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
539 override
540 {
541 SELF_CHECK (len == 4);
542 SELF_CHECK (memaddr % 4 == 0);
543 SELF_CHECK (memaddr / 4 < m_insns_size);
544
545 return m_insns[memaddr / 4];
546 }
547
548 private:
549 const uint32_t *m_insns;
550 size_t m_insns_size;
551 };
552
553 static void
554 aarch64_analyze_prologue_test (void)
555 {
556 struct gdbarch_info info;
557
558 gdbarch_info_init (&info);
559 info.bfd_arch_info = bfd_scan_arch ("aarch64");
560
561 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
562 SELF_CHECK (gdbarch != NULL);
563
564 /* Test the simple prologue in which frame pointer is used. */
565 {
566 struct aarch64_prologue_cache cache;
567 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
568
569 static const uint32_t insns[] = {
570 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
571 0x910003fd, /* mov x29, sp */
572 0x97ffffe6, /* bl 0x400580 */
573 };
574 instruction_reader_test reader (insns);
575
576 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
577 SELF_CHECK (end == 4 * 2);
578
579 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
580 SELF_CHECK (cache.framesize == 272);
581
582 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
583 {
584 if (i == AARCH64_FP_REGNUM)
585 SELF_CHECK (cache.saved_regs[i].addr == -272);
586 else if (i == AARCH64_LR_REGNUM)
587 SELF_CHECK (cache.saved_regs[i].addr == -264);
588 else
589 SELF_CHECK (cache.saved_regs[i].addr == -1);
590 }
591
592 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
593 {
594 int regnum = gdbarch_num_regs (gdbarch);
595
596 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
597 == -1);
598 }
599 }
600
601 /* Test a prologue in which STR is used and frame pointer is not
602 used. */
603 {
604 struct aarch64_prologue_cache cache;
605 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
606
607 static const uint32_t insns[] = {
608 0xf81d0ff3, /* str x19, [sp, #-48]! */
609 0xb9002fe0, /* str w0, [sp, #44] */
610 0xf90013e1, /* str x1, [sp, #32]*/
611 0xfd000fe0, /* str d0, [sp, #24] */
612 0xaa0203f3, /* mov x19, x2 */
613 0xf94013e0, /* ldr x0, [sp, #32] */
614 };
615 instruction_reader_test reader (insns);
616
617 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
618
619 SELF_CHECK (end == 4 * 5);
620
621 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
622 SELF_CHECK (cache.framesize == 48);
623
624 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
625 {
626 if (i == 1)
627 SELF_CHECK (cache.saved_regs[i].addr == -16);
628 else if (i == 19)
629 SELF_CHECK (cache.saved_regs[i].addr == -48);
630 else
631 SELF_CHECK (cache.saved_regs[i].addr == -1);
632 }
633
634 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
635 {
636 int regnum = gdbarch_num_regs (gdbarch);
637
638 if (i == 0)
639 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
640 == -24);
641 else
642 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
643 == -1);
644 }
645 }
646 }
647 } // namespace selftests
648 #endif /* GDB_SELF_TEST */
649
650 /* Implement the "skip_prologue" gdbarch method. */
651
652 static CORE_ADDR
653 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
654 {
655 CORE_ADDR func_addr, limit_pc;
656
657 /* See if we can determine the end of the prologue via the symbol
658 table. If so, then return either PC, or the PC after the
659 prologue, whichever is greater. */
660 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
661 {
662 CORE_ADDR post_prologue_pc
663 = skip_prologue_using_sal (gdbarch, func_addr);
664
665 if (post_prologue_pc != 0)
666 return std::max (pc, post_prologue_pc);
667 }
668
669 /* Can't determine prologue from the symbol table, need to examine
670 instructions. */
671
672 /* Find an upper limit on the function prologue using the debug
673 information. If the debug information could not be used to
674 provide that bound, then use an arbitrary large number as the
675 upper bound. */
676 limit_pc = skip_prologue_using_sal (gdbarch, pc);
677 if (limit_pc == 0)
678 limit_pc = pc + 128; /* Magic. */
679
680 /* Try disassembling prologue. */
681 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
682 }
683
684 /* Scan the function prologue for THIS_FRAME and populate the prologue
685 cache CACHE. */
686
687 static void
688 aarch64_scan_prologue (struct frame_info *this_frame,
689 struct aarch64_prologue_cache *cache)
690 {
691 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
692 CORE_ADDR prologue_start;
693 CORE_ADDR prologue_end;
694 CORE_ADDR prev_pc = get_frame_pc (this_frame);
695 struct gdbarch *gdbarch = get_frame_arch (this_frame);
696
697 cache->prev_pc = prev_pc;
698
699 /* Assume we do not find a frame. */
700 cache->framereg = -1;
701 cache->framesize = 0;
702
703 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
704 &prologue_end))
705 {
706 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
707
708 if (sal.line == 0)
709 {
710 /* No line info so use the current PC. */
711 prologue_end = prev_pc;
712 }
713 else if (sal.end < prologue_end)
714 {
715 /* The next line begins after the function end. */
716 prologue_end = sal.end;
717 }
718
719 prologue_end = std::min (prologue_end, prev_pc);
720 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
721 }
722 else
723 {
724 CORE_ADDR frame_loc;
725
726 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
727 if (frame_loc == 0)
728 return;
729
730 cache->framereg = AARCH64_FP_REGNUM;
731 cache->framesize = 16;
732 cache->saved_regs[29].addr = 0;
733 cache->saved_regs[30].addr = 8;
734 }
735 }
736
737 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
738 function may throw an exception if the inferior's registers or memory is
739 not available. */
740
741 static void
742 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
743 struct aarch64_prologue_cache *cache)
744 {
745 CORE_ADDR unwound_fp;
746 int reg;
747
748 aarch64_scan_prologue (this_frame, cache);
749
750 if (cache->framereg == -1)
751 return;
752
753 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
754 if (unwound_fp == 0)
755 return;
756
757 cache->prev_sp = unwound_fp + cache->framesize;
758
759 /* Calculate actual addresses of saved registers using offsets
760 determined by aarch64_analyze_prologue. */
761 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
762 if (trad_frame_addr_p (cache->saved_regs, reg))
763 cache->saved_regs[reg].addr += cache->prev_sp;
764
765 cache->func = get_frame_func (this_frame);
766
767 cache->available_p = 1;
768 }
769
770 /* Allocate and fill in *THIS_CACHE with information about the prologue of
771 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
772 Return a pointer to the current aarch64_prologue_cache in
773 *THIS_CACHE. */
774
775 static struct aarch64_prologue_cache *
776 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
777 {
778 struct aarch64_prologue_cache *cache;
779
780 if (*this_cache != NULL)
781 return (struct aarch64_prologue_cache *) *this_cache;
782
783 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
784 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
785 *this_cache = cache;
786
787 TRY
788 {
789 aarch64_make_prologue_cache_1 (this_frame, cache);
790 }
791 CATCH (ex, RETURN_MASK_ERROR)
792 {
793 if (ex.error != NOT_AVAILABLE_ERROR)
794 throw_exception (ex);
795 }
796 END_CATCH
797
798 return cache;
799 }
800
801 /* Implement the "stop_reason" frame_unwind method. */
802
803 static enum unwind_stop_reason
804 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
805 void **this_cache)
806 {
807 struct aarch64_prologue_cache *cache
808 = aarch64_make_prologue_cache (this_frame, this_cache);
809
810 if (!cache->available_p)
811 return UNWIND_UNAVAILABLE;
812
813 /* Halt the backtrace at "_start". */
814 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
815 return UNWIND_OUTERMOST;
816
817 /* We've hit a wall, stop. */
818 if (cache->prev_sp == 0)
819 return UNWIND_OUTERMOST;
820
821 return UNWIND_NO_REASON;
822 }
823
824 /* Our frame ID for a normal frame is the current function's starting
825 PC and the caller's SP when we were called. */
826
827 static void
828 aarch64_prologue_this_id (struct frame_info *this_frame,
829 void **this_cache, struct frame_id *this_id)
830 {
831 struct aarch64_prologue_cache *cache
832 = aarch64_make_prologue_cache (this_frame, this_cache);
833
834 if (!cache->available_p)
835 *this_id = frame_id_build_unavailable_stack (cache->func);
836 else
837 *this_id = frame_id_build (cache->prev_sp, cache->func);
838 }
839
840 /* Implement the "prev_register" frame_unwind method. */
841
842 static struct value *
843 aarch64_prologue_prev_register (struct frame_info *this_frame,
844 void **this_cache, int prev_regnum)
845 {
846 struct aarch64_prologue_cache *cache
847 = aarch64_make_prologue_cache (this_frame, this_cache);
848
849 /* If we are asked to unwind the PC, then we need to return the LR
850 instead. The prologue may save PC, but it will point into this
851 frame's prologue, not the next frame's resume location. */
852 if (prev_regnum == AARCH64_PC_REGNUM)
853 {
854 CORE_ADDR lr;
855
856 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
857 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
858 }
859
860 /* SP is generally not saved to the stack, but this frame is
861 identified by the next frame's stack pointer at the time of the
862 call. The value was already reconstructed into PREV_SP. */
863 /*
864 +----------+ ^
865 | saved lr | |
866 +->| saved fp |--+
867 | | |
868 | | | <- Previous SP
869 | +----------+
870 | | saved lr |
871 +--| saved fp |<- FP
872 | |
873 | |<- SP
874 +----------+ */
875 if (prev_regnum == AARCH64_SP_REGNUM)
876 return frame_unwind_got_constant (this_frame, prev_regnum,
877 cache->prev_sp);
878
879 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
880 prev_regnum);
881 }
882
883 /* AArch64 prologue unwinder. */
884 struct frame_unwind aarch64_prologue_unwind =
885 {
886 NORMAL_FRAME,
887 aarch64_prologue_frame_unwind_stop_reason,
888 aarch64_prologue_this_id,
889 aarch64_prologue_prev_register,
890 NULL,
891 default_frame_sniffer
892 };
893
894 /* Allocate and fill in *THIS_CACHE with information about the prologue of
895 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
896 Return a pointer to the current aarch64_prologue_cache in
897 *THIS_CACHE. */
898
899 static struct aarch64_prologue_cache *
900 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
901 {
902 struct aarch64_prologue_cache *cache;
903
904 if (*this_cache != NULL)
905 return (struct aarch64_prologue_cache *) *this_cache;
906
907 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
908 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
909 *this_cache = cache;
910
911 TRY
912 {
913 cache->prev_sp = get_frame_register_unsigned (this_frame,
914 AARCH64_SP_REGNUM);
915 cache->prev_pc = get_frame_pc (this_frame);
916 cache->available_p = 1;
917 }
918 CATCH (ex, RETURN_MASK_ERROR)
919 {
920 if (ex.error != NOT_AVAILABLE_ERROR)
921 throw_exception (ex);
922 }
923 END_CATCH
924
925 return cache;
926 }
927
928 /* Implement the "stop_reason" frame_unwind method. */
929
930 static enum unwind_stop_reason
931 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
932 void **this_cache)
933 {
934 struct aarch64_prologue_cache *cache
935 = aarch64_make_stub_cache (this_frame, this_cache);
936
937 if (!cache->available_p)
938 return UNWIND_UNAVAILABLE;
939
940 return UNWIND_NO_REASON;
941 }
942
943 /* Our frame ID for a stub frame is the current SP and LR. */
944
945 static void
946 aarch64_stub_this_id (struct frame_info *this_frame,
947 void **this_cache, struct frame_id *this_id)
948 {
949 struct aarch64_prologue_cache *cache
950 = aarch64_make_stub_cache (this_frame, this_cache);
951
952 if (cache->available_p)
953 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
954 else
955 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
956 }
957
958 /* Implement the "sniffer" frame_unwind method. */
959
960 static int
961 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
962 struct frame_info *this_frame,
963 void **this_prologue_cache)
964 {
965 CORE_ADDR addr_in_block;
966 gdb_byte dummy[4];
967
968 addr_in_block = get_frame_address_in_block (this_frame);
969 if (in_plt_section (addr_in_block)
970 /* We also use the stub winder if the target memory is unreadable
971 to avoid having the prologue unwinder trying to read it. */
972 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
973 return 1;
974
975 return 0;
976 }
977
978 /* AArch64 stub unwinder. */
979 struct frame_unwind aarch64_stub_unwind =
980 {
981 NORMAL_FRAME,
982 aarch64_stub_frame_unwind_stop_reason,
983 aarch64_stub_this_id,
984 aarch64_prologue_prev_register,
985 NULL,
986 aarch64_stub_unwind_sniffer
987 };
988
989 /* Return the frame base address of *THIS_FRAME. */
990
991 static CORE_ADDR
992 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
993 {
994 struct aarch64_prologue_cache *cache
995 = aarch64_make_prologue_cache (this_frame, this_cache);
996
997 return cache->prev_sp - cache->framesize;
998 }
999
1000 /* AArch64 default frame base information. */
1001 struct frame_base aarch64_normal_base =
1002 {
1003 &aarch64_prologue_unwind,
1004 aarch64_normal_frame_base,
1005 aarch64_normal_frame_base,
1006 aarch64_normal_frame_base
1007 };
1008
1009 /* Return the value of the REGNUM register in the previous frame of
1010 *THIS_FRAME. */
1011
1012 static struct value *
1013 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1014 void **this_cache, int regnum)
1015 {
1016 CORE_ADDR lr;
1017
1018 switch (regnum)
1019 {
1020 case AARCH64_PC_REGNUM:
1021 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1022 return frame_unwind_got_constant (this_frame, regnum, lr);
1023
1024 default:
1025 internal_error (__FILE__, __LINE__,
1026 _("Unexpected register %d"), regnum);
1027 }
1028 }
1029
1030 /* Implement the "init_reg" dwarf2_frame_ops method. */
1031
1032 static void
1033 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1034 struct dwarf2_frame_state_reg *reg,
1035 struct frame_info *this_frame)
1036 {
1037 switch (regnum)
1038 {
1039 case AARCH64_PC_REGNUM:
1040 reg->how = DWARF2_FRAME_REG_FN;
1041 reg->loc.fn = aarch64_dwarf2_prev_register;
1042 break;
1043 case AARCH64_SP_REGNUM:
1044 reg->how = DWARF2_FRAME_REG_CFA;
1045 break;
1046 }
1047 }
1048
1049 /* When arguments must be pushed onto the stack, they go on in reverse
1050 order. The code below implements a FILO (stack) to do this. */
1051
1052 typedef struct
1053 {
1054 /* Value to pass on stack. It can be NULL if this item is for stack
1055 padding. */
1056 const gdb_byte *data;
1057
1058 /* Size in bytes of value to pass on stack. */
1059 int len;
1060 } stack_item_t;
1061
1062 DEF_VEC_O (stack_item_t);
1063
1064 /* Return the alignment (in bytes) of the given type. */
1065
1066 static int
1067 aarch64_type_align (struct type *t)
1068 {
1069 int n;
1070 int align;
1071 int falign;
1072
1073 t = check_typedef (t);
1074 switch (TYPE_CODE (t))
1075 {
1076 default:
1077 /* Should never happen. */
1078 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1079 return 4;
1080
1081 case TYPE_CODE_PTR:
1082 case TYPE_CODE_ENUM:
1083 case TYPE_CODE_INT:
1084 case TYPE_CODE_FLT:
1085 case TYPE_CODE_SET:
1086 case TYPE_CODE_RANGE:
1087 case TYPE_CODE_BITSTRING:
1088 case TYPE_CODE_REF:
1089 case TYPE_CODE_RVALUE_REF:
1090 case TYPE_CODE_CHAR:
1091 case TYPE_CODE_BOOL:
1092 return TYPE_LENGTH (t);
1093
1094 case TYPE_CODE_ARRAY:
1095 if (TYPE_VECTOR (t))
1096 {
1097 /* Use the natural alignment for vector types (the same for
1098 scalar type), but the maximum alignment is 128-bit. */
1099 if (TYPE_LENGTH (t) > 16)
1100 return 16;
1101 else
1102 return TYPE_LENGTH (t);
1103 }
1104 else
1105 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1106 case TYPE_CODE_COMPLEX:
1107 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1108
1109 case TYPE_CODE_STRUCT:
1110 case TYPE_CODE_UNION:
1111 align = 1;
1112 for (n = 0; n < TYPE_NFIELDS (t); n++)
1113 {
1114 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1115 if (falign > align)
1116 align = falign;
1117 }
1118 return align;
1119 }
1120 }
1121
1122 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1123
1124 Return the number of register required, or -1 on failure.
1125
1126 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1127 to the element, else fail if the type of this element does not match the
1128 existing value. */
1129
1130 static int
1131 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1132 struct type **fundamental_type)
1133 {
1134 if (type == nullptr)
1135 return -1;
1136
1137 switch (TYPE_CODE (type))
1138 {
1139 case TYPE_CODE_FLT:
1140 if (TYPE_LENGTH (type) > 16)
1141 return -1;
1142
1143 if (*fundamental_type == nullptr)
1144 *fundamental_type = type;
1145 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1146 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1147 return -1;
1148
1149 return 1;
1150
1151 case TYPE_CODE_COMPLEX:
1152 {
1153 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1154 if (TYPE_LENGTH (target_type) > 16)
1155 return -1;
1156
1157 if (*fundamental_type == nullptr)
1158 *fundamental_type = target_type;
1159 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1160 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1161 return -1;
1162
1163 return 2;
1164 }
1165
1166 case TYPE_CODE_ARRAY:
1167 {
1168 if (TYPE_VECTOR (type))
1169 {
1170 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1171 return -1;
1172
1173 if (*fundamental_type == nullptr)
1174 *fundamental_type = type;
1175 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1176 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1177 return -1;
1178
1179 return 1;
1180 }
1181 else
1182 {
1183 struct type *target_type = TYPE_TARGET_TYPE (type);
1184 int count = aapcs_is_vfp_call_or_return_candidate_1
1185 (target_type, fundamental_type);
1186
1187 if (count == -1)
1188 return count;
1189
1190 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1191 return count;
1192 }
1193 }
1194
1195 case TYPE_CODE_STRUCT:
1196 case TYPE_CODE_UNION:
1197 {
1198 int count = 0;
1199
1200 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1201 {
1202 /* Ignore any static fields. */
1203 if (field_is_static (&TYPE_FIELD (type, i)))
1204 continue;
1205
1206 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1207
1208 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1209 (member, fundamental_type);
1210 if (sub_count == -1)
1211 return -1;
1212 count += sub_count;
1213 }
1214
1215 /* Ensure there is no padding between the fields (allowing for empty
1216 zero length structs) */
1217 int ftype_length = (*fundamental_type == nullptr)
1218 ? 0 : TYPE_LENGTH (*fundamental_type);
1219 if (count * ftype_length != TYPE_LENGTH (type))
1220 return -1;
1221
1222 return count;
1223 }
1224
1225 default:
1226 break;
1227 }
1228
1229 return -1;
1230 }
1231
1232 /* Return true if an argument, whose type is described by TYPE, can be passed or
1233 returned in simd/fp registers, providing enough parameter passing registers
1234 are available. This is as described in the AAPCS64.
1235
1236 Upon successful return, *COUNT returns the number of needed registers,
1237 *FUNDAMENTAL_TYPE contains the type of those registers.
1238
1239 Candidate as per the AAPCS64 5.4.2.C is either a:
1240 - float.
1241 - short-vector.
1242 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1243 all the members are floats and has at most 4 members.
1244 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1245 all the members are short vectors and has at most 4 members.
1246 - Complex (7.1.1)
1247
1248 Note that HFAs and HVAs can include nested structures and arrays. */
1249
1250 static bool
1251 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1252 struct type **fundamental_type)
1253 {
1254 if (type == nullptr)
1255 return false;
1256
1257 *fundamental_type = nullptr;
1258
1259 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1260 fundamental_type);
1261
1262 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1263 {
1264 *count = ag_count;
1265 return true;
1266 }
1267 else
1268 return false;
1269 }
1270
1271 /* AArch64 function call information structure. */
1272 struct aarch64_call_info
1273 {
1274 /* the current argument number. */
1275 unsigned argnum;
1276
1277 /* The next general purpose register number, equivalent to NGRN as
1278 described in the AArch64 Procedure Call Standard. */
1279 unsigned ngrn;
1280
1281 /* The next SIMD and floating point register number, equivalent to
1282 NSRN as described in the AArch64 Procedure Call Standard. */
1283 unsigned nsrn;
1284
1285 /* The next stacked argument address, equivalent to NSAA as
1286 described in the AArch64 Procedure Call Standard. */
1287 unsigned nsaa;
1288
1289 /* Stack item vector. */
1290 VEC(stack_item_t) *si;
1291 };
1292
1293 /* Pass a value in a sequence of consecutive X registers. The caller
1294 is responsbile for ensuring sufficient registers are available. */
1295
1296 static void
1297 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1298 struct aarch64_call_info *info, struct type *type,
1299 struct value *arg)
1300 {
1301 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1302 int len = TYPE_LENGTH (type);
1303 enum type_code typecode = TYPE_CODE (type);
1304 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1305 const bfd_byte *buf = value_contents (arg);
1306
1307 info->argnum++;
1308
1309 while (len > 0)
1310 {
1311 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1312 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1313 byte_order);
1314
1315
1316 /* Adjust sub-word struct/union args when big-endian. */
1317 if (byte_order == BFD_ENDIAN_BIG
1318 && partial_len < X_REGISTER_SIZE
1319 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1320 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1321
1322 if (aarch64_debug)
1323 {
1324 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1325 gdbarch_register_name (gdbarch, regnum),
1326 phex (regval, X_REGISTER_SIZE));
1327 }
1328 regcache_cooked_write_unsigned (regcache, regnum, regval);
1329 len -= partial_len;
1330 buf += partial_len;
1331 regnum++;
1332 }
1333 }
1334
1335 /* Attempt to marshall a value in a V register. Return 1 if
1336 successful, or 0 if insufficient registers are available. This
1337 function, unlike the equivalent pass_in_x() function does not
1338 handle arguments spread across multiple registers. */
1339
1340 static int
1341 pass_in_v (struct gdbarch *gdbarch,
1342 struct regcache *regcache,
1343 struct aarch64_call_info *info,
1344 int len, const bfd_byte *buf)
1345 {
1346 if (info->nsrn < 8)
1347 {
1348 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1349 /* Enough space for a full vector register. */
1350 gdb_byte reg[register_size (gdbarch, regnum)];
1351 gdb_assert (len <= sizeof (reg));
1352
1353 info->argnum++;
1354 info->nsrn++;
1355
1356 memset (reg, 0, sizeof (reg));
1357 /* PCS C.1, the argument is allocated to the least significant
1358 bits of V register. */
1359 memcpy (reg, buf, len);
1360 regcache->cooked_write (regnum, reg);
1361
1362 if (aarch64_debug)
1363 {
1364 debug_printf ("arg %d in %s\n", info->argnum,
1365 gdbarch_register_name (gdbarch, regnum));
1366 }
1367 return 1;
1368 }
1369 info->nsrn = 8;
1370 return 0;
1371 }
1372
1373 /* Marshall an argument onto the stack. */
1374
1375 static void
1376 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1377 struct value *arg)
1378 {
1379 const bfd_byte *buf = value_contents (arg);
1380 int len = TYPE_LENGTH (type);
1381 int align;
1382 stack_item_t item;
1383
1384 info->argnum++;
1385
1386 align = aarch64_type_align (type);
1387
1388 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1389 Natural alignment of the argument's type. */
1390 align = align_up (align, 8);
1391
1392 /* The AArch64 PCS requires at most doubleword alignment. */
1393 if (align > 16)
1394 align = 16;
1395
1396 if (aarch64_debug)
1397 {
1398 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1399 info->nsaa);
1400 }
1401
1402 item.len = len;
1403 item.data = buf;
1404 VEC_safe_push (stack_item_t, info->si, &item);
1405
1406 info->nsaa += len;
1407 if (info->nsaa & (align - 1))
1408 {
1409 /* Push stack alignment padding. */
1410 int pad = align - (info->nsaa & (align - 1));
1411
1412 item.len = pad;
1413 item.data = NULL;
1414
1415 VEC_safe_push (stack_item_t, info->si, &item);
1416 info->nsaa += pad;
1417 }
1418 }
1419
1420 /* Marshall an argument into a sequence of one or more consecutive X
1421 registers or, if insufficient X registers are available then onto
1422 the stack. */
1423
1424 static void
1425 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1426 struct aarch64_call_info *info, struct type *type,
1427 struct value *arg)
1428 {
1429 int len = TYPE_LENGTH (type);
1430 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1431
1432 /* PCS C.13 - Pass in registers if we have enough spare */
1433 if (info->ngrn + nregs <= 8)
1434 {
1435 pass_in_x (gdbarch, regcache, info, type, arg);
1436 info->ngrn += nregs;
1437 }
1438 else
1439 {
1440 info->ngrn = 8;
1441 pass_on_stack (info, type, arg);
1442 }
1443 }
1444
1445 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1446 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1447 registers. A return value of false is an error state as the value will have
1448 been partially passed to the stack. */
1449 static bool
1450 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1451 struct aarch64_call_info *info, struct type *arg_type,
1452 struct value *arg)
1453 {
1454 switch (TYPE_CODE (arg_type))
1455 {
1456 case TYPE_CODE_FLT:
1457 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1458 value_contents (arg));
1459 break;
1460
1461 case TYPE_CODE_COMPLEX:
1462 {
1463 const bfd_byte *buf = value_contents (arg);
1464 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1465
1466 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1467 buf))
1468 return false;
1469
1470 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1471 buf + TYPE_LENGTH (target_type));
1472 }
1473
1474 case TYPE_CODE_ARRAY:
1475 if (TYPE_VECTOR (arg_type))
1476 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1477 value_contents (arg));
1478 /* fall through. */
1479
1480 case TYPE_CODE_STRUCT:
1481 case TYPE_CODE_UNION:
1482 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1483 {
1484 /* Don't include static fields. */
1485 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1486 continue;
1487
1488 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1489 struct type *field_type = check_typedef (value_type (field));
1490
1491 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1492 field))
1493 return false;
1494 }
1495 return true;
1496
1497 default:
1498 return false;
1499 }
1500 }
1501
1502 /* Implement the "push_dummy_call" gdbarch method. */
1503
1504 static CORE_ADDR
1505 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1506 struct regcache *regcache, CORE_ADDR bp_addr,
1507 int nargs,
1508 struct value **args, CORE_ADDR sp,
1509 function_call_return_method return_method,
1510 CORE_ADDR struct_addr)
1511 {
1512 int argnum;
1513 struct aarch64_call_info info;
1514
1515 memset (&info, 0, sizeof (info));
1516
1517 /* We need to know what the type of the called function is in order
1518 to determine the number of named/anonymous arguments for the
1519 actual argument placement, and the return type in order to handle
1520 return value correctly.
1521
1522 The generic code above us views the decision of return in memory
1523 or return in registers as a two stage processes. The language
1524 handler is consulted first and may decide to return in memory (eg
1525 class with copy constructor returned by value), this will cause
1526 the generic code to allocate space AND insert an initial leading
1527 argument.
1528
1529 If the language code does not decide to pass in memory then the
1530 target code is consulted.
1531
1532 If the language code decides to pass in memory we want to move
1533 the pointer inserted as the initial argument from the argument
1534 list and into X8, the conventional AArch64 struct return pointer
1535 register. */
1536
1537 /* Set the return address. For the AArch64, the return breakpoint
1538 is always at BP_ADDR. */
1539 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1540
1541 /* If we were given an initial argument for the return slot, lose it. */
1542 if (return_method == return_method_hidden_param)
1543 {
1544 args++;
1545 nargs--;
1546 }
1547
1548 /* The struct_return pointer occupies X8. */
1549 if (return_method != return_method_normal)
1550 {
1551 if (aarch64_debug)
1552 {
1553 debug_printf ("struct return in %s = 0x%s\n",
1554 gdbarch_register_name (gdbarch,
1555 AARCH64_STRUCT_RETURN_REGNUM),
1556 paddress (gdbarch, struct_addr));
1557 }
1558 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1559 struct_addr);
1560 }
1561
1562 for (argnum = 0; argnum < nargs; argnum++)
1563 {
1564 struct value *arg = args[argnum];
1565 struct type *arg_type, *fundamental_type;
1566 int len, elements;
1567
1568 arg_type = check_typedef (value_type (arg));
1569 len = TYPE_LENGTH (arg_type);
1570
1571 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1572 if there are enough spare registers. */
1573 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1574 &fundamental_type))
1575 {
1576 if (info.nsrn + elements <= 8)
1577 {
1578 /* We know that we have sufficient registers available therefore
1579 this will never need to fallback to the stack. */
1580 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1581 arg))
1582 gdb_assert_not_reached ("Failed to push args");
1583 }
1584 else
1585 {
1586 info.nsrn = 8;
1587 pass_on_stack (&info, arg_type, arg);
1588 }
1589 continue;
1590 }
1591
1592 switch (TYPE_CODE (arg_type))
1593 {
1594 case TYPE_CODE_INT:
1595 case TYPE_CODE_BOOL:
1596 case TYPE_CODE_CHAR:
1597 case TYPE_CODE_RANGE:
1598 case TYPE_CODE_ENUM:
1599 if (len < 4)
1600 {
1601 /* Promote to 32 bit integer. */
1602 if (TYPE_UNSIGNED (arg_type))
1603 arg_type = builtin_type (gdbarch)->builtin_uint32;
1604 else
1605 arg_type = builtin_type (gdbarch)->builtin_int32;
1606 arg = value_cast (arg_type, arg);
1607 }
1608 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1609 break;
1610
1611 case TYPE_CODE_STRUCT:
1612 case TYPE_CODE_ARRAY:
1613 case TYPE_CODE_UNION:
1614 if (len > 16)
1615 {
1616 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1617 invisible reference. */
1618
1619 /* Allocate aligned storage. */
1620 sp = align_down (sp - len, 16);
1621
1622 /* Write the real data into the stack. */
1623 write_memory (sp, value_contents (arg), len);
1624
1625 /* Construct the indirection. */
1626 arg_type = lookup_pointer_type (arg_type);
1627 arg = value_from_pointer (arg_type, sp);
1628 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1629 }
1630 else
1631 /* PCS C.15 / C.18 multiple values pass. */
1632 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1633 break;
1634
1635 default:
1636 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1637 break;
1638 }
1639 }
1640
1641 /* Make sure stack retains 16 byte alignment. */
1642 if (info.nsaa & 15)
1643 sp -= 16 - (info.nsaa & 15);
1644
1645 while (!VEC_empty (stack_item_t, info.si))
1646 {
1647 stack_item_t *si = VEC_last (stack_item_t, info.si);
1648
1649 sp -= si->len;
1650 if (si->data != NULL)
1651 write_memory (sp, si->data, si->len);
1652 VEC_pop (stack_item_t, info.si);
1653 }
1654
1655 VEC_free (stack_item_t, info.si);
1656
1657 /* Finally, update the SP register. */
1658 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1659
1660 return sp;
1661 }
1662
1663 /* Implement the "frame_align" gdbarch method. */
1664
1665 static CORE_ADDR
1666 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1667 {
1668 /* Align the stack to sixteen bytes. */
1669 return sp & ~(CORE_ADDR) 15;
1670 }
1671
1672 /* Return the type for an AdvSISD Q register. */
1673
1674 static struct type *
1675 aarch64_vnq_type (struct gdbarch *gdbarch)
1676 {
1677 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1678
1679 if (tdep->vnq_type == NULL)
1680 {
1681 struct type *t;
1682 struct type *elem;
1683
1684 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1685 TYPE_CODE_UNION);
1686
1687 elem = builtin_type (gdbarch)->builtin_uint128;
1688 append_composite_type_field (t, "u", elem);
1689
1690 elem = builtin_type (gdbarch)->builtin_int128;
1691 append_composite_type_field (t, "s", elem);
1692
1693 tdep->vnq_type = t;
1694 }
1695
1696 return tdep->vnq_type;
1697 }
1698
1699 /* Return the type for an AdvSISD D register. */
1700
1701 static struct type *
1702 aarch64_vnd_type (struct gdbarch *gdbarch)
1703 {
1704 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1705
1706 if (tdep->vnd_type == NULL)
1707 {
1708 struct type *t;
1709 struct type *elem;
1710
1711 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1712 TYPE_CODE_UNION);
1713
1714 elem = builtin_type (gdbarch)->builtin_double;
1715 append_composite_type_field (t, "f", elem);
1716
1717 elem = builtin_type (gdbarch)->builtin_uint64;
1718 append_composite_type_field (t, "u", elem);
1719
1720 elem = builtin_type (gdbarch)->builtin_int64;
1721 append_composite_type_field (t, "s", elem);
1722
1723 tdep->vnd_type = t;
1724 }
1725
1726 return tdep->vnd_type;
1727 }
1728
1729 /* Return the type for an AdvSISD S register. */
1730
1731 static struct type *
1732 aarch64_vns_type (struct gdbarch *gdbarch)
1733 {
1734 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1735
1736 if (tdep->vns_type == NULL)
1737 {
1738 struct type *t;
1739 struct type *elem;
1740
1741 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1742 TYPE_CODE_UNION);
1743
1744 elem = builtin_type (gdbarch)->builtin_float;
1745 append_composite_type_field (t, "f", elem);
1746
1747 elem = builtin_type (gdbarch)->builtin_uint32;
1748 append_composite_type_field (t, "u", elem);
1749
1750 elem = builtin_type (gdbarch)->builtin_int32;
1751 append_composite_type_field (t, "s", elem);
1752
1753 tdep->vns_type = t;
1754 }
1755
1756 return tdep->vns_type;
1757 }
1758
1759 /* Return the type for an AdvSISD H register. */
1760
1761 static struct type *
1762 aarch64_vnh_type (struct gdbarch *gdbarch)
1763 {
1764 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1765
1766 if (tdep->vnh_type == NULL)
1767 {
1768 struct type *t;
1769 struct type *elem;
1770
1771 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1772 TYPE_CODE_UNION);
1773
1774 elem = builtin_type (gdbarch)->builtin_uint16;
1775 append_composite_type_field (t, "u", elem);
1776
1777 elem = builtin_type (gdbarch)->builtin_int16;
1778 append_composite_type_field (t, "s", elem);
1779
1780 tdep->vnh_type = t;
1781 }
1782
1783 return tdep->vnh_type;
1784 }
1785
1786 /* Return the type for an AdvSISD B register. */
1787
1788 static struct type *
1789 aarch64_vnb_type (struct gdbarch *gdbarch)
1790 {
1791 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1792
1793 if (tdep->vnb_type == NULL)
1794 {
1795 struct type *t;
1796 struct type *elem;
1797
1798 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1799 TYPE_CODE_UNION);
1800
1801 elem = builtin_type (gdbarch)->builtin_uint8;
1802 append_composite_type_field (t, "u", elem);
1803
1804 elem = builtin_type (gdbarch)->builtin_int8;
1805 append_composite_type_field (t, "s", elem);
1806
1807 tdep->vnb_type = t;
1808 }
1809
1810 return tdep->vnb_type;
1811 }
1812
1813 /* Return the type for an AdvSISD V register. */
1814
1815 static struct type *
1816 aarch64_vnv_type (struct gdbarch *gdbarch)
1817 {
1818 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1819
1820 if (tdep->vnv_type == NULL)
1821 {
1822 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1823 TYPE_CODE_UNION);
1824
1825 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1826 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1827 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1828 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1829 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1830
1831 tdep->vnv_type = t;
1832 }
1833
1834 return tdep->vnv_type;
1835 }
1836
1837 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1838
1839 static int
1840 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1841 {
1842 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1843 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1844
1845 if (reg == AARCH64_DWARF_SP)
1846 return AARCH64_SP_REGNUM;
1847
1848 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1849 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1850
1851 if (reg == AARCH64_DWARF_SVE_VG)
1852 return AARCH64_SVE_VG_REGNUM;
1853
1854 if (reg == AARCH64_DWARF_SVE_FFR)
1855 return AARCH64_SVE_FFR_REGNUM;
1856
1857 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1858 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1859
1860 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1861 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1862
1863 return -1;
1864 }
1865
1866 /* Implement the "print_insn" gdbarch method. */
1867
1868 static int
1869 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1870 {
1871 info->symbols = NULL;
1872 return default_print_insn (memaddr, info);
1873 }
1874
1875 /* AArch64 BRK software debug mode instruction.
1876 Note that AArch64 code is always little-endian.
1877 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1878 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1879
1880 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1881
1882 /* Extract from an array REGS containing the (raw) register state a
1883 function return value of type TYPE, and copy that, in virtual
1884 format, into VALBUF. */
1885
1886 static void
1887 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1888 gdb_byte *valbuf)
1889 {
1890 struct gdbarch *gdbarch = regs->arch ();
1891 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1892 int elements;
1893 struct type *fundamental_type;
1894
1895 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1896 &fundamental_type))
1897 {
1898 int len = TYPE_LENGTH (fundamental_type);
1899
1900 for (int i = 0; i < elements; i++)
1901 {
1902 int regno = AARCH64_V0_REGNUM + i;
1903 /* Enough space for a full vector register. */
1904 gdb_byte buf[register_size (gdbarch, regno)];
1905 gdb_assert (len <= sizeof (buf));
1906
1907 if (aarch64_debug)
1908 {
1909 debug_printf ("read HFA or HVA return value element %d from %s\n",
1910 i + 1,
1911 gdbarch_register_name (gdbarch, regno));
1912 }
1913 regs->cooked_read (regno, buf);
1914
1915 memcpy (valbuf, buf, len);
1916 valbuf += len;
1917 }
1918 }
1919 else if (TYPE_CODE (type) == TYPE_CODE_INT
1920 || TYPE_CODE (type) == TYPE_CODE_CHAR
1921 || TYPE_CODE (type) == TYPE_CODE_BOOL
1922 || TYPE_CODE (type) == TYPE_CODE_PTR
1923 || TYPE_IS_REFERENCE (type)
1924 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1925 {
1926 /* If the type is a plain integer, then the access is
1927 straight-forward. Otherwise we have to play around a bit
1928 more. */
1929 int len = TYPE_LENGTH (type);
1930 int regno = AARCH64_X0_REGNUM;
1931 ULONGEST tmp;
1932
1933 while (len > 0)
1934 {
1935 /* By using store_unsigned_integer we avoid having to do
1936 anything special for small big-endian values. */
1937 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1938 store_unsigned_integer (valbuf,
1939 (len > X_REGISTER_SIZE
1940 ? X_REGISTER_SIZE : len), byte_order, tmp);
1941 len -= X_REGISTER_SIZE;
1942 valbuf += X_REGISTER_SIZE;
1943 }
1944 }
1945 else
1946 {
1947 /* For a structure or union the behaviour is as if the value had
1948 been stored to word-aligned memory and then loaded into
1949 registers with 64-bit load instruction(s). */
1950 int len = TYPE_LENGTH (type);
1951 int regno = AARCH64_X0_REGNUM;
1952 bfd_byte buf[X_REGISTER_SIZE];
1953
1954 while (len > 0)
1955 {
1956 regs->cooked_read (regno++, buf);
1957 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1958 len -= X_REGISTER_SIZE;
1959 valbuf += X_REGISTER_SIZE;
1960 }
1961 }
1962 }
1963
1964
1965 /* Will a function return an aggregate type in memory or in a
1966 register? Return 0 if an aggregate type can be returned in a
1967 register, 1 if it must be returned in memory. */
1968
1969 static int
1970 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1971 {
1972 type = check_typedef (type);
1973 int elements;
1974 struct type *fundamental_type;
1975
1976 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1977 &fundamental_type))
1978 {
1979 /* v0-v7 are used to return values and one register is allocated
1980 for one member. However, HFA or HVA has at most four members. */
1981 return 0;
1982 }
1983
1984 if (TYPE_LENGTH (type) > 16)
1985 {
1986 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1987 invisible reference. */
1988
1989 return 1;
1990 }
1991
1992 return 0;
1993 }
1994
1995 /* Write into appropriate registers a function return value of type
1996 TYPE, given in virtual format. */
1997
1998 static void
1999 aarch64_store_return_value (struct type *type, struct regcache *regs,
2000 const gdb_byte *valbuf)
2001 {
2002 struct gdbarch *gdbarch = regs->arch ();
2003 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2004 int elements;
2005 struct type *fundamental_type;
2006
2007 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2008 &fundamental_type))
2009 {
2010 int len = TYPE_LENGTH (fundamental_type);
2011
2012 for (int i = 0; i < elements; i++)
2013 {
2014 int regno = AARCH64_V0_REGNUM + i;
2015 /* Enough space for a full vector register. */
2016 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2017 gdb_assert (len <= sizeof (tmpbuf));
2018
2019 if (aarch64_debug)
2020 {
2021 debug_printf ("write HFA or HVA return value element %d to %s\n",
2022 i + 1,
2023 gdbarch_register_name (gdbarch, regno));
2024 }
2025
2026 memcpy (tmpbuf, valbuf,
2027 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2028 regs->cooked_write (regno, tmpbuf);
2029 valbuf += len;
2030 }
2031 }
2032 else if (TYPE_CODE (type) == TYPE_CODE_INT
2033 || TYPE_CODE (type) == TYPE_CODE_CHAR
2034 || TYPE_CODE (type) == TYPE_CODE_BOOL
2035 || TYPE_CODE (type) == TYPE_CODE_PTR
2036 || TYPE_IS_REFERENCE (type)
2037 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2038 {
2039 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2040 {
2041 /* Values of one word or less are zero/sign-extended and
2042 returned in r0. */
2043 bfd_byte tmpbuf[X_REGISTER_SIZE];
2044 LONGEST val = unpack_long (type, valbuf);
2045
2046 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2047 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2048 }
2049 else
2050 {
2051 /* Integral values greater than one word are stored in
2052 consecutive registers starting with r0. This will always
2053 be a multiple of the regiser size. */
2054 int len = TYPE_LENGTH (type);
2055 int regno = AARCH64_X0_REGNUM;
2056
2057 while (len > 0)
2058 {
2059 regs->cooked_write (regno++, valbuf);
2060 len -= X_REGISTER_SIZE;
2061 valbuf += X_REGISTER_SIZE;
2062 }
2063 }
2064 }
2065 else
2066 {
2067 /* For a structure or union the behaviour is as if the value had
2068 been stored to word-aligned memory and then loaded into
2069 registers with 64-bit load instruction(s). */
2070 int len = TYPE_LENGTH (type);
2071 int regno = AARCH64_X0_REGNUM;
2072 bfd_byte tmpbuf[X_REGISTER_SIZE];
2073
2074 while (len > 0)
2075 {
2076 memcpy (tmpbuf, valbuf,
2077 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2078 regs->cooked_write (regno++, tmpbuf);
2079 len -= X_REGISTER_SIZE;
2080 valbuf += X_REGISTER_SIZE;
2081 }
2082 }
2083 }
2084
2085 /* Implement the "return_value" gdbarch method. */
2086
2087 static enum return_value_convention
2088 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2089 struct type *valtype, struct regcache *regcache,
2090 gdb_byte *readbuf, const gdb_byte *writebuf)
2091 {
2092
2093 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2094 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2095 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2096 {
2097 if (aarch64_return_in_memory (gdbarch, valtype))
2098 {
2099 if (aarch64_debug)
2100 debug_printf ("return value in memory\n");
2101 return RETURN_VALUE_STRUCT_CONVENTION;
2102 }
2103 }
2104
2105 if (writebuf)
2106 aarch64_store_return_value (valtype, regcache, writebuf);
2107
2108 if (readbuf)
2109 aarch64_extract_return_value (valtype, regcache, readbuf);
2110
2111 if (aarch64_debug)
2112 debug_printf ("return value in registers\n");
2113
2114 return RETURN_VALUE_REGISTER_CONVENTION;
2115 }
2116
2117 /* Implement the "get_longjmp_target" gdbarch method. */
2118
2119 static int
2120 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2121 {
2122 CORE_ADDR jb_addr;
2123 gdb_byte buf[X_REGISTER_SIZE];
2124 struct gdbarch *gdbarch = get_frame_arch (frame);
2125 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2126 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2127
2128 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2129
2130 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2131 X_REGISTER_SIZE))
2132 return 0;
2133
2134 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2135 return 1;
2136 }
2137
2138 /* Implement the "gen_return_address" gdbarch method. */
2139
2140 static void
2141 aarch64_gen_return_address (struct gdbarch *gdbarch,
2142 struct agent_expr *ax, struct axs_value *value,
2143 CORE_ADDR scope)
2144 {
2145 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2146 value->kind = axs_lvalue_register;
2147 value->u.reg = AARCH64_LR_REGNUM;
2148 }
2149 \f
2150
2151 /* Return the pseudo register name corresponding to register regnum. */
2152
2153 static const char *
2154 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2155 {
2156 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2157
2158 static const char *const q_name[] =
2159 {
2160 "q0", "q1", "q2", "q3",
2161 "q4", "q5", "q6", "q7",
2162 "q8", "q9", "q10", "q11",
2163 "q12", "q13", "q14", "q15",
2164 "q16", "q17", "q18", "q19",
2165 "q20", "q21", "q22", "q23",
2166 "q24", "q25", "q26", "q27",
2167 "q28", "q29", "q30", "q31",
2168 };
2169
2170 static const char *const d_name[] =
2171 {
2172 "d0", "d1", "d2", "d3",
2173 "d4", "d5", "d6", "d7",
2174 "d8", "d9", "d10", "d11",
2175 "d12", "d13", "d14", "d15",
2176 "d16", "d17", "d18", "d19",
2177 "d20", "d21", "d22", "d23",
2178 "d24", "d25", "d26", "d27",
2179 "d28", "d29", "d30", "d31",
2180 };
2181
2182 static const char *const s_name[] =
2183 {
2184 "s0", "s1", "s2", "s3",
2185 "s4", "s5", "s6", "s7",
2186 "s8", "s9", "s10", "s11",
2187 "s12", "s13", "s14", "s15",
2188 "s16", "s17", "s18", "s19",
2189 "s20", "s21", "s22", "s23",
2190 "s24", "s25", "s26", "s27",
2191 "s28", "s29", "s30", "s31",
2192 };
2193
2194 static const char *const h_name[] =
2195 {
2196 "h0", "h1", "h2", "h3",
2197 "h4", "h5", "h6", "h7",
2198 "h8", "h9", "h10", "h11",
2199 "h12", "h13", "h14", "h15",
2200 "h16", "h17", "h18", "h19",
2201 "h20", "h21", "h22", "h23",
2202 "h24", "h25", "h26", "h27",
2203 "h28", "h29", "h30", "h31",
2204 };
2205
2206 static const char *const b_name[] =
2207 {
2208 "b0", "b1", "b2", "b3",
2209 "b4", "b5", "b6", "b7",
2210 "b8", "b9", "b10", "b11",
2211 "b12", "b13", "b14", "b15",
2212 "b16", "b17", "b18", "b19",
2213 "b20", "b21", "b22", "b23",
2214 "b24", "b25", "b26", "b27",
2215 "b28", "b29", "b30", "b31",
2216 };
2217
2218 regnum -= gdbarch_num_regs (gdbarch);
2219
2220 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2221 return q_name[regnum - AARCH64_Q0_REGNUM];
2222
2223 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2224 return d_name[regnum - AARCH64_D0_REGNUM];
2225
2226 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2227 return s_name[regnum - AARCH64_S0_REGNUM];
2228
2229 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2230 return h_name[regnum - AARCH64_H0_REGNUM];
2231
2232 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2233 return b_name[regnum - AARCH64_B0_REGNUM];
2234
2235 if (tdep->has_sve ())
2236 {
2237 static const char *const sve_v_name[] =
2238 {
2239 "v0", "v1", "v2", "v3",
2240 "v4", "v5", "v6", "v7",
2241 "v8", "v9", "v10", "v11",
2242 "v12", "v13", "v14", "v15",
2243 "v16", "v17", "v18", "v19",
2244 "v20", "v21", "v22", "v23",
2245 "v24", "v25", "v26", "v27",
2246 "v28", "v29", "v30", "v31",
2247 };
2248
2249 if (regnum >= AARCH64_SVE_V0_REGNUM
2250 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2251 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2252 }
2253
2254 internal_error (__FILE__, __LINE__,
2255 _("aarch64_pseudo_register_name: bad register number %d"),
2256 regnum);
2257 }
2258
2259 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2260
2261 static struct type *
2262 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2263 {
2264 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2265
2266 regnum -= gdbarch_num_regs (gdbarch);
2267
2268 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2269 return aarch64_vnq_type (gdbarch);
2270
2271 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2272 return aarch64_vnd_type (gdbarch);
2273
2274 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2275 return aarch64_vns_type (gdbarch);
2276
2277 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2278 return aarch64_vnh_type (gdbarch);
2279
2280 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2281 return aarch64_vnb_type (gdbarch);
2282
2283 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2284 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2285 return aarch64_vnv_type (gdbarch);
2286
2287 internal_error (__FILE__, __LINE__,
2288 _("aarch64_pseudo_register_type: bad register number %d"),
2289 regnum);
2290 }
2291
2292 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2293
2294 static int
2295 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2296 struct reggroup *group)
2297 {
2298 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2299
2300 regnum -= gdbarch_num_regs (gdbarch);
2301
2302 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2303 return group == all_reggroup || group == vector_reggroup;
2304 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2305 return (group == all_reggroup || group == vector_reggroup
2306 || group == float_reggroup);
2307 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2308 return (group == all_reggroup || group == vector_reggroup
2309 || group == float_reggroup);
2310 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2311 return group == all_reggroup || group == vector_reggroup;
2312 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2313 return group == all_reggroup || group == vector_reggroup;
2314 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2315 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2316 return group == all_reggroup || group == vector_reggroup;
2317
2318 return group == all_reggroup;
2319 }
2320
2321 /* Helper for aarch64_pseudo_read_value. */
2322
2323 static struct value *
2324 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2325 readable_regcache *regcache, int regnum_offset,
2326 int regsize, struct value *result_value)
2327 {
2328 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2329
2330 /* Enough space for a full vector register. */
2331 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2332 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2333
2334 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2335 mark_value_bytes_unavailable (result_value, 0,
2336 TYPE_LENGTH (value_type (result_value)));
2337 else
2338 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2339
2340 return result_value;
2341 }
2342
2343 /* Implement the "pseudo_register_read_value" gdbarch method. */
2344
2345 static struct value *
2346 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2347 int regnum)
2348 {
2349 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2350 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2351
2352 VALUE_LVAL (result_value) = lval_register;
2353 VALUE_REGNUM (result_value) = regnum;
2354
2355 regnum -= gdbarch_num_regs (gdbarch);
2356
2357 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2358 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2359 regnum - AARCH64_Q0_REGNUM,
2360 Q_REGISTER_SIZE, result_value);
2361
2362 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2363 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2364 regnum - AARCH64_D0_REGNUM,
2365 D_REGISTER_SIZE, result_value);
2366
2367 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2368 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2369 regnum - AARCH64_S0_REGNUM,
2370 S_REGISTER_SIZE, result_value);
2371
2372 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2373 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2374 regnum - AARCH64_H0_REGNUM,
2375 H_REGISTER_SIZE, result_value);
2376
2377 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2378 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2379 regnum - AARCH64_B0_REGNUM,
2380 B_REGISTER_SIZE, result_value);
2381
2382 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2383 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2384 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2385 regnum - AARCH64_SVE_V0_REGNUM,
2386 V_REGISTER_SIZE, result_value);
2387
2388 gdb_assert_not_reached ("regnum out of bound");
2389 }
2390
2391 /* Helper for aarch64_pseudo_write. */
2392
2393 static void
2394 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2395 int regnum_offset, int regsize, const gdb_byte *buf)
2396 {
2397 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2398
2399 /* Enough space for a full vector register. */
2400 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2401 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2402
2403 /* Ensure the register buffer is zero, we want gdb writes of the
2404 various 'scalar' pseudo registers to behavior like architectural
2405 writes, register width bytes are written the remainder are set to
2406 zero. */
2407 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2408
2409 memcpy (reg_buf, buf, regsize);
2410 regcache->raw_write (v_regnum, reg_buf);
2411 }
2412
2413 /* Implement the "pseudo_register_write" gdbarch method. */
2414
2415 static void
2416 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2417 int regnum, const gdb_byte *buf)
2418 {
2419 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2420 regnum -= gdbarch_num_regs (gdbarch);
2421
2422 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2423 return aarch64_pseudo_write_1 (gdbarch, regcache,
2424 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2425 buf);
2426
2427 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2428 return aarch64_pseudo_write_1 (gdbarch, regcache,
2429 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2430 buf);
2431
2432 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2433 return aarch64_pseudo_write_1 (gdbarch, regcache,
2434 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2435 buf);
2436
2437 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2438 return aarch64_pseudo_write_1 (gdbarch, regcache,
2439 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2440 buf);
2441
2442 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2443 return aarch64_pseudo_write_1 (gdbarch, regcache,
2444 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2445 buf);
2446
2447 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2448 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2449 return aarch64_pseudo_write_1 (gdbarch, regcache,
2450 regnum - AARCH64_SVE_V0_REGNUM,
2451 V_REGISTER_SIZE, buf);
2452
2453 gdb_assert_not_reached ("regnum out of bound");
2454 }
2455
2456 /* Callback function for user_reg_add. */
2457
2458 static struct value *
2459 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2460 {
2461 const int *reg_p = (const int *) baton;
2462
2463 return value_of_register (*reg_p, frame);
2464 }
2465 \f
2466
2467 /* Implement the "software_single_step" gdbarch method, needed to
2468 single step through atomic sequences on AArch64. */
2469
2470 static std::vector<CORE_ADDR>
2471 aarch64_software_single_step (struct regcache *regcache)
2472 {
2473 struct gdbarch *gdbarch = regcache->arch ();
2474 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2475 const int insn_size = 4;
2476 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2477 CORE_ADDR pc = regcache_read_pc (regcache);
2478 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2479 CORE_ADDR loc = pc;
2480 CORE_ADDR closing_insn = 0;
2481 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2482 byte_order_for_code);
2483 int index;
2484 int insn_count;
2485 int bc_insn_count = 0; /* Conditional branch instruction count. */
2486 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2487 aarch64_inst inst;
2488
2489 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2490 return {};
2491
2492 /* Look for a Load Exclusive instruction which begins the sequence. */
2493 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2494 return {};
2495
2496 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2497 {
2498 loc += insn_size;
2499 insn = read_memory_unsigned_integer (loc, insn_size,
2500 byte_order_for_code);
2501
2502 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2503 return {};
2504 /* Check if the instruction is a conditional branch. */
2505 if (inst.opcode->iclass == condbranch)
2506 {
2507 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2508
2509 if (bc_insn_count >= 1)
2510 return {};
2511
2512 /* It is, so we'll try to set a breakpoint at the destination. */
2513 breaks[1] = loc + inst.operands[0].imm.value;
2514
2515 bc_insn_count++;
2516 last_breakpoint++;
2517 }
2518
2519 /* Look for the Store Exclusive which closes the atomic sequence. */
2520 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2521 {
2522 closing_insn = loc;
2523 break;
2524 }
2525 }
2526
2527 /* We didn't find a closing Store Exclusive instruction, fall back. */
2528 if (!closing_insn)
2529 return {};
2530
2531 /* Insert breakpoint after the end of the atomic sequence. */
2532 breaks[0] = loc + insn_size;
2533
2534 /* Check for duplicated breakpoints, and also check that the second
2535 breakpoint is not within the atomic sequence. */
2536 if (last_breakpoint
2537 && (breaks[1] == breaks[0]
2538 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2539 last_breakpoint = 0;
2540
2541 std::vector<CORE_ADDR> next_pcs;
2542
2543 /* Insert the breakpoint at the end of the sequence, and one at the
2544 destination of the conditional branch, if it exists. */
2545 for (index = 0; index <= last_breakpoint; index++)
2546 next_pcs.push_back (breaks[index]);
2547
2548 return next_pcs;
2549 }
2550
2551 struct aarch64_displaced_step_closure : public displaced_step_closure
2552 {
2553 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2554 is being displaced stepping. */
2555 int cond = 0;
2556
2557 /* PC adjustment offset after displaced stepping. */
2558 int32_t pc_adjust = 0;
2559 };
2560
2561 /* Data when visiting instructions for displaced stepping. */
2562
2563 struct aarch64_displaced_step_data
2564 {
2565 struct aarch64_insn_data base;
2566
2567 /* The address where the instruction will be executed at. */
2568 CORE_ADDR new_addr;
2569 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2570 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2571 /* Number of instructions in INSN_BUF. */
2572 unsigned insn_count;
2573 /* Registers when doing displaced stepping. */
2574 struct regcache *regs;
2575
2576 aarch64_displaced_step_closure *dsc;
2577 };
2578
2579 /* Implementation of aarch64_insn_visitor method "b". */
2580
2581 static void
2582 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2583 struct aarch64_insn_data *data)
2584 {
2585 struct aarch64_displaced_step_data *dsd
2586 = (struct aarch64_displaced_step_data *) data;
2587 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2588
2589 if (can_encode_int32 (new_offset, 28))
2590 {
2591 /* Emit B rather than BL, because executing BL on a new address
2592 will get the wrong address into LR. In order to avoid this,
2593 we emit B, and update LR if the instruction is BL. */
2594 emit_b (dsd->insn_buf, 0, new_offset);
2595 dsd->insn_count++;
2596 }
2597 else
2598 {
2599 /* Write NOP. */
2600 emit_nop (dsd->insn_buf);
2601 dsd->insn_count++;
2602 dsd->dsc->pc_adjust = offset;
2603 }
2604
2605 if (is_bl)
2606 {
2607 /* Update LR. */
2608 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2609 data->insn_addr + 4);
2610 }
2611 }
2612
2613 /* Implementation of aarch64_insn_visitor method "b_cond". */
2614
2615 static void
2616 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2617 struct aarch64_insn_data *data)
2618 {
2619 struct aarch64_displaced_step_data *dsd
2620 = (struct aarch64_displaced_step_data *) data;
2621
2622 /* GDB has to fix up PC after displaced step this instruction
2623 differently according to the condition is true or false. Instead
2624 of checking COND against conditional flags, we can use
2625 the following instructions, and GDB can tell how to fix up PC
2626 according to the PC value.
2627
2628 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2629 INSN1 ;
2630 TAKEN:
2631 INSN2
2632 */
2633
2634 emit_bcond (dsd->insn_buf, cond, 8);
2635 dsd->dsc->cond = 1;
2636 dsd->dsc->pc_adjust = offset;
2637 dsd->insn_count = 1;
2638 }
2639
2640 /* Dynamically allocate a new register. If we know the register
2641 statically, we should make it a global as above instead of using this
2642 helper function. */
2643
2644 static struct aarch64_register
2645 aarch64_register (unsigned num, int is64)
2646 {
2647 return (struct aarch64_register) { num, is64 };
2648 }
2649
2650 /* Implementation of aarch64_insn_visitor method "cb". */
2651
2652 static void
2653 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2654 const unsigned rn, int is64,
2655 struct aarch64_insn_data *data)
2656 {
2657 struct aarch64_displaced_step_data *dsd
2658 = (struct aarch64_displaced_step_data *) data;
2659
2660 /* The offset is out of range for a compare and branch
2661 instruction. We can use the following instructions instead:
2662
2663 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2664 INSN1 ;
2665 TAKEN:
2666 INSN2
2667 */
2668 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2669 dsd->insn_count = 1;
2670 dsd->dsc->cond = 1;
2671 dsd->dsc->pc_adjust = offset;
2672 }
2673
2674 /* Implementation of aarch64_insn_visitor method "tb". */
2675
2676 static void
2677 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2678 const unsigned rt, unsigned bit,
2679 struct aarch64_insn_data *data)
2680 {
2681 struct aarch64_displaced_step_data *dsd
2682 = (struct aarch64_displaced_step_data *) data;
2683
2684 /* The offset is out of range for a test bit and branch
2685 instruction We can use the following instructions instead:
2686
2687 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2688 INSN1 ;
2689 TAKEN:
2690 INSN2
2691
2692 */
2693 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2694 dsd->insn_count = 1;
2695 dsd->dsc->cond = 1;
2696 dsd->dsc->pc_adjust = offset;
2697 }
2698
2699 /* Implementation of aarch64_insn_visitor method "adr". */
2700
2701 static void
2702 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2703 const int is_adrp, struct aarch64_insn_data *data)
2704 {
2705 struct aarch64_displaced_step_data *dsd
2706 = (struct aarch64_displaced_step_data *) data;
2707 /* We know exactly the address the ADR{P,} instruction will compute.
2708 We can just write it to the destination register. */
2709 CORE_ADDR address = data->insn_addr + offset;
2710
2711 if (is_adrp)
2712 {
2713 /* Clear the lower 12 bits of the offset to get the 4K page. */
2714 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2715 address & ~0xfff);
2716 }
2717 else
2718 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2719 address);
2720
2721 dsd->dsc->pc_adjust = 4;
2722 emit_nop (dsd->insn_buf);
2723 dsd->insn_count = 1;
2724 }
2725
2726 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2727
2728 static void
2729 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2730 const unsigned rt, const int is64,
2731 struct aarch64_insn_data *data)
2732 {
2733 struct aarch64_displaced_step_data *dsd
2734 = (struct aarch64_displaced_step_data *) data;
2735 CORE_ADDR address = data->insn_addr + offset;
2736 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2737
2738 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2739 address);
2740
2741 if (is_sw)
2742 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2743 aarch64_register (rt, 1), zero);
2744 else
2745 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2746 aarch64_register (rt, 1), zero);
2747
2748 dsd->dsc->pc_adjust = 4;
2749 }
2750
2751 /* Implementation of aarch64_insn_visitor method "others". */
2752
2753 static void
2754 aarch64_displaced_step_others (const uint32_t insn,
2755 struct aarch64_insn_data *data)
2756 {
2757 struct aarch64_displaced_step_data *dsd
2758 = (struct aarch64_displaced_step_data *) data;
2759
2760 aarch64_emit_insn (dsd->insn_buf, insn);
2761 dsd->insn_count = 1;
2762
2763 if ((insn & 0xfffffc1f) == 0xd65f0000)
2764 {
2765 /* RET */
2766 dsd->dsc->pc_adjust = 0;
2767 }
2768 else
2769 dsd->dsc->pc_adjust = 4;
2770 }
2771
2772 static const struct aarch64_insn_visitor visitor =
2773 {
2774 aarch64_displaced_step_b,
2775 aarch64_displaced_step_b_cond,
2776 aarch64_displaced_step_cb,
2777 aarch64_displaced_step_tb,
2778 aarch64_displaced_step_adr,
2779 aarch64_displaced_step_ldr_literal,
2780 aarch64_displaced_step_others,
2781 };
2782
2783 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2784
2785 struct displaced_step_closure *
2786 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2787 CORE_ADDR from, CORE_ADDR to,
2788 struct regcache *regs)
2789 {
2790 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2791 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2792 struct aarch64_displaced_step_data dsd;
2793 aarch64_inst inst;
2794
2795 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2796 return NULL;
2797
2798 /* Look for a Load Exclusive instruction which begins the sequence. */
2799 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2800 {
2801 /* We can't displaced step atomic sequences. */
2802 return NULL;
2803 }
2804
2805 std::unique_ptr<aarch64_displaced_step_closure> dsc
2806 (new aarch64_displaced_step_closure);
2807 dsd.base.insn_addr = from;
2808 dsd.new_addr = to;
2809 dsd.regs = regs;
2810 dsd.dsc = dsc.get ();
2811 dsd.insn_count = 0;
2812 aarch64_relocate_instruction (insn, &visitor,
2813 (struct aarch64_insn_data *) &dsd);
2814 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2815
2816 if (dsd.insn_count != 0)
2817 {
2818 int i;
2819
2820 /* Instruction can be relocated to scratch pad. Copy
2821 relocated instruction(s) there. */
2822 for (i = 0; i < dsd.insn_count; i++)
2823 {
2824 if (debug_displaced)
2825 {
2826 debug_printf ("displaced: writing insn ");
2827 debug_printf ("%.8x", dsd.insn_buf[i]);
2828 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2829 }
2830 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2831 (ULONGEST) dsd.insn_buf[i]);
2832 }
2833 }
2834 else
2835 {
2836 dsc = NULL;
2837 }
2838
2839 return dsc.release ();
2840 }
2841
2842 /* Implement the "displaced_step_fixup" gdbarch method. */
2843
2844 void
2845 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2846 struct displaced_step_closure *dsc_,
2847 CORE_ADDR from, CORE_ADDR to,
2848 struct regcache *regs)
2849 {
2850 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2851
2852 if (dsc->cond)
2853 {
2854 ULONGEST pc;
2855
2856 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2857 if (pc - to == 8)
2858 {
2859 /* Condition is true. */
2860 }
2861 else if (pc - to == 4)
2862 {
2863 /* Condition is false. */
2864 dsc->pc_adjust = 4;
2865 }
2866 else
2867 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2868 }
2869
2870 if (dsc->pc_adjust != 0)
2871 {
2872 if (debug_displaced)
2873 {
2874 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2875 paddress (gdbarch, from), dsc->pc_adjust);
2876 }
2877 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2878 from + dsc->pc_adjust);
2879 }
2880 }
2881
2882 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2883
2884 int
2885 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2886 struct displaced_step_closure *closure)
2887 {
2888 return 1;
2889 }
2890
2891 /* Get the correct target description for the given VQ value.
2892 If VQ is zero then it is assumed SVE is not supported.
2893 (It is not possible to set VQ to zero on an SVE system). */
2894
2895 const target_desc *
2896 aarch64_read_description (uint64_t vq, bool pauth_p)
2897 {
2898 if (vq > AARCH64_MAX_SVE_VQ)
2899 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
2900 AARCH64_MAX_SVE_VQ);
2901
2902 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
2903
2904 if (tdesc == NULL)
2905 {
2906 tdesc = aarch64_create_target_description (vq, pauth_p);
2907 tdesc_aarch64_list[vq][pauth_p] = tdesc;
2908 }
2909
2910 return tdesc;
2911 }
2912
2913 /* Return the VQ used when creating the target description TDESC. */
2914
2915 static uint64_t
2916 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2917 {
2918 const struct tdesc_feature *feature_sve;
2919
2920 if (!tdesc_has_registers (tdesc))
2921 return 0;
2922
2923 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2924
2925 if (feature_sve == nullptr)
2926 return 0;
2927
2928 uint64_t vl = tdesc_register_bitsize (feature_sve,
2929 aarch64_sve_register_names[0]) / 8;
2930 return sve_vq_from_vl (vl);
2931 }
2932
2933 /* Add all the expected register sets into GDBARCH. */
2934
2935 static void
2936 aarch64_add_reggroups (struct gdbarch *gdbarch)
2937 {
2938 reggroup_add (gdbarch, general_reggroup);
2939 reggroup_add (gdbarch, float_reggroup);
2940 reggroup_add (gdbarch, system_reggroup);
2941 reggroup_add (gdbarch, vector_reggroup);
2942 reggroup_add (gdbarch, all_reggroup);
2943 reggroup_add (gdbarch, save_reggroup);
2944 reggroup_add (gdbarch, restore_reggroup);
2945 }
2946
2947 /* Implement the "cannot_store_register" gdbarch method. */
2948
2949 static int
2950 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
2951 {
2952 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2953
2954 if (!tdep->has_pauth ())
2955 return 0;
2956
2957 /* Pointer authentication registers are read-only. */
2958 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
2959 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
2960 }
2961
2962 /* Initialize the current architecture based on INFO. If possible,
2963 re-use an architecture from ARCHES, which is a list of
2964 architectures already created during this debugging session.
2965
2966 Called e.g. at program startup, when reading a core file, and when
2967 reading a binary file. */
2968
2969 static struct gdbarch *
2970 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2971 {
2972 struct gdbarch_tdep *tdep;
2973 struct gdbarch *gdbarch;
2974 struct gdbarch_list *best_arch;
2975 struct tdesc_arch_data *tdesc_data = NULL;
2976 const struct target_desc *tdesc = info.target_desc;
2977 int i;
2978 int valid_p = 1;
2979 const struct tdesc_feature *feature_core;
2980 const struct tdesc_feature *feature_fpu;
2981 const struct tdesc_feature *feature_sve;
2982 const struct tdesc_feature *feature_pauth;
2983 int num_regs = 0;
2984 int num_pseudo_regs = 0;
2985 int first_pauth_regnum = -1;
2986
2987 /* Ensure we always have a target description. */
2988 if (!tdesc_has_registers (tdesc))
2989 tdesc = aarch64_read_description (0, false);
2990 gdb_assert (tdesc);
2991
2992 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2993 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2994 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2995 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
2996
2997 if (feature_core == NULL)
2998 return NULL;
2999
3000 tdesc_data = tdesc_data_alloc ();
3001
3002 /* Validate the description provides the mandatory core R registers
3003 and allocate their numbers. */
3004 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3005 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3006 AARCH64_X0_REGNUM + i,
3007 aarch64_r_register_names[i]);
3008
3009 num_regs = AARCH64_X0_REGNUM + i;
3010
3011 /* Add the V registers. */
3012 if (feature_fpu != NULL)
3013 {
3014 if (feature_sve != NULL)
3015 error (_("Program contains both fpu and SVE features."));
3016
3017 /* Validate the description provides the mandatory V registers
3018 and allocate their numbers. */
3019 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3020 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3021 AARCH64_V0_REGNUM + i,
3022 aarch64_v_register_names[i]);
3023
3024 num_regs = AARCH64_V0_REGNUM + i;
3025 }
3026
3027 /* Add the SVE registers. */
3028 if (feature_sve != NULL)
3029 {
3030 /* Validate the description provides the mandatory SVE registers
3031 and allocate their numbers. */
3032 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3033 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3034 AARCH64_SVE_Z0_REGNUM + i,
3035 aarch64_sve_register_names[i]);
3036
3037 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3038 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3039 }
3040
3041 if (feature_fpu != NULL || feature_sve != NULL)
3042 {
3043 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3044 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3045 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3046 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3047 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3048 }
3049
3050 /* Add the pauth registers. */
3051 if (feature_pauth != NULL)
3052 {
3053 first_pauth_regnum = num_regs;
3054
3055 /* Validate the descriptor provides the mandatory PAUTH registers and
3056 allocate their numbers. */
3057 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3058 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3059 first_pauth_regnum + i,
3060 aarch64_pauth_register_names[i]);
3061
3062 num_regs += i;
3063 }
3064
3065 if (!valid_p)
3066 {
3067 tdesc_data_cleanup (tdesc_data);
3068 return NULL;
3069 }
3070
3071 /* AArch64 code is always little-endian. */
3072 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3073
3074 /* If there is already a candidate, use it. */
3075 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3076 best_arch != NULL;
3077 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3078 {
3079 /* Found a match. */
3080 break;
3081 }
3082
3083 if (best_arch != NULL)
3084 {
3085 if (tdesc_data != NULL)
3086 tdesc_data_cleanup (tdesc_data);
3087 return best_arch->gdbarch;
3088 }
3089
3090 tdep = XCNEW (struct gdbarch_tdep);
3091 gdbarch = gdbarch_alloc (&info, tdep);
3092
3093 /* This should be low enough for everything. */
3094 tdep->lowest_pc = 0x20;
3095 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3096 tdep->jb_elt_size = 8;
3097 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3098 tdep->pauth_reg_base = first_pauth_regnum;
3099
3100 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3101 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3102
3103 /* Advance PC across function entry code. */
3104 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3105
3106 /* The stack grows downward. */
3107 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3108
3109 /* Breakpoint manipulation. */
3110 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3111 aarch64_breakpoint::kind_from_pc);
3112 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3113 aarch64_breakpoint::bp_from_kind);
3114 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3115 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3116
3117 /* Information about registers, etc. */
3118 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3119 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3120 set_gdbarch_num_regs (gdbarch, num_regs);
3121
3122 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3123 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3124 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3125 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3126 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3127 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3128 aarch64_pseudo_register_reggroup_p);
3129 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3130
3131 /* ABI */
3132 set_gdbarch_short_bit (gdbarch, 16);
3133 set_gdbarch_int_bit (gdbarch, 32);
3134 set_gdbarch_float_bit (gdbarch, 32);
3135 set_gdbarch_double_bit (gdbarch, 64);
3136 set_gdbarch_long_double_bit (gdbarch, 128);
3137 set_gdbarch_long_bit (gdbarch, 64);
3138 set_gdbarch_long_long_bit (gdbarch, 64);
3139 set_gdbarch_ptr_bit (gdbarch, 64);
3140 set_gdbarch_char_signed (gdbarch, 0);
3141 set_gdbarch_wchar_signed (gdbarch, 0);
3142 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3143 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3144 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3145
3146 /* Internal <-> external register number maps. */
3147 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3148
3149 /* Returning results. */
3150 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3151
3152 /* Disassembly. */
3153 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3154
3155 /* Virtual tables. */
3156 set_gdbarch_vbit_in_delta (gdbarch, 1);
3157
3158 /* Register architecture. */
3159 aarch64_add_reggroups (gdbarch);
3160
3161 /* Hook in the ABI-specific overrides, if they have been registered. */
3162 info.target_desc = tdesc;
3163 info.tdesc_data = tdesc_data;
3164 gdbarch_init_osabi (info, gdbarch);
3165
3166 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3167
3168 /* Add some default predicates. */
3169 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3170 dwarf2_append_unwinders (gdbarch);
3171 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3172
3173 frame_base_set_default (gdbarch, &aarch64_normal_base);
3174
3175 /* Now we have tuned the configuration, set a few final things,
3176 based on what the OS ABI has told us. */
3177
3178 if (tdep->jb_pc >= 0)
3179 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3180
3181 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3182
3183 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3184
3185 /* Add standard register aliases. */
3186 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3187 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3188 value_of_aarch64_user_reg,
3189 &aarch64_register_aliases[i].regnum);
3190
3191 register_aarch64_ravenscar_ops (gdbarch);
3192
3193 return gdbarch;
3194 }
3195
3196 static void
3197 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3198 {
3199 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3200
3201 if (tdep == NULL)
3202 return;
3203
3204 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3205 paddress (gdbarch, tdep->lowest_pc));
3206 }
3207
3208 #if GDB_SELF_TEST
3209 namespace selftests
3210 {
3211 static void aarch64_process_record_test (void);
3212 }
3213 #endif
3214
3215 void
3216 _initialize_aarch64_tdep (void)
3217 {
3218 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3219 aarch64_dump_tdep);
3220
3221 /* Debug this file's internals. */
3222 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3223 Set AArch64 debugging."), _("\
3224 Show AArch64 debugging."), _("\
3225 When on, AArch64 specific debugging is enabled."),
3226 NULL,
3227 show_aarch64_debug,
3228 &setdebuglist, &showdebuglist);
3229
3230 #if GDB_SELF_TEST
3231 selftests::register_test ("aarch64-analyze-prologue",
3232 selftests::aarch64_analyze_prologue_test);
3233 selftests::register_test ("aarch64-process-record",
3234 selftests::aarch64_process_record_test);
3235 selftests::record_xml_tdesc ("aarch64.xml",
3236 aarch64_create_target_description (0, false));
3237 #endif
3238 }
3239
3240 /* AArch64 process record-replay related structures, defines etc. */
3241
3242 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3243 do \
3244 { \
3245 unsigned int reg_len = LENGTH; \
3246 if (reg_len) \
3247 { \
3248 REGS = XNEWVEC (uint32_t, reg_len); \
3249 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3250 } \
3251 } \
3252 while (0)
3253
3254 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3255 do \
3256 { \
3257 unsigned int mem_len = LENGTH; \
3258 if (mem_len) \
3259 { \
3260 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3261 memcpy(&MEMS->len, &RECORD_BUF[0], \
3262 sizeof(struct aarch64_mem_r) * LENGTH); \
3263 } \
3264 } \
3265 while (0)
3266
3267 /* AArch64 record/replay structures and enumerations. */
3268
3269 struct aarch64_mem_r
3270 {
3271 uint64_t len; /* Record length. */
3272 uint64_t addr; /* Memory address. */
3273 };
3274
3275 enum aarch64_record_result
3276 {
3277 AARCH64_RECORD_SUCCESS,
3278 AARCH64_RECORD_UNSUPPORTED,
3279 AARCH64_RECORD_UNKNOWN
3280 };
3281
3282 typedef struct insn_decode_record_t
3283 {
3284 struct gdbarch *gdbarch;
3285 struct regcache *regcache;
3286 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3287 uint32_t aarch64_insn; /* Insn to be recorded. */
3288 uint32_t mem_rec_count; /* Count of memory records. */
3289 uint32_t reg_rec_count; /* Count of register records. */
3290 uint32_t *aarch64_regs; /* Registers to be recorded. */
3291 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3292 } insn_decode_record;
3293
3294 /* Record handler for data processing - register instructions. */
3295
3296 static unsigned int
3297 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3298 {
3299 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3300 uint32_t record_buf[4];
3301
3302 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3303 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3304 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3305
3306 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3307 {
3308 uint8_t setflags;
3309
3310 /* Logical (shifted register). */
3311 if (insn_bits24_27 == 0x0a)
3312 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3313 /* Add/subtract. */
3314 else if (insn_bits24_27 == 0x0b)
3315 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3316 else
3317 return AARCH64_RECORD_UNKNOWN;
3318
3319 record_buf[0] = reg_rd;
3320 aarch64_insn_r->reg_rec_count = 1;
3321 if (setflags)
3322 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3323 }
3324 else
3325 {
3326 if (insn_bits24_27 == 0x0b)
3327 {
3328 /* Data-processing (3 source). */
3329 record_buf[0] = reg_rd;
3330 aarch64_insn_r->reg_rec_count = 1;
3331 }
3332 else if (insn_bits24_27 == 0x0a)
3333 {
3334 if (insn_bits21_23 == 0x00)
3335 {
3336 /* Add/subtract (with carry). */
3337 record_buf[0] = reg_rd;
3338 aarch64_insn_r->reg_rec_count = 1;
3339 if (bit (aarch64_insn_r->aarch64_insn, 29))
3340 {
3341 record_buf[1] = AARCH64_CPSR_REGNUM;
3342 aarch64_insn_r->reg_rec_count = 2;
3343 }
3344 }
3345 else if (insn_bits21_23 == 0x02)
3346 {
3347 /* Conditional compare (register) and conditional compare
3348 (immediate) instructions. */
3349 record_buf[0] = AARCH64_CPSR_REGNUM;
3350 aarch64_insn_r->reg_rec_count = 1;
3351 }
3352 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3353 {
3354 /* CConditional select. */
3355 /* Data-processing (2 source). */
3356 /* Data-processing (1 source). */
3357 record_buf[0] = reg_rd;
3358 aarch64_insn_r->reg_rec_count = 1;
3359 }
3360 else
3361 return AARCH64_RECORD_UNKNOWN;
3362 }
3363 }
3364
3365 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3366 record_buf);
3367 return AARCH64_RECORD_SUCCESS;
3368 }
3369
3370 /* Record handler for data processing - immediate instructions. */
3371
3372 static unsigned int
3373 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3374 {
3375 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3376 uint32_t record_buf[4];
3377
3378 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3379 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3380 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3381
3382 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3383 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3384 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3385 {
3386 record_buf[0] = reg_rd;
3387 aarch64_insn_r->reg_rec_count = 1;
3388 }
3389 else if (insn_bits24_27 == 0x01)
3390 {
3391 /* Add/Subtract (immediate). */
3392 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3393 record_buf[0] = reg_rd;
3394 aarch64_insn_r->reg_rec_count = 1;
3395 if (setflags)
3396 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3397 }
3398 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3399 {
3400 /* Logical (immediate). */
3401 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3402 record_buf[0] = reg_rd;
3403 aarch64_insn_r->reg_rec_count = 1;
3404 if (setflags)
3405 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3406 }
3407 else
3408 return AARCH64_RECORD_UNKNOWN;
3409
3410 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3411 record_buf);
3412 return AARCH64_RECORD_SUCCESS;
3413 }
3414
3415 /* Record handler for branch, exception generation and system instructions. */
3416
3417 static unsigned int
3418 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3419 {
3420 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3421 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3422 uint32_t record_buf[4];
3423
3424 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3425 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3426 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3427
3428 if (insn_bits28_31 == 0x0d)
3429 {
3430 /* Exception generation instructions. */
3431 if (insn_bits24_27 == 0x04)
3432 {
3433 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3434 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3435 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3436 {
3437 ULONGEST svc_number;
3438
3439 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3440 &svc_number);
3441 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3442 svc_number);
3443 }
3444 else
3445 return AARCH64_RECORD_UNSUPPORTED;
3446 }
3447 /* System instructions. */
3448 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3449 {
3450 uint32_t reg_rt, reg_crn;
3451
3452 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3453 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3454
3455 /* Record rt in case of sysl and mrs instructions. */
3456 if (bit (aarch64_insn_r->aarch64_insn, 21))
3457 {
3458 record_buf[0] = reg_rt;
3459 aarch64_insn_r->reg_rec_count = 1;
3460 }
3461 /* Record cpsr for hint and msr(immediate) instructions. */
3462 else if (reg_crn == 0x02 || reg_crn == 0x04)
3463 {
3464 record_buf[0] = AARCH64_CPSR_REGNUM;
3465 aarch64_insn_r->reg_rec_count = 1;
3466 }
3467 }
3468 /* Unconditional branch (register). */
3469 else if((insn_bits24_27 & 0x0e) == 0x06)
3470 {
3471 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3472 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3473 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3474 }
3475 else
3476 return AARCH64_RECORD_UNKNOWN;
3477 }
3478 /* Unconditional branch (immediate). */
3479 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3480 {
3481 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3482 if (bit (aarch64_insn_r->aarch64_insn, 31))
3483 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3484 }
3485 else
3486 /* Compare & branch (immediate), Test & branch (immediate) and
3487 Conditional branch (immediate). */
3488 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3489
3490 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3491 record_buf);
3492 return AARCH64_RECORD_SUCCESS;
3493 }
3494
3495 /* Record handler for advanced SIMD load and store instructions. */
3496
3497 static unsigned int
3498 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3499 {
3500 CORE_ADDR address;
3501 uint64_t addr_offset = 0;
3502 uint32_t record_buf[24];
3503 uint64_t record_buf_mem[24];
3504 uint32_t reg_rn, reg_rt;
3505 uint32_t reg_index = 0, mem_index = 0;
3506 uint8_t opcode_bits, size_bits;
3507
3508 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3509 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3510 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3511 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3512 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3513
3514 if (record_debug)
3515 debug_printf ("Process record: Advanced SIMD load/store\n");
3516
3517 /* Load/store single structure. */
3518 if (bit (aarch64_insn_r->aarch64_insn, 24))
3519 {
3520 uint8_t sindex, scale, selem, esize, replicate = 0;
3521 scale = opcode_bits >> 2;
3522 selem = ((opcode_bits & 0x02) |
3523 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3524 switch (scale)
3525 {
3526 case 1:
3527 if (size_bits & 0x01)
3528 return AARCH64_RECORD_UNKNOWN;
3529 break;
3530 case 2:
3531 if ((size_bits >> 1) & 0x01)
3532 return AARCH64_RECORD_UNKNOWN;
3533 if (size_bits & 0x01)
3534 {
3535 if (!((opcode_bits >> 1) & 0x01))
3536 scale = 3;
3537 else
3538 return AARCH64_RECORD_UNKNOWN;
3539 }
3540 break;
3541 case 3:
3542 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3543 {
3544 scale = size_bits;
3545 replicate = 1;
3546 break;
3547 }
3548 else
3549 return AARCH64_RECORD_UNKNOWN;
3550 default:
3551 break;
3552 }
3553 esize = 8 << scale;
3554 if (replicate)
3555 for (sindex = 0; sindex < selem; sindex++)
3556 {
3557 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3558 reg_rt = (reg_rt + 1) % 32;
3559 }
3560 else
3561 {
3562 for (sindex = 0; sindex < selem; sindex++)
3563 {
3564 if (bit (aarch64_insn_r->aarch64_insn, 22))
3565 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3566 else
3567 {
3568 record_buf_mem[mem_index++] = esize / 8;
3569 record_buf_mem[mem_index++] = address + addr_offset;
3570 }
3571 addr_offset = addr_offset + (esize / 8);
3572 reg_rt = (reg_rt + 1) % 32;
3573 }
3574 }
3575 }
3576 /* Load/store multiple structure. */
3577 else
3578 {
3579 uint8_t selem, esize, rpt, elements;
3580 uint8_t eindex, rindex;
3581
3582 esize = 8 << size_bits;
3583 if (bit (aarch64_insn_r->aarch64_insn, 30))
3584 elements = 128 / esize;
3585 else
3586 elements = 64 / esize;
3587
3588 switch (opcode_bits)
3589 {
3590 /*LD/ST4 (4 Registers). */
3591 case 0:
3592 rpt = 1;
3593 selem = 4;
3594 break;
3595 /*LD/ST1 (4 Registers). */
3596 case 2:
3597 rpt = 4;
3598 selem = 1;
3599 break;
3600 /*LD/ST3 (3 Registers). */
3601 case 4:
3602 rpt = 1;
3603 selem = 3;
3604 break;
3605 /*LD/ST1 (3 Registers). */
3606 case 6:
3607 rpt = 3;
3608 selem = 1;
3609 break;
3610 /*LD/ST1 (1 Register). */
3611 case 7:
3612 rpt = 1;
3613 selem = 1;
3614 break;
3615 /*LD/ST2 (2 Registers). */
3616 case 8:
3617 rpt = 1;
3618 selem = 2;
3619 break;
3620 /*LD/ST1 (2 Registers). */
3621 case 10:
3622 rpt = 2;
3623 selem = 1;
3624 break;
3625 default:
3626 return AARCH64_RECORD_UNSUPPORTED;
3627 break;
3628 }
3629 for (rindex = 0; rindex < rpt; rindex++)
3630 for (eindex = 0; eindex < elements; eindex++)
3631 {
3632 uint8_t reg_tt, sindex;
3633 reg_tt = (reg_rt + rindex) % 32;
3634 for (sindex = 0; sindex < selem; sindex++)
3635 {
3636 if (bit (aarch64_insn_r->aarch64_insn, 22))
3637 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3638 else
3639 {
3640 record_buf_mem[mem_index++] = esize / 8;
3641 record_buf_mem[mem_index++] = address + addr_offset;
3642 }
3643 addr_offset = addr_offset + (esize / 8);
3644 reg_tt = (reg_tt + 1) % 32;
3645 }
3646 }
3647 }
3648
3649 if (bit (aarch64_insn_r->aarch64_insn, 23))
3650 record_buf[reg_index++] = reg_rn;
3651
3652 aarch64_insn_r->reg_rec_count = reg_index;
3653 aarch64_insn_r->mem_rec_count = mem_index / 2;
3654 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3655 record_buf_mem);
3656 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3657 record_buf);
3658 return AARCH64_RECORD_SUCCESS;
3659 }
3660
3661 /* Record handler for load and store instructions. */
3662
3663 static unsigned int
3664 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3665 {
3666 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3667 uint8_t insn_bit23, insn_bit21;
3668 uint8_t opc, size_bits, ld_flag, vector_flag;
3669 uint32_t reg_rn, reg_rt, reg_rt2;
3670 uint64_t datasize, offset;
3671 uint32_t record_buf[8];
3672 uint64_t record_buf_mem[8];
3673 CORE_ADDR address;
3674
3675 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3676 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3677 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3678 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3679 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3680 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3681 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3682 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3683 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3684 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3685 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3686
3687 /* Load/store exclusive. */
3688 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3689 {
3690 if (record_debug)
3691 debug_printf ("Process record: load/store exclusive\n");
3692
3693 if (ld_flag)
3694 {
3695 record_buf[0] = reg_rt;
3696 aarch64_insn_r->reg_rec_count = 1;
3697 if (insn_bit21)
3698 {
3699 record_buf[1] = reg_rt2;
3700 aarch64_insn_r->reg_rec_count = 2;
3701 }
3702 }
3703 else
3704 {
3705 if (insn_bit21)
3706 datasize = (8 << size_bits) * 2;
3707 else
3708 datasize = (8 << size_bits);
3709 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3710 &address);
3711 record_buf_mem[0] = datasize / 8;
3712 record_buf_mem[1] = address;
3713 aarch64_insn_r->mem_rec_count = 1;
3714 if (!insn_bit23)
3715 {
3716 /* Save register rs. */
3717 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3718 aarch64_insn_r->reg_rec_count = 1;
3719 }
3720 }
3721 }
3722 /* Load register (literal) instructions decoding. */
3723 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3724 {
3725 if (record_debug)
3726 debug_printf ("Process record: load register (literal)\n");
3727 if (vector_flag)
3728 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3729 else
3730 record_buf[0] = reg_rt;
3731 aarch64_insn_r->reg_rec_count = 1;
3732 }
3733 /* All types of load/store pair instructions decoding. */
3734 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3735 {
3736 if (record_debug)
3737 debug_printf ("Process record: load/store pair\n");
3738
3739 if (ld_flag)
3740 {
3741 if (vector_flag)
3742 {
3743 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3744 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3745 }
3746 else
3747 {
3748 record_buf[0] = reg_rt;
3749 record_buf[1] = reg_rt2;
3750 }
3751 aarch64_insn_r->reg_rec_count = 2;
3752 }
3753 else
3754 {
3755 uint16_t imm7_off;
3756 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3757 if (!vector_flag)
3758 size_bits = size_bits >> 1;
3759 datasize = 8 << (2 + size_bits);
3760 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3761 offset = offset << (2 + size_bits);
3762 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3763 &address);
3764 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3765 {
3766 if (imm7_off & 0x40)
3767 address = address - offset;
3768 else
3769 address = address + offset;
3770 }
3771
3772 record_buf_mem[0] = datasize / 8;
3773 record_buf_mem[1] = address;
3774 record_buf_mem[2] = datasize / 8;
3775 record_buf_mem[3] = address + (datasize / 8);
3776 aarch64_insn_r->mem_rec_count = 2;
3777 }
3778 if (bit (aarch64_insn_r->aarch64_insn, 23))
3779 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3780 }
3781 /* Load/store register (unsigned immediate) instructions. */
3782 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3783 {
3784 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3785 if (!(opc >> 1))
3786 {
3787 if (opc & 0x01)
3788 ld_flag = 0x01;
3789 else
3790 ld_flag = 0x0;
3791 }
3792 else
3793 {
3794 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3795 {
3796 /* PRFM (immediate) */
3797 return AARCH64_RECORD_SUCCESS;
3798 }
3799 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3800 {
3801 /* LDRSW (immediate) */
3802 ld_flag = 0x1;
3803 }
3804 else
3805 {
3806 if (opc & 0x01)
3807 ld_flag = 0x01;
3808 else
3809 ld_flag = 0x0;
3810 }
3811 }
3812
3813 if (record_debug)
3814 {
3815 debug_printf ("Process record: load/store (unsigned immediate):"
3816 " size %x V %d opc %x\n", size_bits, vector_flag,
3817 opc);
3818 }
3819
3820 if (!ld_flag)
3821 {
3822 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3823 datasize = 8 << size_bits;
3824 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3825 &address);
3826 offset = offset << size_bits;
3827 address = address + offset;
3828
3829 record_buf_mem[0] = datasize >> 3;
3830 record_buf_mem[1] = address;
3831 aarch64_insn_r->mem_rec_count = 1;
3832 }
3833 else
3834 {
3835 if (vector_flag)
3836 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3837 else
3838 record_buf[0] = reg_rt;
3839 aarch64_insn_r->reg_rec_count = 1;
3840 }
3841 }
3842 /* Load/store register (register offset) instructions. */
3843 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3844 && insn_bits10_11 == 0x02 && insn_bit21)
3845 {
3846 if (record_debug)
3847 debug_printf ("Process record: load/store (register offset)\n");
3848 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3849 if (!(opc >> 1))
3850 if (opc & 0x01)
3851 ld_flag = 0x01;
3852 else
3853 ld_flag = 0x0;
3854 else
3855 if (size_bits != 0x03)
3856 ld_flag = 0x01;
3857 else
3858 return AARCH64_RECORD_UNKNOWN;
3859
3860 if (!ld_flag)
3861 {
3862 ULONGEST reg_rm_val;
3863
3864 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3865 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3866 if (bit (aarch64_insn_r->aarch64_insn, 12))
3867 offset = reg_rm_val << size_bits;
3868 else
3869 offset = reg_rm_val;
3870 datasize = 8 << size_bits;
3871 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3872 &address);
3873 address = address + offset;
3874 record_buf_mem[0] = datasize >> 3;
3875 record_buf_mem[1] = address;
3876 aarch64_insn_r->mem_rec_count = 1;
3877 }
3878 else
3879 {
3880 if (vector_flag)
3881 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3882 else
3883 record_buf[0] = reg_rt;
3884 aarch64_insn_r->reg_rec_count = 1;
3885 }
3886 }
3887 /* Load/store register (immediate and unprivileged) instructions. */
3888 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3889 && !insn_bit21)
3890 {
3891 if (record_debug)
3892 {
3893 debug_printf ("Process record: load/store "
3894 "(immediate and unprivileged)\n");
3895 }
3896 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3897 if (!(opc >> 1))
3898 if (opc & 0x01)
3899 ld_flag = 0x01;
3900 else
3901 ld_flag = 0x0;
3902 else
3903 if (size_bits != 0x03)
3904 ld_flag = 0x01;
3905 else
3906 return AARCH64_RECORD_UNKNOWN;
3907
3908 if (!ld_flag)
3909 {
3910 uint16_t imm9_off;
3911 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3912 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3913 datasize = 8 << size_bits;
3914 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3915 &address);
3916 if (insn_bits10_11 != 0x01)
3917 {
3918 if (imm9_off & 0x0100)
3919 address = address - offset;
3920 else
3921 address = address + offset;
3922 }
3923 record_buf_mem[0] = datasize >> 3;
3924 record_buf_mem[1] = address;
3925 aarch64_insn_r->mem_rec_count = 1;
3926 }
3927 else
3928 {
3929 if (vector_flag)
3930 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3931 else
3932 record_buf[0] = reg_rt;
3933 aarch64_insn_r->reg_rec_count = 1;
3934 }
3935 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3936 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3937 }
3938 /* Advanced SIMD load/store instructions. */
3939 else
3940 return aarch64_record_asimd_load_store (aarch64_insn_r);
3941
3942 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3943 record_buf_mem);
3944 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3945 record_buf);
3946 return AARCH64_RECORD_SUCCESS;
3947 }
3948
3949 /* Record handler for data processing SIMD and floating point instructions. */
3950
3951 static unsigned int
3952 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3953 {
3954 uint8_t insn_bit21, opcode, rmode, reg_rd;
3955 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3956 uint8_t insn_bits11_14;
3957 uint32_t record_buf[2];
3958
3959 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3960 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3961 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3962 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3963 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3964 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3965 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3966 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3967 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3968
3969 if (record_debug)
3970 debug_printf ("Process record: data processing SIMD/FP: ");
3971
3972 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3973 {
3974 /* Floating point - fixed point conversion instructions. */
3975 if (!insn_bit21)
3976 {
3977 if (record_debug)
3978 debug_printf ("FP - fixed point conversion");
3979
3980 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3981 record_buf[0] = reg_rd;
3982 else
3983 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3984 }
3985 /* Floating point - conditional compare instructions. */
3986 else if (insn_bits10_11 == 0x01)
3987 {
3988 if (record_debug)
3989 debug_printf ("FP - conditional compare");
3990
3991 record_buf[0] = AARCH64_CPSR_REGNUM;
3992 }
3993 /* Floating point - data processing (2-source) and
3994 conditional select instructions. */
3995 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3996 {
3997 if (record_debug)
3998 debug_printf ("FP - DP (2-source)");
3999
4000 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4001 }
4002 else if (insn_bits10_11 == 0x00)
4003 {
4004 /* Floating point - immediate instructions. */
4005 if ((insn_bits12_15 & 0x01) == 0x01
4006 || (insn_bits12_15 & 0x07) == 0x04)
4007 {
4008 if (record_debug)
4009 debug_printf ("FP - immediate");
4010 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4011 }
4012 /* Floating point - compare instructions. */
4013 else if ((insn_bits12_15 & 0x03) == 0x02)
4014 {
4015 if (record_debug)
4016 debug_printf ("FP - immediate");
4017 record_buf[0] = AARCH64_CPSR_REGNUM;
4018 }
4019 /* Floating point - integer conversions instructions. */
4020 else if (insn_bits12_15 == 0x00)
4021 {
4022 /* Convert float to integer instruction. */
4023 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4024 {
4025 if (record_debug)
4026 debug_printf ("float to int conversion");
4027
4028 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4029 }
4030 /* Convert integer to float instruction. */
4031 else if ((opcode >> 1) == 0x01 && !rmode)
4032 {
4033 if (record_debug)
4034 debug_printf ("int to float conversion");
4035
4036 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4037 }
4038 /* Move float to integer instruction. */
4039 else if ((opcode >> 1) == 0x03)
4040 {
4041 if (record_debug)
4042 debug_printf ("move float to int");
4043
4044 if (!(opcode & 0x01))
4045 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4046 else
4047 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4048 }
4049 else
4050 return AARCH64_RECORD_UNKNOWN;
4051 }
4052 else
4053 return AARCH64_RECORD_UNKNOWN;
4054 }
4055 else
4056 return AARCH64_RECORD_UNKNOWN;
4057 }
4058 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4059 {
4060 if (record_debug)
4061 debug_printf ("SIMD copy");
4062
4063 /* Advanced SIMD copy instructions. */
4064 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4065 && !bit (aarch64_insn_r->aarch64_insn, 15)
4066 && bit (aarch64_insn_r->aarch64_insn, 10))
4067 {
4068 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4069 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4070 else
4071 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4072 }
4073 else
4074 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4075 }
4076 /* All remaining floating point or advanced SIMD instructions. */
4077 else
4078 {
4079 if (record_debug)
4080 debug_printf ("all remain");
4081
4082 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4083 }
4084
4085 if (record_debug)
4086 debug_printf ("\n");
4087
4088 aarch64_insn_r->reg_rec_count++;
4089 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4090 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4091 record_buf);
4092 return AARCH64_RECORD_SUCCESS;
4093 }
4094
4095 /* Decodes insns type and invokes its record handler. */
4096
4097 static unsigned int
4098 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4099 {
4100 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4101
4102 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4103 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4104 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4105 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4106
4107 /* Data processing - immediate instructions. */
4108 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4109 return aarch64_record_data_proc_imm (aarch64_insn_r);
4110
4111 /* Branch, exception generation and system instructions. */
4112 if (ins_bit26 && !ins_bit27 && ins_bit28)
4113 return aarch64_record_branch_except_sys (aarch64_insn_r);
4114
4115 /* Load and store instructions. */
4116 if (!ins_bit25 && ins_bit27)
4117 return aarch64_record_load_store (aarch64_insn_r);
4118
4119 /* Data processing - register instructions. */
4120 if (ins_bit25 && !ins_bit26 && ins_bit27)
4121 return aarch64_record_data_proc_reg (aarch64_insn_r);
4122
4123 /* Data processing - SIMD and floating point instructions. */
4124 if (ins_bit25 && ins_bit26 && ins_bit27)
4125 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4126
4127 return AARCH64_RECORD_UNSUPPORTED;
4128 }
4129
4130 /* Cleans up local record registers and memory allocations. */
4131
4132 static void
4133 deallocate_reg_mem (insn_decode_record *record)
4134 {
4135 xfree (record->aarch64_regs);
4136 xfree (record->aarch64_mems);
4137 }
4138
4139 #if GDB_SELF_TEST
4140 namespace selftests {
4141
4142 static void
4143 aarch64_process_record_test (void)
4144 {
4145 struct gdbarch_info info;
4146 uint32_t ret;
4147
4148 gdbarch_info_init (&info);
4149 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4150
4151 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4152 SELF_CHECK (gdbarch != NULL);
4153
4154 insn_decode_record aarch64_record;
4155
4156 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4157 aarch64_record.regcache = NULL;
4158 aarch64_record.this_addr = 0;
4159 aarch64_record.gdbarch = gdbarch;
4160
4161 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4162 aarch64_record.aarch64_insn = 0xf9800020;
4163 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4164 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4165 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4166 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4167
4168 deallocate_reg_mem (&aarch64_record);
4169 }
4170
4171 } // namespace selftests
4172 #endif /* GDB_SELF_TEST */
4173
4174 /* Parse the current instruction and record the values of the registers and
4175 memory that will be changed in current instruction to record_arch_list
4176 return -1 if something is wrong. */
4177
4178 int
4179 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4180 CORE_ADDR insn_addr)
4181 {
4182 uint32_t rec_no = 0;
4183 uint8_t insn_size = 4;
4184 uint32_t ret = 0;
4185 gdb_byte buf[insn_size];
4186 insn_decode_record aarch64_record;
4187
4188 memset (&buf[0], 0, insn_size);
4189 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4190 target_read_memory (insn_addr, &buf[0], insn_size);
4191 aarch64_record.aarch64_insn
4192 = (uint32_t) extract_unsigned_integer (&buf[0],
4193 insn_size,
4194 gdbarch_byte_order (gdbarch));
4195 aarch64_record.regcache = regcache;
4196 aarch64_record.this_addr = insn_addr;
4197 aarch64_record.gdbarch = gdbarch;
4198
4199 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4200 if (ret == AARCH64_RECORD_UNSUPPORTED)
4201 {
4202 printf_unfiltered (_("Process record does not support instruction "
4203 "0x%0x at address %s.\n"),
4204 aarch64_record.aarch64_insn,
4205 paddress (gdbarch, insn_addr));
4206 ret = -1;
4207 }
4208
4209 if (0 == ret)
4210 {
4211 /* Record registers. */
4212 record_full_arch_list_add_reg (aarch64_record.regcache,
4213 AARCH64_PC_REGNUM);
4214 /* Always record register CPSR. */
4215 record_full_arch_list_add_reg (aarch64_record.regcache,
4216 AARCH64_CPSR_REGNUM);
4217 if (aarch64_record.aarch64_regs)
4218 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4219 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4220 aarch64_record.aarch64_regs[rec_no]))
4221 ret = -1;
4222
4223 /* Record memories. */
4224 if (aarch64_record.aarch64_mems)
4225 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4226 if (record_full_arch_list_add_mem
4227 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4228 aarch64_record.aarch64_mems[rec_no].len))
4229 ret = -1;
4230
4231 if (record_full_arch_list_add_end ())
4232 ret = -1;
4233 }
4234
4235 deallocate_reg_mem (&aarch64_record);
4236 return ret;
4237 }
This page took 0.118721 seconds and 4 git commands to generate.