Aarch64 SVE: Fix stack smashing when calling functions
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "selftest.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
58
59 #include "opcode/aarch64.h"
60 #include <algorithm>
61
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
65
66 /* Pseudo register base numbers. */
67 #define AARCH64_Q0_REGNUM 0
68 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
69 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
72 #define AARCH64_SVE_V0_REGNUM (AARCH64_B0_REGNUM + 32)
73
74 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
75 four members. */
76 #define HA_MAX_NUM_FLDS 4
77
78 /* All possible aarch64 target descriptors. */
79 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
80
81 /* The standard register names, and all the valid aliases for them. */
82 static const struct
83 {
84 const char *const name;
85 int regnum;
86 } aarch64_register_aliases[] =
87 {
88 /* 64-bit register names. */
89 {"fp", AARCH64_FP_REGNUM},
90 {"lr", AARCH64_LR_REGNUM},
91 {"sp", AARCH64_SP_REGNUM},
92
93 /* 32-bit register names. */
94 {"w0", AARCH64_X0_REGNUM + 0},
95 {"w1", AARCH64_X0_REGNUM + 1},
96 {"w2", AARCH64_X0_REGNUM + 2},
97 {"w3", AARCH64_X0_REGNUM + 3},
98 {"w4", AARCH64_X0_REGNUM + 4},
99 {"w5", AARCH64_X0_REGNUM + 5},
100 {"w6", AARCH64_X0_REGNUM + 6},
101 {"w7", AARCH64_X0_REGNUM + 7},
102 {"w8", AARCH64_X0_REGNUM + 8},
103 {"w9", AARCH64_X0_REGNUM + 9},
104 {"w10", AARCH64_X0_REGNUM + 10},
105 {"w11", AARCH64_X0_REGNUM + 11},
106 {"w12", AARCH64_X0_REGNUM + 12},
107 {"w13", AARCH64_X0_REGNUM + 13},
108 {"w14", AARCH64_X0_REGNUM + 14},
109 {"w15", AARCH64_X0_REGNUM + 15},
110 {"w16", AARCH64_X0_REGNUM + 16},
111 {"w17", AARCH64_X0_REGNUM + 17},
112 {"w18", AARCH64_X0_REGNUM + 18},
113 {"w19", AARCH64_X0_REGNUM + 19},
114 {"w20", AARCH64_X0_REGNUM + 20},
115 {"w21", AARCH64_X0_REGNUM + 21},
116 {"w22", AARCH64_X0_REGNUM + 22},
117 {"w23", AARCH64_X0_REGNUM + 23},
118 {"w24", AARCH64_X0_REGNUM + 24},
119 {"w25", AARCH64_X0_REGNUM + 25},
120 {"w26", AARCH64_X0_REGNUM + 26},
121 {"w27", AARCH64_X0_REGNUM + 27},
122 {"w28", AARCH64_X0_REGNUM + 28},
123 {"w29", AARCH64_X0_REGNUM + 29},
124 {"w30", AARCH64_X0_REGNUM + 30},
125
126 /* specials */
127 {"ip0", AARCH64_X0_REGNUM + 16},
128 {"ip1", AARCH64_X0_REGNUM + 17}
129 };
130
131 /* The required core 'R' registers. */
132 static const char *const aarch64_r_register_names[] =
133 {
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_X0_REGNUM! */
136 "x0", "x1", "x2", "x3",
137 "x4", "x5", "x6", "x7",
138 "x8", "x9", "x10", "x11",
139 "x12", "x13", "x14", "x15",
140 "x16", "x17", "x18", "x19",
141 "x20", "x21", "x22", "x23",
142 "x24", "x25", "x26", "x27",
143 "x28", "x29", "x30", "sp",
144 "pc", "cpsr"
145 };
146
147 /* The FP/SIMD 'V' registers. */
148 static const char *const aarch64_v_register_names[] =
149 {
150 /* These registers must appear in consecutive RAW register number
151 order and they must begin with AARCH64_V0_REGNUM! */
152 "v0", "v1", "v2", "v3",
153 "v4", "v5", "v6", "v7",
154 "v8", "v9", "v10", "v11",
155 "v12", "v13", "v14", "v15",
156 "v16", "v17", "v18", "v19",
157 "v20", "v21", "v22", "v23",
158 "v24", "v25", "v26", "v27",
159 "v28", "v29", "v30", "v31",
160 "fpsr",
161 "fpcr"
162 };
163
164 /* The SVE 'Z' and 'P' registers. */
165 static const char *const aarch64_sve_register_names[] =
166 {
167 /* These registers must appear in consecutive RAW register number
168 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
169 "z0", "z1", "z2", "z3",
170 "z4", "z5", "z6", "z7",
171 "z8", "z9", "z10", "z11",
172 "z12", "z13", "z14", "z15",
173 "z16", "z17", "z18", "z19",
174 "z20", "z21", "z22", "z23",
175 "z24", "z25", "z26", "z27",
176 "z28", "z29", "z30", "z31",
177 "fpsr", "fpcr",
178 "p0", "p1", "p2", "p3",
179 "p4", "p5", "p6", "p7",
180 "p8", "p9", "p10", "p11",
181 "p12", "p13", "p14", "p15",
182 "ffr", "vg"
183 };
184
185 /* AArch64 prologue cache structure. */
186 struct aarch64_prologue_cache
187 {
188 /* The program counter at the start of the function. It is used to
189 identify this frame as a prologue frame. */
190 CORE_ADDR func;
191
192 /* The program counter at the time this frame was created; i.e. where
193 this function was called from. It is used to identify this frame as a
194 stub frame. */
195 CORE_ADDR prev_pc;
196
197 /* The stack pointer at the time this frame was created; i.e. the
198 caller's stack pointer when this function was called. It is used
199 to identify this frame. */
200 CORE_ADDR prev_sp;
201
202 /* Is the target available to read from? */
203 int available_p;
204
205 /* The frame base for this frame is just prev_sp - frame size.
206 FRAMESIZE is the distance from the frame pointer to the
207 initial stack pointer. */
208 int framesize;
209
210 /* The register used to hold the frame pointer for this frame. */
211 int framereg;
212
213 /* Saved register offsets. */
214 struct trad_frame_saved_reg *saved_regs;
215 };
216
217 static void
218 show_aarch64_debug (struct ui_file *file, int from_tty,
219 struct cmd_list_element *c, const char *value)
220 {
221 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
222 }
223
224 namespace {
225
226 /* Abstract instruction reader. */
227
228 class abstract_instruction_reader
229 {
230 public:
231 /* Read in one instruction. */
232 virtual ULONGEST read (CORE_ADDR memaddr, int len,
233 enum bfd_endian byte_order) = 0;
234 };
235
236 /* Instruction reader from real target. */
237
238 class instruction_reader : public abstract_instruction_reader
239 {
240 public:
241 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
242 override
243 {
244 return read_code_unsigned_integer (memaddr, len, byte_order);
245 }
246 };
247
248 } // namespace
249
250 /* Analyze a prologue, looking for a recognizable stack frame
251 and frame pointer. Scan until we encounter a store that could
252 clobber the stack frame unexpectedly, or an unknown instruction. */
253
254 static CORE_ADDR
255 aarch64_analyze_prologue (struct gdbarch *gdbarch,
256 CORE_ADDR start, CORE_ADDR limit,
257 struct aarch64_prologue_cache *cache,
258 abstract_instruction_reader& reader)
259 {
260 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
261 int i;
262 /* Track X registers and D registers in prologue. */
263 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
264
265 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
266 regs[i] = pv_register (i, 0);
267 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
268
269 for (; start < limit; start += 4)
270 {
271 uint32_t insn;
272 aarch64_inst inst;
273
274 insn = reader.read (start, 4, byte_order_for_code);
275
276 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
277 break;
278
279 if (inst.opcode->iclass == addsub_imm
280 && (inst.opcode->op == OP_ADD
281 || strcmp ("sub", inst.opcode->name) == 0))
282 {
283 unsigned rd = inst.operands[0].reg.regno;
284 unsigned rn = inst.operands[1].reg.regno;
285
286 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
287 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
288 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
289 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
290
291 if (inst.opcode->op == OP_ADD)
292 {
293 regs[rd] = pv_add_constant (regs[rn],
294 inst.operands[2].imm.value);
295 }
296 else
297 {
298 regs[rd] = pv_add_constant (regs[rn],
299 -inst.operands[2].imm.value);
300 }
301 }
302 else if (inst.opcode->iclass == pcreladdr
303 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
304 {
305 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
306 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
307
308 regs[inst.operands[0].reg.regno] = pv_unknown ();
309 }
310 else if (inst.opcode->iclass == branch_imm)
311 {
312 /* Stop analysis on branch. */
313 break;
314 }
315 else if (inst.opcode->iclass == condbranch)
316 {
317 /* Stop analysis on branch. */
318 break;
319 }
320 else if (inst.opcode->iclass == branch_reg)
321 {
322 /* Stop analysis on branch. */
323 break;
324 }
325 else if (inst.opcode->iclass == compbranch)
326 {
327 /* Stop analysis on branch. */
328 break;
329 }
330 else if (inst.opcode->op == OP_MOVZ)
331 {
332 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
333 regs[inst.operands[0].reg.regno] = pv_unknown ();
334 }
335 else if (inst.opcode->iclass == log_shift
336 && strcmp (inst.opcode->name, "orr") == 0)
337 {
338 unsigned rd = inst.operands[0].reg.regno;
339 unsigned rn = inst.operands[1].reg.regno;
340 unsigned rm = inst.operands[2].reg.regno;
341
342 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
343 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
344 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
345
346 if (inst.operands[2].shifter.amount == 0
347 && rn == AARCH64_SP_REGNUM)
348 regs[rd] = regs[rm];
349 else
350 {
351 if (aarch64_debug)
352 {
353 debug_printf ("aarch64: prologue analysis gave up "
354 "addr=%s opcode=0x%x (orr x register)\n",
355 core_addr_to_string_nz (start), insn);
356 }
357 break;
358 }
359 }
360 else if (inst.opcode->op == OP_STUR)
361 {
362 unsigned rt = inst.operands[0].reg.regno;
363 unsigned rn = inst.operands[1].addr.base_regno;
364 int is64
365 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
366
367 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
368 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
369 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
370 gdb_assert (!inst.operands[1].addr.offset.is_reg);
371
372 stack.store (pv_add_constant (regs[rn],
373 inst.operands[1].addr.offset.imm),
374 is64 ? 8 : 4, regs[rt]);
375 }
376 else if ((inst.opcode->iclass == ldstpair_off
377 || (inst.opcode->iclass == ldstpair_indexed
378 && inst.operands[2].addr.preind))
379 && strcmp ("stp", inst.opcode->name) == 0)
380 {
381 /* STP with addressing mode Pre-indexed and Base register. */
382 unsigned rt1;
383 unsigned rt2;
384 unsigned rn = inst.operands[2].addr.base_regno;
385 int32_t imm = inst.operands[2].addr.offset.imm;
386
387 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
388 || inst.operands[0].type == AARCH64_OPND_Ft);
389 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
390 || inst.operands[1].type == AARCH64_OPND_Ft2);
391 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
392 gdb_assert (!inst.operands[2].addr.offset.is_reg);
393
394 /* If recording this store would invalidate the store area
395 (perhaps because rn is not known) then we should abandon
396 further prologue analysis. */
397 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
398 break;
399
400 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
401 break;
402
403 rt1 = inst.operands[0].reg.regno;
404 rt2 = inst.operands[1].reg.regno;
405 if (inst.operands[0].type == AARCH64_OPND_Ft)
406 {
407 /* Only bottom 64-bit of each V register (D register) need
408 to be preserved. */
409 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
410 rt1 += AARCH64_X_REGISTER_COUNT;
411 rt2 += AARCH64_X_REGISTER_COUNT;
412 }
413
414 stack.store (pv_add_constant (regs[rn], imm), 8,
415 regs[rt1]);
416 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
417 regs[rt2]);
418
419 if (inst.operands[2].addr.writeback)
420 regs[rn] = pv_add_constant (regs[rn], imm);
421
422 }
423 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
424 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
425 && (inst.opcode->op == OP_STR_POS
426 || inst.opcode->op == OP_STRF_POS)))
427 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
428 && strcmp ("str", inst.opcode->name) == 0)
429 {
430 /* STR (immediate) */
431 unsigned int rt = inst.operands[0].reg.regno;
432 int32_t imm = inst.operands[1].addr.offset.imm;
433 unsigned int rn = inst.operands[1].addr.base_regno;
434 bool is64
435 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
436 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
437 || inst.operands[0].type == AARCH64_OPND_Ft);
438
439 if (inst.operands[0].type == AARCH64_OPND_Ft)
440 {
441 /* Only bottom 64-bit of each V register (D register) need
442 to be preserved. */
443 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
444 rt += AARCH64_X_REGISTER_COUNT;
445 }
446
447 stack.store (pv_add_constant (regs[rn], imm),
448 is64 ? 8 : 4, regs[rt]);
449 if (inst.operands[1].addr.writeback)
450 regs[rn] = pv_add_constant (regs[rn], imm);
451 }
452 else if (inst.opcode->iclass == testbranch)
453 {
454 /* Stop analysis on branch. */
455 break;
456 }
457 else
458 {
459 if (aarch64_debug)
460 {
461 debug_printf ("aarch64: prologue analysis gave up addr=%s"
462 " opcode=0x%x\n",
463 core_addr_to_string_nz (start), insn);
464 }
465 break;
466 }
467 }
468
469 if (cache == NULL)
470 return start;
471
472 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
473 {
474 /* Frame pointer is fp. Frame size is constant. */
475 cache->framereg = AARCH64_FP_REGNUM;
476 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
477 }
478 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
479 {
480 /* Try the stack pointer. */
481 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
482 cache->framereg = AARCH64_SP_REGNUM;
483 }
484 else
485 {
486 /* We're just out of luck. We don't know where the frame is. */
487 cache->framereg = -1;
488 cache->framesize = 0;
489 }
490
491 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
492 {
493 CORE_ADDR offset;
494
495 if (stack.find_reg (gdbarch, i, &offset))
496 cache->saved_regs[i].addr = offset;
497 }
498
499 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
500 {
501 int regnum = gdbarch_num_regs (gdbarch);
502 CORE_ADDR offset;
503
504 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
505 &offset))
506 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
507 }
508
509 return start;
510 }
511
512 static CORE_ADDR
513 aarch64_analyze_prologue (struct gdbarch *gdbarch,
514 CORE_ADDR start, CORE_ADDR limit,
515 struct aarch64_prologue_cache *cache)
516 {
517 instruction_reader reader;
518
519 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
520 reader);
521 }
522
523 #if GDB_SELF_TEST
524
525 namespace selftests {
526
527 /* Instruction reader from manually cooked instruction sequences. */
528
529 class instruction_reader_test : public abstract_instruction_reader
530 {
531 public:
532 template<size_t SIZE>
533 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
534 : m_insns (insns), m_insns_size (SIZE)
535 {}
536
537 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
538 override
539 {
540 SELF_CHECK (len == 4);
541 SELF_CHECK (memaddr % 4 == 0);
542 SELF_CHECK (memaddr / 4 < m_insns_size);
543
544 return m_insns[memaddr / 4];
545 }
546
547 private:
548 const uint32_t *m_insns;
549 size_t m_insns_size;
550 };
551
552 static void
553 aarch64_analyze_prologue_test (void)
554 {
555 struct gdbarch_info info;
556
557 gdbarch_info_init (&info);
558 info.bfd_arch_info = bfd_scan_arch ("aarch64");
559
560 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
561 SELF_CHECK (gdbarch != NULL);
562
563 /* Test the simple prologue in which frame pointer is used. */
564 {
565 struct aarch64_prologue_cache cache;
566 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
567
568 static const uint32_t insns[] = {
569 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
570 0x910003fd, /* mov x29, sp */
571 0x97ffffe6, /* bl 0x400580 */
572 };
573 instruction_reader_test reader (insns);
574
575 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
576 SELF_CHECK (end == 4 * 2);
577
578 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
579 SELF_CHECK (cache.framesize == 272);
580
581 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
582 {
583 if (i == AARCH64_FP_REGNUM)
584 SELF_CHECK (cache.saved_regs[i].addr == -272);
585 else if (i == AARCH64_LR_REGNUM)
586 SELF_CHECK (cache.saved_regs[i].addr == -264);
587 else
588 SELF_CHECK (cache.saved_regs[i].addr == -1);
589 }
590
591 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
592 {
593 int regnum = gdbarch_num_regs (gdbarch);
594
595 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
596 == -1);
597 }
598 }
599
600 /* Test a prologue in which STR is used and frame pointer is not
601 used. */
602 {
603 struct aarch64_prologue_cache cache;
604 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
605
606 static const uint32_t insns[] = {
607 0xf81d0ff3, /* str x19, [sp, #-48]! */
608 0xb9002fe0, /* str w0, [sp, #44] */
609 0xf90013e1, /* str x1, [sp, #32]*/
610 0xfd000fe0, /* str d0, [sp, #24] */
611 0xaa0203f3, /* mov x19, x2 */
612 0xf94013e0, /* ldr x0, [sp, #32] */
613 };
614 instruction_reader_test reader (insns);
615
616 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
617
618 SELF_CHECK (end == 4 * 5);
619
620 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
621 SELF_CHECK (cache.framesize == 48);
622
623 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
624 {
625 if (i == 1)
626 SELF_CHECK (cache.saved_regs[i].addr == -16);
627 else if (i == 19)
628 SELF_CHECK (cache.saved_regs[i].addr == -48);
629 else
630 SELF_CHECK (cache.saved_regs[i].addr == -1);
631 }
632
633 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
634 {
635 int regnum = gdbarch_num_regs (gdbarch);
636
637 if (i == 0)
638 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
639 == -24);
640 else
641 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
642 == -1);
643 }
644 }
645 }
646 } // namespace selftests
647 #endif /* GDB_SELF_TEST */
648
649 /* Implement the "skip_prologue" gdbarch method. */
650
651 static CORE_ADDR
652 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
653 {
654 CORE_ADDR func_addr, limit_pc;
655
656 /* See if we can determine the end of the prologue via the symbol
657 table. If so, then return either PC, or the PC after the
658 prologue, whichever is greater. */
659 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
660 {
661 CORE_ADDR post_prologue_pc
662 = skip_prologue_using_sal (gdbarch, func_addr);
663
664 if (post_prologue_pc != 0)
665 return std::max (pc, post_prologue_pc);
666 }
667
668 /* Can't determine prologue from the symbol table, need to examine
669 instructions. */
670
671 /* Find an upper limit on the function prologue using the debug
672 information. If the debug information could not be used to
673 provide that bound, then use an arbitrary large number as the
674 upper bound. */
675 limit_pc = skip_prologue_using_sal (gdbarch, pc);
676 if (limit_pc == 0)
677 limit_pc = pc + 128; /* Magic. */
678
679 /* Try disassembling prologue. */
680 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
681 }
682
683 /* Scan the function prologue for THIS_FRAME and populate the prologue
684 cache CACHE. */
685
686 static void
687 aarch64_scan_prologue (struct frame_info *this_frame,
688 struct aarch64_prologue_cache *cache)
689 {
690 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
691 CORE_ADDR prologue_start;
692 CORE_ADDR prologue_end;
693 CORE_ADDR prev_pc = get_frame_pc (this_frame);
694 struct gdbarch *gdbarch = get_frame_arch (this_frame);
695
696 cache->prev_pc = prev_pc;
697
698 /* Assume we do not find a frame. */
699 cache->framereg = -1;
700 cache->framesize = 0;
701
702 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
703 &prologue_end))
704 {
705 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
706
707 if (sal.line == 0)
708 {
709 /* No line info so use the current PC. */
710 prologue_end = prev_pc;
711 }
712 else if (sal.end < prologue_end)
713 {
714 /* The next line begins after the function end. */
715 prologue_end = sal.end;
716 }
717
718 prologue_end = std::min (prologue_end, prev_pc);
719 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
720 }
721 else
722 {
723 CORE_ADDR frame_loc;
724
725 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
726 if (frame_loc == 0)
727 return;
728
729 cache->framereg = AARCH64_FP_REGNUM;
730 cache->framesize = 16;
731 cache->saved_regs[29].addr = 0;
732 cache->saved_regs[30].addr = 8;
733 }
734 }
735
736 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
737 function may throw an exception if the inferior's registers or memory is
738 not available. */
739
740 static void
741 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
742 struct aarch64_prologue_cache *cache)
743 {
744 CORE_ADDR unwound_fp;
745 int reg;
746
747 aarch64_scan_prologue (this_frame, cache);
748
749 if (cache->framereg == -1)
750 return;
751
752 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
753 if (unwound_fp == 0)
754 return;
755
756 cache->prev_sp = unwound_fp + cache->framesize;
757
758 /* Calculate actual addresses of saved registers using offsets
759 determined by aarch64_analyze_prologue. */
760 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
761 if (trad_frame_addr_p (cache->saved_regs, reg))
762 cache->saved_regs[reg].addr += cache->prev_sp;
763
764 cache->func = get_frame_func (this_frame);
765
766 cache->available_p = 1;
767 }
768
769 /* Allocate and fill in *THIS_CACHE with information about the prologue of
770 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
771 Return a pointer to the current aarch64_prologue_cache in
772 *THIS_CACHE. */
773
774 static struct aarch64_prologue_cache *
775 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
776 {
777 struct aarch64_prologue_cache *cache;
778
779 if (*this_cache != NULL)
780 return (struct aarch64_prologue_cache *) *this_cache;
781
782 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
783 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
784 *this_cache = cache;
785
786 TRY
787 {
788 aarch64_make_prologue_cache_1 (this_frame, cache);
789 }
790 CATCH (ex, RETURN_MASK_ERROR)
791 {
792 if (ex.error != NOT_AVAILABLE_ERROR)
793 throw_exception (ex);
794 }
795 END_CATCH
796
797 return cache;
798 }
799
800 /* Implement the "stop_reason" frame_unwind method. */
801
802 static enum unwind_stop_reason
803 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
804 void **this_cache)
805 {
806 struct aarch64_prologue_cache *cache
807 = aarch64_make_prologue_cache (this_frame, this_cache);
808
809 if (!cache->available_p)
810 return UNWIND_UNAVAILABLE;
811
812 /* Halt the backtrace at "_start". */
813 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
814 return UNWIND_OUTERMOST;
815
816 /* We've hit a wall, stop. */
817 if (cache->prev_sp == 0)
818 return UNWIND_OUTERMOST;
819
820 return UNWIND_NO_REASON;
821 }
822
823 /* Our frame ID for a normal frame is the current function's starting
824 PC and the caller's SP when we were called. */
825
826 static void
827 aarch64_prologue_this_id (struct frame_info *this_frame,
828 void **this_cache, struct frame_id *this_id)
829 {
830 struct aarch64_prologue_cache *cache
831 = aarch64_make_prologue_cache (this_frame, this_cache);
832
833 if (!cache->available_p)
834 *this_id = frame_id_build_unavailable_stack (cache->func);
835 else
836 *this_id = frame_id_build (cache->prev_sp, cache->func);
837 }
838
839 /* Implement the "prev_register" frame_unwind method. */
840
841 static struct value *
842 aarch64_prologue_prev_register (struct frame_info *this_frame,
843 void **this_cache, int prev_regnum)
844 {
845 struct aarch64_prologue_cache *cache
846 = aarch64_make_prologue_cache (this_frame, this_cache);
847
848 /* If we are asked to unwind the PC, then we need to return the LR
849 instead. The prologue may save PC, but it will point into this
850 frame's prologue, not the next frame's resume location. */
851 if (prev_regnum == AARCH64_PC_REGNUM)
852 {
853 CORE_ADDR lr;
854
855 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
856 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
857 }
858
859 /* SP is generally not saved to the stack, but this frame is
860 identified by the next frame's stack pointer at the time of the
861 call. The value was already reconstructed into PREV_SP. */
862 /*
863 +----------+ ^
864 | saved lr | |
865 +->| saved fp |--+
866 | | |
867 | | | <- Previous SP
868 | +----------+
869 | | saved lr |
870 +--| saved fp |<- FP
871 | |
872 | |<- SP
873 +----------+ */
874 if (prev_regnum == AARCH64_SP_REGNUM)
875 return frame_unwind_got_constant (this_frame, prev_regnum,
876 cache->prev_sp);
877
878 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
879 prev_regnum);
880 }
881
882 /* AArch64 prologue unwinder. */
883 struct frame_unwind aarch64_prologue_unwind =
884 {
885 NORMAL_FRAME,
886 aarch64_prologue_frame_unwind_stop_reason,
887 aarch64_prologue_this_id,
888 aarch64_prologue_prev_register,
889 NULL,
890 default_frame_sniffer
891 };
892
893 /* Allocate and fill in *THIS_CACHE with information about the prologue of
894 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
895 Return a pointer to the current aarch64_prologue_cache in
896 *THIS_CACHE. */
897
898 static struct aarch64_prologue_cache *
899 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
900 {
901 struct aarch64_prologue_cache *cache;
902
903 if (*this_cache != NULL)
904 return (struct aarch64_prologue_cache *) *this_cache;
905
906 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
907 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
908 *this_cache = cache;
909
910 TRY
911 {
912 cache->prev_sp = get_frame_register_unsigned (this_frame,
913 AARCH64_SP_REGNUM);
914 cache->prev_pc = get_frame_pc (this_frame);
915 cache->available_p = 1;
916 }
917 CATCH (ex, RETURN_MASK_ERROR)
918 {
919 if (ex.error != NOT_AVAILABLE_ERROR)
920 throw_exception (ex);
921 }
922 END_CATCH
923
924 return cache;
925 }
926
927 /* Implement the "stop_reason" frame_unwind method. */
928
929 static enum unwind_stop_reason
930 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
931 void **this_cache)
932 {
933 struct aarch64_prologue_cache *cache
934 = aarch64_make_stub_cache (this_frame, this_cache);
935
936 if (!cache->available_p)
937 return UNWIND_UNAVAILABLE;
938
939 return UNWIND_NO_REASON;
940 }
941
942 /* Our frame ID for a stub frame is the current SP and LR. */
943
944 static void
945 aarch64_stub_this_id (struct frame_info *this_frame,
946 void **this_cache, struct frame_id *this_id)
947 {
948 struct aarch64_prologue_cache *cache
949 = aarch64_make_stub_cache (this_frame, this_cache);
950
951 if (cache->available_p)
952 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
953 else
954 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
955 }
956
957 /* Implement the "sniffer" frame_unwind method. */
958
959 static int
960 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
961 struct frame_info *this_frame,
962 void **this_prologue_cache)
963 {
964 CORE_ADDR addr_in_block;
965 gdb_byte dummy[4];
966
967 addr_in_block = get_frame_address_in_block (this_frame);
968 if (in_plt_section (addr_in_block)
969 /* We also use the stub winder if the target memory is unreadable
970 to avoid having the prologue unwinder trying to read it. */
971 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
972 return 1;
973
974 return 0;
975 }
976
977 /* AArch64 stub unwinder. */
978 struct frame_unwind aarch64_stub_unwind =
979 {
980 NORMAL_FRAME,
981 aarch64_stub_frame_unwind_stop_reason,
982 aarch64_stub_this_id,
983 aarch64_prologue_prev_register,
984 NULL,
985 aarch64_stub_unwind_sniffer
986 };
987
988 /* Return the frame base address of *THIS_FRAME. */
989
990 static CORE_ADDR
991 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
992 {
993 struct aarch64_prologue_cache *cache
994 = aarch64_make_prologue_cache (this_frame, this_cache);
995
996 return cache->prev_sp - cache->framesize;
997 }
998
999 /* AArch64 default frame base information. */
1000 struct frame_base aarch64_normal_base =
1001 {
1002 &aarch64_prologue_unwind,
1003 aarch64_normal_frame_base,
1004 aarch64_normal_frame_base,
1005 aarch64_normal_frame_base
1006 };
1007
1008 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1009 dummy frame. The frame ID's base needs to match the TOS value
1010 saved by save_dummy_frame_tos () and returned from
1011 aarch64_push_dummy_call, and the PC needs to match the dummy
1012 frame's breakpoint. */
1013
1014 static struct frame_id
1015 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1016 {
1017 return frame_id_build (get_frame_register_unsigned (this_frame,
1018 AARCH64_SP_REGNUM),
1019 get_frame_pc (this_frame));
1020 }
1021
1022 /* Implement the "unwind_pc" gdbarch method. */
1023
1024 static CORE_ADDR
1025 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1026 {
1027 CORE_ADDR pc
1028 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1029
1030 return pc;
1031 }
1032
1033 /* Implement the "unwind_sp" gdbarch method. */
1034
1035 static CORE_ADDR
1036 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1037 {
1038 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1039 }
1040
1041 /* Return the value of the REGNUM register in the previous frame of
1042 *THIS_FRAME. */
1043
1044 static struct value *
1045 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1046 void **this_cache, int regnum)
1047 {
1048 CORE_ADDR lr;
1049
1050 switch (regnum)
1051 {
1052 case AARCH64_PC_REGNUM:
1053 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1054 return frame_unwind_got_constant (this_frame, regnum, lr);
1055
1056 default:
1057 internal_error (__FILE__, __LINE__,
1058 _("Unexpected register %d"), regnum);
1059 }
1060 }
1061
1062 /* Implement the "init_reg" dwarf2_frame_ops method. */
1063
1064 static void
1065 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1066 struct dwarf2_frame_state_reg *reg,
1067 struct frame_info *this_frame)
1068 {
1069 switch (regnum)
1070 {
1071 case AARCH64_PC_REGNUM:
1072 reg->how = DWARF2_FRAME_REG_FN;
1073 reg->loc.fn = aarch64_dwarf2_prev_register;
1074 break;
1075 case AARCH64_SP_REGNUM:
1076 reg->how = DWARF2_FRAME_REG_CFA;
1077 break;
1078 }
1079 }
1080
1081 /* When arguments must be pushed onto the stack, they go on in reverse
1082 order. The code below implements a FILO (stack) to do this. */
1083
1084 typedef struct
1085 {
1086 /* Value to pass on stack. It can be NULL if this item is for stack
1087 padding. */
1088 const gdb_byte *data;
1089
1090 /* Size in bytes of value to pass on stack. */
1091 int len;
1092 } stack_item_t;
1093
1094 DEF_VEC_O (stack_item_t);
1095
1096 /* Return the alignment (in bytes) of the given type. */
1097
1098 static int
1099 aarch64_type_align (struct type *t)
1100 {
1101 int n;
1102 int align;
1103 int falign;
1104
1105 t = check_typedef (t);
1106 switch (TYPE_CODE (t))
1107 {
1108 default:
1109 /* Should never happen. */
1110 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1111 return 4;
1112
1113 case TYPE_CODE_PTR:
1114 case TYPE_CODE_ENUM:
1115 case TYPE_CODE_INT:
1116 case TYPE_CODE_FLT:
1117 case TYPE_CODE_SET:
1118 case TYPE_CODE_RANGE:
1119 case TYPE_CODE_BITSTRING:
1120 case TYPE_CODE_REF:
1121 case TYPE_CODE_RVALUE_REF:
1122 case TYPE_CODE_CHAR:
1123 case TYPE_CODE_BOOL:
1124 return TYPE_LENGTH (t);
1125
1126 case TYPE_CODE_ARRAY:
1127 if (TYPE_VECTOR (t))
1128 {
1129 /* Use the natural alignment for vector types (the same for
1130 scalar type), but the maximum alignment is 128-bit. */
1131 if (TYPE_LENGTH (t) > 16)
1132 return 16;
1133 else
1134 return TYPE_LENGTH (t);
1135 }
1136 else
1137 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1138 case TYPE_CODE_COMPLEX:
1139 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1140
1141 case TYPE_CODE_STRUCT:
1142 case TYPE_CODE_UNION:
1143 align = 1;
1144 for (n = 0; n < TYPE_NFIELDS (t); n++)
1145 {
1146 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1147 if (falign > align)
1148 align = falign;
1149 }
1150 return align;
1151 }
1152 }
1153
1154 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1155
1156 Return the number of register required, or -1 on failure.
1157
1158 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1159 to the element, else fail if the type of this element does not match the
1160 existing value. */
1161
1162 static int
1163 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1164 struct type **fundamental_type)
1165 {
1166 if (type == nullptr)
1167 return -1;
1168
1169 switch (TYPE_CODE (type))
1170 {
1171 case TYPE_CODE_FLT:
1172 if (TYPE_LENGTH (type) > 16)
1173 return -1;
1174
1175 if (*fundamental_type == nullptr)
1176 *fundamental_type = type;
1177 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1178 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1179 return -1;
1180
1181 return 1;
1182
1183 case TYPE_CODE_COMPLEX:
1184 {
1185 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1186 if (TYPE_LENGTH (target_type) > 16)
1187 return -1;
1188
1189 if (*fundamental_type == nullptr)
1190 *fundamental_type = target_type;
1191 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1192 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1193 return -1;
1194
1195 return 2;
1196 }
1197
1198 case TYPE_CODE_ARRAY:
1199 {
1200 if (TYPE_VECTOR (type))
1201 {
1202 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1203 return -1;
1204
1205 if (*fundamental_type == nullptr)
1206 *fundamental_type = type;
1207 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1208 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1209 return -1;
1210
1211 return 1;
1212 }
1213 else
1214 {
1215 struct type *target_type = TYPE_TARGET_TYPE (type);
1216 int count = aapcs_is_vfp_call_or_return_candidate_1
1217 (target_type, fundamental_type);
1218
1219 if (count == -1)
1220 return count;
1221
1222 count *= TYPE_LENGTH (type);
1223 return count;
1224 }
1225 }
1226
1227 case TYPE_CODE_STRUCT:
1228 case TYPE_CODE_UNION:
1229 {
1230 int count = 0;
1231
1232 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1233 {
1234 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1235
1236 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1237 (member, fundamental_type);
1238 if (sub_count == -1)
1239 return -1;
1240 count += sub_count;
1241 }
1242 return count;
1243 }
1244
1245 default:
1246 break;
1247 }
1248
1249 return -1;
1250 }
1251
1252 /* Return true if an argument, whose type is described by TYPE, can be passed or
1253 returned in simd/fp registers, providing enough parameter passing registers
1254 are available. This is as described in the AAPCS64.
1255
1256 Upon successful return, *COUNT returns the number of needed registers,
1257 *FUNDAMENTAL_TYPE contains the type of those registers.
1258
1259 Candidate as per the AAPCS64 5.4.2.C is either a:
1260 - float.
1261 - short-vector.
1262 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1263 all the members are floats and has at most 4 members.
1264 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1265 all the members are short vectors and has at most 4 members.
1266 - Complex (7.1.1)
1267
1268 Note that HFAs and HVAs can include nested structures and arrays. */
1269
1270 static bool
1271 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1272 struct type **fundamental_type)
1273 {
1274 if (type == nullptr)
1275 return false;
1276
1277 *fundamental_type = nullptr;
1278
1279 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1280 fundamental_type);
1281
1282 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1283 {
1284 *count = ag_count;
1285 return true;
1286 }
1287 else
1288 return false;
1289 }
1290
1291 /* AArch64 function call information structure. */
1292 struct aarch64_call_info
1293 {
1294 /* the current argument number. */
1295 unsigned argnum;
1296
1297 /* The next general purpose register number, equivalent to NGRN as
1298 described in the AArch64 Procedure Call Standard. */
1299 unsigned ngrn;
1300
1301 /* The next SIMD and floating point register number, equivalent to
1302 NSRN as described in the AArch64 Procedure Call Standard. */
1303 unsigned nsrn;
1304
1305 /* The next stacked argument address, equivalent to NSAA as
1306 described in the AArch64 Procedure Call Standard. */
1307 unsigned nsaa;
1308
1309 /* Stack item vector. */
1310 VEC(stack_item_t) *si;
1311 };
1312
1313 /* Pass a value in a sequence of consecutive X registers. The caller
1314 is responsbile for ensuring sufficient registers are available. */
1315
1316 static void
1317 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1318 struct aarch64_call_info *info, struct type *type,
1319 struct value *arg)
1320 {
1321 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1322 int len = TYPE_LENGTH (type);
1323 enum type_code typecode = TYPE_CODE (type);
1324 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1325 const bfd_byte *buf = value_contents (arg);
1326
1327 info->argnum++;
1328
1329 while (len > 0)
1330 {
1331 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1332 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1333 byte_order);
1334
1335
1336 /* Adjust sub-word struct/union args when big-endian. */
1337 if (byte_order == BFD_ENDIAN_BIG
1338 && partial_len < X_REGISTER_SIZE
1339 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1340 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1341
1342 if (aarch64_debug)
1343 {
1344 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1345 gdbarch_register_name (gdbarch, regnum),
1346 phex (regval, X_REGISTER_SIZE));
1347 }
1348 regcache_cooked_write_unsigned (regcache, regnum, regval);
1349 len -= partial_len;
1350 buf += partial_len;
1351 regnum++;
1352 }
1353 }
1354
1355 /* Attempt to marshall a value in a V register. Return 1 if
1356 successful, or 0 if insufficient registers are available. This
1357 function, unlike the equivalent pass_in_x() function does not
1358 handle arguments spread across multiple registers. */
1359
1360 static int
1361 pass_in_v (struct gdbarch *gdbarch,
1362 struct regcache *regcache,
1363 struct aarch64_call_info *info,
1364 int len, const bfd_byte *buf)
1365 {
1366 if (info->nsrn < 8)
1367 {
1368 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1369 /* Enough space for a full vector register. */
1370 gdb_byte reg[register_size (gdbarch, regnum)];
1371 gdb_assert (len <= sizeof (reg));
1372
1373 info->argnum++;
1374 info->nsrn++;
1375
1376 memset (reg, 0, sizeof (reg));
1377 /* PCS C.1, the argument is allocated to the least significant
1378 bits of V register. */
1379 memcpy (reg, buf, len);
1380 regcache->cooked_write (regnum, reg);
1381
1382 if (aarch64_debug)
1383 {
1384 debug_printf ("arg %d in %s\n", info->argnum,
1385 gdbarch_register_name (gdbarch, regnum));
1386 }
1387 return 1;
1388 }
1389 info->nsrn = 8;
1390 return 0;
1391 }
1392
1393 /* Marshall an argument onto the stack. */
1394
1395 static void
1396 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1397 struct value *arg)
1398 {
1399 const bfd_byte *buf = value_contents (arg);
1400 int len = TYPE_LENGTH (type);
1401 int align;
1402 stack_item_t item;
1403
1404 info->argnum++;
1405
1406 align = aarch64_type_align (type);
1407
1408 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1409 Natural alignment of the argument's type. */
1410 align = align_up (align, 8);
1411
1412 /* The AArch64 PCS requires at most doubleword alignment. */
1413 if (align > 16)
1414 align = 16;
1415
1416 if (aarch64_debug)
1417 {
1418 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1419 info->nsaa);
1420 }
1421
1422 item.len = len;
1423 item.data = buf;
1424 VEC_safe_push (stack_item_t, info->si, &item);
1425
1426 info->nsaa += len;
1427 if (info->nsaa & (align - 1))
1428 {
1429 /* Push stack alignment padding. */
1430 int pad = align - (info->nsaa & (align - 1));
1431
1432 item.len = pad;
1433 item.data = NULL;
1434
1435 VEC_safe_push (stack_item_t, info->si, &item);
1436 info->nsaa += pad;
1437 }
1438 }
1439
1440 /* Marshall an argument into a sequence of one or more consecutive X
1441 registers or, if insufficient X registers are available then onto
1442 the stack. */
1443
1444 static void
1445 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1446 struct aarch64_call_info *info, struct type *type,
1447 struct value *arg)
1448 {
1449 int len = TYPE_LENGTH (type);
1450 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1451
1452 /* PCS C.13 - Pass in registers if we have enough spare */
1453 if (info->ngrn + nregs <= 8)
1454 {
1455 pass_in_x (gdbarch, regcache, info, type, arg);
1456 info->ngrn += nregs;
1457 }
1458 else
1459 {
1460 info->ngrn = 8;
1461 pass_on_stack (info, type, arg);
1462 }
1463 }
1464
1465 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1466 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1467 registers. A return value of false is an error state as the value will have
1468 been partially passed to the stack. */
1469 static bool
1470 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1471 struct aarch64_call_info *info, struct type *arg_type,
1472 struct value *arg)
1473 {
1474 switch (TYPE_CODE (arg_type))
1475 {
1476 case TYPE_CODE_FLT:
1477 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1478 value_contents (arg));
1479 break;
1480
1481 case TYPE_CODE_COMPLEX:
1482 {
1483 const bfd_byte *buf = value_contents (arg);
1484 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1485
1486 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1487 buf))
1488 return false;
1489
1490 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1491 buf + TYPE_LENGTH (target_type));
1492 }
1493
1494 case TYPE_CODE_ARRAY:
1495 if (TYPE_VECTOR (arg_type))
1496 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1497 value_contents (arg));
1498 /* fall through. */
1499
1500 case TYPE_CODE_STRUCT:
1501 case TYPE_CODE_UNION:
1502 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1503 {
1504 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1505 struct type *field_type = check_typedef (value_type (field));
1506
1507 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1508 field))
1509 return false;
1510 }
1511 return true;
1512
1513 default:
1514 return false;
1515 }
1516 }
1517
1518 /* Implement the "push_dummy_call" gdbarch method. */
1519
1520 static CORE_ADDR
1521 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1522 struct regcache *regcache, CORE_ADDR bp_addr,
1523 int nargs,
1524 struct value **args, CORE_ADDR sp, int struct_return,
1525 CORE_ADDR struct_addr)
1526 {
1527 int argnum;
1528 struct aarch64_call_info info;
1529 struct type *func_type;
1530 struct type *return_type;
1531 int lang_struct_return;
1532
1533 memset (&info, 0, sizeof (info));
1534
1535 /* We need to know what the type of the called function is in order
1536 to determine the number of named/anonymous arguments for the
1537 actual argument placement, and the return type in order to handle
1538 return value correctly.
1539
1540 The generic code above us views the decision of return in memory
1541 or return in registers as a two stage processes. The language
1542 handler is consulted first and may decide to return in memory (eg
1543 class with copy constructor returned by value), this will cause
1544 the generic code to allocate space AND insert an initial leading
1545 argument.
1546
1547 If the language code does not decide to pass in memory then the
1548 target code is consulted.
1549
1550 If the language code decides to pass in memory we want to move
1551 the pointer inserted as the initial argument from the argument
1552 list and into X8, the conventional AArch64 struct return pointer
1553 register.
1554
1555 This is slightly awkward, ideally the flag "lang_struct_return"
1556 would be passed to the targets implementation of push_dummy_call.
1557 Rather that change the target interface we call the language code
1558 directly ourselves. */
1559
1560 func_type = check_typedef (value_type (function));
1561
1562 /* Dereference function pointer types. */
1563 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1564 func_type = TYPE_TARGET_TYPE (func_type);
1565
1566 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1567 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1568
1569 /* If language_pass_by_reference () returned true we will have been
1570 given an additional initial argument, a hidden pointer to the
1571 return slot in memory. */
1572 return_type = TYPE_TARGET_TYPE (func_type);
1573 lang_struct_return = language_pass_by_reference (return_type);
1574
1575 /* Set the return address. For the AArch64, the return breakpoint
1576 is always at BP_ADDR. */
1577 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1578
1579 /* If we were given an initial argument for the return slot because
1580 lang_struct_return was true, lose it. */
1581 if (lang_struct_return)
1582 {
1583 args++;
1584 nargs--;
1585 }
1586
1587 /* The struct_return pointer occupies X8. */
1588 if (struct_return || lang_struct_return)
1589 {
1590 if (aarch64_debug)
1591 {
1592 debug_printf ("struct return in %s = 0x%s\n",
1593 gdbarch_register_name (gdbarch,
1594 AARCH64_STRUCT_RETURN_REGNUM),
1595 paddress (gdbarch, struct_addr));
1596 }
1597 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1598 struct_addr);
1599 }
1600
1601 for (argnum = 0; argnum < nargs; argnum++)
1602 {
1603 struct value *arg = args[argnum];
1604 struct type *arg_type, *fundamental_type;
1605 int len, elements;
1606
1607 arg_type = check_typedef (value_type (arg));
1608 len = TYPE_LENGTH (arg_type);
1609
1610 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1611 if there are enough spare registers. */
1612 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1613 &fundamental_type))
1614 {
1615 if (info.nsrn + elements <= 8)
1616 {
1617 /* We know that we have sufficient registers available therefore
1618 this will never need to fallback to the stack. */
1619 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1620 arg))
1621 gdb_assert_not_reached ("Failed to push args");
1622 }
1623 else
1624 {
1625 info.nsrn = 8;
1626 pass_on_stack (&info, arg_type, arg);
1627 }
1628 continue;
1629 }
1630
1631 switch (TYPE_CODE (arg_type))
1632 {
1633 case TYPE_CODE_INT:
1634 case TYPE_CODE_BOOL:
1635 case TYPE_CODE_CHAR:
1636 case TYPE_CODE_RANGE:
1637 case TYPE_CODE_ENUM:
1638 if (len < 4)
1639 {
1640 /* Promote to 32 bit integer. */
1641 if (TYPE_UNSIGNED (arg_type))
1642 arg_type = builtin_type (gdbarch)->builtin_uint32;
1643 else
1644 arg_type = builtin_type (gdbarch)->builtin_int32;
1645 arg = value_cast (arg_type, arg);
1646 }
1647 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1648 break;
1649
1650 case TYPE_CODE_STRUCT:
1651 case TYPE_CODE_ARRAY:
1652 case TYPE_CODE_UNION:
1653 if (len > 16)
1654 {
1655 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1656 invisible reference. */
1657
1658 /* Allocate aligned storage. */
1659 sp = align_down (sp - len, 16);
1660
1661 /* Write the real data into the stack. */
1662 write_memory (sp, value_contents (arg), len);
1663
1664 /* Construct the indirection. */
1665 arg_type = lookup_pointer_type (arg_type);
1666 arg = value_from_pointer (arg_type, sp);
1667 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1668 }
1669 else
1670 /* PCS C.15 / C.18 multiple values pass. */
1671 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1672 break;
1673
1674 default:
1675 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1676 break;
1677 }
1678 }
1679
1680 /* Make sure stack retains 16 byte alignment. */
1681 if (info.nsaa & 15)
1682 sp -= 16 - (info.nsaa & 15);
1683
1684 while (!VEC_empty (stack_item_t, info.si))
1685 {
1686 stack_item_t *si = VEC_last (stack_item_t, info.si);
1687
1688 sp -= si->len;
1689 if (si->data != NULL)
1690 write_memory (sp, si->data, si->len);
1691 VEC_pop (stack_item_t, info.si);
1692 }
1693
1694 VEC_free (stack_item_t, info.si);
1695
1696 /* Finally, update the SP register. */
1697 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1698
1699 return sp;
1700 }
1701
1702 /* Implement the "frame_align" gdbarch method. */
1703
1704 static CORE_ADDR
1705 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1706 {
1707 /* Align the stack to sixteen bytes. */
1708 return sp & ~(CORE_ADDR) 15;
1709 }
1710
1711 /* Return the type for an AdvSISD Q register. */
1712
1713 static struct type *
1714 aarch64_vnq_type (struct gdbarch *gdbarch)
1715 {
1716 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1717
1718 if (tdep->vnq_type == NULL)
1719 {
1720 struct type *t;
1721 struct type *elem;
1722
1723 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1724 TYPE_CODE_UNION);
1725
1726 elem = builtin_type (gdbarch)->builtin_uint128;
1727 append_composite_type_field (t, "u", elem);
1728
1729 elem = builtin_type (gdbarch)->builtin_int128;
1730 append_composite_type_field (t, "s", elem);
1731
1732 tdep->vnq_type = t;
1733 }
1734
1735 return tdep->vnq_type;
1736 }
1737
1738 /* Return the type for an AdvSISD D register. */
1739
1740 static struct type *
1741 aarch64_vnd_type (struct gdbarch *gdbarch)
1742 {
1743 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1744
1745 if (tdep->vnd_type == NULL)
1746 {
1747 struct type *t;
1748 struct type *elem;
1749
1750 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1751 TYPE_CODE_UNION);
1752
1753 elem = builtin_type (gdbarch)->builtin_double;
1754 append_composite_type_field (t, "f", elem);
1755
1756 elem = builtin_type (gdbarch)->builtin_uint64;
1757 append_composite_type_field (t, "u", elem);
1758
1759 elem = builtin_type (gdbarch)->builtin_int64;
1760 append_composite_type_field (t, "s", elem);
1761
1762 tdep->vnd_type = t;
1763 }
1764
1765 return tdep->vnd_type;
1766 }
1767
1768 /* Return the type for an AdvSISD S register. */
1769
1770 static struct type *
1771 aarch64_vns_type (struct gdbarch *gdbarch)
1772 {
1773 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1774
1775 if (tdep->vns_type == NULL)
1776 {
1777 struct type *t;
1778 struct type *elem;
1779
1780 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1781 TYPE_CODE_UNION);
1782
1783 elem = builtin_type (gdbarch)->builtin_float;
1784 append_composite_type_field (t, "f", elem);
1785
1786 elem = builtin_type (gdbarch)->builtin_uint32;
1787 append_composite_type_field (t, "u", elem);
1788
1789 elem = builtin_type (gdbarch)->builtin_int32;
1790 append_composite_type_field (t, "s", elem);
1791
1792 tdep->vns_type = t;
1793 }
1794
1795 return tdep->vns_type;
1796 }
1797
1798 /* Return the type for an AdvSISD H register. */
1799
1800 static struct type *
1801 aarch64_vnh_type (struct gdbarch *gdbarch)
1802 {
1803 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1804
1805 if (tdep->vnh_type == NULL)
1806 {
1807 struct type *t;
1808 struct type *elem;
1809
1810 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1811 TYPE_CODE_UNION);
1812
1813 elem = builtin_type (gdbarch)->builtin_uint16;
1814 append_composite_type_field (t, "u", elem);
1815
1816 elem = builtin_type (gdbarch)->builtin_int16;
1817 append_composite_type_field (t, "s", elem);
1818
1819 tdep->vnh_type = t;
1820 }
1821
1822 return tdep->vnh_type;
1823 }
1824
1825 /* Return the type for an AdvSISD B register. */
1826
1827 static struct type *
1828 aarch64_vnb_type (struct gdbarch *gdbarch)
1829 {
1830 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1831
1832 if (tdep->vnb_type == NULL)
1833 {
1834 struct type *t;
1835 struct type *elem;
1836
1837 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1838 TYPE_CODE_UNION);
1839
1840 elem = builtin_type (gdbarch)->builtin_uint8;
1841 append_composite_type_field (t, "u", elem);
1842
1843 elem = builtin_type (gdbarch)->builtin_int8;
1844 append_composite_type_field (t, "s", elem);
1845
1846 tdep->vnb_type = t;
1847 }
1848
1849 return tdep->vnb_type;
1850 }
1851
1852 /* Return the type for an AdvSISD V register. */
1853
1854 static struct type *
1855 aarch64_vnv_type (struct gdbarch *gdbarch)
1856 {
1857 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1858
1859 if (tdep->vnv_type == NULL)
1860 {
1861 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1862 TYPE_CODE_UNION);
1863
1864 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1865 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1866 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1867 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1868 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1869
1870 tdep->vnv_type = t;
1871 }
1872
1873 return tdep->vnv_type;
1874 }
1875
1876 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1877
1878 static int
1879 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1880 {
1881 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1882 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1883
1884 if (reg == AARCH64_DWARF_SP)
1885 return AARCH64_SP_REGNUM;
1886
1887 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1888 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1889
1890 if (reg == AARCH64_DWARF_SVE_VG)
1891 return AARCH64_SVE_VG_REGNUM;
1892
1893 if (reg == AARCH64_DWARF_SVE_FFR)
1894 return AARCH64_SVE_FFR_REGNUM;
1895
1896 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1897 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1898
1899 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1900 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1901
1902 return -1;
1903 }
1904
1905 /* Implement the "print_insn" gdbarch method. */
1906
1907 static int
1908 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1909 {
1910 info->symbols = NULL;
1911 return default_print_insn (memaddr, info);
1912 }
1913
1914 /* AArch64 BRK software debug mode instruction.
1915 Note that AArch64 code is always little-endian.
1916 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1917 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1918
1919 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1920
1921 /* Extract from an array REGS containing the (raw) register state a
1922 function return value of type TYPE, and copy that, in virtual
1923 format, into VALBUF. */
1924
1925 static void
1926 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1927 gdb_byte *valbuf)
1928 {
1929 struct gdbarch *gdbarch = regs->arch ();
1930 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1931 int elements;
1932 struct type *fundamental_type;
1933
1934 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1935 &fundamental_type))
1936 {
1937 int len = TYPE_LENGTH (fundamental_type);
1938
1939 for (int i = 0; i < elements; i++)
1940 {
1941 int regno = AARCH64_V0_REGNUM + i;
1942 /* Enough space for a full vector register. */
1943 gdb_byte buf[register_size (gdbarch, regno)];
1944 gdb_assert (len <= sizeof (buf));
1945
1946 if (aarch64_debug)
1947 {
1948 debug_printf ("read HFA or HVA return value element %d from %s\n",
1949 i + 1,
1950 gdbarch_register_name (gdbarch, regno));
1951 }
1952 regs->cooked_read (regno, buf);
1953
1954 memcpy (valbuf, buf, len);
1955 valbuf += len;
1956 }
1957 }
1958 else if (TYPE_CODE (type) == TYPE_CODE_INT
1959 || TYPE_CODE (type) == TYPE_CODE_CHAR
1960 || TYPE_CODE (type) == TYPE_CODE_BOOL
1961 || TYPE_CODE (type) == TYPE_CODE_PTR
1962 || TYPE_IS_REFERENCE (type)
1963 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1964 {
1965 /* If the the type is a plain integer, then the access is
1966 straight-forward. Otherwise we have to play around a bit
1967 more. */
1968 int len = TYPE_LENGTH (type);
1969 int regno = AARCH64_X0_REGNUM;
1970 ULONGEST tmp;
1971
1972 while (len > 0)
1973 {
1974 /* By using store_unsigned_integer we avoid having to do
1975 anything special for small big-endian values. */
1976 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1977 store_unsigned_integer (valbuf,
1978 (len > X_REGISTER_SIZE
1979 ? X_REGISTER_SIZE : len), byte_order, tmp);
1980 len -= X_REGISTER_SIZE;
1981 valbuf += X_REGISTER_SIZE;
1982 }
1983 }
1984 else
1985 {
1986 /* For a structure or union the behaviour is as if the value had
1987 been stored to word-aligned memory and then loaded into
1988 registers with 64-bit load instruction(s). */
1989 int len = TYPE_LENGTH (type);
1990 int regno = AARCH64_X0_REGNUM;
1991 bfd_byte buf[X_REGISTER_SIZE];
1992
1993 while (len > 0)
1994 {
1995 regs->cooked_read (regno++, buf);
1996 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1997 len -= X_REGISTER_SIZE;
1998 valbuf += X_REGISTER_SIZE;
1999 }
2000 }
2001 }
2002
2003
2004 /* Will a function return an aggregate type in memory or in a
2005 register? Return 0 if an aggregate type can be returned in a
2006 register, 1 if it must be returned in memory. */
2007
2008 static int
2009 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2010 {
2011 type = check_typedef (type);
2012 int elements;
2013 struct type *fundamental_type;
2014
2015 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2016 &fundamental_type))
2017 {
2018 /* v0-v7 are used to return values and one register is allocated
2019 for one member. However, HFA or HVA has at most four members. */
2020 return 0;
2021 }
2022
2023 if (TYPE_LENGTH (type) > 16)
2024 {
2025 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2026 invisible reference. */
2027
2028 return 1;
2029 }
2030
2031 return 0;
2032 }
2033
2034 /* Write into appropriate registers a function return value of type
2035 TYPE, given in virtual format. */
2036
2037 static void
2038 aarch64_store_return_value (struct type *type, struct regcache *regs,
2039 const gdb_byte *valbuf)
2040 {
2041 struct gdbarch *gdbarch = regs->arch ();
2042 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2043 int elements;
2044 struct type *fundamental_type;
2045
2046 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2047 &fundamental_type))
2048 {
2049 int len = TYPE_LENGTH (fundamental_type);
2050
2051 for (int i = 0; i < elements; i++)
2052 {
2053 int regno = AARCH64_V0_REGNUM + i;
2054 /* Enough space for a full vector register. */
2055 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2056 gdb_assert (len <= sizeof (tmpbuf));
2057
2058 if (aarch64_debug)
2059 {
2060 debug_printf ("write HFA or HVA return value element %d to %s\n",
2061 i + 1,
2062 gdbarch_register_name (gdbarch, regno));
2063 }
2064
2065 memcpy (tmpbuf, valbuf,
2066 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2067 regs->cooked_write (regno, tmpbuf);
2068 valbuf += len;
2069 }
2070 }
2071 else if (TYPE_CODE (type) == TYPE_CODE_INT
2072 || TYPE_CODE (type) == TYPE_CODE_CHAR
2073 || TYPE_CODE (type) == TYPE_CODE_BOOL
2074 || TYPE_CODE (type) == TYPE_CODE_PTR
2075 || TYPE_IS_REFERENCE (type)
2076 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2077 {
2078 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2079 {
2080 /* Values of one word or less are zero/sign-extended and
2081 returned in r0. */
2082 bfd_byte tmpbuf[X_REGISTER_SIZE];
2083 LONGEST val = unpack_long (type, valbuf);
2084
2085 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2086 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2087 }
2088 else
2089 {
2090 /* Integral values greater than one word are stored in
2091 consecutive registers starting with r0. This will always
2092 be a multiple of the regiser size. */
2093 int len = TYPE_LENGTH (type);
2094 int regno = AARCH64_X0_REGNUM;
2095
2096 while (len > 0)
2097 {
2098 regs->cooked_write (regno++, valbuf);
2099 len -= X_REGISTER_SIZE;
2100 valbuf += X_REGISTER_SIZE;
2101 }
2102 }
2103 }
2104 else
2105 {
2106 /* For a structure or union the behaviour is as if the value had
2107 been stored to word-aligned memory and then loaded into
2108 registers with 64-bit load instruction(s). */
2109 int len = TYPE_LENGTH (type);
2110 int regno = AARCH64_X0_REGNUM;
2111 bfd_byte tmpbuf[X_REGISTER_SIZE];
2112
2113 while (len > 0)
2114 {
2115 memcpy (tmpbuf, valbuf,
2116 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2117 regs->cooked_write (regno++, tmpbuf);
2118 len -= X_REGISTER_SIZE;
2119 valbuf += X_REGISTER_SIZE;
2120 }
2121 }
2122 }
2123
2124 /* Implement the "return_value" gdbarch method. */
2125
2126 static enum return_value_convention
2127 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2128 struct type *valtype, struct regcache *regcache,
2129 gdb_byte *readbuf, const gdb_byte *writebuf)
2130 {
2131
2132 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2133 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2134 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2135 {
2136 if (aarch64_return_in_memory (gdbarch, valtype))
2137 {
2138 if (aarch64_debug)
2139 debug_printf ("return value in memory\n");
2140 return RETURN_VALUE_STRUCT_CONVENTION;
2141 }
2142 }
2143
2144 if (writebuf)
2145 aarch64_store_return_value (valtype, regcache, writebuf);
2146
2147 if (readbuf)
2148 aarch64_extract_return_value (valtype, regcache, readbuf);
2149
2150 if (aarch64_debug)
2151 debug_printf ("return value in registers\n");
2152
2153 return RETURN_VALUE_REGISTER_CONVENTION;
2154 }
2155
2156 /* Implement the "get_longjmp_target" gdbarch method. */
2157
2158 static int
2159 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2160 {
2161 CORE_ADDR jb_addr;
2162 gdb_byte buf[X_REGISTER_SIZE];
2163 struct gdbarch *gdbarch = get_frame_arch (frame);
2164 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2165 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2166
2167 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2168
2169 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2170 X_REGISTER_SIZE))
2171 return 0;
2172
2173 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2174 return 1;
2175 }
2176
2177 /* Implement the "gen_return_address" gdbarch method. */
2178
2179 static void
2180 aarch64_gen_return_address (struct gdbarch *gdbarch,
2181 struct agent_expr *ax, struct axs_value *value,
2182 CORE_ADDR scope)
2183 {
2184 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2185 value->kind = axs_lvalue_register;
2186 value->u.reg = AARCH64_LR_REGNUM;
2187 }
2188 \f
2189
2190 /* Return the pseudo register name corresponding to register regnum. */
2191
2192 static const char *
2193 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2194 {
2195 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2196
2197 static const char *const q_name[] =
2198 {
2199 "q0", "q1", "q2", "q3",
2200 "q4", "q5", "q6", "q7",
2201 "q8", "q9", "q10", "q11",
2202 "q12", "q13", "q14", "q15",
2203 "q16", "q17", "q18", "q19",
2204 "q20", "q21", "q22", "q23",
2205 "q24", "q25", "q26", "q27",
2206 "q28", "q29", "q30", "q31",
2207 };
2208
2209 static const char *const d_name[] =
2210 {
2211 "d0", "d1", "d2", "d3",
2212 "d4", "d5", "d6", "d7",
2213 "d8", "d9", "d10", "d11",
2214 "d12", "d13", "d14", "d15",
2215 "d16", "d17", "d18", "d19",
2216 "d20", "d21", "d22", "d23",
2217 "d24", "d25", "d26", "d27",
2218 "d28", "d29", "d30", "d31",
2219 };
2220
2221 static const char *const s_name[] =
2222 {
2223 "s0", "s1", "s2", "s3",
2224 "s4", "s5", "s6", "s7",
2225 "s8", "s9", "s10", "s11",
2226 "s12", "s13", "s14", "s15",
2227 "s16", "s17", "s18", "s19",
2228 "s20", "s21", "s22", "s23",
2229 "s24", "s25", "s26", "s27",
2230 "s28", "s29", "s30", "s31",
2231 };
2232
2233 static const char *const h_name[] =
2234 {
2235 "h0", "h1", "h2", "h3",
2236 "h4", "h5", "h6", "h7",
2237 "h8", "h9", "h10", "h11",
2238 "h12", "h13", "h14", "h15",
2239 "h16", "h17", "h18", "h19",
2240 "h20", "h21", "h22", "h23",
2241 "h24", "h25", "h26", "h27",
2242 "h28", "h29", "h30", "h31",
2243 };
2244
2245 static const char *const b_name[] =
2246 {
2247 "b0", "b1", "b2", "b3",
2248 "b4", "b5", "b6", "b7",
2249 "b8", "b9", "b10", "b11",
2250 "b12", "b13", "b14", "b15",
2251 "b16", "b17", "b18", "b19",
2252 "b20", "b21", "b22", "b23",
2253 "b24", "b25", "b26", "b27",
2254 "b28", "b29", "b30", "b31",
2255 };
2256
2257 regnum -= gdbarch_num_regs (gdbarch);
2258
2259 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2260 return q_name[regnum - AARCH64_Q0_REGNUM];
2261
2262 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2263 return d_name[regnum - AARCH64_D0_REGNUM];
2264
2265 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2266 return s_name[regnum - AARCH64_S0_REGNUM];
2267
2268 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2269 return h_name[regnum - AARCH64_H0_REGNUM];
2270
2271 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2272 return b_name[regnum - AARCH64_B0_REGNUM];
2273
2274 if (tdep->has_sve ())
2275 {
2276 static const char *const sve_v_name[] =
2277 {
2278 "v0", "v1", "v2", "v3",
2279 "v4", "v5", "v6", "v7",
2280 "v8", "v9", "v10", "v11",
2281 "v12", "v13", "v14", "v15",
2282 "v16", "v17", "v18", "v19",
2283 "v20", "v21", "v22", "v23",
2284 "v24", "v25", "v26", "v27",
2285 "v28", "v29", "v30", "v31",
2286 };
2287
2288 if (regnum >= AARCH64_SVE_V0_REGNUM
2289 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2290 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2291 }
2292
2293 internal_error (__FILE__, __LINE__,
2294 _("aarch64_pseudo_register_name: bad register number %d"),
2295 regnum);
2296 }
2297
2298 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2299
2300 static struct type *
2301 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2302 {
2303 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2304
2305 regnum -= gdbarch_num_regs (gdbarch);
2306
2307 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2308 return aarch64_vnq_type (gdbarch);
2309
2310 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2311 return aarch64_vnd_type (gdbarch);
2312
2313 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2314 return aarch64_vns_type (gdbarch);
2315
2316 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2317 return aarch64_vnh_type (gdbarch);
2318
2319 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2320 return aarch64_vnb_type (gdbarch);
2321
2322 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2323 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2324 return aarch64_vnv_type (gdbarch);
2325
2326 internal_error (__FILE__, __LINE__,
2327 _("aarch64_pseudo_register_type: bad register number %d"),
2328 regnum);
2329 }
2330
2331 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2332
2333 static int
2334 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2335 struct reggroup *group)
2336 {
2337 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2338
2339 regnum -= gdbarch_num_regs (gdbarch);
2340
2341 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2342 return group == all_reggroup || group == vector_reggroup;
2343 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2344 return (group == all_reggroup || group == vector_reggroup
2345 || group == float_reggroup);
2346 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2347 return (group == all_reggroup || group == vector_reggroup
2348 || group == float_reggroup);
2349 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2350 return group == all_reggroup || group == vector_reggroup;
2351 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2352 return group == all_reggroup || group == vector_reggroup;
2353 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2354 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2355 return group == all_reggroup || group == vector_reggroup;
2356
2357 return group == all_reggroup;
2358 }
2359
2360 /* Helper for aarch64_pseudo_read_value. */
2361
2362 static struct value *
2363 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2364 readable_regcache *regcache, int regnum_offset,
2365 int regsize, struct value *result_value)
2366 {
2367 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2368
2369 /* Enough space for a full vector register. */
2370 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2371 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2372
2373 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2374 mark_value_bytes_unavailable (result_value, 0,
2375 TYPE_LENGTH (value_type (result_value)));
2376 else
2377 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2378
2379 return result_value;
2380 }
2381
2382 /* Implement the "pseudo_register_read_value" gdbarch method. */
2383
2384 static struct value *
2385 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2386 int regnum)
2387 {
2388 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2389 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2390
2391 VALUE_LVAL (result_value) = lval_register;
2392 VALUE_REGNUM (result_value) = regnum;
2393
2394 regnum -= gdbarch_num_regs (gdbarch);
2395
2396 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2397 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2398 regnum - AARCH64_Q0_REGNUM,
2399 Q_REGISTER_SIZE, result_value);
2400
2401 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2402 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2403 regnum - AARCH64_D0_REGNUM,
2404 D_REGISTER_SIZE, result_value);
2405
2406 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2407 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2408 regnum - AARCH64_S0_REGNUM,
2409 S_REGISTER_SIZE, result_value);
2410
2411 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2412 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2413 regnum - AARCH64_H0_REGNUM,
2414 H_REGISTER_SIZE, result_value);
2415
2416 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2417 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2418 regnum - AARCH64_B0_REGNUM,
2419 B_REGISTER_SIZE, result_value);
2420
2421 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2422 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2423 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2424 regnum - AARCH64_SVE_V0_REGNUM,
2425 V_REGISTER_SIZE, result_value);
2426
2427 gdb_assert_not_reached ("regnum out of bound");
2428 }
2429
2430 /* Helper for aarch64_pseudo_write. */
2431
2432 static void
2433 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2434 int regnum_offset, int regsize, const gdb_byte *buf)
2435 {
2436 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2437
2438 /* Enough space for a full vector register. */
2439 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2440 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2441
2442 /* Ensure the register buffer is zero, we want gdb writes of the
2443 various 'scalar' pseudo registers to behavior like architectural
2444 writes, register width bytes are written the remainder are set to
2445 zero. */
2446 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2447
2448 memcpy (reg_buf, buf, regsize);
2449 regcache->raw_write (v_regnum, reg_buf);
2450 }
2451
2452 /* Implement the "pseudo_register_write" gdbarch method. */
2453
2454 static void
2455 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2456 int regnum, const gdb_byte *buf)
2457 {
2458 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2459 regnum -= gdbarch_num_regs (gdbarch);
2460
2461 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2462 return aarch64_pseudo_write_1 (gdbarch, regcache,
2463 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2464 buf);
2465
2466 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2467 return aarch64_pseudo_write_1 (gdbarch, regcache,
2468 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2469 buf);
2470
2471 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2472 return aarch64_pseudo_write_1 (gdbarch, regcache,
2473 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2474 buf);
2475
2476 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2477 return aarch64_pseudo_write_1 (gdbarch, regcache,
2478 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2479 buf);
2480
2481 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2482 return aarch64_pseudo_write_1 (gdbarch, regcache,
2483 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2484 buf);
2485
2486 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2487 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2488 return aarch64_pseudo_write_1 (gdbarch, regcache,
2489 regnum - AARCH64_SVE_V0_REGNUM,
2490 V_REGISTER_SIZE, buf);
2491
2492 gdb_assert_not_reached ("regnum out of bound");
2493 }
2494
2495 /* Callback function for user_reg_add. */
2496
2497 static struct value *
2498 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2499 {
2500 const int *reg_p = (const int *) baton;
2501
2502 return value_of_register (*reg_p, frame);
2503 }
2504 \f
2505
2506 /* Implement the "software_single_step" gdbarch method, needed to
2507 single step through atomic sequences on AArch64. */
2508
2509 static std::vector<CORE_ADDR>
2510 aarch64_software_single_step (struct regcache *regcache)
2511 {
2512 struct gdbarch *gdbarch = regcache->arch ();
2513 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2514 const int insn_size = 4;
2515 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2516 CORE_ADDR pc = regcache_read_pc (regcache);
2517 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2518 CORE_ADDR loc = pc;
2519 CORE_ADDR closing_insn = 0;
2520 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2521 byte_order_for_code);
2522 int index;
2523 int insn_count;
2524 int bc_insn_count = 0; /* Conditional branch instruction count. */
2525 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2526 aarch64_inst inst;
2527
2528 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2529 return {};
2530
2531 /* Look for a Load Exclusive instruction which begins the sequence. */
2532 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2533 return {};
2534
2535 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2536 {
2537 loc += insn_size;
2538 insn = read_memory_unsigned_integer (loc, insn_size,
2539 byte_order_for_code);
2540
2541 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2542 return {};
2543 /* Check if the instruction is a conditional branch. */
2544 if (inst.opcode->iclass == condbranch)
2545 {
2546 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2547
2548 if (bc_insn_count >= 1)
2549 return {};
2550
2551 /* It is, so we'll try to set a breakpoint at the destination. */
2552 breaks[1] = loc + inst.operands[0].imm.value;
2553
2554 bc_insn_count++;
2555 last_breakpoint++;
2556 }
2557
2558 /* Look for the Store Exclusive which closes the atomic sequence. */
2559 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2560 {
2561 closing_insn = loc;
2562 break;
2563 }
2564 }
2565
2566 /* We didn't find a closing Store Exclusive instruction, fall back. */
2567 if (!closing_insn)
2568 return {};
2569
2570 /* Insert breakpoint after the end of the atomic sequence. */
2571 breaks[0] = loc + insn_size;
2572
2573 /* Check for duplicated breakpoints, and also check that the second
2574 breakpoint is not within the atomic sequence. */
2575 if (last_breakpoint
2576 && (breaks[1] == breaks[0]
2577 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2578 last_breakpoint = 0;
2579
2580 std::vector<CORE_ADDR> next_pcs;
2581
2582 /* Insert the breakpoint at the end of the sequence, and one at the
2583 destination of the conditional branch, if it exists. */
2584 for (index = 0; index <= last_breakpoint; index++)
2585 next_pcs.push_back (breaks[index]);
2586
2587 return next_pcs;
2588 }
2589
2590 struct aarch64_displaced_step_closure : public displaced_step_closure
2591 {
2592 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2593 is being displaced stepping. */
2594 int cond = 0;
2595
2596 /* PC adjustment offset after displaced stepping. */
2597 int32_t pc_adjust = 0;
2598 };
2599
2600 /* Data when visiting instructions for displaced stepping. */
2601
2602 struct aarch64_displaced_step_data
2603 {
2604 struct aarch64_insn_data base;
2605
2606 /* The address where the instruction will be executed at. */
2607 CORE_ADDR new_addr;
2608 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2609 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2610 /* Number of instructions in INSN_BUF. */
2611 unsigned insn_count;
2612 /* Registers when doing displaced stepping. */
2613 struct regcache *regs;
2614
2615 aarch64_displaced_step_closure *dsc;
2616 };
2617
2618 /* Implementation of aarch64_insn_visitor method "b". */
2619
2620 static void
2621 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2622 struct aarch64_insn_data *data)
2623 {
2624 struct aarch64_displaced_step_data *dsd
2625 = (struct aarch64_displaced_step_data *) data;
2626 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2627
2628 if (can_encode_int32 (new_offset, 28))
2629 {
2630 /* Emit B rather than BL, because executing BL on a new address
2631 will get the wrong address into LR. In order to avoid this,
2632 we emit B, and update LR if the instruction is BL. */
2633 emit_b (dsd->insn_buf, 0, new_offset);
2634 dsd->insn_count++;
2635 }
2636 else
2637 {
2638 /* Write NOP. */
2639 emit_nop (dsd->insn_buf);
2640 dsd->insn_count++;
2641 dsd->dsc->pc_adjust = offset;
2642 }
2643
2644 if (is_bl)
2645 {
2646 /* Update LR. */
2647 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2648 data->insn_addr + 4);
2649 }
2650 }
2651
2652 /* Implementation of aarch64_insn_visitor method "b_cond". */
2653
2654 static void
2655 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2656 struct aarch64_insn_data *data)
2657 {
2658 struct aarch64_displaced_step_data *dsd
2659 = (struct aarch64_displaced_step_data *) data;
2660
2661 /* GDB has to fix up PC after displaced step this instruction
2662 differently according to the condition is true or false. Instead
2663 of checking COND against conditional flags, we can use
2664 the following instructions, and GDB can tell how to fix up PC
2665 according to the PC value.
2666
2667 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2668 INSN1 ;
2669 TAKEN:
2670 INSN2
2671 */
2672
2673 emit_bcond (dsd->insn_buf, cond, 8);
2674 dsd->dsc->cond = 1;
2675 dsd->dsc->pc_adjust = offset;
2676 dsd->insn_count = 1;
2677 }
2678
2679 /* Dynamically allocate a new register. If we know the register
2680 statically, we should make it a global as above instead of using this
2681 helper function. */
2682
2683 static struct aarch64_register
2684 aarch64_register (unsigned num, int is64)
2685 {
2686 return (struct aarch64_register) { num, is64 };
2687 }
2688
2689 /* Implementation of aarch64_insn_visitor method "cb". */
2690
2691 static void
2692 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2693 const unsigned rn, int is64,
2694 struct aarch64_insn_data *data)
2695 {
2696 struct aarch64_displaced_step_data *dsd
2697 = (struct aarch64_displaced_step_data *) data;
2698
2699 /* The offset is out of range for a compare and branch
2700 instruction. We can use the following instructions instead:
2701
2702 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2703 INSN1 ;
2704 TAKEN:
2705 INSN2
2706 */
2707 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2708 dsd->insn_count = 1;
2709 dsd->dsc->cond = 1;
2710 dsd->dsc->pc_adjust = offset;
2711 }
2712
2713 /* Implementation of aarch64_insn_visitor method "tb". */
2714
2715 static void
2716 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2717 const unsigned rt, unsigned bit,
2718 struct aarch64_insn_data *data)
2719 {
2720 struct aarch64_displaced_step_data *dsd
2721 = (struct aarch64_displaced_step_data *) data;
2722
2723 /* The offset is out of range for a test bit and branch
2724 instruction We can use the following instructions instead:
2725
2726 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2727 INSN1 ;
2728 TAKEN:
2729 INSN2
2730
2731 */
2732 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2733 dsd->insn_count = 1;
2734 dsd->dsc->cond = 1;
2735 dsd->dsc->pc_adjust = offset;
2736 }
2737
2738 /* Implementation of aarch64_insn_visitor method "adr". */
2739
2740 static void
2741 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2742 const int is_adrp, struct aarch64_insn_data *data)
2743 {
2744 struct aarch64_displaced_step_data *dsd
2745 = (struct aarch64_displaced_step_data *) data;
2746 /* We know exactly the address the ADR{P,} instruction will compute.
2747 We can just write it to the destination register. */
2748 CORE_ADDR address = data->insn_addr + offset;
2749
2750 if (is_adrp)
2751 {
2752 /* Clear the lower 12 bits of the offset to get the 4K page. */
2753 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2754 address & ~0xfff);
2755 }
2756 else
2757 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2758 address);
2759
2760 dsd->dsc->pc_adjust = 4;
2761 emit_nop (dsd->insn_buf);
2762 dsd->insn_count = 1;
2763 }
2764
2765 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2766
2767 static void
2768 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2769 const unsigned rt, const int is64,
2770 struct aarch64_insn_data *data)
2771 {
2772 struct aarch64_displaced_step_data *dsd
2773 = (struct aarch64_displaced_step_data *) data;
2774 CORE_ADDR address = data->insn_addr + offset;
2775 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2776
2777 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2778 address);
2779
2780 if (is_sw)
2781 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2782 aarch64_register (rt, 1), zero);
2783 else
2784 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2785 aarch64_register (rt, 1), zero);
2786
2787 dsd->dsc->pc_adjust = 4;
2788 }
2789
2790 /* Implementation of aarch64_insn_visitor method "others". */
2791
2792 static void
2793 aarch64_displaced_step_others (const uint32_t insn,
2794 struct aarch64_insn_data *data)
2795 {
2796 struct aarch64_displaced_step_data *dsd
2797 = (struct aarch64_displaced_step_data *) data;
2798
2799 aarch64_emit_insn (dsd->insn_buf, insn);
2800 dsd->insn_count = 1;
2801
2802 if ((insn & 0xfffffc1f) == 0xd65f0000)
2803 {
2804 /* RET */
2805 dsd->dsc->pc_adjust = 0;
2806 }
2807 else
2808 dsd->dsc->pc_adjust = 4;
2809 }
2810
2811 static const struct aarch64_insn_visitor visitor =
2812 {
2813 aarch64_displaced_step_b,
2814 aarch64_displaced_step_b_cond,
2815 aarch64_displaced_step_cb,
2816 aarch64_displaced_step_tb,
2817 aarch64_displaced_step_adr,
2818 aarch64_displaced_step_ldr_literal,
2819 aarch64_displaced_step_others,
2820 };
2821
2822 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2823
2824 struct displaced_step_closure *
2825 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2826 CORE_ADDR from, CORE_ADDR to,
2827 struct regcache *regs)
2828 {
2829 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2830 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2831 struct aarch64_displaced_step_data dsd;
2832 aarch64_inst inst;
2833
2834 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2835 return NULL;
2836
2837 /* Look for a Load Exclusive instruction which begins the sequence. */
2838 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2839 {
2840 /* We can't displaced step atomic sequences. */
2841 return NULL;
2842 }
2843
2844 std::unique_ptr<aarch64_displaced_step_closure> dsc
2845 (new aarch64_displaced_step_closure);
2846 dsd.base.insn_addr = from;
2847 dsd.new_addr = to;
2848 dsd.regs = regs;
2849 dsd.dsc = dsc.get ();
2850 dsd.insn_count = 0;
2851 aarch64_relocate_instruction (insn, &visitor,
2852 (struct aarch64_insn_data *) &dsd);
2853 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2854
2855 if (dsd.insn_count != 0)
2856 {
2857 int i;
2858
2859 /* Instruction can be relocated to scratch pad. Copy
2860 relocated instruction(s) there. */
2861 for (i = 0; i < dsd.insn_count; i++)
2862 {
2863 if (debug_displaced)
2864 {
2865 debug_printf ("displaced: writing insn ");
2866 debug_printf ("%.8x", dsd.insn_buf[i]);
2867 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2868 }
2869 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2870 (ULONGEST) dsd.insn_buf[i]);
2871 }
2872 }
2873 else
2874 {
2875 dsc = NULL;
2876 }
2877
2878 return dsc.release ();
2879 }
2880
2881 /* Implement the "displaced_step_fixup" gdbarch method. */
2882
2883 void
2884 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2885 struct displaced_step_closure *dsc_,
2886 CORE_ADDR from, CORE_ADDR to,
2887 struct regcache *regs)
2888 {
2889 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2890
2891 if (dsc->cond)
2892 {
2893 ULONGEST pc;
2894
2895 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2896 if (pc - to == 8)
2897 {
2898 /* Condition is true. */
2899 }
2900 else if (pc - to == 4)
2901 {
2902 /* Condition is false. */
2903 dsc->pc_adjust = 4;
2904 }
2905 else
2906 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2907 }
2908
2909 if (dsc->pc_adjust != 0)
2910 {
2911 if (debug_displaced)
2912 {
2913 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2914 paddress (gdbarch, from), dsc->pc_adjust);
2915 }
2916 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2917 from + dsc->pc_adjust);
2918 }
2919 }
2920
2921 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2922
2923 int
2924 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2925 struct displaced_step_closure *closure)
2926 {
2927 return 1;
2928 }
2929
2930 /* Get the correct target description for the given VQ value.
2931 If VQ is zero then it is assumed SVE is not supported.
2932 (It is not possible to set VQ to zero on an SVE system). */
2933
2934 const target_desc *
2935 aarch64_read_description (uint64_t vq)
2936 {
2937 if (vq > AARCH64_MAX_SVE_VQ)
2938 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
2939 AARCH64_MAX_SVE_VQ);
2940
2941 struct target_desc *tdesc = tdesc_aarch64_list[vq];
2942
2943 if (tdesc == NULL)
2944 {
2945 tdesc = aarch64_create_target_description (vq);
2946 tdesc_aarch64_list[vq] = tdesc;
2947 }
2948
2949 return tdesc;
2950 }
2951
2952 /* Return the VQ used when creating the target description TDESC. */
2953
2954 static uint64_t
2955 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2956 {
2957 const struct tdesc_feature *feature_sve;
2958
2959 if (!tdesc_has_registers (tdesc))
2960 return 0;
2961
2962 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2963
2964 if (feature_sve == nullptr)
2965 return 0;
2966
2967 uint64_t vl = tdesc_register_bitsize (feature_sve,
2968 aarch64_sve_register_names[0]) / 8;
2969 return sve_vq_from_vl (vl);
2970 }
2971
2972
2973 /* Initialize the current architecture based on INFO. If possible,
2974 re-use an architecture from ARCHES, which is a list of
2975 architectures already created during this debugging session.
2976
2977 Called e.g. at program startup, when reading a core file, and when
2978 reading a binary file. */
2979
2980 static struct gdbarch *
2981 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2982 {
2983 struct gdbarch_tdep *tdep;
2984 struct gdbarch *gdbarch;
2985 struct gdbarch_list *best_arch;
2986 struct tdesc_arch_data *tdesc_data = NULL;
2987 const struct target_desc *tdesc = info.target_desc;
2988 int i;
2989 int valid_p = 1;
2990 const struct tdesc_feature *feature_core;
2991 const struct tdesc_feature *feature_fpu;
2992 const struct tdesc_feature *feature_sve;
2993 int num_regs = 0;
2994 int num_pseudo_regs = 0;
2995
2996 /* Ensure we always have a target description. */
2997 if (!tdesc_has_registers (tdesc))
2998 tdesc = aarch64_read_description (0);
2999 gdb_assert (tdesc);
3000
3001 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
3002 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3003 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3004
3005 if (feature_core == NULL)
3006 return NULL;
3007
3008 tdesc_data = tdesc_data_alloc ();
3009
3010 /* Validate the description provides the mandatory core R registers
3011 and allocate their numbers. */
3012 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3013 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3014 AARCH64_X0_REGNUM + i,
3015 aarch64_r_register_names[i]);
3016
3017 num_regs = AARCH64_X0_REGNUM + i;
3018
3019 /* Add the V registers. */
3020 if (feature_fpu != NULL)
3021 {
3022 if (feature_sve != NULL)
3023 error (_("Program contains both fpu and SVE features."));
3024
3025 /* Validate the description provides the mandatory V registers
3026 and allocate their numbers. */
3027 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3028 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3029 AARCH64_V0_REGNUM + i,
3030 aarch64_v_register_names[i]);
3031
3032 num_regs = AARCH64_V0_REGNUM + i;
3033 }
3034
3035 /* Add the SVE registers. */
3036 if (feature_sve != NULL)
3037 {
3038 /* Validate the description provides the mandatory SVE registers
3039 and allocate their numbers. */
3040 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3041 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3042 AARCH64_SVE_Z0_REGNUM + i,
3043 aarch64_sve_register_names[i]);
3044
3045 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3046 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3047 }
3048
3049 if (feature_fpu != NULL || feature_sve != NULL)
3050 {
3051 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3052 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3053 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3054 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3055 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3056 }
3057
3058 if (!valid_p)
3059 {
3060 tdesc_data_cleanup (tdesc_data);
3061 return NULL;
3062 }
3063
3064 /* AArch64 code is always little-endian. */
3065 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3066
3067 /* If there is already a candidate, use it. */
3068 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3069 best_arch != NULL;
3070 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3071 {
3072 /* Found a match. */
3073 break;
3074 }
3075
3076 if (best_arch != NULL)
3077 {
3078 if (tdesc_data != NULL)
3079 tdesc_data_cleanup (tdesc_data);
3080 return best_arch->gdbarch;
3081 }
3082
3083 tdep = XCNEW (struct gdbarch_tdep);
3084 gdbarch = gdbarch_alloc (&info, tdep);
3085
3086 /* This should be low enough for everything. */
3087 tdep->lowest_pc = 0x20;
3088 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3089 tdep->jb_elt_size = 8;
3090 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3091
3092 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3093 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3094
3095 /* Frame handling. */
3096 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3097 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3098 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3099
3100 /* Advance PC across function entry code. */
3101 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3102
3103 /* The stack grows downward. */
3104 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3105
3106 /* Breakpoint manipulation. */
3107 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3108 aarch64_breakpoint::kind_from_pc);
3109 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3110 aarch64_breakpoint::bp_from_kind);
3111 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3112 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3113
3114 /* Information about registers, etc. */
3115 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3116 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3117 set_gdbarch_num_regs (gdbarch, num_regs);
3118
3119 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3120 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3121 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3122 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3123 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3124 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3125 aarch64_pseudo_register_reggroup_p);
3126
3127 /* ABI */
3128 set_gdbarch_short_bit (gdbarch, 16);
3129 set_gdbarch_int_bit (gdbarch, 32);
3130 set_gdbarch_float_bit (gdbarch, 32);
3131 set_gdbarch_double_bit (gdbarch, 64);
3132 set_gdbarch_long_double_bit (gdbarch, 128);
3133 set_gdbarch_long_bit (gdbarch, 64);
3134 set_gdbarch_long_long_bit (gdbarch, 64);
3135 set_gdbarch_ptr_bit (gdbarch, 64);
3136 set_gdbarch_char_signed (gdbarch, 0);
3137 set_gdbarch_wchar_signed (gdbarch, 0);
3138 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3139 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3140 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3141
3142 /* Internal <-> external register number maps. */
3143 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3144
3145 /* Returning results. */
3146 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3147
3148 /* Disassembly. */
3149 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3150
3151 /* Virtual tables. */
3152 set_gdbarch_vbit_in_delta (gdbarch, 1);
3153
3154 /* Hook in the ABI-specific overrides, if they have been registered. */
3155 info.target_desc = tdesc;
3156 info.tdesc_data = tdesc_data;
3157 gdbarch_init_osabi (info, gdbarch);
3158
3159 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3160
3161 /* Add some default predicates. */
3162 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3163 dwarf2_append_unwinders (gdbarch);
3164 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3165
3166 frame_base_set_default (gdbarch, &aarch64_normal_base);
3167
3168 /* Now we have tuned the configuration, set a few final things,
3169 based on what the OS ABI has told us. */
3170
3171 if (tdep->jb_pc >= 0)
3172 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3173
3174 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3175
3176 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3177
3178 /* Add standard register aliases. */
3179 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3180 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3181 value_of_aarch64_user_reg,
3182 &aarch64_register_aliases[i].regnum);
3183
3184 return gdbarch;
3185 }
3186
3187 static void
3188 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3189 {
3190 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3191
3192 if (tdep == NULL)
3193 return;
3194
3195 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3196 paddress (gdbarch, tdep->lowest_pc));
3197 }
3198
3199 #if GDB_SELF_TEST
3200 namespace selftests
3201 {
3202 static void aarch64_process_record_test (void);
3203 }
3204 #endif
3205
3206 void
3207 _initialize_aarch64_tdep (void)
3208 {
3209 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3210 aarch64_dump_tdep);
3211
3212 /* Debug this file's internals. */
3213 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3214 Set AArch64 debugging."), _("\
3215 Show AArch64 debugging."), _("\
3216 When on, AArch64 specific debugging is enabled."),
3217 NULL,
3218 show_aarch64_debug,
3219 &setdebuglist, &showdebuglist);
3220
3221 #if GDB_SELF_TEST
3222 selftests::register_test ("aarch64-analyze-prologue",
3223 selftests::aarch64_analyze_prologue_test);
3224 selftests::register_test ("aarch64-process-record",
3225 selftests::aarch64_process_record_test);
3226 selftests::record_xml_tdesc ("aarch64.xml",
3227 aarch64_create_target_description (0));
3228 #endif
3229 }
3230
3231 /* AArch64 process record-replay related structures, defines etc. */
3232
3233 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3234 do \
3235 { \
3236 unsigned int reg_len = LENGTH; \
3237 if (reg_len) \
3238 { \
3239 REGS = XNEWVEC (uint32_t, reg_len); \
3240 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3241 } \
3242 } \
3243 while (0)
3244
3245 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3246 do \
3247 { \
3248 unsigned int mem_len = LENGTH; \
3249 if (mem_len) \
3250 { \
3251 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3252 memcpy(&MEMS->len, &RECORD_BUF[0], \
3253 sizeof(struct aarch64_mem_r) * LENGTH); \
3254 } \
3255 } \
3256 while (0)
3257
3258 /* AArch64 record/replay structures and enumerations. */
3259
3260 struct aarch64_mem_r
3261 {
3262 uint64_t len; /* Record length. */
3263 uint64_t addr; /* Memory address. */
3264 };
3265
3266 enum aarch64_record_result
3267 {
3268 AARCH64_RECORD_SUCCESS,
3269 AARCH64_RECORD_UNSUPPORTED,
3270 AARCH64_RECORD_UNKNOWN
3271 };
3272
3273 typedef struct insn_decode_record_t
3274 {
3275 struct gdbarch *gdbarch;
3276 struct regcache *regcache;
3277 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3278 uint32_t aarch64_insn; /* Insn to be recorded. */
3279 uint32_t mem_rec_count; /* Count of memory records. */
3280 uint32_t reg_rec_count; /* Count of register records. */
3281 uint32_t *aarch64_regs; /* Registers to be recorded. */
3282 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3283 } insn_decode_record;
3284
3285 /* Record handler for data processing - register instructions. */
3286
3287 static unsigned int
3288 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3289 {
3290 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3291 uint32_t record_buf[4];
3292
3293 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3294 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3295 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3296
3297 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3298 {
3299 uint8_t setflags;
3300
3301 /* Logical (shifted register). */
3302 if (insn_bits24_27 == 0x0a)
3303 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3304 /* Add/subtract. */
3305 else if (insn_bits24_27 == 0x0b)
3306 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3307 else
3308 return AARCH64_RECORD_UNKNOWN;
3309
3310 record_buf[0] = reg_rd;
3311 aarch64_insn_r->reg_rec_count = 1;
3312 if (setflags)
3313 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3314 }
3315 else
3316 {
3317 if (insn_bits24_27 == 0x0b)
3318 {
3319 /* Data-processing (3 source). */
3320 record_buf[0] = reg_rd;
3321 aarch64_insn_r->reg_rec_count = 1;
3322 }
3323 else if (insn_bits24_27 == 0x0a)
3324 {
3325 if (insn_bits21_23 == 0x00)
3326 {
3327 /* Add/subtract (with carry). */
3328 record_buf[0] = reg_rd;
3329 aarch64_insn_r->reg_rec_count = 1;
3330 if (bit (aarch64_insn_r->aarch64_insn, 29))
3331 {
3332 record_buf[1] = AARCH64_CPSR_REGNUM;
3333 aarch64_insn_r->reg_rec_count = 2;
3334 }
3335 }
3336 else if (insn_bits21_23 == 0x02)
3337 {
3338 /* Conditional compare (register) and conditional compare
3339 (immediate) instructions. */
3340 record_buf[0] = AARCH64_CPSR_REGNUM;
3341 aarch64_insn_r->reg_rec_count = 1;
3342 }
3343 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3344 {
3345 /* CConditional select. */
3346 /* Data-processing (2 source). */
3347 /* Data-processing (1 source). */
3348 record_buf[0] = reg_rd;
3349 aarch64_insn_r->reg_rec_count = 1;
3350 }
3351 else
3352 return AARCH64_RECORD_UNKNOWN;
3353 }
3354 }
3355
3356 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3357 record_buf);
3358 return AARCH64_RECORD_SUCCESS;
3359 }
3360
3361 /* Record handler for data processing - immediate instructions. */
3362
3363 static unsigned int
3364 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3365 {
3366 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3367 uint32_t record_buf[4];
3368
3369 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3370 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3371 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3372
3373 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3374 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3375 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3376 {
3377 record_buf[0] = reg_rd;
3378 aarch64_insn_r->reg_rec_count = 1;
3379 }
3380 else if (insn_bits24_27 == 0x01)
3381 {
3382 /* Add/Subtract (immediate). */
3383 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3384 record_buf[0] = reg_rd;
3385 aarch64_insn_r->reg_rec_count = 1;
3386 if (setflags)
3387 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3388 }
3389 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3390 {
3391 /* Logical (immediate). */
3392 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3393 record_buf[0] = reg_rd;
3394 aarch64_insn_r->reg_rec_count = 1;
3395 if (setflags)
3396 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3397 }
3398 else
3399 return AARCH64_RECORD_UNKNOWN;
3400
3401 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3402 record_buf);
3403 return AARCH64_RECORD_SUCCESS;
3404 }
3405
3406 /* Record handler for branch, exception generation and system instructions. */
3407
3408 static unsigned int
3409 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3410 {
3411 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3412 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3413 uint32_t record_buf[4];
3414
3415 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3416 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3417 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3418
3419 if (insn_bits28_31 == 0x0d)
3420 {
3421 /* Exception generation instructions. */
3422 if (insn_bits24_27 == 0x04)
3423 {
3424 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3425 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3426 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3427 {
3428 ULONGEST svc_number;
3429
3430 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3431 &svc_number);
3432 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3433 svc_number);
3434 }
3435 else
3436 return AARCH64_RECORD_UNSUPPORTED;
3437 }
3438 /* System instructions. */
3439 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3440 {
3441 uint32_t reg_rt, reg_crn;
3442
3443 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3444 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3445
3446 /* Record rt in case of sysl and mrs instructions. */
3447 if (bit (aarch64_insn_r->aarch64_insn, 21))
3448 {
3449 record_buf[0] = reg_rt;
3450 aarch64_insn_r->reg_rec_count = 1;
3451 }
3452 /* Record cpsr for hint and msr(immediate) instructions. */
3453 else if (reg_crn == 0x02 || reg_crn == 0x04)
3454 {
3455 record_buf[0] = AARCH64_CPSR_REGNUM;
3456 aarch64_insn_r->reg_rec_count = 1;
3457 }
3458 }
3459 /* Unconditional branch (register). */
3460 else if((insn_bits24_27 & 0x0e) == 0x06)
3461 {
3462 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3463 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3464 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3465 }
3466 else
3467 return AARCH64_RECORD_UNKNOWN;
3468 }
3469 /* Unconditional branch (immediate). */
3470 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3471 {
3472 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3473 if (bit (aarch64_insn_r->aarch64_insn, 31))
3474 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3475 }
3476 else
3477 /* Compare & branch (immediate), Test & branch (immediate) and
3478 Conditional branch (immediate). */
3479 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3480
3481 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3482 record_buf);
3483 return AARCH64_RECORD_SUCCESS;
3484 }
3485
3486 /* Record handler for advanced SIMD load and store instructions. */
3487
3488 static unsigned int
3489 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3490 {
3491 CORE_ADDR address;
3492 uint64_t addr_offset = 0;
3493 uint32_t record_buf[24];
3494 uint64_t record_buf_mem[24];
3495 uint32_t reg_rn, reg_rt;
3496 uint32_t reg_index = 0, mem_index = 0;
3497 uint8_t opcode_bits, size_bits;
3498
3499 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3500 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3501 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3502 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3503 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3504
3505 if (record_debug)
3506 debug_printf ("Process record: Advanced SIMD load/store\n");
3507
3508 /* Load/store single structure. */
3509 if (bit (aarch64_insn_r->aarch64_insn, 24))
3510 {
3511 uint8_t sindex, scale, selem, esize, replicate = 0;
3512 scale = opcode_bits >> 2;
3513 selem = ((opcode_bits & 0x02) |
3514 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3515 switch (scale)
3516 {
3517 case 1:
3518 if (size_bits & 0x01)
3519 return AARCH64_RECORD_UNKNOWN;
3520 break;
3521 case 2:
3522 if ((size_bits >> 1) & 0x01)
3523 return AARCH64_RECORD_UNKNOWN;
3524 if (size_bits & 0x01)
3525 {
3526 if (!((opcode_bits >> 1) & 0x01))
3527 scale = 3;
3528 else
3529 return AARCH64_RECORD_UNKNOWN;
3530 }
3531 break;
3532 case 3:
3533 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3534 {
3535 scale = size_bits;
3536 replicate = 1;
3537 break;
3538 }
3539 else
3540 return AARCH64_RECORD_UNKNOWN;
3541 default:
3542 break;
3543 }
3544 esize = 8 << scale;
3545 if (replicate)
3546 for (sindex = 0; sindex < selem; sindex++)
3547 {
3548 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3549 reg_rt = (reg_rt + 1) % 32;
3550 }
3551 else
3552 {
3553 for (sindex = 0; sindex < selem; sindex++)
3554 {
3555 if (bit (aarch64_insn_r->aarch64_insn, 22))
3556 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3557 else
3558 {
3559 record_buf_mem[mem_index++] = esize / 8;
3560 record_buf_mem[mem_index++] = address + addr_offset;
3561 }
3562 addr_offset = addr_offset + (esize / 8);
3563 reg_rt = (reg_rt + 1) % 32;
3564 }
3565 }
3566 }
3567 /* Load/store multiple structure. */
3568 else
3569 {
3570 uint8_t selem, esize, rpt, elements;
3571 uint8_t eindex, rindex;
3572
3573 esize = 8 << size_bits;
3574 if (bit (aarch64_insn_r->aarch64_insn, 30))
3575 elements = 128 / esize;
3576 else
3577 elements = 64 / esize;
3578
3579 switch (opcode_bits)
3580 {
3581 /*LD/ST4 (4 Registers). */
3582 case 0:
3583 rpt = 1;
3584 selem = 4;
3585 break;
3586 /*LD/ST1 (4 Registers). */
3587 case 2:
3588 rpt = 4;
3589 selem = 1;
3590 break;
3591 /*LD/ST3 (3 Registers). */
3592 case 4:
3593 rpt = 1;
3594 selem = 3;
3595 break;
3596 /*LD/ST1 (3 Registers). */
3597 case 6:
3598 rpt = 3;
3599 selem = 1;
3600 break;
3601 /*LD/ST1 (1 Register). */
3602 case 7:
3603 rpt = 1;
3604 selem = 1;
3605 break;
3606 /*LD/ST2 (2 Registers). */
3607 case 8:
3608 rpt = 1;
3609 selem = 2;
3610 break;
3611 /*LD/ST1 (2 Registers). */
3612 case 10:
3613 rpt = 2;
3614 selem = 1;
3615 break;
3616 default:
3617 return AARCH64_RECORD_UNSUPPORTED;
3618 break;
3619 }
3620 for (rindex = 0; rindex < rpt; rindex++)
3621 for (eindex = 0; eindex < elements; eindex++)
3622 {
3623 uint8_t reg_tt, sindex;
3624 reg_tt = (reg_rt + rindex) % 32;
3625 for (sindex = 0; sindex < selem; sindex++)
3626 {
3627 if (bit (aarch64_insn_r->aarch64_insn, 22))
3628 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3629 else
3630 {
3631 record_buf_mem[mem_index++] = esize / 8;
3632 record_buf_mem[mem_index++] = address + addr_offset;
3633 }
3634 addr_offset = addr_offset + (esize / 8);
3635 reg_tt = (reg_tt + 1) % 32;
3636 }
3637 }
3638 }
3639
3640 if (bit (aarch64_insn_r->aarch64_insn, 23))
3641 record_buf[reg_index++] = reg_rn;
3642
3643 aarch64_insn_r->reg_rec_count = reg_index;
3644 aarch64_insn_r->mem_rec_count = mem_index / 2;
3645 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3646 record_buf_mem);
3647 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3648 record_buf);
3649 return AARCH64_RECORD_SUCCESS;
3650 }
3651
3652 /* Record handler for load and store instructions. */
3653
3654 static unsigned int
3655 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3656 {
3657 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3658 uint8_t insn_bit23, insn_bit21;
3659 uint8_t opc, size_bits, ld_flag, vector_flag;
3660 uint32_t reg_rn, reg_rt, reg_rt2;
3661 uint64_t datasize, offset;
3662 uint32_t record_buf[8];
3663 uint64_t record_buf_mem[8];
3664 CORE_ADDR address;
3665
3666 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3667 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3668 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3669 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3670 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3671 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3672 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3673 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3674 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3675 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3676 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3677
3678 /* Load/store exclusive. */
3679 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3680 {
3681 if (record_debug)
3682 debug_printf ("Process record: load/store exclusive\n");
3683
3684 if (ld_flag)
3685 {
3686 record_buf[0] = reg_rt;
3687 aarch64_insn_r->reg_rec_count = 1;
3688 if (insn_bit21)
3689 {
3690 record_buf[1] = reg_rt2;
3691 aarch64_insn_r->reg_rec_count = 2;
3692 }
3693 }
3694 else
3695 {
3696 if (insn_bit21)
3697 datasize = (8 << size_bits) * 2;
3698 else
3699 datasize = (8 << size_bits);
3700 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3701 &address);
3702 record_buf_mem[0] = datasize / 8;
3703 record_buf_mem[1] = address;
3704 aarch64_insn_r->mem_rec_count = 1;
3705 if (!insn_bit23)
3706 {
3707 /* Save register rs. */
3708 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3709 aarch64_insn_r->reg_rec_count = 1;
3710 }
3711 }
3712 }
3713 /* Load register (literal) instructions decoding. */
3714 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3715 {
3716 if (record_debug)
3717 debug_printf ("Process record: load register (literal)\n");
3718 if (vector_flag)
3719 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3720 else
3721 record_buf[0] = reg_rt;
3722 aarch64_insn_r->reg_rec_count = 1;
3723 }
3724 /* All types of load/store pair instructions decoding. */
3725 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3726 {
3727 if (record_debug)
3728 debug_printf ("Process record: load/store pair\n");
3729
3730 if (ld_flag)
3731 {
3732 if (vector_flag)
3733 {
3734 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3735 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3736 }
3737 else
3738 {
3739 record_buf[0] = reg_rt;
3740 record_buf[1] = reg_rt2;
3741 }
3742 aarch64_insn_r->reg_rec_count = 2;
3743 }
3744 else
3745 {
3746 uint16_t imm7_off;
3747 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3748 if (!vector_flag)
3749 size_bits = size_bits >> 1;
3750 datasize = 8 << (2 + size_bits);
3751 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3752 offset = offset << (2 + size_bits);
3753 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3754 &address);
3755 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3756 {
3757 if (imm7_off & 0x40)
3758 address = address - offset;
3759 else
3760 address = address + offset;
3761 }
3762
3763 record_buf_mem[0] = datasize / 8;
3764 record_buf_mem[1] = address;
3765 record_buf_mem[2] = datasize / 8;
3766 record_buf_mem[3] = address + (datasize / 8);
3767 aarch64_insn_r->mem_rec_count = 2;
3768 }
3769 if (bit (aarch64_insn_r->aarch64_insn, 23))
3770 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3771 }
3772 /* Load/store register (unsigned immediate) instructions. */
3773 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3774 {
3775 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3776 if (!(opc >> 1))
3777 {
3778 if (opc & 0x01)
3779 ld_flag = 0x01;
3780 else
3781 ld_flag = 0x0;
3782 }
3783 else
3784 {
3785 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3786 {
3787 /* PRFM (immediate) */
3788 return AARCH64_RECORD_SUCCESS;
3789 }
3790 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3791 {
3792 /* LDRSW (immediate) */
3793 ld_flag = 0x1;
3794 }
3795 else
3796 {
3797 if (opc & 0x01)
3798 ld_flag = 0x01;
3799 else
3800 ld_flag = 0x0;
3801 }
3802 }
3803
3804 if (record_debug)
3805 {
3806 debug_printf ("Process record: load/store (unsigned immediate):"
3807 " size %x V %d opc %x\n", size_bits, vector_flag,
3808 opc);
3809 }
3810
3811 if (!ld_flag)
3812 {
3813 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3814 datasize = 8 << size_bits;
3815 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3816 &address);
3817 offset = offset << size_bits;
3818 address = address + offset;
3819
3820 record_buf_mem[0] = datasize >> 3;
3821 record_buf_mem[1] = address;
3822 aarch64_insn_r->mem_rec_count = 1;
3823 }
3824 else
3825 {
3826 if (vector_flag)
3827 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3828 else
3829 record_buf[0] = reg_rt;
3830 aarch64_insn_r->reg_rec_count = 1;
3831 }
3832 }
3833 /* Load/store register (register offset) instructions. */
3834 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3835 && insn_bits10_11 == 0x02 && insn_bit21)
3836 {
3837 if (record_debug)
3838 debug_printf ("Process record: load/store (register offset)\n");
3839 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3840 if (!(opc >> 1))
3841 if (opc & 0x01)
3842 ld_flag = 0x01;
3843 else
3844 ld_flag = 0x0;
3845 else
3846 if (size_bits != 0x03)
3847 ld_flag = 0x01;
3848 else
3849 return AARCH64_RECORD_UNKNOWN;
3850
3851 if (!ld_flag)
3852 {
3853 ULONGEST reg_rm_val;
3854
3855 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3856 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3857 if (bit (aarch64_insn_r->aarch64_insn, 12))
3858 offset = reg_rm_val << size_bits;
3859 else
3860 offset = reg_rm_val;
3861 datasize = 8 << size_bits;
3862 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3863 &address);
3864 address = address + offset;
3865 record_buf_mem[0] = datasize >> 3;
3866 record_buf_mem[1] = address;
3867 aarch64_insn_r->mem_rec_count = 1;
3868 }
3869 else
3870 {
3871 if (vector_flag)
3872 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3873 else
3874 record_buf[0] = reg_rt;
3875 aarch64_insn_r->reg_rec_count = 1;
3876 }
3877 }
3878 /* Load/store register (immediate and unprivileged) instructions. */
3879 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3880 && !insn_bit21)
3881 {
3882 if (record_debug)
3883 {
3884 debug_printf ("Process record: load/store "
3885 "(immediate and unprivileged)\n");
3886 }
3887 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3888 if (!(opc >> 1))
3889 if (opc & 0x01)
3890 ld_flag = 0x01;
3891 else
3892 ld_flag = 0x0;
3893 else
3894 if (size_bits != 0x03)
3895 ld_flag = 0x01;
3896 else
3897 return AARCH64_RECORD_UNKNOWN;
3898
3899 if (!ld_flag)
3900 {
3901 uint16_t imm9_off;
3902 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3903 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3904 datasize = 8 << size_bits;
3905 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3906 &address);
3907 if (insn_bits10_11 != 0x01)
3908 {
3909 if (imm9_off & 0x0100)
3910 address = address - offset;
3911 else
3912 address = address + offset;
3913 }
3914 record_buf_mem[0] = datasize >> 3;
3915 record_buf_mem[1] = address;
3916 aarch64_insn_r->mem_rec_count = 1;
3917 }
3918 else
3919 {
3920 if (vector_flag)
3921 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3922 else
3923 record_buf[0] = reg_rt;
3924 aarch64_insn_r->reg_rec_count = 1;
3925 }
3926 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3927 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3928 }
3929 /* Advanced SIMD load/store instructions. */
3930 else
3931 return aarch64_record_asimd_load_store (aarch64_insn_r);
3932
3933 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3934 record_buf_mem);
3935 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3936 record_buf);
3937 return AARCH64_RECORD_SUCCESS;
3938 }
3939
3940 /* Record handler for data processing SIMD and floating point instructions. */
3941
3942 static unsigned int
3943 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3944 {
3945 uint8_t insn_bit21, opcode, rmode, reg_rd;
3946 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3947 uint8_t insn_bits11_14;
3948 uint32_t record_buf[2];
3949
3950 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3951 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3952 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3953 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3954 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3955 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3956 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3957 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3958 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3959
3960 if (record_debug)
3961 debug_printf ("Process record: data processing SIMD/FP: ");
3962
3963 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3964 {
3965 /* Floating point - fixed point conversion instructions. */
3966 if (!insn_bit21)
3967 {
3968 if (record_debug)
3969 debug_printf ("FP - fixed point conversion");
3970
3971 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3972 record_buf[0] = reg_rd;
3973 else
3974 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3975 }
3976 /* Floating point - conditional compare instructions. */
3977 else if (insn_bits10_11 == 0x01)
3978 {
3979 if (record_debug)
3980 debug_printf ("FP - conditional compare");
3981
3982 record_buf[0] = AARCH64_CPSR_REGNUM;
3983 }
3984 /* Floating point - data processing (2-source) and
3985 conditional select instructions. */
3986 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3987 {
3988 if (record_debug)
3989 debug_printf ("FP - DP (2-source)");
3990
3991 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3992 }
3993 else if (insn_bits10_11 == 0x00)
3994 {
3995 /* Floating point - immediate instructions. */
3996 if ((insn_bits12_15 & 0x01) == 0x01
3997 || (insn_bits12_15 & 0x07) == 0x04)
3998 {
3999 if (record_debug)
4000 debug_printf ("FP - immediate");
4001 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4002 }
4003 /* Floating point - compare instructions. */
4004 else if ((insn_bits12_15 & 0x03) == 0x02)
4005 {
4006 if (record_debug)
4007 debug_printf ("FP - immediate");
4008 record_buf[0] = AARCH64_CPSR_REGNUM;
4009 }
4010 /* Floating point - integer conversions instructions. */
4011 else if (insn_bits12_15 == 0x00)
4012 {
4013 /* Convert float to integer instruction. */
4014 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4015 {
4016 if (record_debug)
4017 debug_printf ("float to int conversion");
4018
4019 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4020 }
4021 /* Convert integer to float instruction. */
4022 else if ((opcode >> 1) == 0x01 && !rmode)
4023 {
4024 if (record_debug)
4025 debug_printf ("int to float conversion");
4026
4027 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4028 }
4029 /* Move float to integer instruction. */
4030 else if ((opcode >> 1) == 0x03)
4031 {
4032 if (record_debug)
4033 debug_printf ("move float to int");
4034
4035 if (!(opcode & 0x01))
4036 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4037 else
4038 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4039 }
4040 else
4041 return AARCH64_RECORD_UNKNOWN;
4042 }
4043 else
4044 return AARCH64_RECORD_UNKNOWN;
4045 }
4046 else
4047 return AARCH64_RECORD_UNKNOWN;
4048 }
4049 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4050 {
4051 if (record_debug)
4052 debug_printf ("SIMD copy");
4053
4054 /* Advanced SIMD copy instructions. */
4055 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4056 && !bit (aarch64_insn_r->aarch64_insn, 15)
4057 && bit (aarch64_insn_r->aarch64_insn, 10))
4058 {
4059 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4060 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4061 else
4062 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4063 }
4064 else
4065 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4066 }
4067 /* All remaining floating point or advanced SIMD instructions. */
4068 else
4069 {
4070 if (record_debug)
4071 debug_printf ("all remain");
4072
4073 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4074 }
4075
4076 if (record_debug)
4077 debug_printf ("\n");
4078
4079 aarch64_insn_r->reg_rec_count++;
4080 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4081 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4082 record_buf);
4083 return AARCH64_RECORD_SUCCESS;
4084 }
4085
4086 /* Decodes insns type and invokes its record handler. */
4087
4088 static unsigned int
4089 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4090 {
4091 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4092
4093 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4094 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4095 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4096 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4097
4098 /* Data processing - immediate instructions. */
4099 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4100 return aarch64_record_data_proc_imm (aarch64_insn_r);
4101
4102 /* Branch, exception generation and system instructions. */
4103 if (ins_bit26 && !ins_bit27 && ins_bit28)
4104 return aarch64_record_branch_except_sys (aarch64_insn_r);
4105
4106 /* Load and store instructions. */
4107 if (!ins_bit25 && ins_bit27)
4108 return aarch64_record_load_store (aarch64_insn_r);
4109
4110 /* Data processing - register instructions. */
4111 if (ins_bit25 && !ins_bit26 && ins_bit27)
4112 return aarch64_record_data_proc_reg (aarch64_insn_r);
4113
4114 /* Data processing - SIMD and floating point instructions. */
4115 if (ins_bit25 && ins_bit26 && ins_bit27)
4116 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4117
4118 return AARCH64_RECORD_UNSUPPORTED;
4119 }
4120
4121 /* Cleans up local record registers and memory allocations. */
4122
4123 static void
4124 deallocate_reg_mem (insn_decode_record *record)
4125 {
4126 xfree (record->aarch64_regs);
4127 xfree (record->aarch64_mems);
4128 }
4129
4130 #if GDB_SELF_TEST
4131 namespace selftests {
4132
4133 static void
4134 aarch64_process_record_test (void)
4135 {
4136 struct gdbarch_info info;
4137 uint32_t ret;
4138
4139 gdbarch_info_init (&info);
4140 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4141
4142 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4143 SELF_CHECK (gdbarch != NULL);
4144
4145 insn_decode_record aarch64_record;
4146
4147 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4148 aarch64_record.regcache = NULL;
4149 aarch64_record.this_addr = 0;
4150 aarch64_record.gdbarch = gdbarch;
4151
4152 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4153 aarch64_record.aarch64_insn = 0xf9800020;
4154 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4155 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4156 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4157 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4158
4159 deallocate_reg_mem (&aarch64_record);
4160 }
4161
4162 } // namespace selftests
4163 #endif /* GDB_SELF_TEST */
4164
4165 /* Parse the current instruction and record the values of the registers and
4166 memory that will be changed in current instruction to record_arch_list
4167 return -1 if something is wrong. */
4168
4169 int
4170 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4171 CORE_ADDR insn_addr)
4172 {
4173 uint32_t rec_no = 0;
4174 uint8_t insn_size = 4;
4175 uint32_t ret = 0;
4176 gdb_byte buf[insn_size];
4177 insn_decode_record aarch64_record;
4178
4179 memset (&buf[0], 0, insn_size);
4180 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4181 target_read_memory (insn_addr, &buf[0], insn_size);
4182 aarch64_record.aarch64_insn
4183 = (uint32_t) extract_unsigned_integer (&buf[0],
4184 insn_size,
4185 gdbarch_byte_order (gdbarch));
4186 aarch64_record.regcache = regcache;
4187 aarch64_record.this_addr = insn_addr;
4188 aarch64_record.gdbarch = gdbarch;
4189
4190 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4191 if (ret == AARCH64_RECORD_UNSUPPORTED)
4192 {
4193 printf_unfiltered (_("Process record does not support instruction "
4194 "0x%0x at address %s.\n"),
4195 aarch64_record.aarch64_insn,
4196 paddress (gdbarch, insn_addr));
4197 ret = -1;
4198 }
4199
4200 if (0 == ret)
4201 {
4202 /* Record registers. */
4203 record_full_arch_list_add_reg (aarch64_record.regcache,
4204 AARCH64_PC_REGNUM);
4205 /* Always record register CPSR. */
4206 record_full_arch_list_add_reg (aarch64_record.regcache,
4207 AARCH64_CPSR_REGNUM);
4208 if (aarch64_record.aarch64_regs)
4209 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4210 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4211 aarch64_record.aarch64_regs[rec_no]))
4212 ret = -1;
4213
4214 /* Record memories. */
4215 if (aarch64_record.aarch64_mems)
4216 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4217 if (record_full_arch_list_add_mem
4218 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4219 aarch64_record.aarch64_mems[rec_no].len))
4220 ret = -1;
4221
4222 if (record_full_arch_list_add_end ())
4223 ret = -1;
4224 }
4225
4226 deallocate_reg_mem (&aarch64_record);
4227 return ret;
4228 }
This page took 0.13732 seconds and 4 git commands to generate.