bfa0385417b75b105643c632a529b6f976fb38c8
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "value.h"
31 #include "arch-utils.h"
32 #include "osabi.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
36 #include "objfiles.h"
37 #include "dwarf2-frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "language.h"
43 #include "infcall.h"
44 #include "ax.h"
45 #include "ax-gdb.h"
46 #include "selftest.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
58
59 #include "opcode/aarch64.h"
60 #include <algorithm>
61
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
65
66 /* Pseudo register base numbers. */
67 #define AARCH64_Q0_REGNUM 0
68 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
69 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
72 #define AARCH64_SVE_V0_REGNUM (AARCH64_B0_REGNUM + 32)
73
74 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
75 four members. */
76 #define HA_MAX_NUM_FLDS 4
77
78 /* All possible aarch64 target descriptors. */
79 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
80
81 /* The standard register names, and all the valid aliases for them. */
82 static const struct
83 {
84 const char *const name;
85 int regnum;
86 } aarch64_register_aliases[] =
87 {
88 /* 64-bit register names. */
89 {"fp", AARCH64_FP_REGNUM},
90 {"lr", AARCH64_LR_REGNUM},
91 {"sp", AARCH64_SP_REGNUM},
92
93 /* 32-bit register names. */
94 {"w0", AARCH64_X0_REGNUM + 0},
95 {"w1", AARCH64_X0_REGNUM + 1},
96 {"w2", AARCH64_X0_REGNUM + 2},
97 {"w3", AARCH64_X0_REGNUM + 3},
98 {"w4", AARCH64_X0_REGNUM + 4},
99 {"w5", AARCH64_X0_REGNUM + 5},
100 {"w6", AARCH64_X0_REGNUM + 6},
101 {"w7", AARCH64_X0_REGNUM + 7},
102 {"w8", AARCH64_X0_REGNUM + 8},
103 {"w9", AARCH64_X0_REGNUM + 9},
104 {"w10", AARCH64_X0_REGNUM + 10},
105 {"w11", AARCH64_X0_REGNUM + 11},
106 {"w12", AARCH64_X0_REGNUM + 12},
107 {"w13", AARCH64_X0_REGNUM + 13},
108 {"w14", AARCH64_X0_REGNUM + 14},
109 {"w15", AARCH64_X0_REGNUM + 15},
110 {"w16", AARCH64_X0_REGNUM + 16},
111 {"w17", AARCH64_X0_REGNUM + 17},
112 {"w18", AARCH64_X0_REGNUM + 18},
113 {"w19", AARCH64_X0_REGNUM + 19},
114 {"w20", AARCH64_X0_REGNUM + 20},
115 {"w21", AARCH64_X0_REGNUM + 21},
116 {"w22", AARCH64_X0_REGNUM + 22},
117 {"w23", AARCH64_X0_REGNUM + 23},
118 {"w24", AARCH64_X0_REGNUM + 24},
119 {"w25", AARCH64_X0_REGNUM + 25},
120 {"w26", AARCH64_X0_REGNUM + 26},
121 {"w27", AARCH64_X0_REGNUM + 27},
122 {"w28", AARCH64_X0_REGNUM + 28},
123 {"w29", AARCH64_X0_REGNUM + 29},
124 {"w30", AARCH64_X0_REGNUM + 30},
125
126 /* specials */
127 {"ip0", AARCH64_X0_REGNUM + 16},
128 {"ip1", AARCH64_X0_REGNUM + 17}
129 };
130
131 /* The required core 'R' registers. */
132 static const char *const aarch64_r_register_names[] =
133 {
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_X0_REGNUM! */
136 "x0", "x1", "x2", "x3",
137 "x4", "x5", "x6", "x7",
138 "x8", "x9", "x10", "x11",
139 "x12", "x13", "x14", "x15",
140 "x16", "x17", "x18", "x19",
141 "x20", "x21", "x22", "x23",
142 "x24", "x25", "x26", "x27",
143 "x28", "x29", "x30", "sp",
144 "pc", "cpsr"
145 };
146
147 /* The FP/SIMD 'V' registers. */
148 static const char *const aarch64_v_register_names[] =
149 {
150 /* These registers must appear in consecutive RAW register number
151 order and they must begin with AARCH64_V0_REGNUM! */
152 "v0", "v1", "v2", "v3",
153 "v4", "v5", "v6", "v7",
154 "v8", "v9", "v10", "v11",
155 "v12", "v13", "v14", "v15",
156 "v16", "v17", "v18", "v19",
157 "v20", "v21", "v22", "v23",
158 "v24", "v25", "v26", "v27",
159 "v28", "v29", "v30", "v31",
160 "fpsr",
161 "fpcr"
162 };
163
164 /* The SVE 'Z' and 'P' registers. */
165 static const char *const aarch64_sve_register_names[] =
166 {
167 /* These registers must appear in consecutive RAW register number
168 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
169 "z0", "z1", "z2", "z3",
170 "z4", "z5", "z6", "z7",
171 "z8", "z9", "z10", "z11",
172 "z12", "z13", "z14", "z15",
173 "z16", "z17", "z18", "z19",
174 "z20", "z21", "z22", "z23",
175 "z24", "z25", "z26", "z27",
176 "z28", "z29", "z30", "z31",
177 "fpsr", "fpcr",
178 "p0", "p1", "p2", "p3",
179 "p4", "p5", "p6", "p7",
180 "p8", "p9", "p10", "p11",
181 "p12", "p13", "p14", "p15",
182 "ffr", "vg"
183 };
184
185 /* AArch64 prologue cache structure. */
186 struct aarch64_prologue_cache
187 {
188 /* The program counter at the start of the function. It is used to
189 identify this frame as a prologue frame. */
190 CORE_ADDR func;
191
192 /* The program counter at the time this frame was created; i.e. where
193 this function was called from. It is used to identify this frame as a
194 stub frame. */
195 CORE_ADDR prev_pc;
196
197 /* The stack pointer at the time this frame was created; i.e. the
198 caller's stack pointer when this function was called. It is used
199 to identify this frame. */
200 CORE_ADDR prev_sp;
201
202 /* Is the target available to read from? */
203 int available_p;
204
205 /* The frame base for this frame is just prev_sp - frame size.
206 FRAMESIZE is the distance from the frame pointer to the
207 initial stack pointer. */
208 int framesize;
209
210 /* The register used to hold the frame pointer for this frame. */
211 int framereg;
212
213 /* Saved register offsets. */
214 struct trad_frame_saved_reg *saved_regs;
215 };
216
217 static void
218 show_aarch64_debug (struct ui_file *file, int from_tty,
219 struct cmd_list_element *c, const char *value)
220 {
221 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
222 }
223
224 namespace {
225
226 /* Abstract instruction reader. */
227
228 class abstract_instruction_reader
229 {
230 public:
231 /* Read in one instruction. */
232 virtual ULONGEST read (CORE_ADDR memaddr, int len,
233 enum bfd_endian byte_order) = 0;
234 };
235
236 /* Instruction reader from real target. */
237
238 class instruction_reader : public abstract_instruction_reader
239 {
240 public:
241 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
242 override
243 {
244 return read_code_unsigned_integer (memaddr, len, byte_order);
245 }
246 };
247
248 } // namespace
249
250 /* Analyze a prologue, looking for a recognizable stack frame
251 and frame pointer. Scan until we encounter a store that could
252 clobber the stack frame unexpectedly, or an unknown instruction. */
253
254 static CORE_ADDR
255 aarch64_analyze_prologue (struct gdbarch *gdbarch,
256 CORE_ADDR start, CORE_ADDR limit,
257 struct aarch64_prologue_cache *cache,
258 abstract_instruction_reader& reader)
259 {
260 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
261 int i;
262 /* Track X registers and D registers in prologue. */
263 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
264
265 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
266 regs[i] = pv_register (i, 0);
267 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
268
269 for (; start < limit; start += 4)
270 {
271 uint32_t insn;
272 aarch64_inst inst;
273
274 insn = reader.read (start, 4, byte_order_for_code);
275
276 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
277 break;
278
279 if (inst.opcode->iclass == addsub_imm
280 && (inst.opcode->op == OP_ADD
281 || strcmp ("sub", inst.opcode->name) == 0))
282 {
283 unsigned rd = inst.operands[0].reg.regno;
284 unsigned rn = inst.operands[1].reg.regno;
285
286 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
287 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
288 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
289 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
290
291 if (inst.opcode->op == OP_ADD)
292 {
293 regs[rd] = pv_add_constant (regs[rn],
294 inst.operands[2].imm.value);
295 }
296 else
297 {
298 regs[rd] = pv_add_constant (regs[rn],
299 -inst.operands[2].imm.value);
300 }
301 }
302 else if (inst.opcode->iclass == pcreladdr
303 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
304 {
305 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
306 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
307
308 regs[inst.operands[0].reg.regno] = pv_unknown ();
309 }
310 else if (inst.opcode->iclass == branch_imm)
311 {
312 /* Stop analysis on branch. */
313 break;
314 }
315 else if (inst.opcode->iclass == condbranch)
316 {
317 /* Stop analysis on branch. */
318 break;
319 }
320 else if (inst.opcode->iclass == branch_reg)
321 {
322 /* Stop analysis on branch. */
323 break;
324 }
325 else if (inst.opcode->iclass == compbranch)
326 {
327 /* Stop analysis on branch. */
328 break;
329 }
330 else if (inst.opcode->op == OP_MOVZ)
331 {
332 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
333 regs[inst.operands[0].reg.regno] = pv_unknown ();
334 }
335 else if (inst.opcode->iclass == log_shift
336 && strcmp (inst.opcode->name, "orr") == 0)
337 {
338 unsigned rd = inst.operands[0].reg.regno;
339 unsigned rn = inst.operands[1].reg.regno;
340 unsigned rm = inst.operands[2].reg.regno;
341
342 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
343 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
344 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
345
346 if (inst.operands[2].shifter.amount == 0
347 && rn == AARCH64_SP_REGNUM)
348 regs[rd] = regs[rm];
349 else
350 {
351 if (aarch64_debug)
352 {
353 debug_printf ("aarch64: prologue analysis gave up "
354 "addr=%s opcode=0x%x (orr x register)\n",
355 core_addr_to_string_nz (start), insn);
356 }
357 break;
358 }
359 }
360 else if (inst.opcode->op == OP_STUR)
361 {
362 unsigned rt = inst.operands[0].reg.regno;
363 unsigned rn = inst.operands[1].addr.base_regno;
364 int is64
365 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
366
367 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
368 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
369 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
370 gdb_assert (!inst.operands[1].addr.offset.is_reg);
371
372 stack.store (pv_add_constant (regs[rn],
373 inst.operands[1].addr.offset.imm),
374 is64 ? 8 : 4, regs[rt]);
375 }
376 else if ((inst.opcode->iclass == ldstpair_off
377 || (inst.opcode->iclass == ldstpair_indexed
378 && inst.operands[2].addr.preind))
379 && strcmp ("stp", inst.opcode->name) == 0)
380 {
381 /* STP with addressing mode Pre-indexed and Base register. */
382 unsigned rt1;
383 unsigned rt2;
384 unsigned rn = inst.operands[2].addr.base_regno;
385 int32_t imm = inst.operands[2].addr.offset.imm;
386
387 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
388 || inst.operands[0].type == AARCH64_OPND_Ft);
389 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
390 || inst.operands[1].type == AARCH64_OPND_Ft2);
391 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
392 gdb_assert (!inst.operands[2].addr.offset.is_reg);
393
394 /* If recording this store would invalidate the store area
395 (perhaps because rn is not known) then we should abandon
396 further prologue analysis. */
397 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
398 break;
399
400 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
401 break;
402
403 rt1 = inst.operands[0].reg.regno;
404 rt2 = inst.operands[1].reg.regno;
405 if (inst.operands[0].type == AARCH64_OPND_Ft)
406 {
407 /* Only bottom 64-bit of each V register (D register) need
408 to be preserved. */
409 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
410 rt1 += AARCH64_X_REGISTER_COUNT;
411 rt2 += AARCH64_X_REGISTER_COUNT;
412 }
413
414 stack.store (pv_add_constant (regs[rn], imm), 8,
415 regs[rt1]);
416 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
417 regs[rt2]);
418
419 if (inst.operands[2].addr.writeback)
420 regs[rn] = pv_add_constant (regs[rn], imm);
421
422 }
423 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
424 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
425 && (inst.opcode->op == OP_STR_POS
426 || inst.opcode->op == OP_STRF_POS)))
427 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
428 && strcmp ("str", inst.opcode->name) == 0)
429 {
430 /* STR (immediate) */
431 unsigned int rt = inst.operands[0].reg.regno;
432 int32_t imm = inst.operands[1].addr.offset.imm;
433 unsigned int rn = inst.operands[1].addr.base_regno;
434 bool is64
435 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
436 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
437 || inst.operands[0].type == AARCH64_OPND_Ft);
438
439 if (inst.operands[0].type == AARCH64_OPND_Ft)
440 {
441 /* Only bottom 64-bit of each V register (D register) need
442 to be preserved. */
443 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
444 rt += AARCH64_X_REGISTER_COUNT;
445 }
446
447 stack.store (pv_add_constant (regs[rn], imm),
448 is64 ? 8 : 4, regs[rt]);
449 if (inst.operands[1].addr.writeback)
450 regs[rn] = pv_add_constant (regs[rn], imm);
451 }
452 else if (inst.opcode->iclass == testbranch)
453 {
454 /* Stop analysis on branch. */
455 break;
456 }
457 else
458 {
459 if (aarch64_debug)
460 {
461 debug_printf ("aarch64: prologue analysis gave up addr=%s"
462 " opcode=0x%x\n",
463 core_addr_to_string_nz (start), insn);
464 }
465 break;
466 }
467 }
468
469 if (cache == NULL)
470 return start;
471
472 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
473 {
474 /* Frame pointer is fp. Frame size is constant. */
475 cache->framereg = AARCH64_FP_REGNUM;
476 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
477 }
478 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
479 {
480 /* Try the stack pointer. */
481 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
482 cache->framereg = AARCH64_SP_REGNUM;
483 }
484 else
485 {
486 /* We're just out of luck. We don't know where the frame is. */
487 cache->framereg = -1;
488 cache->framesize = 0;
489 }
490
491 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
492 {
493 CORE_ADDR offset;
494
495 if (stack.find_reg (gdbarch, i, &offset))
496 cache->saved_regs[i].addr = offset;
497 }
498
499 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
500 {
501 int regnum = gdbarch_num_regs (gdbarch);
502 CORE_ADDR offset;
503
504 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
505 &offset))
506 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
507 }
508
509 return start;
510 }
511
512 static CORE_ADDR
513 aarch64_analyze_prologue (struct gdbarch *gdbarch,
514 CORE_ADDR start, CORE_ADDR limit,
515 struct aarch64_prologue_cache *cache)
516 {
517 instruction_reader reader;
518
519 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
520 reader);
521 }
522
523 #if GDB_SELF_TEST
524
525 namespace selftests {
526
527 /* Instruction reader from manually cooked instruction sequences. */
528
529 class instruction_reader_test : public abstract_instruction_reader
530 {
531 public:
532 template<size_t SIZE>
533 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
534 : m_insns (insns), m_insns_size (SIZE)
535 {}
536
537 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
538 override
539 {
540 SELF_CHECK (len == 4);
541 SELF_CHECK (memaddr % 4 == 0);
542 SELF_CHECK (memaddr / 4 < m_insns_size);
543
544 return m_insns[memaddr / 4];
545 }
546
547 private:
548 const uint32_t *m_insns;
549 size_t m_insns_size;
550 };
551
552 static void
553 aarch64_analyze_prologue_test (void)
554 {
555 struct gdbarch_info info;
556
557 gdbarch_info_init (&info);
558 info.bfd_arch_info = bfd_scan_arch ("aarch64");
559
560 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
561 SELF_CHECK (gdbarch != NULL);
562
563 /* Test the simple prologue in which frame pointer is used. */
564 {
565 struct aarch64_prologue_cache cache;
566 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
567
568 static const uint32_t insns[] = {
569 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
570 0x910003fd, /* mov x29, sp */
571 0x97ffffe6, /* bl 0x400580 */
572 };
573 instruction_reader_test reader (insns);
574
575 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
576 SELF_CHECK (end == 4 * 2);
577
578 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
579 SELF_CHECK (cache.framesize == 272);
580
581 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
582 {
583 if (i == AARCH64_FP_REGNUM)
584 SELF_CHECK (cache.saved_regs[i].addr == -272);
585 else if (i == AARCH64_LR_REGNUM)
586 SELF_CHECK (cache.saved_regs[i].addr == -264);
587 else
588 SELF_CHECK (cache.saved_regs[i].addr == -1);
589 }
590
591 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
592 {
593 int regnum = gdbarch_num_regs (gdbarch);
594
595 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
596 == -1);
597 }
598 }
599
600 /* Test a prologue in which STR is used and frame pointer is not
601 used. */
602 {
603 struct aarch64_prologue_cache cache;
604 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
605
606 static const uint32_t insns[] = {
607 0xf81d0ff3, /* str x19, [sp, #-48]! */
608 0xb9002fe0, /* str w0, [sp, #44] */
609 0xf90013e1, /* str x1, [sp, #32]*/
610 0xfd000fe0, /* str d0, [sp, #24] */
611 0xaa0203f3, /* mov x19, x2 */
612 0xf94013e0, /* ldr x0, [sp, #32] */
613 };
614 instruction_reader_test reader (insns);
615
616 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
617
618 SELF_CHECK (end == 4 * 5);
619
620 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
621 SELF_CHECK (cache.framesize == 48);
622
623 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
624 {
625 if (i == 1)
626 SELF_CHECK (cache.saved_regs[i].addr == -16);
627 else if (i == 19)
628 SELF_CHECK (cache.saved_regs[i].addr == -48);
629 else
630 SELF_CHECK (cache.saved_regs[i].addr == -1);
631 }
632
633 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
634 {
635 int regnum = gdbarch_num_regs (gdbarch);
636
637 if (i == 0)
638 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
639 == -24);
640 else
641 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
642 == -1);
643 }
644 }
645 }
646 } // namespace selftests
647 #endif /* GDB_SELF_TEST */
648
649 /* Implement the "skip_prologue" gdbarch method. */
650
651 static CORE_ADDR
652 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
653 {
654 CORE_ADDR func_addr, limit_pc;
655
656 /* See if we can determine the end of the prologue via the symbol
657 table. If so, then return either PC, or the PC after the
658 prologue, whichever is greater. */
659 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
660 {
661 CORE_ADDR post_prologue_pc
662 = skip_prologue_using_sal (gdbarch, func_addr);
663
664 if (post_prologue_pc != 0)
665 return std::max (pc, post_prologue_pc);
666 }
667
668 /* Can't determine prologue from the symbol table, need to examine
669 instructions. */
670
671 /* Find an upper limit on the function prologue using the debug
672 information. If the debug information could not be used to
673 provide that bound, then use an arbitrary large number as the
674 upper bound. */
675 limit_pc = skip_prologue_using_sal (gdbarch, pc);
676 if (limit_pc == 0)
677 limit_pc = pc + 128; /* Magic. */
678
679 /* Try disassembling prologue. */
680 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
681 }
682
683 /* Scan the function prologue for THIS_FRAME and populate the prologue
684 cache CACHE. */
685
686 static void
687 aarch64_scan_prologue (struct frame_info *this_frame,
688 struct aarch64_prologue_cache *cache)
689 {
690 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
691 CORE_ADDR prologue_start;
692 CORE_ADDR prologue_end;
693 CORE_ADDR prev_pc = get_frame_pc (this_frame);
694 struct gdbarch *gdbarch = get_frame_arch (this_frame);
695
696 cache->prev_pc = prev_pc;
697
698 /* Assume we do not find a frame. */
699 cache->framereg = -1;
700 cache->framesize = 0;
701
702 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
703 &prologue_end))
704 {
705 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
706
707 if (sal.line == 0)
708 {
709 /* No line info so use the current PC. */
710 prologue_end = prev_pc;
711 }
712 else if (sal.end < prologue_end)
713 {
714 /* The next line begins after the function end. */
715 prologue_end = sal.end;
716 }
717
718 prologue_end = std::min (prologue_end, prev_pc);
719 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
720 }
721 else
722 {
723 CORE_ADDR frame_loc;
724
725 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
726 if (frame_loc == 0)
727 return;
728
729 cache->framereg = AARCH64_FP_REGNUM;
730 cache->framesize = 16;
731 cache->saved_regs[29].addr = 0;
732 cache->saved_regs[30].addr = 8;
733 }
734 }
735
736 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
737 function may throw an exception if the inferior's registers or memory is
738 not available. */
739
740 static void
741 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
742 struct aarch64_prologue_cache *cache)
743 {
744 CORE_ADDR unwound_fp;
745 int reg;
746
747 aarch64_scan_prologue (this_frame, cache);
748
749 if (cache->framereg == -1)
750 return;
751
752 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
753 if (unwound_fp == 0)
754 return;
755
756 cache->prev_sp = unwound_fp + cache->framesize;
757
758 /* Calculate actual addresses of saved registers using offsets
759 determined by aarch64_analyze_prologue. */
760 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
761 if (trad_frame_addr_p (cache->saved_regs, reg))
762 cache->saved_regs[reg].addr += cache->prev_sp;
763
764 cache->func = get_frame_func (this_frame);
765
766 cache->available_p = 1;
767 }
768
769 /* Allocate and fill in *THIS_CACHE with information about the prologue of
770 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
771 Return a pointer to the current aarch64_prologue_cache in
772 *THIS_CACHE. */
773
774 static struct aarch64_prologue_cache *
775 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
776 {
777 struct aarch64_prologue_cache *cache;
778
779 if (*this_cache != NULL)
780 return (struct aarch64_prologue_cache *) *this_cache;
781
782 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
783 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
784 *this_cache = cache;
785
786 TRY
787 {
788 aarch64_make_prologue_cache_1 (this_frame, cache);
789 }
790 CATCH (ex, RETURN_MASK_ERROR)
791 {
792 if (ex.error != NOT_AVAILABLE_ERROR)
793 throw_exception (ex);
794 }
795 END_CATCH
796
797 return cache;
798 }
799
800 /* Implement the "stop_reason" frame_unwind method. */
801
802 static enum unwind_stop_reason
803 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
804 void **this_cache)
805 {
806 struct aarch64_prologue_cache *cache
807 = aarch64_make_prologue_cache (this_frame, this_cache);
808
809 if (!cache->available_p)
810 return UNWIND_UNAVAILABLE;
811
812 /* Halt the backtrace at "_start". */
813 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
814 return UNWIND_OUTERMOST;
815
816 /* We've hit a wall, stop. */
817 if (cache->prev_sp == 0)
818 return UNWIND_OUTERMOST;
819
820 return UNWIND_NO_REASON;
821 }
822
823 /* Our frame ID for a normal frame is the current function's starting
824 PC and the caller's SP when we were called. */
825
826 static void
827 aarch64_prologue_this_id (struct frame_info *this_frame,
828 void **this_cache, struct frame_id *this_id)
829 {
830 struct aarch64_prologue_cache *cache
831 = aarch64_make_prologue_cache (this_frame, this_cache);
832
833 if (!cache->available_p)
834 *this_id = frame_id_build_unavailable_stack (cache->func);
835 else
836 *this_id = frame_id_build (cache->prev_sp, cache->func);
837 }
838
839 /* Implement the "prev_register" frame_unwind method. */
840
841 static struct value *
842 aarch64_prologue_prev_register (struct frame_info *this_frame,
843 void **this_cache, int prev_regnum)
844 {
845 struct aarch64_prologue_cache *cache
846 = aarch64_make_prologue_cache (this_frame, this_cache);
847
848 /* If we are asked to unwind the PC, then we need to return the LR
849 instead. The prologue may save PC, but it will point into this
850 frame's prologue, not the next frame's resume location. */
851 if (prev_regnum == AARCH64_PC_REGNUM)
852 {
853 CORE_ADDR lr;
854
855 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
856 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
857 }
858
859 /* SP is generally not saved to the stack, but this frame is
860 identified by the next frame's stack pointer at the time of the
861 call. The value was already reconstructed into PREV_SP. */
862 /*
863 +----------+ ^
864 | saved lr | |
865 +->| saved fp |--+
866 | | |
867 | | | <- Previous SP
868 | +----------+
869 | | saved lr |
870 +--| saved fp |<- FP
871 | |
872 | |<- SP
873 +----------+ */
874 if (prev_regnum == AARCH64_SP_REGNUM)
875 return frame_unwind_got_constant (this_frame, prev_regnum,
876 cache->prev_sp);
877
878 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
879 prev_regnum);
880 }
881
882 /* AArch64 prologue unwinder. */
883 struct frame_unwind aarch64_prologue_unwind =
884 {
885 NORMAL_FRAME,
886 aarch64_prologue_frame_unwind_stop_reason,
887 aarch64_prologue_this_id,
888 aarch64_prologue_prev_register,
889 NULL,
890 default_frame_sniffer
891 };
892
893 /* Allocate and fill in *THIS_CACHE with information about the prologue of
894 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
895 Return a pointer to the current aarch64_prologue_cache in
896 *THIS_CACHE. */
897
898 static struct aarch64_prologue_cache *
899 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
900 {
901 struct aarch64_prologue_cache *cache;
902
903 if (*this_cache != NULL)
904 return (struct aarch64_prologue_cache *) *this_cache;
905
906 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
907 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
908 *this_cache = cache;
909
910 TRY
911 {
912 cache->prev_sp = get_frame_register_unsigned (this_frame,
913 AARCH64_SP_REGNUM);
914 cache->prev_pc = get_frame_pc (this_frame);
915 cache->available_p = 1;
916 }
917 CATCH (ex, RETURN_MASK_ERROR)
918 {
919 if (ex.error != NOT_AVAILABLE_ERROR)
920 throw_exception (ex);
921 }
922 END_CATCH
923
924 return cache;
925 }
926
927 /* Implement the "stop_reason" frame_unwind method. */
928
929 static enum unwind_stop_reason
930 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
931 void **this_cache)
932 {
933 struct aarch64_prologue_cache *cache
934 = aarch64_make_stub_cache (this_frame, this_cache);
935
936 if (!cache->available_p)
937 return UNWIND_UNAVAILABLE;
938
939 return UNWIND_NO_REASON;
940 }
941
942 /* Our frame ID for a stub frame is the current SP and LR. */
943
944 static void
945 aarch64_stub_this_id (struct frame_info *this_frame,
946 void **this_cache, struct frame_id *this_id)
947 {
948 struct aarch64_prologue_cache *cache
949 = aarch64_make_stub_cache (this_frame, this_cache);
950
951 if (cache->available_p)
952 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
953 else
954 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
955 }
956
957 /* Implement the "sniffer" frame_unwind method. */
958
959 static int
960 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
961 struct frame_info *this_frame,
962 void **this_prologue_cache)
963 {
964 CORE_ADDR addr_in_block;
965 gdb_byte dummy[4];
966
967 addr_in_block = get_frame_address_in_block (this_frame);
968 if (in_plt_section (addr_in_block)
969 /* We also use the stub winder if the target memory is unreadable
970 to avoid having the prologue unwinder trying to read it. */
971 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
972 return 1;
973
974 return 0;
975 }
976
977 /* AArch64 stub unwinder. */
978 struct frame_unwind aarch64_stub_unwind =
979 {
980 NORMAL_FRAME,
981 aarch64_stub_frame_unwind_stop_reason,
982 aarch64_stub_this_id,
983 aarch64_prologue_prev_register,
984 NULL,
985 aarch64_stub_unwind_sniffer
986 };
987
988 /* Return the frame base address of *THIS_FRAME. */
989
990 static CORE_ADDR
991 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
992 {
993 struct aarch64_prologue_cache *cache
994 = aarch64_make_prologue_cache (this_frame, this_cache);
995
996 return cache->prev_sp - cache->framesize;
997 }
998
999 /* AArch64 default frame base information. */
1000 struct frame_base aarch64_normal_base =
1001 {
1002 &aarch64_prologue_unwind,
1003 aarch64_normal_frame_base,
1004 aarch64_normal_frame_base,
1005 aarch64_normal_frame_base
1006 };
1007
1008 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1009 dummy frame. The frame ID's base needs to match the TOS value
1010 saved by save_dummy_frame_tos () and returned from
1011 aarch64_push_dummy_call, and the PC needs to match the dummy
1012 frame's breakpoint. */
1013
1014 static struct frame_id
1015 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1016 {
1017 return frame_id_build (get_frame_register_unsigned (this_frame,
1018 AARCH64_SP_REGNUM),
1019 get_frame_pc (this_frame));
1020 }
1021
1022 /* Implement the "unwind_pc" gdbarch method. */
1023
1024 static CORE_ADDR
1025 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1026 {
1027 CORE_ADDR pc
1028 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1029
1030 return pc;
1031 }
1032
1033 /* Implement the "unwind_sp" gdbarch method. */
1034
1035 static CORE_ADDR
1036 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1037 {
1038 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1039 }
1040
1041 /* Return the value of the REGNUM register in the previous frame of
1042 *THIS_FRAME. */
1043
1044 static struct value *
1045 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1046 void **this_cache, int regnum)
1047 {
1048 CORE_ADDR lr;
1049
1050 switch (regnum)
1051 {
1052 case AARCH64_PC_REGNUM:
1053 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1054 return frame_unwind_got_constant (this_frame, regnum, lr);
1055
1056 default:
1057 internal_error (__FILE__, __LINE__,
1058 _("Unexpected register %d"), regnum);
1059 }
1060 }
1061
1062 /* Implement the "init_reg" dwarf2_frame_ops method. */
1063
1064 static void
1065 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1066 struct dwarf2_frame_state_reg *reg,
1067 struct frame_info *this_frame)
1068 {
1069 switch (regnum)
1070 {
1071 case AARCH64_PC_REGNUM:
1072 reg->how = DWARF2_FRAME_REG_FN;
1073 reg->loc.fn = aarch64_dwarf2_prev_register;
1074 break;
1075 case AARCH64_SP_REGNUM:
1076 reg->how = DWARF2_FRAME_REG_CFA;
1077 break;
1078 }
1079 }
1080
1081 /* When arguments must be pushed onto the stack, they go on in reverse
1082 order. The code below implements a FILO (stack) to do this. */
1083
1084 typedef struct
1085 {
1086 /* Value to pass on stack. It can be NULL if this item is for stack
1087 padding. */
1088 const gdb_byte *data;
1089
1090 /* Size in bytes of value to pass on stack. */
1091 int len;
1092 } stack_item_t;
1093
1094 DEF_VEC_O (stack_item_t);
1095
1096 /* Return the alignment (in bytes) of the given type. */
1097
1098 static int
1099 aarch64_type_align (struct type *t)
1100 {
1101 int n;
1102 int align;
1103 int falign;
1104
1105 t = check_typedef (t);
1106 switch (TYPE_CODE (t))
1107 {
1108 default:
1109 /* Should never happen. */
1110 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1111 return 4;
1112
1113 case TYPE_CODE_PTR:
1114 case TYPE_CODE_ENUM:
1115 case TYPE_CODE_INT:
1116 case TYPE_CODE_FLT:
1117 case TYPE_CODE_SET:
1118 case TYPE_CODE_RANGE:
1119 case TYPE_CODE_BITSTRING:
1120 case TYPE_CODE_REF:
1121 case TYPE_CODE_RVALUE_REF:
1122 case TYPE_CODE_CHAR:
1123 case TYPE_CODE_BOOL:
1124 return TYPE_LENGTH (t);
1125
1126 case TYPE_CODE_ARRAY:
1127 if (TYPE_VECTOR (t))
1128 {
1129 /* Use the natural alignment for vector types (the same for
1130 scalar type), but the maximum alignment is 128-bit. */
1131 if (TYPE_LENGTH (t) > 16)
1132 return 16;
1133 else
1134 return TYPE_LENGTH (t);
1135 }
1136 else
1137 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1138 case TYPE_CODE_COMPLEX:
1139 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1140
1141 case TYPE_CODE_STRUCT:
1142 case TYPE_CODE_UNION:
1143 align = 1;
1144 for (n = 0; n < TYPE_NFIELDS (t); n++)
1145 {
1146 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1147 if (falign > align)
1148 align = falign;
1149 }
1150 return align;
1151 }
1152 }
1153
1154 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1155 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1156 document; otherwise return 0. */
1157
1158 static int
1159 is_hfa_or_hva (struct type *ty)
1160 {
1161 switch (TYPE_CODE (ty))
1162 {
1163 case TYPE_CODE_ARRAY:
1164 {
1165 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1166
1167 if (TYPE_VECTOR (ty))
1168 return 0;
1169
1170 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1171 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1172 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1173 && TYPE_VECTOR (target_ty))))
1174 return 1;
1175 break;
1176 }
1177
1178 case TYPE_CODE_UNION:
1179 case TYPE_CODE_STRUCT:
1180 {
1181 /* HFA or HVA has at most four members. */
1182 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1183 {
1184 struct type *member0_type;
1185
1186 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1187 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1188 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1189 && TYPE_VECTOR (member0_type)))
1190 {
1191 int i;
1192
1193 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1194 {
1195 struct type *member1_type;
1196
1197 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1198 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1199 || (TYPE_LENGTH (member0_type)
1200 != TYPE_LENGTH (member1_type)))
1201 return 0;
1202 }
1203 return 1;
1204 }
1205 }
1206 return 0;
1207 }
1208
1209 default:
1210 break;
1211 }
1212
1213 return 0;
1214 }
1215
1216 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1217
1218 Return the number of register required, or -1 on failure.
1219
1220 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1221 to the element, else fail if the type of this element does not match the
1222 existing value. */
1223
1224 static int
1225 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1226 struct type **fundamental_type)
1227 {
1228 if (type == nullptr)
1229 return -1;
1230
1231 switch (TYPE_CODE (type))
1232 {
1233 case TYPE_CODE_FLT:
1234 if (TYPE_LENGTH (type) > 16)
1235 return -1;
1236
1237 if (*fundamental_type == nullptr)
1238 *fundamental_type = type;
1239 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1240 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1241 return -1;
1242
1243 return 1;
1244
1245 case TYPE_CODE_COMPLEX:
1246 {
1247 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1248 if (TYPE_LENGTH (target_type) > 16)
1249 return -1;
1250
1251 if (*fundamental_type == nullptr)
1252 *fundamental_type = target_type;
1253 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1254 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1255 return -1;
1256
1257 return 2;
1258 }
1259
1260 case TYPE_CODE_ARRAY:
1261 {
1262 if (TYPE_VECTOR (type))
1263 {
1264 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1265 return -1;
1266
1267 if (*fundamental_type == nullptr)
1268 *fundamental_type = type;
1269 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1270 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1271 return -1;
1272
1273 return 1;
1274 }
1275 else
1276 {
1277 struct type *target_type = TYPE_TARGET_TYPE (type);
1278 int count = aapcs_is_vfp_call_or_return_candidate_1
1279 (target_type, fundamental_type);
1280
1281 if (count == -1)
1282 return count;
1283
1284 count *= TYPE_LENGTH (type);
1285 return count;
1286 }
1287 }
1288
1289 case TYPE_CODE_STRUCT:
1290 case TYPE_CODE_UNION:
1291 {
1292 int count = 0;
1293
1294 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1295 {
1296 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1297
1298 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1299 (member, fundamental_type);
1300 if (sub_count == -1)
1301 return -1;
1302 count += sub_count;
1303 }
1304 return count;
1305 }
1306
1307 default:
1308 break;
1309 }
1310
1311 return -1;
1312 }
1313
1314 /* Return true if an argument, whose type is described by TYPE, can be passed or
1315 returned in simd/fp registers, providing enough parameter passing registers
1316 are available. This is as described in the AAPCS64.
1317
1318 Upon successful return, *COUNT returns the number of needed registers,
1319 *FUNDAMENTAL_TYPE contains the type of those registers.
1320
1321 Candidate as per the AAPCS64 5.4.2.C is either a:
1322 - float.
1323 - short-vector.
1324 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1325 all the members are floats and has at most 4 members.
1326 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1327 all the members are short vectors and has at most 4 members.
1328 - Complex (7.1.1)
1329
1330 Note that HFAs and HVAs can include nested structures and arrays. */
1331
1332 static bool
1333 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1334 struct type **fundamental_type)
1335 {
1336 if (type == nullptr)
1337 return false;
1338
1339 *fundamental_type = nullptr;
1340
1341 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1342 fundamental_type);
1343
1344 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1345 {
1346 *count = ag_count;
1347 return true;
1348 }
1349 else
1350 return false;
1351 }
1352
1353 /* AArch64 function call information structure. */
1354 struct aarch64_call_info
1355 {
1356 /* the current argument number. */
1357 unsigned argnum;
1358
1359 /* The next general purpose register number, equivalent to NGRN as
1360 described in the AArch64 Procedure Call Standard. */
1361 unsigned ngrn;
1362
1363 /* The next SIMD and floating point register number, equivalent to
1364 NSRN as described in the AArch64 Procedure Call Standard. */
1365 unsigned nsrn;
1366
1367 /* The next stacked argument address, equivalent to NSAA as
1368 described in the AArch64 Procedure Call Standard. */
1369 unsigned nsaa;
1370
1371 /* Stack item vector. */
1372 VEC(stack_item_t) *si;
1373 };
1374
1375 /* Pass a value in a sequence of consecutive X registers. The caller
1376 is responsbile for ensuring sufficient registers are available. */
1377
1378 static void
1379 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1380 struct aarch64_call_info *info, struct type *type,
1381 struct value *arg)
1382 {
1383 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1384 int len = TYPE_LENGTH (type);
1385 enum type_code typecode = TYPE_CODE (type);
1386 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1387 const bfd_byte *buf = value_contents (arg);
1388
1389 info->argnum++;
1390
1391 while (len > 0)
1392 {
1393 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1394 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1395 byte_order);
1396
1397
1398 /* Adjust sub-word struct/union args when big-endian. */
1399 if (byte_order == BFD_ENDIAN_BIG
1400 && partial_len < X_REGISTER_SIZE
1401 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1402 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1403
1404 if (aarch64_debug)
1405 {
1406 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1407 gdbarch_register_name (gdbarch, regnum),
1408 phex (regval, X_REGISTER_SIZE));
1409 }
1410 regcache_cooked_write_unsigned (regcache, regnum, regval);
1411 len -= partial_len;
1412 buf += partial_len;
1413 regnum++;
1414 }
1415 }
1416
1417 /* Attempt to marshall a value in a V register. Return 1 if
1418 successful, or 0 if insufficient registers are available. This
1419 function, unlike the equivalent pass_in_x() function does not
1420 handle arguments spread across multiple registers. */
1421
1422 static int
1423 pass_in_v (struct gdbarch *gdbarch,
1424 struct regcache *regcache,
1425 struct aarch64_call_info *info,
1426 int len, const bfd_byte *buf)
1427 {
1428 if (info->nsrn < 8)
1429 {
1430 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1431 gdb_byte reg[V_REGISTER_SIZE];
1432
1433 info->argnum++;
1434 info->nsrn++;
1435
1436 memset (reg, 0, sizeof (reg));
1437 /* PCS C.1, the argument is allocated to the least significant
1438 bits of V register. */
1439 memcpy (reg, buf, len);
1440 regcache->cooked_write (regnum, reg);
1441
1442 if (aarch64_debug)
1443 {
1444 debug_printf ("arg %d in %s\n", info->argnum,
1445 gdbarch_register_name (gdbarch, regnum));
1446 }
1447 return 1;
1448 }
1449 info->nsrn = 8;
1450 return 0;
1451 }
1452
1453 /* Marshall an argument onto the stack. */
1454
1455 static void
1456 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1457 struct value *arg)
1458 {
1459 const bfd_byte *buf = value_contents (arg);
1460 int len = TYPE_LENGTH (type);
1461 int align;
1462 stack_item_t item;
1463
1464 info->argnum++;
1465
1466 align = aarch64_type_align (type);
1467
1468 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1469 Natural alignment of the argument's type. */
1470 align = align_up (align, 8);
1471
1472 /* The AArch64 PCS requires at most doubleword alignment. */
1473 if (align > 16)
1474 align = 16;
1475
1476 if (aarch64_debug)
1477 {
1478 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1479 info->nsaa);
1480 }
1481
1482 item.len = len;
1483 item.data = buf;
1484 VEC_safe_push (stack_item_t, info->si, &item);
1485
1486 info->nsaa += len;
1487 if (info->nsaa & (align - 1))
1488 {
1489 /* Push stack alignment padding. */
1490 int pad = align - (info->nsaa & (align - 1));
1491
1492 item.len = pad;
1493 item.data = NULL;
1494
1495 VEC_safe_push (stack_item_t, info->si, &item);
1496 info->nsaa += pad;
1497 }
1498 }
1499
1500 /* Marshall an argument into a sequence of one or more consecutive X
1501 registers or, if insufficient X registers are available then onto
1502 the stack. */
1503
1504 static void
1505 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1506 struct aarch64_call_info *info, struct type *type,
1507 struct value *arg)
1508 {
1509 int len = TYPE_LENGTH (type);
1510 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1511
1512 /* PCS C.13 - Pass in registers if we have enough spare */
1513 if (info->ngrn + nregs <= 8)
1514 {
1515 pass_in_x (gdbarch, regcache, info, type, arg);
1516 info->ngrn += nregs;
1517 }
1518 else
1519 {
1520 info->ngrn = 8;
1521 pass_on_stack (info, type, arg);
1522 }
1523 }
1524
1525 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1526 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1527 registers. A return value of false is an error state as the value will have
1528 been partially passed to the stack. */
1529 static bool
1530 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1531 struct aarch64_call_info *info, struct type *arg_type,
1532 struct value *arg)
1533 {
1534 switch (TYPE_CODE (arg_type))
1535 {
1536 case TYPE_CODE_FLT:
1537 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1538 value_contents (arg));
1539 break;
1540
1541 case TYPE_CODE_COMPLEX:
1542 {
1543 const bfd_byte *buf = value_contents (arg);
1544 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1545
1546 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1547 buf))
1548 return false;
1549
1550 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1551 buf + TYPE_LENGTH (target_type));
1552 }
1553
1554 case TYPE_CODE_ARRAY:
1555 if (TYPE_VECTOR (arg_type))
1556 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1557 value_contents (arg));
1558 /* fall through. */
1559
1560 case TYPE_CODE_STRUCT:
1561 case TYPE_CODE_UNION:
1562 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1563 {
1564 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1565 struct type *field_type = check_typedef (value_type (field));
1566
1567 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1568 field))
1569 return false;
1570 }
1571 return true;
1572
1573 default:
1574 return false;
1575 }
1576 }
1577
1578 /* Implement the "push_dummy_call" gdbarch method. */
1579
1580 static CORE_ADDR
1581 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1582 struct regcache *regcache, CORE_ADDR bp_addr,
1583 int nargs,
1584 struct value **args, CORE_ADDR sp, int struct_return,
1585 CORE_ADDR struct_addr)
1586 {
1587 int argnum;
1588 struct aarch64_call_info info;
1589 struct type *func_type;
1590 struct type *return_type;
1591 int lang_struct_return;
1592
1593 memset (&info, 0, sizeof (info));
1594
1595 /* We need to know what the type of the called function is in order
1596 to determine the number of named/anonymous arguments for the
1597 actual argument placement, and the return type in order to handle
1598 return value correctly.
1599
1600 The generic code above us views the decision of return in memory
1601 or return in registers as a two stage processes. The language
1602 handler is consulted first and may decide to return in memory (eg
1603 class with copy constructor returned by value), this will cause
1604 the generic code to allocate space AND insert an initial leading
1605 argument.
1606
1607 If the language code does not decide to pass in memory then the
1608 target code is consulted.
1609
1610 If the language code decides to pass in memory we want to move
1611 the pointer inserted as the initial argument from the argument
1612 list and into X8, the conventional AArch64 struct return pointer
1613 register.
1614
1615 This is slightly awkward, ideally the flag "lang_struct_return"
1616 would be passed to the targets implementation of push_dummy_call.
1617 Rather that change the target interface we call the language code
1618 directly ourselves. */
1619
1620 func_type = check_typedef (value_type (function));
1621
1622 /* Dereference function pointer types. */
1623 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1624 func_type = TYPE_TARGET_TYPE (func_type);
1625
1626 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1627 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1628
1629 /* If language_pass_by_reference () returned true we will have been
1630 given an additional initial argument, a hidden pointer to the
1631 return slot in memory. */
1632 return_type = TYPE_TARGET_TYPE (func_type);
1633 lang_struct_return = language_pass_by_reference (return_type);
1634
1635 /* Set the return address. For the AArch64, the return breakpoint
1636 is always at BP_ADDR. */
1637 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1638
1639 /* If we were given an initial argument for the return slot because
1640 lang_struct_return was true, lose it. */
1641 if (lang_struct_return)
1642 {
1643 args++;
1644 nargs--;
1645 }
1646
1647 /* The struct_return pointer occupies X8. */
1648 if (struct_return || lang_struct_return)
1649 {
1650 if (aarch64_debug)
1651 {
1652 debug_printf ("struct return in %s = 0x%s\n",
1653 gdbarch_register_name (gdbarch,
1654 AARCH64_STRUCT_RETURN_REGNUM),
1655 paddress (gdbarch, struct_addr));
1656 }
1657 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1658 struct_addr);
1659 }
1660
1661 for (argnum = 0; argnum < nargs; argnum++)
1662 {
1663 struct value *arg = args[argnum];
1664 struct type *arg_type, *fundamental_type;
1665 int len, elements;
1666
1667 arg_type = check_typedef (value_type (arg));
1668 len = TYPE_LENGTH (arg_type);
1669
1670 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1671 if there are enough spare registers. */
1672 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1673 &fundamental_type))
1674 {
1675 if (info.nsrn + elements <= 8)
1676 {
1677 /* We know that we have sufficient registers available therefore
1678 this will never need to fallback to the stack. */
1679 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1680 arg))
1681 gdb_assert_not_reached ("Failed to push args");
1682 }
1683 else
1684 {
1685 info.nsrn = 8;
1686 pass_on_stack (&info, arg_type, arg);
1687 }
1688 continue;
1689 }
1690
1691 switch (TYPE_CODE (arg_type))
1692 {
1693 case TYPE_CODE_INT:
1694 case TYPE_CODE_BOOL:
1695 case TYPE_CODE_CHAR:
1696 case TYPE_CODE_RANGE:
1697 case TYPE_CODE_ENUM:
1698 if (len < 4)
1699 {
1700 /* Promote to 32 bit integer. */
1701 if (TYPE_UNSIGNED (arg_type))
1702 arg_type = builtin_type (gdbarch)->builtin_uint32;
1703 else
1704 arg_type = builtin_type (gdbarch)->builtin_int32;
1705 arg = value_cast (arg_type, arg);
1706 }
1707 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1708 break;
1709
1710 case TYPE_CODE_STRUCT:
1711 case TYPE_CODE_ARRAY:
1712 case TYPE_CODE_UNION:
1713 if (len > 16)
1714 {
1715 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1716 invisible reference. */
1717
1718 /* Allocate aligned storage. */
1719 sp = align_down (sp - len, 16);
1720
1721 /* Write the real data into the stack. */
1722 write_memory (sp, value_contents (arg), len);
1723
1724 /* Construct the indirection. */
1725 arg_type = lookup_pointer_type (arg_type);
1726 arg = value_from_pointer (arg_type, sp);
1727 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1728 }
1729 else
1730 /* PCS C.15 / C.18 multiple values pass. */
1731 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1732 break;
1733
1734 default:
1735 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1736 break;
1737 }
1738 }
1739
1740 /* Make sure stack retains 16 byte alignment. */
1741 if (info.nsaa & 15)
1742 sp -= 16 - (info.nsaa & 15);
1743
1744 while (!VEC_empty (stack_item_t, info.si))
1745 {
1746 stack_item_t *si = VEC_last (stack_item_t, info.si);
1747
1748 sp -= si->len;
1749 if (si->data != NULL)
1750 write_memory (sp, si->data, si->len);
1751 VEC_pop (stack_item_t, info.si);
1752 }
1753
1754 VEC_free (stack_item_t, info.si);
1755
1756 /* Finally, update the SP register. */
1757 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1758
1759 return sp;
1760 }
1761
1762 /* Implement the "frame_align" gdbarch method. */
1763
1764 static CORE_ADDR
1765 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1766 {
1767 /* Align the stack to sixteen bytes. */
1768 return sp & ~(CORE_ADDR) 15;
1769 }
1770
1771 /* Return the type for an AdvSISD Q register. */
1772
1773 static struct type *
1774 aarch64_vnq_type (struct gdbarch *gdbarch)
1775 {
1776 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1777
1778 if (tdep->vnq_type == NULL)
1779 {
1780 struct type *t;
1781 struct type *elem;
1782
1783 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1784 TYPE_CODE_UNION);
1785
1786 elem = builtin_type (gdbarch)->builtin_uint128;
1787 append_composite_type_field (t, "u", elem);
1788
1789 elem = builtin_type (gdbarch)->builtin_int128;
1790 append_composite_type_field (t, "s", elem);
1791
1792 tdep->vnq_type = t;
1793 }
1794
1795 return tdep->vnq_type;
1796 }
1797
1798 /* Return the type for an AdvSISD D register. */
1799
1800 static struct type *
1801 aarch64_vnd_type (struct gdbarch *gdbarch)
1802 {
1803 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1804
1805 if (tdep->vnd_type == NULL)
1806 {
1807 struct type *t;
1808 struct type *elem;
1809
1810 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1811 TYPE_CODE_UNION);
1812
1813 elem = builtin_type (gdbarch)->builtin_double;
1814 append_composite_type_field (t, "f", elem);
1815
1816 elem = builtin_type (gdbarch)->builtin_uint64;
1817 append_composite_type_field (t, "u", elem);
1818
1819 elem = builtin_type (gdbarch)->builtin_int64;
1820 append_composite_type_field (t, "s", elem);
1821
1822 tdep->vnd_type = t;
1823 }
1824
1825 return tdep->vnd_type;
1826 }
1827
1828 /* Return the type for an AdvSISD S register. */
1829
1830 static struct type *
1831 aarch64_vns_type (struct gdbarch *gdbarch)
1832 {
1833 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1834
1835 if (tdep->vns_type == NULL)
1836 {
1837 struct type *t;
1838 struct type *elem;
1839
1840 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1841 TYPE_CODE_UNION);
1842
1843 elem = builtin_type (gdbarch)->builtin_float;
1844 append_composite_type_field (t, "f", elem);
1845
1846 elem = builtin_type (gdbarch)->builtin_uint32;
1847 append_composite_type_field (t, "u", elem);
1848
1849 elem = builtin_type (gdbarch)->builtin_int32;
1850 append_composite_type_field (t, "s", elem);
1851
1852 tdep->vns_type = t;
1853 }
1854
1855 return tdep->vns_type;
1856 }
1857
1858 /* Return the type for an AdvSISD H register. */
1859
1860 static struct type *
1861 aarch64_vnh_type (struct gdbarch *gdbarch)
1862 {
1863 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1864
1865 if (tdep->vnh_type == NULL)
1866 {
1867 struct type *t;
1868 struct type *elem;
1869
1870 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1871 TYPE_CODE_UNION);
1872
1873 elem = builtin_type (gdbarch)->builtin_uint16;
1874 append_composite_type_field (t, "u", elem);
1875
1876 elem = builtin_type (gdbarch)->builtin_int16;
1877 append_composite_type_field (t, "s", elem);
1878
1879 tdep->vnh_type = t;
1880 }
1881
1882 return tdep->vnh_type;
1883 }
1884
1885 /* Return the type for an AdvSISD B register. */
1886
1887 static struct type *
1888 aarch64_vnb_type (struct gdbarch *gdbarch)
1889 {
1890 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1891
1892 if (tdep->vnb_type == NULL)
1893 {
1894 struct type *t;
1895 struct type *elem;
1896
1897 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1898 TYPE_CODE_UNION);
1899
1900 elem = builtin_type (gdbarch)->builtin_uint8;
1901 append_composite_type_field (t, "u", elem);
1902
1903 elem = builtin_type (gdbarch)->builtin_int8;
1904 append_composite_type_field (t, "s", elem);
1905
1906 tdep->vnb_type = t;
1907 }
1908
1909 return tdep->vnb_type;
1910 }
1911
1912 /* Return the type for an AdvSISD V register. */
1913
1914 static struct type *
1915 aarch64_vnv_type (struct gdbarch *gdbarch)
1916 {
1917 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1918
1919 if (tdep->vnv_type == NULL)
1920 {
1921 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1922 TYPE_CODE_UNION);
1923
1924 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1925 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1926 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1927 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1928 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1929
1930 tdep->vnv_type = t;
1931 }
1932
1933 return tdep->vnv_type;
1934 }
1935
1936 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1937
1938 static int
1939 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1940 {
1941 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1942 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1943
1944 if (reg == AARCH64_DWARF_SP)
1945 return AARCH64_SP_REGNUM;
1946
1947 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1948 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1949
1950 if (reg == AARCH64_DWARF_SVE_VG)
1951 return AARCH64_SVE_VG_REGNUM;
1952
1953 if (reg == AARCH64_DWARF_SVE_FFR)
1954 return AARCH64_SVE_FFR_REGNUM;
1955
1956 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1957 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1958
1959 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1960 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1961
1962 return -1;
1963 }
1964
1965 /* Implement the "print_insn" gdbarch method. */
1966
1967 static int
1968 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1969 {
1970 info->symbols = NULL;
1971 return default_print_insn (memaddr, info);
1972 }
1973
1974 /* AArch64 BRK software debug mode instruction.
1975 Note that AArch64 code is always little-endian.
1976 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1977 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1978
1979 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1980
1981 /* Extract from an array REGS containing the (raw) register state a
1982 function return value of type TYPE, and copy that, in virtual
1983 format, into VALBUF. */
1984
1985 static void
1986 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1987 gdb_byte *valbuf)
1988 {
1989 struct gdbarch *gdbarch = regs->arch ();
1990 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1991
1992 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1993 {
1994 bfd_byte buf[V_REGISTER_SIZE];
1995 int len = TYPE_LENGTH (type);
1996
1997 regs->cooked_read (AARCH64_V0_REGNUM, buf);
1998 memcpy (valbuf, buf, len);
1999 }
2000 else if (TYPE_CODE (type) == TYPE_CODE_INT
2001 || TYPE_CODE (type) == TYPE_CODE_CHAR
2002 || TYPE_CODE (type) == TYPE_CODE_BOOL
2003 || TYPE_CODE (type) == TYPE_CODE_PTR
2004 || TYPE_IS_REFERENCE (type)
2005 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2006 {
2007 /* If the the type is a plain integer, then the access is
2008 straight-forward. Otherwise we have to play around a bit
2009 more. */
2010 int len = TYPE_LENGTH (type);
2011 int regno = AARCH64_X0_REGNUM;
2012 ULONGEST tmp;
2013
2014 while (len > 0)
2015 {
2016 /* By using store_unsigned_integer we avoid having to do
2017 anything special for small big-endian values. */
2018 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2019 store_unsigned_integer (valbuf,
2020 (len > X_REGISTER_SIZE
2021 ? X_REGISTER_SIZE : len), byte_order, tmp);
2022 len -= X_REGISTER_SIZE;
2023 valbuf += X_REGISTER_SIZE;
2024 }
2025 }
2026 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
2027 {
2028 int regno = AARCH64_V0_REGNUM;
2029 bfd_byte buf[V_REGISTER_SIZE];
2030 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
2031 int len = TYPE_LENGTH (target_type);
2032
2033 regs->cooked_read (regno, buf);
2034 memcpy (valbuf, buf, len);
2035 valbuf += len;
2036 regs->cooked_read (regno + 1, buf);
2037 memcpy (valbuf, buf, len);
2038 valbuf += len;
2039 }
2040 else if (is_hfa_or_hva (type))
2041 {
2042 int elements = TYPE_NFIELDS (type);
2043 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2044 int len = TYPE_LENGTH (member_type);
2045 int i;
2046
2047 for (i = 0; i < elements; i++)
2048 {
2049 int regno = AARCH64_V0_REGNUM + i;
2050 bfd_byte buf[V_REGISTER_SIZE];
2051
2052 if (aarch64_debug)
2053 {
2054 debug_printf ("read HFA or HVA return value element %d from %s\n",
2055 i + 1,
2056 gdbarch_register_name (gdbarch, regno));
2057 }
2058 regs->cooked_read (regno, buf);
2059
2060 memcpy (valbuf, buf, len);
2061 valbuf += len;
2062 }
2063 }
2064 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2065 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
2066 {
2067 /* Short vector is returned in V register. */
2068 gdb_byte buf[V_REGISTER_SIZE];
2069
2070 regs->cooked_read (AARCH64_V0_REGNUM, buf);
2071 memcpy (valbuf, buf, TYPE_LENGTH (type));
2072 }
2073 else
2074 {
2075 /* For a structure or union the behaviour is as if the value had
2076 been stored to word-aligned memory and then loaded into
2077 registers with 64-bit load instruction(s). */
2078 int len = TYPE_LENGTH (type);
2079 int regno = AARCH64_X0_REGNUM;
2080 bfd_byte buf[X_REGISTER_SIZE];
2081
2082 while (len > 0)
2083 {
2084 regs->cooked_read (regno++, buf);
2085 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2086 len -= X_REGISTER_SIZE;
2087 valbuf += X_REGISTER_SIZE;
2088 }
2089 }
2090 }
2091
2092
2093 /* Will a function return an aggregate type in memory or in a
2094 register? Return 0 if an aggregate type can be returned in a
2095 register, 1 if it must be returned in memory. */
2096
2097 static int
2098 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2099 {
2100 type = check_typedef (type);
2101
2102 if (is_hfa_or_hva (type))
2103 {
2104 /* v0-v7 are used to return values and one register is allocated
2105 for one member. However, HFA or HVA has at most four members. */
2106 return 0;
2107 }
2108
2109 if (TYPE_LENGTH (type) > 16)
2110 {
2111 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2112 invisible reference. */
2113
2114 return 1;
2115 }
2116
2117 return 0;
2118 }
2119
2120 /* Write into appropriate registers a function return value of type
2121 TYPE, given in virtual format. */
2122
2123 static void
2124 aarch64_store_return_value (struct type *type, struct regcache *regs,
2125 const gdb_byte *valbuf)
2126 {
2127 struct gdbarch *gdbarch = regs->arch ();
2128 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2129
2130 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2131 {
2132 bfd_byte buf[V_REGISTER_SIZE];
2133 int len = TYPE_LENGTH (type);
2134
2135 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2136 regs->cooked_write (AARCH64_V0_REGNUM, buf);
2137 }
2138 else if (TYPE_CODE (type) == TYPE_CODE_INT
2139 || TYPE_CODE (type) == TYPE_CODE_CHAR
2140 || TYPE_CODE (type) == TYPE_CODE_BOOL
2141 || TYPE_CODE (type) == TYPE_CODE_PTR
2142 || TYPE_IS_REFERENCE (type)
2143 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2144 {
2145 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2146 {
2147 /* Values of one word or less are zero/sign-extended and
2148 returned in r0. */
2149 bfd_byte tmpbuf[X_REGISTER_SIZE];
2150 LONGEST val = unpack_long (type, valbuf);
2151
2152 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2153 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2154 }
2155 else
2156 {
2157 /* Integral values greater than one word are stored in
2158 consecutive registers starting with r0. This will always
2159 be a multiple of the regiser size. */
2160 int len = TYPE_LENGTH (type);
2161 int regno = AARCH64_X0_REGNUM;
2162
2163 while (len > 0)
2164 {
2165 regs->cooked_write (regno++, valbuf);
2166 len -= X_REGISTER_SIZE;
2167 valbuf += X_REGISTER_SIZE;
2168 }
2169 }
2170 }
2171 else if (is_hfa_or_hva (type))
2172 {
2173 int elements = TYPE_NFIELDS (type);
2174 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2175 int len = TYPE_LENGTH (member_type);
2176 int i;
2177
2178 for (i = 0; i < elements; i++)
2179 {
2180 int regno = AARCH64_V0_REGNUM + i;
2181 bfd_byte tmpbuf[V_REGISTER_SIZE];
2182
2183 if (aarch64_debug)
2184 {
2185 debug_printf ("write HFA or HVA return value element %d to %s\n",
2186 i + 1,
2187 gdbarch_register_name (gdbarch, regno));
2188 }
2189
2190 memcpy (tmpbuf, valbuf, len);
2191 regs->cooked_write (regno, tmpbuf);
2192 valbuf += len;
2193 }
2194 }
2195 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2196 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
2197 {
2198 /* Short vector. */
2199 gdb_byte buf[V_REGISTER_SIZE];
2200
2201 memcpy (buf, valbuf, TYPE_LENGTH (type));
2202 regs->cooked_write (AARCH64_V0_REGNUM, buf);
2203 }
2204 else
2205 {
2206 /* For a structure or union the behaviour is as if the value had
2207 been stored to word-aligned memory and then loaded into
2208 registers with 64-bit load instruction(s). */
2209 int len = TYPE_LENGTH (type);
2210 int regno = AARCH64_X0_REGNUM;
2211 bfd_byte tmpbuf[X_REGISTER_SIZE];
2212
2213 while (len > 0)
2214 {
2215 memcpy (tmpbuf, valbuf,
2216 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2217 regs->cooked_write (regno++, tmpbuf);
2218 len -= X_REGISTER_SIZE;
2219 valbuf += X_REGISTER_SIZE;
2220 }
2221 }
2222 }
2223
2224 /* Implement the "return_value" gdbarch method. */
2225
2226 static enum return_value_convention
2227 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2228 struct type *valtype, struct regcache *regcache,
2229 gdb_byte *readbuf, const gdb_byte *writebuf)
2230 {
2231
2232 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2233 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2234 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2235 {
2236 if (aarch64_return_in_memory (gdbarch, valtype))
2237 {
2238 if (aarch64_debug)
2239 debug_printf ("return value in memory\n");
2240 return RETURN_VALUE_STRUCT_CONVENTION;
2241 }
2242 }
2243
2244 if (writebuf)
2245 aarch64_store_return_value (valtype, regcache, writebuf);
2246
2247 if (readbuf)
2248 aarch64_extract_return_value (valtype, regcache, readbuf);
2249
2250 if (aarch64_debug)
2251 debug_printf ("return value in registers\n");
2252
2253 return RETURN_VALUE_REGISTER_CONVENTION;
2254 }
2255
2256 /* Implement the "get_longjmp_target" gdbarch method. */
2257
2258 static int
2259 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2260 {
2261 CORE_ADDR jb_addr;
2262 gdb_byte buf[X_REGISTER_SIZE];
2263 struct gdbarch *gdbarch = get_frame_arch (frame);
2264 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2265 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2266
2267 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2268
2269 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2270 X_REGISTER_SIZE))
2271 return 0;
2272
2273 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2274 return 1;
2275 }
2276
2277 /* Implement the "gen_return_address" gdbarch method. */
2278
2279 static void
2280 aarch64_gen_return_address (struct gdbarch *gdbarch,
2281 struct agent_expr *ax, struct axs_value *value,
2282 CORE_ADDR scope)
2283 {
2284 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2285 value->kind = axs_lvalue_register;
2286 value->u.reg = AARCH64_LR_REGNUM;
2287 }
2288 \f
2289
2290 /* Return the pseudo register name corresponding to register regnum. */
2291
2292 static const char *
2293 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2294 {
2295 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2296
2297 static const char *const q_name[] =
2298 {
2299 "q0", "q1", "q2", "q3",
2300 "q4", "q5", "q6", "q7",
2301 "q8", "q9", "q10", "q11",
2302 "q12", "q13", "q14", "q15",
2303 "q16", "q17", "q18", "q19",
2304 "q20", "q21", "q22", "q23",
2305 "q24", "q25", "q26", "q27",
2306 "q28", "q29", "q30", "q31",
2307 };
2308
2309 static const char *const d_name[] =
2310 {
2311 "d0", "d1", "d2", "d3",
2312 "d4", "d5", "d6", "d7",
2313 "d8", "d9", "d10", "d11",
2314 "d12", "d13", "d14", "d15",
2315 "d16", "d17", "d18", "d19",
2316 "d20", "d21", "d22", "d23",
2317 "d24", "d25", "d26", "d27",
2318 "d28", "d29", "d30", "d31",
2319 };
2320
2321 static const char *const s_name[] =
2322 {
2323 "s0", "s1", "s2", "s3",
2324 "s4", "s5", "s6", "s7",
2325 "s8", "s9", "s10", "s11",
2326 "s12", "s13", "s14", "s15",
2327 "s16", "s17", "s18", "s19",
2328 "s20", "s21", "s22", "s23",
2329 "s24", "s25", "s26", "s27",
2330 "s28", "s29", "s30", "s31",
2331 };
2332
2333 static const char *const h_name[] =
2334 {
2335 "h0", "h1", "h2", "h3",
2336 "h4", "h5", "h6", "h7",
2337 "h8", "h9", "h10", "h11",
2338 "h12", "h13", "h14", "h15",
2339 "h16", "h17", "h18", "h19",
2340 "h20", "h21", "h22", "h23",
2341 "h24", "h25", "h26", "h27",
2342 "h28", "h29", "h30", "h31",
2343 };
2344
2345 static const char *const b_name[] =
2346 {
2347 "b0", "b1", "b2", "b3",
2348 "b4", "b5", "b6", "b7",
2349 "b8", "b9", "b10", "b11",
2350 "b12", "b13", "b14", "b15",
2351 "b16", "b17", "b18", "b19",
2352 "b20", "b21", "b22", "b23",
2353 "b24", "b25", "b26", "b27",
2354 "b28", "b29", "b30", "b31",
2355 };
2356
2357 regnum -= gdbarch_num_regs (gdbarch);
2358
2359 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2360 return q_name[regnum - AARCH64_Q0_REGNUM];
2361
2362 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2363 return d_name[regnum - AARCH64_D0_REGNUM];
2364
2365 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2366 return s_name[regnum - AARCH64_S0_REGNUM];
2367
2368 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2369 return h_name[regnum - AARCH64_H0_REGNUM];
2370
2371 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2372 return b_name[regnum - AARCH64_B0_REGNUM];
2373
2374 if (tdep->has_sve ())
2375 {
2376 static const char *const sve_v_name[] =
2377 {
2378 "v0", "v1", "v2", "v3",
2379 "v4", "v5", "v6", "v7",
2380 "v8", "v9", "v10", "v11",
2381 "v12", "v13", "v14", "v15",
2382 "v16", "v17", "v18", "v19",
2383 "v20", "v21", "v22", "v23",
2384 "v24", "v25", "v26", "v27",
2385 "v28", "v29", "v30", "v31",
2386 };
2387
2388 if (regnum >= AARCH64_SVE_V0_REGNUM
2389 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2390 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2391 }
2392
2393 internal_error (__FILE__, __LINE__,
2394 _("aarch64_pseudo_register_name: bad register number %d"),
2395 regnum);
2396 }
2397
2398 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2399
2400 static struct type *
2401 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2402 {
2403 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2404
2405 regnum -= gdbarch_num_regs (gdbarch);
2406
2407 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2408 return aarch64_vnq_type (gdbarch);
2409
2410 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2411 return aarch64_vnd_type (gdbarch);
2412
2413 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2414 return aarch64_vns_type (gdbarch);
2415
2416 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2417 return aarch64_vnh_type (gdbarch);
2418
2419 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2420 return aarch64_vnb_type (gdbarch);
2421
2422 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2423 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2424 return aarch64_vnv_type (gdbarch);
2425
2426 internal_error (__FILE__, __LINE__,
2427 _("aarch64_pseudo_register_type: bad register number %d"),
2428 regnum);
2429 }
2430
2431 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2432
2433 static int
2434 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2435 struct reggroup *group)
2436 {
2437 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2438
2439 regnum -= gdbarch_num_regs (gdbarch);
2440
2441 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2442 return group == all_reggroup || group == vector_reggroup;
2443 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2444 return (group == all_reggroup || group == vector_reggroup
2445 || group == float_reggroup);
2446 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2447 return (group == all_reggroup || group == vector_reggroup
2448 || group == float_reggroup);
2449 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2450 return group == all_reggroup || group == vector_reggroup;
2451 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2452 return group == all_reggroup || group == vector_reggroup;
2453 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2454 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2455 return group == all_reggroup || group == vector_reggroup;
2456
2457 return group == all_reggroup;
2458 }
2459
2460 /* Helper for aarch64_pseudo_read_value. */
2461
2462 static struct value *
2463 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2464 readable_regcache *regcache, int regnum_offset,
2465 int regsize, struct value *result_value)
2466 {
2467 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2468
2469 /* Enough space for a full vector register. */
2470 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2471 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2472
2473 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2474 mark_value_bytes_unavailable (result_value, 0,
2475 TYPE_LENGTH (value_type (result_value)));
2476 else
2477 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2478
2479 return result_value;
2480 }
2481
2482 /* Implement the "pseudo_register_read_value" gdbarch method. */
2483
2484 static struct value *
2485 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2486 int regnum)
2487 {
2488 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2489 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2490
2491 VALUE_LVAL (result_value) = lval_register;
2492 VALUE_REGNUM (result_value) = regnum;
2493
2494 regnum -= gdbarch_num_regs (gdbarch);
2495
2496 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2497 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2498 regnum - AARCH64_Q0_REGNUM,
2499 Q_REGISTER_SIZE, result_value);
2500
2501 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2502 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2503 regnum - AARCH64_D0_REGNUM,
2504 D_REGISTER_SIZE, result_value);
2505
2506 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2507 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2508 regnum - AARCH64_S0_REGNUM,
2509 S_REGISTER_SIZE, result_value);
2510
2511 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2512 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2513 regnum - AARCH64_H0_REGNUM,
2514 H_REGISTER_SIZE, result_value);
2515
2516 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2517 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2518 regnum - AARCH64_B0_REGNUM,
2519 B_REGISTER_SIZE, result_value);
2520
2521 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2522 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2523 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2524 regnum - AARCH64_SVE_V0_REGNUM,
2525 V_REGISTER_SIZE, result_value);
2526
2527 gdb_assert_not_reached ("regnum out of bound");
2528 }
2529
2530 /* Helper for aarch64_pseudo_write. */
2531
2532 static void
2533 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2534 int regnum_offset, int regsize, const gdb_byte *buf)
2535 {
2536 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2537
2538 /* Enough space for a full vector register. */
2539 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2540 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2541
2542 /* Ensure the register buffer is zero, we want gdb writes of the
2543 various 'scalar' pseudo registers to behavior like architectural
2544 writes, register width bytes are written the remainder are set to
2545 zero. */
2546 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2547
2548 memcpy (reg_buf, buf, regsize);
2549 regcache->raw_write (v_regnum, reg_buf);
2550 }
2551
2552 /* Implement the "pseudo_register_write" gdbarch method. */
2553
2554 static void
2555 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2556 int regnum, const gdb_byte *buf)
2557 {
2558 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2559 regnum -= gdbarch_num_regs (gdbarch);
2560
2561 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2562 return aarch64_pseudo_write_1 (gdbarch, regcache,
2563 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2564 buf);
2565
2566 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2567 return aarch64_pseudo_write_1 (gdbarch, regcache,
2568 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2569 buf);
2570
2571 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2572 return aarch64_pseudo_write_1 (gdbarch, regcache,
2573 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2574 buf);
2575
2576 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2577 return aarch64_pseudo_write_1 (gdbarch, regcache,
2578 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2579 buf);
2580
2581 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2582 return aarch64_pseudo_write_1 (gdbarch, regcache,
2583 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2584 buf);
2585
2586 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2587 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2588 return aarch64_pseudo_write_1 (gdbarch, regcache,
2589 regnum - AARCH64_SVE_V0_REGNUM,
2590 V_REGISTER_SIZE, buf);
2591
2592 gdb_assert_not_reached ("regnum out of bound");
2593 }
2594
2595 /* Callback function for user_reg_add. */
2596
2597 static struct value *
2598 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2599 {
2600 const int *reg_p = (const int *) baton;
2601
2602 return value_of_register (*reg_p, frame);
2603 }
2604 \f
2605
2606 /* Implement the "software_single_step" gdbarch method, needed to
2607 single step through atomic sequences on AArch64. */
2608
2609 static std::vector<CORE_ADDR>
2610 aarch64_software_single_step (struct regcache *regcache)
2611 {
2612 struct gdbarch *gdbarch = regcache->arch ();
2613 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2614 const int insn_size = 4;
2615 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2616 CORE_ADDR pc = regcache_read_pc (regcache);
2617 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2618 CORE_ADDR loc = pc;
2619 CORE_ADDR closing_insn = 0;
2620 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2621 byte_order_for_code);
2622 int index;
2623 int insn_count;
2624 int bc_insn_count = 0; /* Conditional branch instruction count. */
2625 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2626 aarch64_inst inst;
2627
2628 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2629 return {};
2630
2631 /* Look for a Load Exclusive instruction which begins the sequence. */
2632 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2633 return {};
2634
2635 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2636 {
2637 loc += insn_size;
2638 insn = read_memory_unsigned_integer (loc, insn_size,
2639 byte_order_for_code);
2640
2641 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2642 return {};
2643 /* Check if the instruction is a conditional branch. */
2644 if (inst.opcode->iclass == condbranch)
2645 {
2646 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2647
2648 if (bc_insn_count >= 1)
2649 return {};
2650
2651 /* It is, so we'll try to set a breakpoint at the destination. */
2652 breaks[1] = loc + inst.operands[0].imm.value;
2653
2654 bc_insn_count++;
2655 last_breakpoint++;
2656 }
2657
2658 /* Look for the Store Exclusive which closes the atomic sequence. */
2659 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2660 {
2661 closing_insn = loc;
2662 break;
2663 }
2664 }
2665
2666 /* We didn't find a closing Store Exclusive instruction, fall back. */
2667 if (!closing_insn)
2668 return {};
2669
2670 /* Insert breakpoint after the end of the atomic sequence. */
2671 breaks[0] = loc + insn_size;
2672
2673 /* Check for duplicated breakpoints, and also check that the second
2674 breakpoint is not within the atomic sequence. */
2675 if (last_breakpoint
2676 && (breaks[1] == breaks[0]
2677 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2678 last_breakpoint = 0;
2679
2680 std::vector<CORE_ADDR> next_pcs;
2681
2682 /* Insert the breakpoint at the end of the sequence, and one at the
2683 destination of the conditional branch, if it exists. */
2684 for (index = 0; index <= last_breakpoint; index++)
2685 next_pcs.push_back (breaks[index]);
2686
2687 return next_pcs;
2688 }
2689
2690 struct aarch64_displaced_step_closure : public displaced_step_closure
2691 {
2692 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2693 is being displaced stepping. */
2694 int cond = 0;
2695
2696 /* PC adjustment offset after displaced stepping. */
2697 int32_t pc_adjust = 0;
2698 };
2699
2700 /* Data when visiting instructions for displaced stepping. */
2701
2702 struct aarch64_displaced_step_data
2703 {
2704 struct aarch64_insn_data base;
2705
2706 /* The address where the instruction will be executed at. */
2707 CORE_ADDR new_addr;
2708 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2709 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2710 /* Number of instructions in INSN_BUF. */
2711 unsigned insn_count;
2712 /* Registers when doing displaced stepping. */
2713 struct regcache *regs;
2714
2715 aarch64_displaced_step_closure *dsc;
2716 };
2717
2718 /* Implementation of aarch64_insn_visitor method "b". */
2719
2720 static void
2721 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2722 struct aarch64_insn_data *data)
2723 {
2724 struct aarch64_displaced_step_data *dsd
2725 = (struct aarch64_displaced_step_data *) data;
2726 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2727
2728 if (can_encode_int32 (new_offset, 28))
2729 {
2730 /* Emit B rather than BL, because executing BL on a new address
2731 will get the wrong address into LR. In order to avoid this,
2732 we emit B, and update LR if the instruction is BL. */
2733 emit_b (dsd->insn_buf, 0, new_offset);
2734 dsd->insn_count++;
2735 }
2736 else
2737 {
2738 /* Write NOP. */
2739 emit_nop (dsd->insn_buf);
2740 dsd->insn_count++;
2741 dsd->dsc->pc_adjust = offset;
2742 }
2743
2744 if (is_bl)
2745 {
2746 /* Update LR. */
2747 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2748 data->insn_addr + 4);
2749 }
2750 }
2751
2752 /* Implementation of aarch64_insn_visitor method "b_cond". */
2753
2754 static void
2755 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2756 struct aarch64_insn_data *data)
2757 {
2758 struct aarch64_displaced_step_data *dsd
2759 = (struct aarch64_displaced_step_data *) data;
2760
2761 /* GDB has to fix up PC after displaced step this instruction
2762 differently according to the condition is true or false. Instead
2763 of checking COND against conditional flags, we can use
2764 the following instructions, and GDB can tell how to fix up PC
2765 according to the PC value.
2766
2767 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2768 INSN1 ;
2769 TAKEN:
2770 INSN2
2771 */
2772
2773 emit_bcond (dsd->insn_buf, cond, 8);
2774 dsd->dsc->cond = 1;
2775 dsd->dsc->pc_adjust = offset;
2776 dsd->insn_count = 1;
2777 }
2778
2779 /* Dynamically allocate a new register. If we know the register
2780 statically, we should make it a global as above instead of using this
2781 helper function. */
2782
2783 static struct aarch64_register
2784 aarch64_register (unsigned num, int is64)
2785 {
2786 return (struct aarch64_register) { num, is64 };
2787 }
2788
2789 /* Implementation of aarch64_insn_visitor method "cb". */
2790
2791 static void
2792 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2793 const unsigned rn, int is64,
2794 struct aarch64_insn_data *data)
2795 {
2796 struct aarch64_displaced_step_data *dsd
2797 = (struct aarch64_displaced_step_data *) data;
2798
2799 /* The offset is out of range for a compare and branch
2800 instruction. We can use the following instructions instead:
2801
2802 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2803 INSN1 ;
2804 TAKEN:
2805 INSN2
2806 */
2807 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2808 dsd->insn_count = 1;
2809 dsd->dsc->cond = 1;
2810 dsd->dsc->pc_adjust = offset;
2811 }
2812
2813 /* Implementation of aarch64_insn_visitor method "tb". */
2814
2815 static void
2816 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2817 const unsigned rt, unsigned bit,
2818 struct aarch64_insn_data *data)
2819 {
2820 struct aarch64_displaced_step_data *dsd
2821 = (struct aarch64_displaced_step_data *) data;
2822
2823 /* The offset is out of range for a test bit and branch
2824 instruction We can use the following instructions instead:
2825
2826 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2827 INSN1 ;
2828 TAKEN:
2829 INSN2
2830
2831 */
2832 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2833 dsd->insn_count = 1;
2834 dsd->dsc->cond = 1;
2835 dsd->dsc->pc_adjust = offset;
2836 }
2837
2838 /* Implementation of aarch64_insn_visitor method "adr". */
2839
2840 static void
2841 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2842 const int is_adrp, struct aarch64_insn_data *data)
2843 {
2844 struct aarch64_displaced_step_data *dsd
2845 = (struct aarch64_displaced_step_data *) data;
2846 /* We know exactly the address the ADR{P,} instruction will compute.
2847 We can just write it to the destination register. */
2848 CORE_ADDR address = data->insn_addr + offset;
2849
2850 if (is_adrp)
2851 {
2852 /* Clear the lower 12 bits of the offset to get the 4K page. */
2853 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2854 address & ~0xfff);
2855 }
2856 else
2857 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2858 address);
2859
2860 dsd->dsc->pc_adjust = 4;
2861 emit_nop (dsd->insn_buf);
2862 dsd->insn_count = 1;
2863 }
2864
2865 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2866
2867 static void
2868 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2869 const unsigned rt, const int is64,
2870 struct aarch64_insn_data *data)
2871 {
2872 struct aarch64_displaced_step_data *dsd
2873 = (struct aarch64_displaced_step_data *) data;
2874 CORE_ADDR address = data->insn_addr + offset;
2875 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2876
2877 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2878 address);
2879
2880 if (is_sw)
2881 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2882 aarch64_register (rt, 1), zero);
2883 else
2884 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2885 aarch64_register (rt, 1), zero);
2886
2887 dsd->dsc->pc_adjust = 4;
2888 }
2889
2890 /* Implementation of aarch64_insn_visitor method "others". */
2891
2892 static void
2893 aarch64_displaced_step_others (const uint32_t insn,
2894 struct aarch64_insn_data *data)
2895 {
2896 struct aarch64_displaced_step_data *dsd
2897 = (struct aarch64_displaced_step_data *) data;
2898
2899 aarch64_emit_insn (dsd->insn_buf, insn);
2900 dsd->insn_count = 1;
2901
2902 if ((insn & 0xfffffc1f) == 0xd65f0000)
2903 {
2904 /* RET */
2905 dsd->dsc->pc_adjust = 0;
2906 }
2907 else
2908 dsd->dsc->pc_adjust = 4;
2909 }
2910
2911 static const struct aarch64_insn_visitor visitor =
2912 {
2913 aarch64_displaced_step_b,
2914 aarch64_displaced_step_b_cond,
2915 aarch64_displaced_step_cb,
2916 aarch64_displaced_step_tb,
2917 aarch64_displaced_step_adr,
2918 aarch64_displaced_step_ldr_literal,
2919 aarch64_displaced_step_others,
2920 };
2921
2922 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2923
2924 struct displaced_step_closure *
2925 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2926 CORE_ADDR from, CORE_ADDR to,
2927 struct regcache *regs)
2928 {
2929 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2930 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2931 struct aarch64_displaced_step_data dsd;
2932 aarch64_inst inst;
2933
2934 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2935 return NULL;
2936
2937 /* Look for a Load Exclusive instruction which begins the sequence. */
2938 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2939 {
2940 /* We can't displaced step atomic sequences. */
2941 return NULL;
2942 }
2943
2944 std::unique_ptr<aarch64_displaced_step_closure> dsc
2945 (new aarch64_displaced_step_closure);
2946 dsd.base.insn_addr = from;
2947 dsd.new_addr = to;
2948 dsd.regs = regs;
2949 dsd.dsc = dsc.get ();
2950 dsd.insn_count = 0;
2951 aarch64_relocate_instruction (insn, &visitor,
2952 (struct aarch64_insn_data *) &dsd);
2953 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2954
2955 if (dsd.insn_count != 0)
2956 {
2957 int i;
2958
2959 /* Instruction can be relocated to scratch pad. Copy
2960 relocated instruction(s) there. */
2961 for (i = 0; i < dsd.insn_count; i++)
2962 {
2963 if (debug_displaced)
2964 {
2965 debug_printf ("displaced: writing insn ");
2966 debug_printf ("%.8x", dsd.insn_buf[i]);
2967 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2968 }
2969 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2970 (ULONGEST) dsd.insn_buf[i]);
2971 }
2972 }
2973 else
2974 {
2975 dsc = NULL;
2976 }
2977
2978 return dsc.release ();
2979 }
2980
2981 /* Implement the "displaced_step_fixup" gdbarch method. */
2982
2983 void
2984 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2985 struct displaced_step_closure *dsc_,
2986 CORE_ADDR from, CORE_ADDR to,
2987 struct regcache *regs)
2988 {
2989 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2990
2991 if (dsc->cond)
2992 {
2993 ULONGEST pc;
2994
2995 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2996 if (pc - to == 8)
2997 {
2998 /* Condition is true. */
2999 }
3000 else if (pc - to == 4)
3001 {
3002 /* Condition is false. */
3003 dsc->pc_adjust = 4;
3004 }
3005 else
3006 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3007 }
3008
3009 if (dsc->pc_adjust != 0)
3010 {
3011 if (debug_displaced)
3012 {
3013 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3014 paddress (gdbarch, from), dsc->pc_adjust);
3015 }
3016 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3017 from + dsc->pc_adjust);
3018 }
3019 }
3020
3021 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3022
3023 int
3024 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3025 struct displaced_step_closure *closure)
3026 {
3027 return 1;
3028 }
3029
3030 /* Get the correct target description for the given VQ value.
3031 If VQ is zero then it is assumed SVE is not supported.
3032 (It is not possible to set VQ to zero on an SVE system). */
3033
3034 const target_desc *
3035 aarch64_read_description (uint64_t vq)
3036 {
3037 if (vq > AARCH64_MAX_SVE_VQ)
3038 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3039 AARCH64_MAX_SVE_VQ);
3040
3041 struct target_desc *tdesc = tdesc_aarch64_list[vq];
3042
3043 if (tdesc == NULL)
3044 {
3045 tdesc = aarch64_create_target_description (vq);
3046 tdesc_aarch64_list[vq] = tdesc;
3047 }
3048
3049 return tdesc;
3050 }
3051
3052 /* Return the VQ used when creating the target description TDESC. */
3053
3054 static uint64_t
3055 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3056 {
3057 const struct tdesc_feature *feature_sve;
3058
3059 if (!tdesc_has_registers (tdesc))
3060 return 0;
3061
3062 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3063
3064 if (feature_sve == nullptr)
3065 return 0;
3066
3067 uint64_t vl = tdesc_register_bitsize (feature_sve,
3068 aarch64_sve_register_names[0]) / 8;
3069 return sve_vq_from_vl (vl);
3070 }
3071
3072
3073 /* Initialize the current architecture based on INFO. If possible,
3074 re-use an architecture from ARCHES, which is a list of
3075 architectures already created during this debugging session.
3076
3077 Called e.g. at program startup, when reading a core file, and when
3078 reading a binary file. */
3079
3080 static struct gdbarch *
3081 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3082 {
3083 struct gdbarch_tdep *tdep;
3084 struct gdbarch *gdbarch;
3085 struct gdbarch_list *best_arch;
3086 struct tdesc_arch_data *tdesc_data = NULL;
3087 const struct target_desc *tdesc = info.target_desc;
3088 int i;
3089 int valid_p = 1;
3090 const struct tdesc_feature *feature_core;
3091 const struct tdesc_feature *feature_fpu;
3092 const struct tdesc_feature *feature_sve;
3093 int num_regs = 0;
3094 int num_pseudo_regs = 0;
3095
3096 /* Ensure we always have a target description. */
3097 if (!tdesc_has_registers (tdesc))
3098 tdesc = aarch64_read_description (0);
3099 gdb_assert (tdesc);
3100
3101 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
3102 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3103 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3104
3105 if (feature_core == NULL)
3106 return NULL;
3107
3108 tdesc_data = tdesc_data_alloc ();
3109
3110 /* Validate the description provides the mandatory core R registers
3111 and allocate their numbers. */
3112 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3113 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3114 AARCH64_X0_REGNUM + i,
3115 aarch64_r_register_names[i]);
3116
3117 num_regs = AARCH64_X0_REGNUM + i;
3118
3119 /* Add the V registers. */
3120 if (feature_fpu != NULL)
3121 {
3122 if (feature_sve != NULL)
3123 error (_("Program contains both fpu and SVE features."));
3124
3125 /* Validate the description provides the mandatory V registers
3126 and allocate their numbers. */
3127 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3128 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3129 AARCH64_V0_REGNUM + i,
3130 aarch64_v_register_names[i]);
3131
3132 num_regs = AARCH64_V0_REGNUM + i;
3133 }
3134
3135 /* Add the SVE registers. */
3136 if (feature_sve != NULL)
3137 {
3138 /* Validate the description provides the mandatory SVE registers
3139 and allocate their numbers. */
3140 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3141 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3142 AARCH64_SVE_Z0_REGNUM + i,
3143 aarch64_sve_register_names[i]);
3144
3145 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3146 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3147 }
3148
3149 if (feature_fpu != NULL || feature_sve != NULL)
3150 {
3151 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3152 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3153 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3154 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3155 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3156 }
3157
3158 if (!valid_p)
3159 {
3160 tdesc_data_cleanup (tdesc_data);
3161 return NULL;
3162 }
3163
3164 /* AArch64 code is always little-endian. */
3165 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3166
3167 /* If there is already a candidate, use it. */
3168 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3169 best_arch != NULL;
3170 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3171 {
3172 /* Found a match. */
3173 break;
3174 }
3175
3176 if (best_arch != NULL)
3177 {
3178 if (tdesc_data != NULL)
3179 tdesc_data_cleanup (tdesc_data);
3180 return best_arch->gdbarch;
3181 }
3182
3183 tdep = XCNEW (struct gdbarch_tdep);
3184 gdbarch = gdbarch_alloc (&info, tdep);
3185
3186 /* This should be low enough for everything. */
3187 tdep->lowest_pc = 0x20;
3188 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3189 tdep->jb_elt_size = 8;
3190 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3191
3192 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3193 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3194
3195 /* Frame handling. */
3196 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3197 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3198 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3199
3200 /* Advance PC across function entry code. */
3201 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3202
3203 /* The stack grows downward. */
3204 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3205
3206 /* Breakpoint manipulation. */
3207 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3208 aarch64_breakpoint::kind_from_pc);
3209 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3210 aarch64_breakpoint::bp_from_kind);
3211 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3212 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3213
3214 /* Information about registers, etc. */
3215 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3216 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3217 set_gdbarch_num_regs (gdbarch, num_regs);
3218
3219 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3220 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3221 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3222 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3223 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3224 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3225 aarch64_pseudo_register_reggroup_p);
3226
3227 /* ABI */
3228 set_gdbarch_short_bit (gdbarch, 16);
3229 set_gdbarch_int_bit (gdbarch, 32);
3230 set_gdbarch_float_bit (gdbarch, 32);
3231 set_gdbarch_double_bit (gdbarch, 64);
3232 set_gdbarch_long_double_bit (gdbarch, 128);
3233 set_gdbarch_long_bit (gdbarch, 64);
3234 set_gdbarch_long_long_bit (gdbarch, 64);
3235 set_gdbarch_ptr_bit (gdbarch, 64);
3236 set_gdbarch_char_signed (gdbarch, 0);
3237 set_gdbarch_wchar_signed (gdbarch, 0);
3238 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3239 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3240 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3241
3242 /* Internal <-> external register number maps. */
3243 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3244
3245 /* Returning results. */
3246 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3247
3248 /* Disassembly. */
3249 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3250
3251 /* Virtual tables. */
3252 set_gdbarch_vbit_in_delta (gdbarch, 1);
3253
3254 /* Hook in the ABI-specific overrides, if they have been registered. */
3255 info.target_desc = tdesc;
3256 info.tdesc_data = tdesc_data;
3257 gdbarch_init_osabi (info, gdbarch);
3258
3259 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3260
3261 /* Add some default predicates. */
3262 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3263 dwarf2_append_unwinders (gdbarch);
3264 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3265
3266 frame_base_set_default (gdbarch, &aarch64_normal_base);
3267
3268 /* Now we have tuned the configuration, set a few final things,
3269 based on what the OS ABI has told us. */
3270
3271 if (tdep->jb_pc >= 0)
3272 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3273
3274 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3275
3276 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3277
3278 /* Add standard register aliases. */
3279 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3280 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3281 value_of_aarch64_user_reg,
3282 &aarch64_register_aliases[i].regnum);
3283
3284 return gdbarch;
3285 }
3286
3287 static void
3288 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3289 {
3290 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3291
3292 if (tdep == NULL)
3293 return;
3294
3295 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3296 paddress (gdbarch, tdep->lowest_pc));
3297 }
3298
3299 #if GDB_SELF_TEST
3300 namespace selftests
3301 {
3302 static void aarch64_process_record_test (void);
3303 }
3304 #endif
3305
3306 void
3307 _initialize_aarch64_tdep (void)
3308 {
3309 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3310 aarch64_dump_tdep);
3311
3312 /* Debug this file's internals. */
3313 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3314 Set AArch64 debugging."), _("\
3315 Show AArch64 debugging."), _("\
3316 When on, AArch64 specific debugging is enabled."),
3317 NULL,
3318 show_aarch64_debug,
3319 &setdebuglist, &showdebuglist);
3320
3321 #if GDB_SELF_TEST
3322 selftests::register_test ("aarch64-analyze-prologue",
3323 selftests::aarch64_analyze_prologue_test);
3324 selftests::register_test ("aarch64-process-record",
3325 selftests::aarch64_process_record_test);
3326 selftests::record_xml_tdesc ("aarch64.xml",
3327 aarch64_create_target_description (0));
3328 #endif
3329 }
3330
3331 /* AArch64 process record-replay related structures, defines etc. */
3332
3333 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3334 do \
3335 { \
3336 unsigned int reg_len = LENGTH; \
3337 if (reg_len) \
3338 { \
3339 REGS = XNEWVEC (uint32_t, reg_len); \
3340 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3341 } \
3342 } \
3343 while (0)
3344
3345 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3346 do \
3347 { \
3348 unsigned int mem_len = LENGTH; \
3349 if (mem_len) \
3350 { \
3351 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3352 memcpy(&MEMS->len, &RECORD_BUF[0], \
3353 sizeof(struct aarch64_mem_r) * LENGTH); \
3354 } \
3355 } \
3356 while (0)
3357
3358 /* AArch64 record/replay structures and enumerations. */
3359
3360 struct aarch64_mem_r
3361 {
3362 uint64_t len; /* Record length. */
3363 uint64_t addr; /* Memory address. */
3364 };
3365
3366 enum aarch64_record_result
3367 {
3368 AARCH64_RECORD_SUCCESS,
3369 AARCH64_RECORD_UNSUPPORTED,
3370 AARCH64_RECORD_UNKNOWN
3371 };
3372
3373 typedef struct insn_decode_record_t
3374 {
3375 struct gdbarch *gdbarch;
3376 struct regcache *regcache;
3377 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3378 uint32_t aarch64_insn; /* Insn to be recorded. */
3379 uint32_t mem_rec_count; /* Count of memory records. */
3380 uint32_t reg_rec_count; /* Count of register records. */
3381 uint32_t *aarch64_regs; /* Registers to be recorded. */
3382 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3383 } insn_decode_record;
3384
3385 /* Record handler for data processing - register instructions. */
3386
3387 static unsigned int
3388 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3389 {
3390 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3391 uint32_t record_buf[4];
3392
3393 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3394 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3395 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3396
3397 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3398 {
3399 uint8_t setflags;
3400
3401 /* Logical (shifted register). */
3402 if (insn_bits24_27 == 0x0a)
3403 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3404 /* Add/subtract. */
3405 else if (insn_bits24_27 == 0x0b)
3406 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3407 else
3408 return AARCH64_RECORD_UNKNOWN;
3409
3410 record_buf[0] = reg_rd;
3411 aarch64_insn_r->reg_rec_count = 1;
3412 if (setflags)
3413 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3414 }
3415 else
3416 {
3417 if (insn_bits24_27 == 0x0b)
3418 {
3419 /* Data-processing (3 source). */
3420 record_buf[0] = reg_rd;
3421 aarch64_insn_r->reg_rec_count = 1;
3422 }
3423 else if (insn_bits24_27 == 0x0a)
3424 {
3425 if (insn_bits21_23 == 0x00)
3426 {
3427 /* Add/subtract (with carry). */
3428 record_buf[0] = reg_rd;
3429 aarch64_insn_r->reg_rec_count = 1;
3430 if (bit (aarch64_insn_r->aarch64_insn, 29))
3431 {
3432 record_buf[1] = AARCH64_CPSR_REGNUM;
3433 aarch64_insn_r->reg_rec_count = 2;
3434 }
3435 }
3436 else if (insn_bits21_23 == 0x02)
3437 {
3438 /* Conditional compare (register) and conditional compare
3439 (immediate) instructions. */
3440 record_buf[0] = AARCH64_CPSR_REGNUM;
3441 aarch64_insn_r->reg_rec_count = 1;
3442 }
3443 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3444 {
3445 /* CConditional select. */
3446 /* Data-processing (2 source). */
3447 /* Data-processing (1 source). */
3448 record_buf[0] = reg_rd;
3449 aarch64_insn_r->reg_rec_count = 1;
3450 }
3451 else
3452 return AARCH64_RECORD_UNKNOWN;
3453 }
3454 }
3455
3456 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3457 record_buf);
3458 return AARCH64_RECORD_SUCCESS;
3459 }
3460
3461 /* Record handler for data processing - immediate instructions. */
3462
3463 static unsigned int
3464 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3465 {
3466 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3467 uint32_t record_buf[4];
3468
3469 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3470 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3471 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3472
3473 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3474 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3475 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3476 {
3477 record_buf[0] = reg_rd;
3478 aarch64_insn_r->reg_rec_count = 1;
3479 }
3480 else if (insn_bits24_27 == 0x01)
3481 {
3482 /* Add/Subtract (immediate). */
3483 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3484 record_buf[0] = reg_rd;
3485 aarch64_insn_r->reg_rec_count = 1;
3486 if (setflags)
3487 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3488 }
3489 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3490 {
3491 /* Logical (immediate). */
3492 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3493 record_buf[0] = reg_rd;
3494 aarch64_insn_r->reg_rec_count = 1;
3495 if (setflags)
3496 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3497 }
3498 else
3499 return AARCH64_RECORD_UNKNOWN;
3500
3501 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3502 record_buf);
3503 return AARCH64_RECORD_SUCCESS;
3504 }
3505
3506 /* Record handler for branch, exception generation and system instructions. */
3507
3508 static unsigned int
3509 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3510 {
3511 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3512 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3513 uint32_t record_buf[4];
3514
3515 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3516 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3517 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3518
3519 if (insn_bits28_31 == 0x0d)
3520 {
3521 /* Exception generation instructions. */
3522 if (insn_bits24_27 == 0x04)
3523 {
3524 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3525 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3526 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3527 {
3528 ULONGEST svc_number;
3529
3530 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3531 &svc_number);
3532 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3533 svc_number);
3534 }
3535 else
3536 return AARCH64_RECORD_UNSUPPORTED;
3537 }
3538 /* System instructions. */
3539 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3540 {
3541 uint32_t reg_rt, reg_crn;
3542
3543 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3544 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3545
3546 /* Record rt in case of sysl and mrs instructions. */
3547 if (bit (aarch64_insn_r->aarch64_insn, 21))
3548 {
3549 record_buf[0] = reg_rt;
3550 aarch64_insn_r->reg_rec_count = 1;
3551 }
3552 /* Record cpsr for hint and msr(immediate) instructions. */
3553 else if (reg_crn == 0x02 || reg_crn == 0x04)
3554 {
3555 record_buf[0] = AARCH64_CPSR_REGNUM;
3556 aarch64_insn_r->reg_rec_count = 1;
3557 }
3558 }
3559 /* Unconditional branch (register). */
3560 else if((insn_bits24_27 & 0x0e) == 0x06)
3561 {
3562 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3563 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3564 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3565 }
3566 else
3567 return AARCH64_RECORD_UNKNOWN;
3568 }
3569 /* Unconditional branch (immediate). */
3570 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3571 {
3572 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3573 if (bit (aarch64_insn_r->aarch64_insn, 31))
3574 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3575 }
3576 else
3577 /* Compare & branch (immediate), Test & branch (immediate) and
3578 Conditional branch (immediate). */
3579 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3580
3581 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3582 record_buf);
3583 return AARCH64_RECORD_SUCCESS;
3584 }
3585
3586 /* Record handler for advanced SIMD load and store instructions. */
3587
3588 static unsigned int
3589 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3590 {
3591 CORE_ADDR address;
3592 uint64_t addr_offset = 0;
3593 uint32_t record_buf[24];
3594 uint64_t record_buf_mem[24];
3595 uint32_t reg_rn, reg_rt;
3596 uint32_t reg_index = 0, mem_index = 0;
3597 uint8_t opcode_bits, size_bits;
3598
3599 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3600 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3601 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3602 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3603 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3604
3605 if (record_debug)
3606 debug_printf ("Process record: Advanced SIMD load/store\n");
3607
3608 /* Load/store single structure. */
3609 if (bit (aarch64_insn_r->aarch64_insn, 24))
3610 {
3611 uint8_t sindex, scale, selem, esize, replicate = 0;
3612 scale = opcode_bits >> 2;
3613 selem = ((opcode_bits & 0x02) |
3614 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3615 switch (scale)
3616 {
3617 case 1:
3618 if (size_bits & 0x01)
3619 return AARCH64_RECORD_UNKNOWN;
3620 break;
3621 case 2:
3622 if ((size_bits >> 1) & 0x01)
3623 return AARCH64_RECORD_UNKNOWN;
3624 if (size_bits & 0x01)
3625 {
3626 if (!((opcode_bits >> 1) & 0x01))
3627 scale = 3;
3628 else
3629 return AARCH64_RECORD_UNKNOWN;
3630 }
3631 break;
3632 case 3:
3633 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3634 {
3635 scale = size_bits;
3636 replicate = 1;
3637 break;
3638 }
3639 else
3640 return AARCH64_RECORD_UNKNOWN;
3641 default:
3642 break;
3643 }
3644 esize = 8 << scale;
3645 if (replicate)
3646 for (sindex = 0; sindex < selem; sindex++)
3647 {
3648 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3649 reg_rt = (reg_rt + 1) % 32;
3650 }
3651 else
3652 {
3653 for (sindex = 0; sindex < selem; sindex++)
3654 {
3655 if (bit (aarch64_insn_r->aarch64_insn, 22))
3656 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3657 else
3658 {
3659 record_buf_mem[mem_index++] = esize / 8;
3660 record_buf_mem[mem_index++] = address + addr_offset;
3661 }
3662 addr_offset = addr_offset + (esize / 8);
3663 reg_rt = (reg_rt + 1) % 32;
3664 }
3665 }
3666 }
3667 /* Load/store multiple structure. */
3668 else
3669 {
3670 uint8_t selem, esize, rpt, elements;
3671 uint8_t eindex, rindex;
3672
3673 esize = 8 << size_bits;
3674 if (bit (aarch64_insn_r->aarch64_insn, 30))
3675 elements = 128 / esize;
3676 else
3677 elements = 64 / esize;
3678
3679 switch (opcode_bits)
3680 {
3681 /*LD/ST4 (4 Registers). */
3682 case 0:
3683 rpt = 1;
3684 selem = 4;
3685 break;
3686 /*LD/ST1 (4 Registers). */
3687 case 2:
3688 rpt = 4;
3689 selem = 1;
3690 break;
3691 /*LD/ST3 (3 Registers). */
3692 case 4:
3693 rpt = 1;
3694 selem = 3;
3695 break;
3696 /*LD/ST1 (3 Registers). */
3697 case 6:
3698 rpt = 3;
3699 selem = 1;
3700 break;
3701 /*LD/ST1 (1 Register). */
3702 case 7:
3703 rpt = 1;
3704 selem = 1;
3705 break;
3706 /*LD/ST2 (2 Registers). */
3707 case 8:
3708 rpt = 1;
3709 selem = 2;
3710 break;
3711 /*LD/ST1 (2 Registers). */
3712 case 10:
3713 rpt = 2;
3714 selem = 1;
3715 break;
3716 default:
3717 return AARCH64_RECORD_UNSUPPORTED;
3718 break;
3719 }
3720 for (rindex = 0; rindex < rpt; rindex++)
3721 for (eindex = 0; eindex < elements; eindex++)
3722 {
3723 uint8_t reg_tt, sindex;
3724 reg_tt = (reg_rt + rindex) % 32;
3725 for (sindex = 0; sindex < selem; sindex++)
3726 {
3727 if (bit (aarch64_insn_r->aarch64_insn, 22))
3728 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3729 else
3730 {
3731 record_buf_mem[mem_index++] = esize / 8;
3732 record_buf_mem[mem_index++] = address + addr_offset;
3733 }
3734 addr_offset = addr_offset + (esize / 8);
3735 reg_tt = (reg_tt + 1) % 32;
3736 }
3737 }
3738 }
3739
3740 if (bit (aarch64_insn_r->aarch64_insn, 23))
3741 record_buf[reg_index++] = reg_rn;
3742
3743 aarch64_insn_r->reg_rec_count = reg_index;
3744 aarch64_insn_r->mem_rec_count = mem_index / 2;
3745 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3746 record_buf_mem);
3747 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3748 record_buf);
3749 return AARCH64_RECORD_SUCCESS;
3750 }
3751
3752 /* Record handler for load and store instructions. */
3753
3754 static unsigned int
3755 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3756 {
3757 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3758 uint8_t insn_bit23, insn_bit21;
3759 uint8_t opc, size_bits, ld_flag, vector_flag;
3760 uint32_t reg_rn, reg_rt, reg_rt2;
3761 uint64_t datasize, offset;
3762 uint32_t record_buf[8];
3763 uint64_t record_buf_mem[8];
3764 CORE_ADDR address;
3765
3766 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3767 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3768 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3769 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3770 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3771 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3772 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3773 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3774 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3775 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3776 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3777
3778 /* Load/store exclusive. */
3779 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3780 {
3781 if (record_debug)
3782 debug_printf ("Process record: load/store exclusive\n");
3783
3784 if (ld_flag)
3785 {
3786 record_buf[0] = reg_rt;
3787 aarch64_insn_r->reg_rec_count = 1;
3788 if (insn_bit21)
3789 {
3790 record_buf[1] = reg_rt2;
3791 aarch64_insn_r->reg_rec_count = 2;
3792 }
3793 }
3794 else
3795 {
3796 if (insn_bit21)
3797 datasize = (8 << size_bits) * 2;
3798 else
3799 datasize = (8 << size_bits);
3800 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3801 &address);
3802 record_buf_mem[0] = datasize / 8;
3803 record_buf_mem[1] = address;
3804 aarch64_insn_r->mem_rec_count = 1;
3805 if (!insn_bit23)
3806 {
3807 /* Save register rs. */
3808 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3809 aarch64_insn_r->reg_rec_count = 1;
3810 }
3811 }
3812 }
3813 /* Load register (literal) instructions decoding. */
3814 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3815 {
3816 if (record_debug)
3817 debug_printf ("Process record: load register (literal)\n");
3818 if (vector_flag)
3819 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3820 else
3821 record_buf[0] = reg_rt;
3822 aarch64_insn_r->reg_rec_count = 1;
3823 }
3824 /* All types of load/store pair instructions decoding. */
3825 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3826 {
3827 if (record_debug)
3828 debug_printf ("Process record: load/store pair\n");
3829
3830 if (ld_flag)
3831 {
3832 if (vector_flag)
3833 {
3834 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3835 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3836 }
3837 else
3838 {
3839 record_buf[0] = reg_rt;
3840 record_buf[1] = reg_rt2;
3841 }
3842 aarch64_insn_r->reg_rec_count = 2;
3843 }
3844 else
3845 {
3846 uint16_t imm7_off;
3847 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3848 if (!vector_flag)
3849 size_bits = size_bits >> 1;
3850 datasize = 8 << (2 + size_bits);
3851 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3852 offset = offset << (2 + size_bits);
3853 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3854 &address);
3855 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3856 {
3857 if (imm7_off & 0x40)
3858 address = address - offset;
3859 else
3860 address = address + offset;
3861 }
3862
3863 record_buf_mem[0] = datasize / 8;
3864 record_buf_mem[1] = address;
3865 record_buf_mem[2] = datasize / 8;
3866 record_buf_mem[3] = address + (datasize / 8);
3867 aarch64_insn_r->mem_rec_count = 2;
3868 }
3869 if (bit (aarch64_insn_r->aarch64_insn, 23))
3870 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3871 }
3872 /* Load/store register (unsigned immediate) instructions. */
3873 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3874 {
3875 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3876 if (!(opc >> 1))
3877 {
3878 if (opc & 0x01)
3879 ld_flag = 0x01;
3880 else
3881 ld_flag = 0x0;
3882 }
3883 else
3884 {
3885 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3886 {
3887 /* PRFM (immediate) */
3888 return AARCH64_RECORD_SUCCESS;
3889 }
3890 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3891 {
3892 /* LDRSW (immediate) */
3893 ld_flag = 0x1;
3894 }
3895 else
3896 {
3897 if (opc & 0x01)
3898 ld_flag = 0x01;
3899 else
3900 ld_flag = 0x0;
3901 }
3902 }
3903
3904 if (record_debug)
3905 {
3906 debug_printf ("Process record: load/store (unsigned immediate):"
3907 " size %x V %d opc %x\n", size_bits, vector_flag,
3908 opc);
3909 }
3910
3911 if (!ld_flag)
3912 {
3913 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3914 datasize = 8 << size_bits;
3915 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3916 &address);
3917 offset = offset << size_bits;
3918 address = address + offset;
3919
3920 record_buf_mem[0] = datasize >> 3;
3921 record_buf_mem[1] = address;
3922 aarch64_insn_r->mem_rec_count = 1;
3923 }
3924 else
3925 {
3926 if (vector_flag)
3927 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3928 else
3929 record_buf[0] = reg_rt;
3930 aarch64_insn_r->reg_rec_count = 1;
3931 }
3932 }
3933 /* Load/store register (register offset) instructions. */
3934 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3935 && insn_bits10_11 == 0x02 && insn_bit21)
3936 {
3937 if (record_debug)
3938 debug_printf ("Process record: load/store (register offset)\n");
3939 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3940 if (!(opc >> 1))
3941 if (opc & 0x01)
3942 ld_flag = 0x01;
3943 else
3944 ld_flag = 0x0;
3945 else
3946 if (size_bits != 0x03)
3947 ld_flag = 0x01;
3948 else
3949 return AARCH64_RECORD_UNKNOWN;
3950
3951 if (!ld_flag)
3952 {
3953 ULONGEST reg_rm_val;
3954
3955 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3956 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3957 if (bit (aarch64_insn_r->aarch64_insn, 12))
3958 offset = reg_rm_val << size_bits;
3959 else
3960 offset = reg_rm_val;
3961 datasize = 8 << size_bits;
3962 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3963 &address);
3964 address = address + offset;
3965 record_buf_mem[0] = datasize >> 3;
3966 record_buf_mem[1] = address;
3967 aarch64_insn_r->mem_rec_count = 1;
3968 }
3969 else
3970 {
3971 if (vector_flag)
3972 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3973 else
3974 record_buf[0] = reg_rt;
3975 aarch64_insn_r->reg_rec_count = 1;
3976 }
3977 }
3978 /* Load/store register (immediate and unprivileged) instructions. */
3979 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3980 && !insn_bit21)
3981 {
3982 if (record_debug)
3983 {
3984 debug_printf ("Process record: load/store "
3985 "(immediate and unprivileged)\n");
3986 }
3987 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3988 if (!(opc >> 1))
3989 if (opc & 0x01)
3990 ld_flag = 0x01;
3991 else
3992 ld_flag = 0x0;
3993 else
3994 if (size_bits != 0x03)
3995 ld_flag = 0x01;
3996 else
3997 return AARCH64_RECORD_UNKNOWN;
3998
3999 if (!ld_flag)
4000 {
4001 uint16_t imm9_off;
4002 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4003 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4004 datasize = 8 << size_bits;
4005 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4006 &address);
4007 if (insn_bits10_11 != 0x01)
4008 {
4009 if (imm9_off & 0x0100)
4010 address = address - offset;
4011 else
4012 address = address + offset;
4013 }
4014 record_buf_mem[0] = datasize >> 3;
4015 record_buf_mem[1] = address;
4016 aarch64_insn_r->mem_rec_count = 1;
4017 }
4018 else
4019 {
4020 if (vector_flag)
4021 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4022 else
4023 record_buf[0] = reg_rt;
4024 aarch64_insn_r->reg_rec_count = 1;
4025 }
4026 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4027 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4028 }
4029 /* Advanced SIMD load/store instructions. */
4030 else
4031 return aarch64_record_asimd_load_store (aarch64_insn_r);
4032
4033 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4034 record_buf_mem);
4035 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4036 record_buf);
4037 return AARCH64_RECORD_SUCCESS;
4038 }
4039
4040 /* Record handler for data processing SIMD and floating point instructions. */
4041
4042 static unsigned int
4043 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4044 {
4045 uint8_t insn_bit21, opcode, rmode, reg_rd;
4046 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4047 uint8_t insn_bits11_14;
4048 uint32_t record_buf[2];
4049
4050 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4051 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4052 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4053 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4054 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4055 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4056 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4057 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4058 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4059
4060 if (record_debug)
4061 debug_printf ("Process record: data processing SIMD/FP: ");
4062
4063 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4064 {
4065 /* Floating point - fixed point conversion instructions. */
4066 if (!insn_bit21)
4067 {
4068 if (record_debug)
4069 debug_printf ("FP - fixed point conversion");
4070
4071 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4072 record_buf[0] = reg_rd;
4073 else
4074 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4075 }
4076 /* Floating point - conditional compare instructions. */
4077 else if (insn_bits10_11 == 0x01)
4078 {
4079 if (record_debug)
4080 debug_printf ("FP - conditional compare");
4081
4082 record_buf[0] = AARCH64_CPSR_REGNUM;
4083 }
4084 /* Floating point - data processing (2-source) and
4085 conditional select instructions. */
4086 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4087 {
4088 if (record_debug)
4089 debug_printf ("FP - DP (2-source)");
4090
4091 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4092 }
4093 else if (insn_bits10_11 == 0x00)
4094 {
4095 /* Floating point - immediate instructions. */
4096 if ((insn_bits12_15 & 0x01) == 0x01
4097 || (insn_bits12_15 & 0x07) == 0x04)
4098 {
4099 if (record_debug)
4100 debug_printf ("FP - immediate");
4101 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4102 }
4103 /* Floating point - compare instructions. */
4104 else if ((insn_bits12_15 & 0x03) == 0x02)
4105 {
4106 if (record_debug)
4107 debug_printf ("FP - immediate");
4108 record_buf[0] = AARCH64_CPSR_REGNUM;
4109 }
4110 /* Floating point - integer conversions instructions. */
4111 else if (insn_bits12_15 == 0x00)
4112 {
4113 /* Convert float to integer instruction. */
4114 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4115 {
4116 if (record_debug)
4117 debug_printf ("float to int conversion");
4118
4119 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4120 }
4121 /* Convert integer to float instruction. */
4122 else if ((opcode >> 1) == 0x01 && !rmode)
4123 {
4124 if (record_debug)
4125 debug_printf ("int to float conversion");
4126
4127 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4128 }
4129 /* Move float to integer instruction. */
4130 else if ((opcode >> 1) == 0x03)
4131 {
4132 if (record_debug)
4133 debug_printf ("move float to int");
4134
4135 if (!(opcode & 0x01))
4136 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4137 else
4138 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4139 }
4140 else
4141 return AARCH64_RECORD_UNKNOWN;
4142 }
4143 else
4144 return AARCH64_RECORD_UNKNOWN;
4145 }
4146 else
4147 return AARCH64_RECORD_UNKNOWN;
4148 }
4149 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4150 {
4151 if (record_debug)
4152 debug_printf ("SIMD copy");
4153
4154 /* Advanced SIMD copy instructions. */
4155 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4156 && !bit (aarch64_insn_r->aarch64_insn, 15)
4157 && bit (aarch64_insn_r->aarch64_insn, 10))
4158 {
4159 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4160 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4161 else
4162 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4163 }
4164 else
4165 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4166 }
4167 /* All remaining floating point or advanced SIMD instructions. */
4168 else
4169 {
4170 if (record_debug)
4171 debug_printf ("all remain");
4172
4173 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4174 }
4175
4176 if (record_debug)
4177 debug_printf ("\n");
4178
4179 aarch64_insn_r->reg_rec_count++;
4180 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4181 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4182 record_buf);
4183 return AARCH64_RECORD_SUCCESS;
4184 }
4185
4186 /* Decodes insns type and invokes its record handler. */
4187
4188 static unsigned int
4189 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4190 {
4191 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4192
4193 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4194 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4195 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4196 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4197
4198 /* Data processing - immediate instructions. */
4199 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4200 return aarch64_record_data_proc_imm (aarch64_insn_r);
4201
4202 /* Branch, exception generation and system instructions. */
4203 if (ins_bit26 && !ins_bit27 && ins_bit28)
4204 return aarch64_record_branch_except_sys (aarch64_insn_r);
4205
4206 /* Load and store instructions. */
4207 if (!ins_bit25 && ins_bit27)
4208 return aarch64_record_load_store (aarch64_insn_r);
4209
4210 /* Data processing - register instructions. */
4211 if (ins_bit25 && !ins_bit26 && ins_bit27)
4212 return aarch64_record_data_proc_reg (aarch64_insn_r);
4213
4214 /* Data processing - SIMD and floating point instructions. */
4215 if (ins_bit25 && ins_bit26 && ins_bit27)
4216 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4217
4218 return AARCH64_RECORD_UNSUPPORTED;
4219 }
4220
4221 /* Cleans up local record registers and memory allocations. */
4222
4223 static void
4224 deallocate_reg_mem (insn_decode_record *record)
4225 {
4226 xfree (record->aarch64_regs);
4227 xfree (record->aarch64_mems);
4228 }
4229
4230 #if GDB_SELF_TEST
4231 namespace selftests {
4232
4233 static void
4234 aarch64_process_record_test (void)
4235 {
4236 struct gdbarch_info info;
4237 uint32_t ret;
4238
4239 gdbarch_info_init (&info);
4240 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4241
4242 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4243 SELF_CHECK (gdbarch != NULL);
4244
4245 insn_decode_record aarch64_record;
4246
4247 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4248 aarch64_record.regcache = NULL;
4249 aarch64_record.this_addr = 0;
4250 aarch64_record.gdbarch = gdbarch;
4251
4252 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4253 aarch64_record.aarch64_insn = 0xf9800020;
4254 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4255 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4256 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4257 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4258
4259 deallocate_reg_mem (&aarch64_record);
4260 }
4261
4262 } // namespace selftests
4263 #endif /* GDB_SELF_TEST */
4264
4265 /* Parse the current instruction and record the values of the registers and
4266 memory that will be changed in current instruction to record_arch_list
4267 return -1 if something is wrong. */
4268
4269 int
4270 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4271 CORE_ADDR insn_addr)
4272 {
4273 uint32_t rec_no = 0;
4274 uint8_t insn_size = 4;
4275 uint32_t ret = 0;
4276 gdb_byte buf[insn_size];
4277 insn_decode_record aarch64_record;
4278
4279 memset (&buf[0], 0, insn_size);
4280 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4281 target_read_memory (insn_addr, &buf[0], insn_size);
4282 aarch64_record.aarch64_insn
4283 = (uint32_t) extract_unsigned_integer (&buf[0],
4284 insn_size,
4285 gdbarch_byte_order (gdbarch));
4286 aarch64_record.regcache = regcache;
4287 aarch64_record.this_addr = insn_addr;
4288 aarch64_record.gdbarch = gdbarch;
4289
4290 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4291 if (ret == AARCH64_RECORD_UNSUPPORTED)
4292 {
4293 printf_unfiltered (_("Process record does not support instruction "
4294 "0x%0x at address %s.\n"),
4295 aarch64_record.aarch64_insn,
4296 paddress (gdbarch, insn_addr));
4297 ret = -1;
4298 }
4299
4300 if (0 == ret)
4301 {
4302 /* Record registers. */
4303 record_full_arch_list_add_reg (aarch64_record.regcache,
4304 AARCH64_PC_REGNUM);
4305 /* Always record register CPSR. */
4306 record_full_arch_list_add_reg (aarch64_record.regcache,
4307 AARCH64_CPSR_REGNUM);
4308 if (aarch64_record.aarch64_regs)
4309 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4310 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4311 aarch64_record.aarch64_regs[rec_no]))
4312 ret = -1;
4313
4314 /* Record memories. */
4315 if (aarch64_record.aarch64_mems)
4316 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4317 if (record_full_arch_list_add_mem
4318 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4319 aarch64_record.aarch64_mems[rec_no].len))
4320 ret = -1;
4321
4322 if (record_full_arch_list_add_end ())
4323 ret = -1;
4324 }
4325
4326 deallocate_reg_mem (&aarch64_record);
4327 return ret;
4328 }
This page took 0.230546 seconds and 4 git commands to generate.