Change some void* to gdb_byte*
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57
58 #include "features/aarch64.c"
59
60 #include "arch/aarch64-insn.h"
61
62 #include "opcode/aarch64.h"
63
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
68 /* Pseudo register base numbers. */
69 #define AARCH64_Q0_REGNUM 0
70 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
74
75 /* The standard register names, and all the valid aliases for them. */
76 static const struct
77 {
78 const char *const name;
79 int regnum;
80 } aarch64_register_aliases[] =
81 {
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123 };
124
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
127 {
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139 };
140
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
143 {
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156 };
157
158 /* AArch64 prologue cache structure. */
159 struct aarch64_prologue_cache
160 {
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
163 CORE_ADDR func;
164
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
167 stub frame. */
168 CORE_ADDR prev_pc;
169
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
173 CORE_ADDR prev_sp;
174
175 /* Is the target available to read from? */
176 int available_p;
177
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
181 int framesize;
182
183 /* The register used to hold the frame pointer for this frame. */
184 int framereg;
185
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
188 };
189
190 static void
191 show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
193 {
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
195 }
196
197 /* Extract a signed value from a bit field within an instruction
198 encoding.
199
200 INSN is the instruction opcode.
201
202 WIDTH specifies the width of the bit field to extract (in bits).
203
204 OFFSET specifies the least significant bit of the field where bits
205 are numbered zero counting from least to most significant. */
206
207 static int32_t
208 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
209 {
210 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
211 unsigned shift_r = sizeof (int32_t) * 8 - width;
212
213 return ((int32_t) insn << shift_l) >> shift_r;
214 }
215
216 /* Determine if specified bits within an instruction opcode matches a
217 specific pattern.
218
219 INSN is the instruction opcode.
220
221 MASK specifies the bits within the opcode that are to be tested
222 agsinst for a match with PATTERN. */
223
224 static int
225 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
226 {
227 return (insn & mask) == pattern;
228 }
229
230 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
231
232 ADDR specifies the address of the opcode.
233 INSN specifies the opcode to test.
234 RD receives the 'rd' field from the decoded instruction.
235 RN receives the 'rn' field from the decoded instruction.
236
237 Return 1 if the opcodes matches and is decoded, otherwise 0. */
238 static int
239 aarch64_decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd,
240 unsigned *rn, int32_t *imm)
241 {
242 if ((insn & 0x9f000000) == 0x91000000)
243 {
244 unsigned shift;
245 unsigned op_is_sub;
246
247 *rd = (insn >> 0) & 0x1f;
248 *rn = (insn >> 5) & 0x1f;
249 *imm = (insn >> 10) & 0xfff;
250 shift = (insn >> 22) & 0x3;
251 op_is_sub = (insn >> 30) & 0x1;
252
253 switch (shift)
254 {
255 case 0:
256 break;
257 case 1:
258 *imm <<= 12;
259 break;
260 default:
261 /* UNDEFINED */
262 return 0;
263 }
264
265 if (op_is_sub)
266 *imm = -*imm;
267
268 if (aarch64_debug)
269 {
270 debug_printf ("decode: 0x%s 0x%x add x%u, x%u, #%d\n",
271 core_addr_to_string_nz (addr), insn, *rd, *rn,
272 *imm);
273 }
274 return 1;
275 }
276 return 0;
277 }
278
279 /* Decode an opcode if it represents a branch via register instruction.
280
281 ADDR specifies the address of the opcode.
282 INSN specifies the opcode to test.
283 IS_BLR receives the 'op' bit from the decoded instruction.
284 RN receives the 'rn' field from the decoded instruction.
285
286 Return 1 if the opcodes matches and is decoded, otherwise 0. */
287
288 static int
289 aarch64_decode_br (CORE_ADDR addr, uint32_t insn, int *is_blr,
290 unsigned *rn)
291 {
292 /* 8 4 0 6 2 8 4 0 */
293 /* blr 110101100011111100000000000rrrrr */
294 /* br 110101100001111100000000000rrrrr */
295 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
296 {
297 *is_blr = (insn >> 21) & 1;
298 *rn = (insn >> 5) & 0x1f;
299
300 if (aarch64_debug)
301 {
302 debug_printf ("decode: 0x%s 0x%x %s 0x%x\n",
303 core_addr_to_string_nz (addr), insn,
304 *is_blr ? "blr" : "br", *rn);
305 }
306
307 return 1;
308 }
309 return 0;
310 }
311
312 /* Decode an opcode if it represents a ERET instruction.
313
314 ADDR specifies the address of the opcode.
315 INSN specifies the opcode to test.
316
317 Return 1 if the opcodes matches and is decoded, otherwise 0. */
318
319 static int
320 aarch64_decode_eret (CORE_ADDR addr, uint32_t insn)
321 {
322 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
323 if (insn == 0xd69f03e0)
324 {
325 if (aarch64_debug)
326 {
327 debug_printf ("decode: 0x%s 0x%x eret\n",
328 core_addr_to_string_nz (addr), insn);
329 }
330 return 1;
331 }
332 return 0;
333 }
334
335 /* Decode an opcode if it represents a MOVZ instruction.
336
337 ADDR specifies the address of the opcode.
338 INSN specifies the opcode to test.
339 RD receives the 'rd' field from the decoded instruction.
340
341 Return 1 if the opcodes matches and is decoded, otherwise 0. */
342
343 static int
344 aarch64_decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
345 {
346 if (decode_masked_match (insn, 0xff800000, 0x52800000))
347 {
348 *rd = (insn >> 0) & 0x1f;
349
350 if (aarch64_debug)
351 {
352 debug_printf ("decode: 0x%s 0x%x movz x%u, #?\n",
353 core_addr_to_string_nz (addr), insn, *rd);
354 }
355 return 1;
356 }
357 return 0;
358 }
359
360 /* Decode an opcode if it represents a ORR (shifted register)
361 instruction.
362
363 ADDR specifies the address of the opcode.
364 INSN specifies the opcode to test.
365 RD receives the 'rd' field from the decoded instruction.
366 RN receives the 'rn' field from the decoded instruction.
367 RM receives the 'rm' field from the decoded instruction.
368 IMM receives the 'imm6' field from the decoded instruction.
369
370 Return 1 if the opcodes matches and is decoded, otherwise 0. */
371
372 static int
373 aarch64_decode_orr_shifted_register_x (CORE_ADDR addr, uint32_t insn,
374 unsigned *rd, unsigned *rn,
375 unsigned *rm, int32_t *imm)
376 {
377 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
378 {
379 *rd = (insn >> 0) & 0x1f;
380 *rn = (insn >> 5) & 0x1f;
381 *rm = (insn >> 16) & 0x1f;
382 *imm = (insn >> 10) & 0x3f;
383
384 if (aarch64_debug)
385 {
386 debug_printf ("decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
387 core_addr_to_string_nz (addr), insn, *rd, *rn,
388 *rm, *imm);
389 }
390 return 1;
391 }
392 return 0;
393 }
394
395 /* Decode an opcode if it represents a RET instruction.
396
397 ADDR specifies the address of the opcode.
398 INSN specifies the opcode to test.
399 RN receives the 'rn' field from the decoded instruction.
400
401 Return 1 if the opcodes matches and is decoded, otherwise 0. */
402
403 static int
404 aarch64_decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
405 {
406 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
407 {
408 *rn = (insn >> 5) & 0x1f;
409 if (aarch64_debug)
410 {
411 debug_printf ("decode: 0x%s 0x%x ret x%u\n",
412 core_addr_to_string_nz (addr), insn, *rn);
413 }
414 return 1;
415 }
416 return 0;
417 }
418
419 /* Decode an opcode if it represents the following instruction:
420 STP rt, rt2, [rn, #imm]
421
422 ADDR specifies the address of the opcode.
423 INSN specifies the opcode to test.
424 RT1 receives the 'rt' field from the decoded instruction.
425 RT2 receives the 'rt2' field from the decoded instruction.
426 RN receives the 'rn' field from the decoded instruction.
427 IMM receives the 'imm' field from the decoded instruction.
428
429 Return 1 if the opcodes matches and is decoded, otherwise 0. */
430
431 static int
432 aarch64_decode_stp_offset (CORE_ADDR addr, uint32_t insn, unsigned *rt1,
433 unsigned *rt2, unsigned *rn, int32_t *imm)
434 {
435 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
436 {
437 *rt1 = (insn >> 0) & 0x1f;
438 *rn = (insn >> 5) & 0x1f;
439 *rt2 = (insn >> 10) & 0x1f;
440 *imm = extract_signed_bitfield (insn, 7, 15);
441 *imm <<= 3;
442
443 if (aarch64_debug)
444 {
445 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
446 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
447 *rn, *imm);
448 }
449 return 1;
450 }
451 return 0;
452 }
453
454 /* Decode an opcode if it represents the following instruction:
455 STP rt, rt2, [rn, #imm]!
456
457 ADDR specifies the address of the opcode.
458 INSN specifies the opcode to test.
459 RT1 receives the 'rt' field from the decoded instruction.
460 RT2 receives the 'rt2' field from the decoded instruction.
461 RN receives the 'rn' field from the decoded instruction.
462 IMM receives the 'imm' field from the decoded instruction.
463
464 Return 1 if the opcodes matches and is decoded, otherwise 0. */
465
466 static int
467 aarch64_decode_stp_offset_wb (CORE_ADDR addr, uint32_t insn, unsigned *rt1,
468 unsigned *rt2, unsigned *rn, int32_t *imm)
469 {
470 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
471 {
472 *rt1 = (insn >> 0) & 0x1f;
473 *rn = (insn >> 5) & 0x1f;
474 *rt2 = (insn >> 10) & 0x1f;
475 *imm = extract_signed_bitfield (insn, 7, 15);
476 *imm <<= 3;
477
478 if (aarch64_debug)
479 {
480 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
481 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
482 *rn, *imm);
483 }
484 return 1;
485 }
486 return 0;
487 }
488
489 /* Decode an opcode if it represents the following instruction:
490 STUR rt, [rn, #imm]
491
492 ADDR specifies the address of the opcode.
493 INSN specifies the opcode to test.
494 IS64 receives size field from the decoded instruction.
495 RT receives the 'rt' field from the decoded instruction.
496 RN receives the 'rn' field from the decoded instruction.
497 IMM receives the 'imm' field from the decoded instruction.
498
499 Return 1 if the opcodes matches and is decoded, otherwise 0. */
500
501 static int
502 aarch64_decode_stur (CORE_ADDR addr, uint32_t insn, int *is64,
503 unsigned *rt, unsigned *rn, int32_t *imm)
504 {
505 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
506 {
507 *is64 = (insn >> 30) & 1;
508 *rt = (insn >> 0) & 0x1f;
509 *rn = (insn >> 5) & 0x1f;
510 *imm = extract_signed_bitfield (insn, 9, 12);
511
512 if (aarch64_debug)
513 {
514 debug_printf ("decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
515 core_addr_to_string_nz (addr), insn,
516 *is64 ? 'x' : 'w', *rt, *rn, *imm);
517 }
518 return 1;
519 }
520 return 0;
521 }
522
523 /* Analyze a prologue, looking for a recognizable stack frame
524 and frame pointer. Scan until we encounter a store that could
525 clobber the stack frame unexpectedly, or an unknown instruction. */
526
527 static CORE_ADDR
528 aarch64_analyze_prologue (struct gdbarch *gdbarch,
529 CORE_ADDR start, CORE_ADDR limit,
530 struct aarch64_prologue_cache *cache)
531 {
532 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
533 int i;
534 pv_t regs[AARCH64_X_REGISTER_COUNT];
535 struct pv_area *stack;
536 struct cleanup *back_to;
537
538 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
539 regs[i] = pv_register (i, 0);
540 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
541 back_to = make_cleanup_free_pv_area (stack);
542
543 for (; start < limit; start += 4)
544 {
545 uint32_t insn;
546 unsigned rd;
547 unsigned rn;
548 unsigned rm;
549 unsigned rt;
550 unsigned rt1;
551 unsigned rt2;
552 int op_is_sub;
553 int32_t imm;
554 unsigned cond;
555 int is64;
556 int is_link;
557 int is_cbnz;
558 int is_tbnz;
559 unsigned bit;
560 int is_adrp;
561 int32_t offset;
562
563 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
564
565 if (aarch64_decode_add_sub_imm (start, insn, &rd, &rn, &imm))
566 regs[rd] = pv_add_constant (regs[rn], imm);
567 else if (aarch64_decode_adr (start, insn, &is_adrp, &rd, &offset)
568 && is_adrp)
569 regs[rd] = pv_unknown ();
570 else if (aarch64_decode_b (start, insn, &is_link, &offset))
571 {
572 /* Stop analysis on branch. */
573 break;
574 }
575 else if (aarch64_decode_bcond (start, insn, &cond, &offset))
576 {
577 /* Stop analysis on branch. */
578 break;
579 }
580 else if (aarch64_decode_br (start, insn, &is_link, &rn))
581 {
582 /* Stop analysis on branch. */
583 break;
584 }
585 else if (aarch64_decode_cb (start, insn, &is64, &is_cbnz, &rn,
586 &offset))
587 {
588 /* Stop analysis on branch. */
589 break;
590 }
591 else if (aarch64_decode_eret (start, insn))
592 {
593 /* Stop analysis on branch. */
594 break;
595 }
596 else if (aarch64_decode_movz (start, insn, &rd))
597 regs[rd] = pv_unknown ();
598 else if (aarch64_decode_orr_shifted_register_x (start, insn, &rd,
599 &rn, &rm, &imm))
600 {
601 if (imm == 0 && rn == 31)
602 regs[rd] = regs[rm];
603 else
604 {
605 if (aarch64_debug)
606 {
607 debug_printf ("aarch64: prologue analysis gave up "
608 "addr=0x%s opcode=0x%x (orr x register)\n",
609 core_addr_to_string_nz (start), insn);
610 }
611 break;
612 }
613 }
614 else if (aarch64_decode_ret (start, insn, &rn))
615 {
616 /* Stop analysis on branch. */
617 break;
618 }
619 else if (aarch64_decode_stur (start, insn, &is64, &rt, &rn, &offset))
620 {
621 pv_area_store (stack, pv_add_constant (regs[rn], offset),
622 is64 ? 8 : 4, regs[rt]);
623 }
624 else if (aarch64_decode_stp_offset (start, insn, &rt1, &rt2, &rn,
625 &imm))
626 {
627 /* If recording this store would invalidate the store area
628 (perhaps because rn is not known) then we should abandon
629 further prologue analysis. */
630 if (pv_area_store_would_trash (stack,
631 pv_add_constant (regs[rn], imm)))
632 break;
633
634 if (pv_area_store_would_trash (stack,
635 pv_add_constant (regs[rn], imm + 8)))
636 break;
637
638 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
639 regs[rt1]);
640 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
641 regs[rt2]);
642 }
643 else if (aarch64_decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn,
644 &imm))
645 {
646 /* If recording this store would invalidate the store area
647 (perhaps because rn is not known) then we should abandon
648 further prologue analysis. */
649 if (pv_area_store_would_trash (stack,
650 pv_add_constant (regs[rn], imm)))
651 break;
652
653 if (pv_area_store_would_trash (stack,
654 pv_add_constant (regs[rn], imm + 8)))
655 break;
656
657 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
658 regs[rt1]);
659 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
660 regs[rt2]);
661 regs[rn] = pv_add_constant (regs[rn], imm);
662 }
663 else if (aarch64_decode_tb (start, insn, &is_tbnz, &bit, &rn,
664 &offset))
665 {
666 /* Stop analysis on branch. */
667 break;
668 }
669 else
670 {
671 if (aarch64_debug)
672 {
673 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
674 " opcode=0x%x\n",
675 core_addr_to_string_nz (start), insn);
676 }
677 break;
678 }
679 }
680
681 if (cache == NULL)
682 {
683 do_cleanups (back_to);
684 return start;
685 }
686
687 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
688 {
689 /* Frame pointer is fp. Frame size is constant. */
690 cache->framereg = AARCH64_FP_REGNUM;
691 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
692 }
693 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
694 {
695 /* Try the stack pointer. */
696 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
697 cache->framereg = AARCH64_SP_REGNUM;
698 }
699 else
700 {
701 /* We're just out of luck. We don't know where the frame is. */
702 cache->framereg = -1;
703 cache->framesize = 0;
704 }
705
706 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
707 {
708 CORE_ADDR offset;
709
710 if (pv_area_find_reg (stack, gdbarch, i, &offset))
711 cache->saved_regs[i].addr = offset;
712 }
713
714 do_cleanups (back_to);
715 return start;
716 }
717
718 /* Implement the "skip_prologue" gdbarch method. */
719
720 static CORE_ADDR
721 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
722 {
723 unsigned long inst;
724 CORE_ADDR skip_pc;
725 CORE_ADDR func_addr, limit_pc;
726 struct symtab_and_line sal;
727
728 /* See if we can determine the end of the prologue via the symbol
729 table. If so, then return either PC, or the PC after the
730 prologue, whichever is greater. */
731 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
732 {
733 CORE_ADDR post_prologue_pc
734 = skip_prologue_using_sal (gdbarch, func_addr);
735
736 if (post_prologue_pc != 0)
737 return max (pc, post_prologue_pc);
738 }
739
740 /* Can't determine prologue from the symbol table, need to examine
741 instructions. */
742
743 /* Find an upper limit on the function prologue using the debug
744 information. If the debug information could not be used to
745 provide that bound, then use an arbitrary large number as the
746 upper bound. */
747 limit_pc = skip_prologue_using_sal (gdbarch, pc);
748 if (limit_pc == 0)
749 limit_pc = pc + 128; /* Magic. */
750
751 /* Try disassembling prologue. */
752 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
753 }
754
755 /* Scan the function prologue for THIS_FRAME and populate the prologue
756 cache CACHE. */
757
758 static void
759 aarch64_scan_prologue (struct frame_info *this_frame,
760 struct aarch64_prologue_cache *cache)
761 {
762 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
763 CORE_ADDR prologue_start;
764 CORE_ADDR prologue_end;
765 CORE_ADDR prev_pc = get_frame_pc (this_frame);
766 struct gdbarch *gdbarch = get_frame_arch (this_frame);
767
768 cache->prev_pc = prev_pc;
769
770 /* Assume we do not find a frame. */
771 cache->framereg = -1;
772 cache->framesize = 0;
773
774 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
775 &prologue_end))
776 {
777 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
778
779 if (sal.line == 0)
780 {
781 /* No line info so use the current PC. */
782 prologue_end = prev_pc;
783 }
784 else if (sal.end < prologue_end)
785 {
786 /* The next line begins after the function end. */
787 prologue_end = sal.end;
788 }
789
790 prologue_end = min (prologue_end, prev_pc);
791 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
792 }
793 else
794 {
795 CORE_ADDR frame_loc;
796 LONGEST saved_fp;
797 LONGEST saved_lr;
798 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
799
800 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
801 if (frame_loc == 0)
802 return;
803
804 cache->framereg = AARCH64_FP_REGNUM;
805 cache->framesize = 16;
806 cache->saved_regs[29].addr = 0;
807 cache->saved_regs[30].addr = 8;
808 }
809 }
810
811 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
812 function may throw an exception if the inferior's registers or memory is
813 not available. */
814
815 static void
816 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
817 struct aarch64_prologue_cache *cache)
818 {
819 CORE_ADDR unwound_fp;
820 int reg;
821
822 aarch64_scan_prologue (this_frame, cache);
823
824 if (cache->framereg == -1)
825 return;
826
827 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
828 if (unwound_fp == 0)
829 return;
830
831 cache->prev_sp = unwound_fp + cache->framesize;
832
833 /* Calculate actual addresses of saved registers using offsets
834 determined by aarch64_analyze_prologue. */
835 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
836 if (trad_frame_addr_p (cache->saved_regs, reg))
837 cache->saved_regs[reg].addr += cache->prev_sp;
838
839 cache->func = get_frame_func (this_frame);
840
841 cache->available_p = 1;
842 }
843
844 /* Allocate and fill in *THIS_CACHE with information about the prologue of
845 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
846 Return a pointer to the current aarch64_prologue_cache in
847 *THIS_CACHE. */
848
849 static struct aarch64_prologue_cache *
850 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
851 {
852 struct aarch64_prologue_cache *cache;
853
854 if (*this_cache != NULL)
855 return (struct aarch64_prologue_cache *) *this_cache;
856
857 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
858 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
859 *this_cache = cache;
860
861 TRY
862 {
863 aarch64_make_prologue_cache_1 (this_frame, cache);
864 }
865 CATCH (ex, RETURN_MASK_ERROR)
866 {
867 if (ex.error != NOT_AVAILABLE_ERROR)
868 throw_exception (ex);
869 }
870 END_CATCH
871
872 return cache;
873 }
874
875 /* Implement the "stop_reason" frame_unwind method. */
876
877 static enum unwind_stop_reason
878 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
879 void **this_cache)
880 {
881 struct aarch64_prologue_cache *cache
882 = aarch64_make_prologue_cache (this_frame, this_cache);
883
884 if (!cache->available_p)
885 return UNWIND_UNAVAILABLE;
886
887 /* Halt the backtrace at "_start". */
888 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
889 return UNWIND_OUTERMOST;
890
891 /* We've hit a wall, stop. */
892 if (cache->prev_sp == 0)
893 return UNWIND_OUTERMOST;
894
895 return UNWIND_NO_REASON;
896 }
897
898 /* Our frame ID for a normal frame is the current function's starting
899 PC and the caller's SP when we were called. */
900
901 static void
902 aarch64_prologue_this_id (struct frame_info *this_frame,
903 void **this_cache, struct frame_id *this_id)
904 {
905 struct aarch64_prologue_cache *cache
906 = aarch64_make_prologue_cache (this_frame, this_cache);
907
908 if (!cache->available_p)
909 *this_id = frame_id_build_unavailable_stack (cache->func);
910 else
911 *this_id = frame_id_build (cache->prev_sp, cache->func);
912 }
913
914 /* Implement the "prev_register" frame_unwind method. */
915
916 static struct value *
917 aarch64_prologue_prev_register (struct frame_info *this_frame,
918 void **this_cache, int prev_regnum)
919 {
920 struct gdbarch *gdbarch = get_frame_arch (this_frame);
921 struct aarch64_prologue_cache *cache
922 = aarch64_make_prologue_cache (this_frame, this_cache);
923
924 /* If we are asked to unwind the PC, then we need to return the LR
925 instead. The prologue may save PC, but it will point into this
926 frame's prologue, not the next frame's resume location. */
927 if (prev_regnum == AARCH64_PC_REGNUM)
928 {
929 CORE_ADDR lr;
930
931 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
932 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
933 }
934
935 /* SP is generally not saved to the stack, but this frame is
936 identified by the next frame's stack pointer at the time of the
937 call. The value was already reconstructed into PREV_SP. */
938 /*
939 +----------+ ^
940 | saved lr | |
941 +->| saved fp |--+
942 | | |
943 | | | <- Previous SP
944 | +----------+
945 | | saved lr |
946 +--| saved fp |<- FP
947 | |
948 | |<- SP
949 +----------+ */
950 if (prev_regnum == AARCH64_SP_REGNUM)
951 return frame_unwind_got_constant (this_frame, prev_regnum,
952 cache->prev_sp);
953
954 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
955 prev_regnum);
956 }
957
958 /* AArch64 prologue unwinder. */
959 struct frame_unwind aarch64_prologue_unwind =
960 {
961 NORMAL_FRAME,
962 aarch64_prologue_frame_unwind_stop_reason,
963 aarch64_prologue_this_id,
964 aarch64_prologue_prev_register,
965 NULL,
966 default_frame_sniffer
967 };
968
969 /* Allocate and fill in *THIS_CACHE with information about the prologue of
970 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
971 Return a pointer to the current aarch64_prologue_cache in
972 *THIS_CACHE. */
973
974 static struct aarch64_prologue_cache *
975 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
976 {
977 struct aarch64_prologue_cache *cache;
978
979 if (*this_cache != NULL)
980 return (struct aarch64_prologue_cache *) *this_cache;
981
982 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
983 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
984 *this_cache = cache;
985
986 TRY
987 {
988 cache->prev_sp = get_frame_register_unsigned (this_frame,
989 AARCH64_SP_REGNUM);
990 cache->prev_pc = get_frame_pc (this_frame);
991 cache->available_p = 1;
992 }
993 CATCH (ex, RETURN_MASK_ERROR)
994 {
995 if (ex.error != NOT_AVAILABLE_ERROR)
996 throw_exception (ex);
997 }
998 END_CATCH
999
1000 return cache;
1001 }
1002
1003 /* Implement the "stop_reason" frame_unwind method. */
1004
1005 static enum unwind_stop_reason
1006 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1007 void **this_cache)
1008 {
1009 struct aarch64_prologue_cache *cache
1010 = aarch64_make_stub_cache (this_frame, this_cache);
1011
1012 if (!cache->available_p)
1013 return UNWIND_UNAVAILABLE;
1014
1015 return UNWIND_NO_REASON;
1016 }
1017
1018 /* Our frame ID for a stub frame is the current SP and LR. */
1019
1020 static void
1021 aarch64_stub_this_id (struct frame_info *this_frame,
1022 void **this_cache, struct frame_id *this_id)
1023 {
1024 struct aarch64_prologue_cache *cache
1025 = aarch64_make_stub_cache (this_frame, this_cache);
1026
1027 if (cache->available_p)
1028 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1029 else
1030 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1031 }
1032
1033 /* Implement the "sniffer" frame_unwind method. */
1034
1035 static int
1036 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1037 struct frame_info *this_frame,
1038 void **this_prologue_cache)
1039 {
1040 CORE_ADDR addr_in_block;
1041 gdb_byte dummy[4];
1042
1043 addr_in_block = get_frame_address_in_block (this_frame);
1044 if (in_plt_section (addr_in_block)
1045 /* We also use the stub winder if the target memory is unreadable
1046 to avoid having the prologue unwinder trying to read it. */
1047 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1048 return 1;
1049
1050 return 0;
1051 }
1052
1053 /* AArch64 stub unwinder. */
1054 struct frame_unwind aarch64_stub_unwind =
1055 {
1056 NORMAL_FRAME,
1057 aarch64_stub_frame_unwind_stop_reason,
1058 aarch64_stub_this_id,
1059 aarch64_prologue_prev_register,
1060 NULL,
1061 aarch64_stub_unwind_sniffer
1062 };
1063
1064 /* Return the frame base address of *THIS_FRAME. */
1065
1066 static CORE_ADDR
1067 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1068 {
1069 struct aarch64_prologue_cache *cache
1070 = aarch64_make_prologue_cache (this_frame, this_cache);
1071
1072 return cache->prev_sp - cache->framesize;
1073 }
1074
1075 /* AArch64 default frame base information. */
1076 struct frame_base aarch64_normal_base =
1077 {
1078 &aarch64_prologue_unwind,
1079 aarch64_normal_frame_base,
1080 aarch64_normal_frame_base,
1081 aarch64_normal_frame_base
1082 };
1083
1084 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1085 dummy frame. The frame ID's base needs to match the TOS value
1086 saved by save_dummy_frame_tos () and returned from
1087 aarch64_push_dummy_call, and the PC needs to match the dummy
1088 frame's breakpoint. */
1089
1090 static struct frame_id
1091 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1092 {
1093 return frame_id_build (get_frame_register_unsigned (this_frame,
1094 AARCH64_SP_REGNUM),
1095 get_frame_pc (this_frame));
1096 }
1097
1098 /* Implement the "unwind_pc" gdbarch method. */
1099
1100 static CORE_ADDR
1101 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1102 {
1103 CORE_ADDR pc
1104 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1105
1106 return pc;
1107 }
1108
1109 /* Implement the "unwind_sp" gdbarch method. */
1110
1111 static CORE_ADDR
1112 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1113 {
1114 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1115 }
1116
1117 /* Return the value of the REGNUM register in the previous frame of
1118 *THIS_FRAME. */
1119
1120 static struct value *
1121 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1122 void **this_cache, int regnum)
1123 {
1124 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1125 CORE_ADDR lr;
1126
1127 switch (regnum)
1128 {
1129 case AARCH64_PC_REGNUM:
1130 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1131 return frame_unwind_got_constant (this_frame, regnum, lr);
1132
1133 default:
1134 internal_error (__FILE__, __LINE__,
1135 _("Unexpected register %d"), regnum);
1136 }
1137 }
1138
1139 /* Implement the "init_reg" dwarf2_frame_ops method. */
1140
1141 static void
1142 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1143 struct dwarf2_frame_state_reg *reg,
1144 struct frame_info *this_frame)
1145 {
1146 switch (regnum)
1147 {
1148 case AARCH64_PC_REGNUM:
1149 reg->how = DWARF2_FRAME_REG_FN;
1150 reg->loc.fn = aarch64_dwarf2_prev_register;
1151 break;
1152 case AARCH64_SP_REGNUM:
1153 reg->how = DWARF2_FRAME_REG_CFA;
1154 break;
1155 }
1156 }
1157
1158 /* When arguments must be pushed onto the stack, they go on in reverse
1159 order. The code below implements a FILO (stack) to do this. */
1160
1161 typedef struct
1162 {
1163 /* Value to pass on stack. */
1164 const gdb_byte *data;
1165
1166 /* Size in bytes of value to pass on stack. */
1167 int len;
1168 } stack_item_t;
1169
1170 DEF_VEC_O (stack_item_t);
1171
1172 /* Return the alignment (in bytes) of the given type. */
1173
1174 static int
1175 aarch64_type_align (struct type *t)
1176 {
1177 int n;
1178 int align;
1179 int falign;
1180
1181 t = check_typedef (t);
1182 switch (TYPE_CODE (t))
1183 {
1184 default:
1185 /* Should never happen. */
1186 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1187 return 4;
1188
1189 case TYPE_CODE_PTR:
1190 case TYPE_CODE_ENUM:
1191 case TYPE_CODE_INT:
1192 case TYPE_CODE_FLT:
1193 case TYPE_CODE_SET:
1194 case TYPE_CODE_RANGE:
1195 case TYPE_CODE_BITSTRING:
1196 case TYPE_CODE_REF:
1197 case TYPE_CODE_CHAR:
1198 case TYPE_CODE_BOOL:
1199 return TYPE_LENGTH (t);
1200
1201 case TYPE_CODE_ARRAY:
1202 case TYPE_CODE_COMPLEX:
1203 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1204
1205 case TYPE_CODE_STRUCT:
1206 case TYPE_CODE_UNION:
1207 align = 1;
1208 for (n = 0; n < TYPE_NFIELDS (t); n++)
1209 {
1210 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1211 if (falign > align)
1212 align = falign;
1213 }
1214 return align;
1215 }
1216 }
1217
1218 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1219 defined in the AAPCS64 ABI document; otherwise return 0. */
1220
1221 static int
1222 is_hfa (struct type *ty)
1223 {
1224 switch (TYPE_CODE (ty))
1225 {
1226 case TYPE_CODE_ARRAY:
1227 {
1228 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1229 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1230 return 1;
1231 break;
1232 }
1233
1234 case TYPE_CODE_UNION:
1235 case TYPE_CODE_STRUCT:
1236 {
1237 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1238 {
1239 struct type *member0_type;
1240
1241 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1242 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1243 {
1244 int i;
1245
1246 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1247 {
1248 struct type *member1_type;
1249
1250 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1251 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1252 || (TYPE_LENGTH (member0_type)
1253 != TYPE_LENGTH (member1_type)))
1254 return 0;
1255 }
1256 return 1;
1257 }
1258 }
1259 return 0;
1260 }
1261
1262 default:
1263 break;
1264 }
1265
1266 return 0;
1267 }
1268
1269 /* AArch64 function call information structure. */
1270 struct aarch64_call_info
1271 {
1272 /* the current argument number. */
1273 unsigned argnum;
1274
1275 /* The next general purpose register number, equivalent to NGRN as
1276 described in the AArch64 Procedure Call Standard. */
1277 unsigned ngrn;
1278
1279 /* The next SIMD and floating point register number, equivalent to
1280 NSRN as described in the AArch64 Procedure Call Standard. */
1281 unsigned nsrn;
1282
1283 /* The next stacked argument address, equivalent to NSAA as
1284 described in the AArch64 Procedure Call Standard. */
1285 unsigned nsaa;
1286
1287 /* Stack item vector. */
1288 VEC(stack_item_t) *si;
1289 };
1290
1291 /* Pass a value in a sequence of consecutive X registers. The caller
1292 is responsbile for ensuring sufficient registers are available. */
1293
1294 static void
1295 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1296 struct aarch64_call_info *info, struct type *type,
1297 const bfd_byte *buf)
1298 {
1299 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1300 int len = TYPE_LENGTH (type);
1301 enum type_code typecode = TYPE_CODE (type);
1302 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1303
1304 info->argnum++;
1305
1306 while (len > 0)
1307 {
1308 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1309 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1310 byte_order);
1311
1312
1313 /* Adjust sub-word struct/union args when big-endian. */
1314 if (byte_order == BFD_ENDIAN_BIG
1315 && partial_len < X_REGISTER_SIZE
1316 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1317 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1318
1319 if (aarch64_debug)
1320 {
1321 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1322 gdbarch_register_name (gdbarch, regnum),
1323 phex (regval, X_REGISTER_SIZE));
1324 }
1325 regcache_cooked_write_unsigned (regcache, regnum, regval);
1326 len -= partial_len;
1327 buf += partial_len;
1328 regnum++;
1329 }
1330 }
1331
1332 /* Attempt to marshall a value in a V register. Return 1 if
1333 successful, or 0 if insufficient registers are available. This
1334 function, unlike the equivalent pass_in_x() function does not
1335 handle arguments spread across multiple registers. */
1336
1337 static int
1338 pass_in_v (struct gdbarch *gdbarch,
1339 struct regcache *regcache,
1340 struct aarch64_call_info *info,
1341 const bfd_byte *buf)
1342 {
1343 if (info->nsrn < 8)
1344 {
1345 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1346 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1347
1348 info->argnum++;
1349 info->nsrn++;
1350
1351 regcache_cooked_write (regcache, regnum, buf);
1352 if (aarch64_debug)
1353 {
1354 debug_printf ("arg %d in %s\n", info->argnum,
1355 gdbarch_register_name (gdbarch, regnum));
1356 }
1357 return 1;
1358 }
1359 info->nsrn = 8;
1360 return 0;
1361 }
1362
1363 /* Marshall an argument onto the stack. */
1364
1365 static void
1366 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1367 const bfd_byte *buf)
1368 {
1369 int len = TYPE_LENGTH (type);
1370 int align;
1371 stack_item_t item;
1372
1373 info->argnum++;
1374
1375 align = aarch64_type_align (type);
1376
1377 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1378 Natural alignment of the argument's type. */
1379 align = align_up (align, 8);
1380
1381 /* The AArch64 PCS requires at most doubleword alignment. */
1382 if (align > 16)
1383 align = 16;
1384
1385 if (aarch64_debug)
1386 {
1387 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1388 info->nsaa);
1389 }
1390
1391 item.len = len;
1392 item.data = buf;
1393 VEC_safe_push (stack_item_t, info->si, &item);
1394
1395 info->nsaa += len;
1396 if (info->nsaa & (align - 1))
1397 {
1398 /* Push stack alignment padding. */
1399 int pad = align - (info->nsaa & (align - 1));
1400
1401 item.len = pad;
1402 item.data = buf;
1403
1404 VEC_safe_push (stack_item_t, info->si, &item);
1405 info->nsaa += pad;
1406 }
1407 }
1408
1409 /* Marshall an argument into a sequence of one or more consecutive X
1410 registers or, if insufficient X registers are available then onto
1411 the stack. */
1412
1413 static void
1414 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1415 struct aarch64_call_info *info, struct type *type,
1416 const bfd_byte *buf)
1417 {
1418 int len = TYPE_LENGTH (type);
1419 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1420
1421 /* PCS C.13 - Pass in registers if we have enough spare */
1422 if (info->ngrn + nregs <= 8)
1423 {
1424 pass_in_x (gdbarch, regcache, info, type, buf);
1425 info->ngrn += nregs;
1426 }
1427 else
1428 {
1429 info->ngrn = 8;
1430 pass_on_stack (info, type, buf);
1431 }
1432 }
1433
1434 /* Pass a value in a V register, or on the stack if insufficient are
1435 available. */
1436
1437 static void
1438 pass_in_v_or_stack (struct gdbarch *gdbarch,
1439 struct regcache *regcache,
1440 struct aarch64_call_info *info,
1441 struct type *type,
1442 const bfd_byte *buf)
1443 {
1444 if (!pass_in_v (gdbarch, regcache, info, buf))
1445 pass_on_stack (info, type, buf);
1446 }
1447
1448 /* Implement the "push_dummy_call" gdbarch method. */
1449
1450 static CORE_ADDR
1451 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1452 struct regcache *regcache, CORE_ADDR bp_addr,
1453 int nargs,
1454 struct value **args, CORE_ADDR sp, int struct_return,
1455 CORE_ADDR struct_addr)
1456 {
1457 int nstack = 0;
1458 int argnum;
1459 int x_argreg;
1460 int v_argreg;
1461 struct aarch64_call_info info;
1462 struct type *func_type;
1463 struct type *return_type;
1464 int lang_struct_return;
1465
1466 memset (&info, 0, sizeof (info));
1467
1468 /* We need to know what the type of the called function is in order
1469 to determine the number of named/anonymous arguments for the
1470 actual argument placement, and the return type in order to handle
1471 return value correctly.
1472
1473 The generic code above us views the decision of return in memory
1474 or return in registers as a two stage processes. The language
1475 handler is consulted first and may decide to return in memory (eg
1476 class with copy constructor returned by value), this will cause
1477 the generic code to allocate space AND insert an initial leading
1478 argument.
1479
1480 If the language code does not decide to pass in memory then the
1481 target code is consulted.
1482
1483 If the language code decides to pass in memory we want to move
1484 the pointer inserted as the initial argument from the argument
1485 list and into X8, the conventional AArch64 struct return pointer
1486 register.
1487
1488 This is slightly awkward, ideally the flag "lang_struct_return"
1489 would be passed to the targets implementation of push_dummy_call.
1490 Rather that change the target interface we call the language code
1491 directly ourselves. */
1492
1493 func_type = check_typedef (value_type (function));
1494
1495 /* Dereference function pointer types. */
1496 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1497 func_type = TYPE_TARGET_TYPE (func_type);
1498
1499 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1500 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1501
1502 /* If language_pass_by_reference () returned true we will have been
1503 given an additional initial argument, a hidden pointer to the
1504 return slot in memory. */
1505 return_type = TYPE_TARGET_TYPE (func_type);
1506 lang_struct_return = language_pass_by_reference (return_type);
1507
1508 /* Set the return address. For the AArch64, the return breakpoint
1509 is always at BP_ADDR. */
1510 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1511
1512 /* If we were given an initial argument for the return slot because
1513 lang_struct_return was true, lose it. */
1514 if (lang_struct_return)
1515 {
1516 args++;
1517 nargs--;
1518 }
1519
1520 /* The struct_return pointer occupies X8. */
1521 if (struct_return || lang_struct_return)
1522 {
1523 if (aarch64_debug)
1524 {
1525 debug_printf ("struct return in %s = 0x%s\n",
1526 gdbarch_register_name (gdbarch,
1527 AARCH64_STRUCT_RETURN_REGNUM),
1528 paddress (gdbarch, struct_addr));
1529 }
1530 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1531 struct_addr);
1532 }
1533
1534 for (argnum = 0; argnum < nargs; argnum++)
1535 {
1536 struct value *arg = args[argnum];
1537 struct type *arg_type;
1538 int len;
1539
1540 arg_type = check_typedef (value_type (arg));
1541 len = TYPE_LENGTH (arg_type);
1542
1543 switch (TYPE_CODE (arg_type))
1544 {
1545 case TYPE_CODE_INT:
1546 case TYPE_CODE_BOOL:
1547 case TYPE_CODE_CHAR:
1548 case TYPE_CODE_RANGE:
1549 case TYPE_CODE_ENUM:
1550 if (len < 4)
1551 {
1552 /* Promote to 32 bit integer. */
1553 if (TYPE_UNSIGNED (arg_type))
1554 arg_type = builtin_type (gdbarch)->builtin_uint32;
1555 else
1556 arg_type = builtin_type (gdbarch)->builtin_int32;
1557 arg = value_cast (arg_type, arg);
1558 }
1559 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1560 value_contents (arg));
1561 break;
1562
1563 case TYPE_CODE_COMPLEX:
1564 if (info.nsrn <= 6)
1565 {
1566 const bfd_byte *buf = value_contents (arg);
1567 struct type *target_type =
1568 check_typedef (TYPE_TARGET_TYPE (arg_type));
1569
1570 pass_in_v (gdbarch, regcache, &info, buf);
1571 pass_in_v (gdbarch, regcache, &info,
1572 buf + TYPE_LENGTH (target_type));
1573 }
1574 else
1575 {
1576 info.nsrn = 8;
1577 pass_on_stack (&info, arg_type, value_contents (arg));
1578 }
1579 break;
1580 case TYPE_CODE_FLT:
1581 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1582 value_contents (arg));
1583 break;
1584
1585 case TYPE_CODE_STRUCT:
1586 case TYPE_CODE_ARRAY:
1587 case TYPE_CODE_UNION:
1588 if (is_hfa (arg_type))
1589 {
1590 int elements = TYPE_NFIELDS (arg_type);
1591
1592 /* Homogeneous Aggregates */
1593 if (info.nsrn + elements < 8)
1594 {
1595 int i;
1596
1597 for (i = 0; i < elements; i++)
1598 {
1599 /* We know that we have sufficient registers
1600 available therefore this will never fallback
1601 to the stack. */
1602 struct value *field =
1603 value_primitive_field (arg, 0, i, arg_type);
1604 struct type *field_type =
1605 check_typedef (value_type (field));
1606
1607 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1608 value_contents_writeable (field));
1609 }
1610 }
1611 else
1612 {
1613 info.nsrn = 8;
1614 pass_on_stack (&info, arg_type, value_contents (arg));
1615 }
1616 }
1617 else if (len > 16)
1618 {
1619 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1620 invisible reference. */
1621
1622 /* Allocate aligned storage. */
1623 sp = align_down (sp - len, 16);
1624
1625 /* Write the real data into the stack. */
1626 write_memory (sp, value_contents (arg), len);
1627
1628 /* Construct the indirection. */
1629 arg_type = lookup_pointer_type (arg_type);
1630 arg = value_from_pointer (arg_type, sp);
1631 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1632 value_contents (arg));
1633 }
1634 else
1635 /* PCS C.15 / C.18 multiple values pass. */
1636 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1637 value_contents (arg));
1638 break;
1639
1640 default:
1641 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1642 value_contents (arg));
1643 break;
1644 }
1645 }
1646
1647 /* Make sure stack retains 16 byte alignment. */
1648 if (info.nsaa & 15)
1649 sp -= 16 - (info.nsaa & 15);
1650
1651 while (!VEC_empty (stack_item_t, info.si))
1652 {
1653 stack_item_t *si = VEC_last (stack_item_t, info.si);
1654
1655 sp -= si->len;
1656 write_memory (sp, si->data, si->len);
1657 VEC_pop (stack_item_t, info.si);
1658 }
1659
1660 VEC_free (stack_item_t, info.si);
1661
1662 /* Finally, update the SP register. */
1663 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1664
1665 return sp;
1666 }
1667
1668 /* Implement the "frame_align" gdbarch method. */
1669
1670 static CORE_ADDR
1671 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1672 {
1673 /* Align the stack to sixteen bytes. */
1674 return sp & ~(CORE_ADDR) 15;
1675 }
1676
1677 /* Return the type for an AdvSISD Q register. */
1678
1679 static struct type *
1680 aarch64_vnq_type (struct gdbarch *gdbarch)
1681 {
1682 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1683
1684 if (tdep->vnq_type == NULL)
1685 {
1686 struct type *t;
1687 struct type *elem;
1688
1689 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1690 TYPE_CODE_UNION);
1691
1692 elem = builtin_type (gdbarch)->builtin_uint128;
1693 append_composite_type_field (t, "u", elem);
1694
1695 elem = builtin_type (gdbarch)->builtin_int128;
1696 append_composite_type_field (t, "s", elem);
1697
1698 tdep->vnq_type = t;
1699 }
1700
1701 return tdep->vnq_type;
1702 }
1703
1704 /* Return the type for an AdvSISD D register. */
1705
1706 static struct type *
1707 aarch64_vnd_type (struct gdbarch *gdbarch)
1708 {
1709 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1710
1711 if (tdep->vnd_type == NULL)
1712 {
1713 struct type *t;
1714 struct type *elem;
1715
1716 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1717 TYPE_CODE_UNION);
1718
1719 elem = builtin_type (gdbarch)->builtin_double;
1720 append_composite_type_field (t, "f", elem);
1721
1722 elem = builtin_type (gdbarch)->builtin_uint64;
1723 append_composite_type_field (t, "u", elem);
1724
1725 elem = builtin_type (gdbarch)->builtin_int64;
1726 append_composite_type_field (t, "s", elem);
1727
1728 tdep->vnd_type = t;
1729 }
1730
1731 return tdep->vnd_type;
1732 }
1733
1734 /* Return the type for an AdvSISD S register. */
1735
1736 static struct type *
1737 aarch64_vns_type (struct gdbarch *gdbarch)
1738 {
1739 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1740
1741 if (tdep->vns_type == NULL)
1742 {
1743 struct type *t;
1744 struct type *elem;
1745
1746 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1747 TYPE_CODE_UNION);
1748
1749 elem = builtin_type (gdbarch)->builtin_float;
1750 append_composite_type_field (t, "f", elem);
1751
1752 elem = builtin_type (gdbarch)->builtin_uint32;
1753 append_composite_type_field (t, "u", elem);
1754
1755 elem = builtin_type (gdbarch)->builtin_int32;
1756 append_composite_type_field (t, "s", elem);
1757
1758 tdep->vns_type = t;
1759 }
1760
1761 return tdep->vns_type;
1762 }
1763
1764 /* Return the type for an AdvSISD H register. */
1765
1766 static struct type *
1767 aarch64_vnh_type (struct gdbarch *gdbarch)
1768 {
1769 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1770
1771 if (tdep->vnh_type == NULL)
1772 {
1773 struct type *t;
1774 struct type *elem;
1775
1776 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1777 TYPE_CODE_UNION);
1778
1779 elem = builtin_type (gdbarch)->builtin_uint16;
1780 append_composite_type_field (t, "u", elem);
1781
1782 elem = builtin_type (gdbarch)->builtin_int16;
1783 append_composite_type_field (t, "s", elem);
1784
1785 tdep->vnh_type = t;
1786 }
1787
1788 return tdep->vnh_type;
1789 }
1790
1791 /* Return the type for an AdvSISD B register. */
1792
1793 static struct type *
1794 aarch64_vnb_type (struct gdbarch *gdbarch)
1795 {
1796 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1797
1798 if (tdep->vnb_type == NULL)
1799 {
1800 struct type *t;
1801 struct type *elem;
1802
1803 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1804 TYPE_CODE_UNION);
1805
1806 elem = builtin_type (gdbarch)->builtin_uint8;
1807 append_composite_type_field (t, "u", elem);
1808
1809 elem = builtin_type (gdbarch)->builtin_int8;
1810 append_composite_type_field (t, "s", elem);
1811
1812 tdep->vnb_type = t;
1813 }
1814
1815 return tdep->vnb_type;
1816 }
1817
1818 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1819
1820 static int
1821 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1822 {
1823 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1824 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1825
1826 if (reg == AARCH64_DWARF_SP)
1827 return AARCH64_SP_REGNUM;
1828
1829 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1830 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1831
1832 return -1;
1833 }
1834 \f
1835
1836 /* Implement the "print_insn" gdbarch method. */
1837
1838 static int
1839 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1840 {
1841 info->symbols = NULL;
1842 return print_insn_aarch64 (memaddr, info);
1843 }
1844
1845 /* AArch64 BRK software debug mode instruction.
1846 Note that AArch64 code is always little-endian.
1847 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1848 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1849
1850 /* Implement the "breakpoint_from_pc" gdbarch method. */
1851
1852 static const gdb_byte *
1853 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1854 int *lenptr)
1855 {
1856 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1857
1858 *lenptr = sizeof (aarch64_default_breakpoint);
1859 return aarch64_default_breakpoint;
1860 }
1861
1862 /* Extract from an array REGS containing the (raw) register state a
1863 function return value of type TYPE, and copy that, in virtual
1864 format, into VALBUF. */
1865
1866 static void
1867 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1868 gdb_byte *valbuf)
1869 {
1870 struct gdbarch *gdbarch = get_regcache_arch (regs);
1871 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1872
1873 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1874 {
1875 bfd_byte buf[V_REGISTER_SIZE];
1876 int len = TYPE_LENGTH (type);
1877
1878 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1879 memcpy (valbuf, buf, len);
1880 }
1881 else if (TYPE_CODE (type) == TYPE_CODE_INT
1882 || TYPE_CODE (type) == TYPE_CODE_CHAR
1883 || TYPE_CODE (type) == TYPE_CODE_BOOL
1884 || TYPE_CODE (type) == TYPE_CODE_PTR
1885 || TYPE_CODE (type) == TYPE_CODE_REF
1886 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1887 {
1888 /* If the the type is a plain integer, then the access is
1889 straight-forward. Otherwise we have to play around a bit
1890 more. */
1891 int len = TYPE_LENGTH (type);
1892 int regno = AARCH64_X0_REGNUM;
1893 ULONGEST tmp;
1894
1895 while (len > 0)
1896 {
1897 /* By using store_unsigned_integer we avoid having to do
1898 anything special for small big-endian values. */
1899 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1900 store_unsigned_integer (valbuf,
1901 (len > X_REGISTER_SIZE
1902 ? X_REGISTER_SIZE : len), byte_order, tmp);
1903 len -= X_REGISTER_SIZE;
1904 valbuf += X_REGISTER_SIZE;
1905 }
1906 }
1907 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1908 {
1909 int regno = AARCH64_V0_REGNUM;
1910 bfd_byte buf[V_REGISTER_SIZE];
1911 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1912 int len = TYPE_LENGTH (target_type);
1913
1914 regcache_cooked_read (regs, regno, buf);
1915 memcpy (valbuf, buf, len);
1916 valbuf += len;
1917 regcache_cooked_read (regs, regno + 1, buf);
1918 memcpy (valbuf, buf, len);
1919 valbuf += len;
1920 }
1921 else if (is_hfa (type))
1922 {
1923 int elements = TYPE_NFIELDS (type);
1924 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1925 int len = TYPE_LENGTH (member_type);
1926 int i;
1927
1928 for (i = 0; i < elements; i++)
1929 {
1930 int regno = AARCH64_V0_REGNUM + i;
1931 bfd_byte buf[X_REGISTER_SIZE];
1932
1933 if (aarch64_debug)
1934 {
1935 debug_printf ("read HFA return value element %d from %s\n",
1936 i + 1,
1937 gdbarch_register_name (gdbarch, regno));
1938 }
1939 regcache_cooked_read (regs, regno, buf);
1940
1941 memcpy (valbuf, buf, len);
1942 valbuf += len;
1943 }
1944 }
1945 else
1946 {
1947 /* For a structure or union the behaviour is as if the value had
1948 been stored to word-aligned memory and then loaded into
1949 registers with 64-bit load instruction(s). */
1950 int len = TYPE_LENGTH (type);
1951 int regno = AARCH64_X0_REGNUM;
1952 bfd_byte buf[X_REGISTER_SIZE];
1953
1954 while (len > 0)
1955 {
1956 regcache_cooked_read (regs, regno++, buf);
1957 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1958 len -= X_REGISTER_SIZE;
1959 valbuf += X_REGISTER_SIZE;
1960 }
1961 }
1962 }
1963
1964
1965 /* Will a function return an aggregate type in memory or in a
1966 register? Return 0 if an aggregate type can be returned in a
1967 register, 1 if it must be returned in memory. */
1968
1969 static int
1970 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1971 {
1972 int nRc;
1973 enum type_code code;
1974
1975 type = check_typedef (type);
1976
1977 /* In the AArch64 ABI, "integer" like aggregate types are returned
1978 in registers. For an aggregate type to be integer like, its size
1979 must be less than or equal to 4 * X_REGISTER_SIZE. */
1980
1981 if (is_hfa (type))
1982 {
1983 /* PCS B.5 If the argument is a Named HFA, then the argument is
1984 used unmodified. */
1985 return 0;
1986 }
1987
1988 if (TYPE_LENGTH (type) > 16)
1989 {
1990 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1991 invisible reference. */
1992
1993 return 1;
1994 }
1995
1996 return 0;
1997 }
1998
1999 /* Write into appropriate registers a function return value of type
2000 TYPE, given in virtual format. */
2001
2002 static void
2003 aarch64_store_return_value (struct type *type, struct regcache *regs,
2004 const gdb_byte *valbuf)
2005 {
2006 struct gdbarch *gdbarch = get_regcache_arch (regs);
2007 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2008
2009 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2010 {
2011 bfd_byte buf[V_REGISTER_SIZE];
2012 int len = TYPE_LENGTH (type);
2013
2014 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2015 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2016 }
2017 else if (TYPE_CODE (type) == TYPE_CODE_INT
2018 || TYPE_CODE (type) == TYPE_CODE_CHAR
2019 || TYPE_CODE (type) == TYPE_CODE_BOOL
2020 || TYPE_CODE (type) == TYPE_CODE_PTR
2021 || TYPE_CODE (type) == TYPE_CODE_REF
2022 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2023 {
2024 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2025 {
2026 /* Values of one word or less are zero/sign-extended and
2027 returned in r0. */
2028 bfd_byte tmpbuf[X_REGISTER_SIZE];
2029 LONGEST val = unpack_long (type, valbuf);
2030
2031 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2032 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2033 }
2034 else
2035 {
2036 /* Integral values greater than one word are stored in
2037 consecutive registers starting with r0. This will always
2038 be a multiple of the regiser size. */
2039 int len = TYPE_LENGTH (type);
2040 int regno = AARCH64_X0_REGNUM;
2041
2042 while (len > 0)
2043 {
2044 regcache_cooked_write (regs, regno++, valbuf);
2045 len -= X_REGISTER_SIZE;
2046 valbuf += X_REGISTER_SIZE;
2047 }
2048 }
2049 }
2050 else if (is_hfa (type))
2051 {
2052 int elements = TYPE_NFIELDS (type);
2053 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2054 int len = TYPE_LENGTH (member_type);
2055 int i;
2056
2057 for (i = 0; i < elements; i++)
2058 {
2059 int regno = AARCH64_V0_REGNUM + i;
2060 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2061
2062 if (aarch64_debug)
2063 {
2064 debug_printf ("write HFA return value element %d to %s\n",
2065 i + 1,
2066 gdbarch_register_name (gdbarch, regno));
2067 }
2068
2069 memcpy (tmpbuf, valbuf, len);
2070 regcache_cooked_write (regs, regno, tmpbuf);
2071 valbuf += len;
2072 }
2073 }
2074 else
2075 {
2076 /* For a structure or union the behaviour is as if the value had
2077 been stored to word-aligned memory and then loaded into
2078 registers with 64-bit load instruction(s). */
2079 int len = TYPE_LENGTH (type);
2080 int regno = AARCH64_X0_REGNUM;
2081 bfd_byte tmpbuf[X_REGISTER_SIZE];
2082
2083 while (len > 0)
2084 {
2085 memcpy (tmpbuf, valbuf,
2086 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2087 regcache_cooked_write (regs, regno++, tmpbuf);
2088 len -= X_REGISTER_SIZE;
2089 valbuf += X_REGISTER_SIZE;
2090 }
2091 }
2092 }
2093
2094 /* Implement the "return_value" gdbarch method. */
2095
2096 static enum return_value_convention
2097 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2098 struct type *valtype, struct regcache *regcache,
2099 gdb_byte *readbuf, const gdb_byte *writebuf)
2100 {
2101 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2102
2103 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2104 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2105 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2106 {
2107 if (aarch64_return_in_memory (gdbarch, valtype))
2108 {
2109 if (aarch64_debug)
2110 debug_printf ("return value in memory\n");
2111 return RETURN_VALUE_STRUCT_CONVENTION;
2112 }
2113 }
2114
2115 if (writebuf)
2116 aarch64_store_return_value (valtype, regcache, writebuf);
2117
2118 if (readbuf)
2119 aarch64_extract_return_value (valtype, regcache, readbuf);
2120
2121 if (aarch64_debug)
2122 debug_printf ("return value in registers\n");
2123
2124 return RETURN_VALUE_REGISTER_CONVENTION;
2125 }
2126
2127 /* Implement the "get_longjmp_target" gdbarch method. */
2128
2129 static int
2130 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2131 {
2132 CORE_ADDR jb_addr;
2133 gdb_byte buf[X_REGISTER_SIZE];
2134 struct gdbarch *gdbarch = get_frame_arch (frame);
2135 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2136 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2137
2138 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2139
2140 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2141 X_REGISTER_SIZE))
2142 return 0;
2143
2144 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2145 return 1;
2146 }
2147
2148 /* Implement the "gen_return_address" gdbarch method. */
2149
2150 static void
2151 aarch64_gen_return_address (struct gdbarch *gdbarch,
2152 struct agent_expr *ax, struct axs_value *value,
2153 CORE_ADDR scope)
2154 {
2155 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2156 value->kind = axs_lvalue_register;
2157 value->u.reg = AARCH64_LR_REGNUM;
2158 }
2159 \f
2160
2161 /* Return the pseudo register name corresponding to register regnum. */
2162
2163 static const char *
2164 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2165 {
2166 static const char *const q_name[] =
2167 {
2168 "q0", "q1", "q2", "q3",
2169 "q4", "q5", "q6", "q7",
2170 "q8", "q9", "q10", "q11",
2171 "q12", "q13", "q14", "q15",
2172 "q16", "q17", "q18", "q19",
2173 "q20", "q21", "q22", "q23",
2174 "q24", "q25", "q26", "q27",
2175 "q28", "q29", "q30", "q31",
2176 };
2177
2178 static const char *const d_name[] =
2179 {
2180 "d0", "d1", "d2", "d3",
2181 "d4", "d5", "d6", "d7",
2182 "d8", "d9", "d10", "d11",
2183 "d12", "d13", "d14", "d15",
2184 "d16", "d17", "d18", "d19",
2185 "d20", "d21", "d22", "d23",
2186 "d24", "d25", "d26", "d27",
2187 "d28", "d29", "d30", "d31",
2188 };
2189
2190 static const char *const s_name[] =
2191 {
2192 "s0", "s1", "s2", "s3",
2193 "s4", "s5", "s6", "s7",
2194 "s8", "s9", "s10", "s11",
2195 "s12", "s13", "s14", "s15",
2196 "s16", "s17", "s18", "s19",
2197 "s20", "s21", "s22", "s23",
2198 "s24", "s25", "s26", "s27",
2199 "s28", "s29", "s30", "s31",
2200 };
2201
2202 static const char *const h_name[] =
2203 {
2204 "h0", "h1", "h2", "h3",
2205 "h4", "h5", "h6", "h7",
2206 "h8", "h9", "h10", "h11",
2207 "h12", "h13", "h14", "h15",
2208 "h16", "h17", "h18", "h19",
2209 "h20", "h21", "h22", "h23",
2210 "h24", "h25", "h26", "h27",
2211 "h28", "h29", "h30", "h31",
2212 };
2213
2214 static const char *const b_name[] =
2215 {
2216 "b0", "b1", "b2", "b3",
2217 "b4", "b5", "b6", "b7",
2218 "b8", "b9", "b10", "b11",
2219 "b12", "b13", "b14", "b15",
2220 "b16", "b17", "b18", "b19",
2221 "b20", "b21", "b22", "b23",
2222 "b24", "b25", "b26", "b27",
2223 "b28", "b29", "b30", "b31",
2224 };
2225
2226 regnum -= gdbarch_num_regs (gdbarch);
2227
2228 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2229 return q_name[regnum - AARCH64_Q0_REGNUM];
2230
2231 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2232 return d_name[regnum - AARCH64_D0_REGNUM];
2233
2234 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2235 return s_name[regnum - AARCH64_S0_REGNUM];
2236
2237 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2238 return h_name[regnum - AARCH64_H0_REGNUM];
2239
2240 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2241 return b_name[regnum - AARCH64_B0_REGNUM];
2242
2243 internal_error (__FILE__, __LINE__,
2244 _("aarch64_pseudo_register_name: bad register number %d"),
2245 regnum);
2246 }
2247
2248 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2249
2250 static struct type *
2251 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2252 {
2253 regnum -= gdbarch_num_regs (gdbarch);
2254
2255 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2256 return aarch64_vnq_type (gdbarch);
2257
2258 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2259 return aarch64_vnd_type (gdbarch);
2260
2261 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2262 return aarch64_vns_type (gdbarch);
2263
2264 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2265 return aarch64_vnh_type (gdbarch);
2266
2267 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2268 return aarch64_vnb_type (gdbarch);
2269
2270 internal_error (__FILE__, __LINE__,
2271 _("aarch64_pseudo_register_type: bad register number %d"),
2272 regnum);
2273 }
2274
2275 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2276
2277 static int
2278 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2279 struct reggroup *group)
2280 {
2281 regnum -= gdbarch_num_regs (gdbarch);
2282
2283 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2284 return group == all_reggroup || group == vector_reggroup;
2285 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2286 return (group == all_reggroup || group == vector_reggroup
2287 || group == float_reggroup);
2288 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2289 return (group == all_reggroup || group == vector_reggroup
2290 || group == float_reggroup);
2291 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2292 return group == all_reggroup || group == vector_reggroup;
2293 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2294 return group == all_reggroup || group == vector_reggroup;
2295
2296 return group == all_reggroup;
2297 }
2298
2299 /* Implement the "pseudo_register_read_value" gdbarch method. */
2300
2301 static struct value *
2302 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2303 struct regcache *regcache,
2304 int regnum)
2305 {
2306 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2307 struct value *result_value;
2308 gdb_byte *buf;
2309
2310 result_value = allocate_value (register_type (gdbarch, regnum));
2311 VALUE_LVAL (result_value) = lval_register;
2312 VALUE_REGNUM (result_value) = regnum;
2313 buf = value_contents_raw (result_value);
2314
2315 regnum -= gdbarch_num_regs (gdbarch);
2316
2317 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2318 {
2319 enum register_status status;
2320 unsigned v_regnum;
2321
2322 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2323 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2324 if (status != REG_VALID)
2325 mark_value_bytes_unavailable (result_value, 0,
2326 TYPE_LENGTH (value_type (result_value)));
2327 else
2328 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2329 return result_value;
2330 }
2331
2332 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2333 {
2334 enum register_status status;
2335 unsigned v_regnum;
2336
2337 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2338 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2339 if (status != REG_VALID)
2340 mark_value_bytes_unavailable (result_value, 0,
2341 TYPE_LENGTH (value_type (result_value)));
2342 else
2343 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2344 return result_value;
2345 }
2346
2347 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2348 {
2349 enum register_status status;
2350 unsigned v_regnum;
2351
2352 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2353 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2354 if (status != REG_VALID)
2355 mark_value_bytes_unavailable (result_value, 0,
2356 TYPE_LENGTH (value_type (result_value)));
2357 else
2358 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2359 return result_value;
2360 }
2361
2362 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2363 {
2364 enum register_status status;
2365 unsigned v_regnum;
2366
2367 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2368 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2369 if (status != REG_VALID)
2370 mark_value_bytes_unavailable (result_value, 0,
2371 TYPE_LENGTH (value_type (result_value)));
2372 else
2373 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2374 return result_value;
2375 }
2376
2377 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2378 {
2379 enum register_status status;
2380 unsigned v_regnum;
2381
2382 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2383 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2384 if (status != REG_VALID)
2385 mark_value_bytes_unavailable (result_value, 0,
2386 TYPE_LENGTH (value_type (result_value)));
2387 else
2388 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2389 return result_value;
2390 }
2391
2392 gdb_assert_not_reached ("regnum out of bound");
2393 }
2394
2395 /* Implement the "pseudo_register_write" gdbarch method. */
2396
2397 static void
2398 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2399 int regnum, const gdb_byte *buf)
2400 {
2401 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2402
2403 /* Ensure the register buffer is zero, we want gdb writes of the
2404 various 'scalar' pseudo registers to behavior like architectural
2405 writes, register width bytes are written the remainder are set to
2406 zero. */
2407 memset (reg_buf, 0, sizeof (reg_buf));
2408
2409 regnum -= gdbarch_num_regs (gdbarch);
2410
2411 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2412 {
2413 /* pseudo Q registers */
2414 unsigned v_regnum;
2415
2416 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2417 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2418 regcache_raw_write (regcache, v_regnum, reg_buf);
2419 return;
2420 }
2421
2422 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2423 {
2424 /* pseudo D registers */
2425 unsigned v_regnum;
2426
2427 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2428 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2429 regcache_raw_write (regcache, v_regnum, reg_buf);
2430 return;
2431 }
2432
2433 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2434 {
2435 unsigned v_regnum;
2436
2437 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2438 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2439 regcache_raw_write (regcache, v_regnum, reg_buf);
2440 return;
2441 }
2442
2443 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2444 {
2445 /* pseudo H registers */
2446 unsigned v_regnum;
2447
2448 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2449 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2450 regcache_raw_write (regcache, v_regnum, reg_buf);
2451 return;
2452 }
2453
2454 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2455 {
2456 /* pseudo B registers */
2457 unsigned v_regnum;
2458
2459 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2460 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2461 regcache_raw_write (regcache, v_regnum, reg_buf);
2462 return;
2463 }
2464
2465 gdb_assert_not_reached ("regnum out of bound");
2466 }
2467
2468 /* Callback function for user_reg_add. */
2469
2470 static struct value *
2471 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2472 {
2473 const int *reg_p = (const int *) baton;
2474
2475 return value_of_register (*reg_p, frame);
2476 }
2477 \f
2478
2479 /* Implement the "software_single_step" gdbarch method, needed to
2480 single step through atomic sequences on AArch64. */
2481
2482 static int
2483 aarch64_software_single_step (struct frame_info *frame)
2484 {
2485 struct gdbarch *gdbarch = get_frame_arch (frame);
2486 struct address_space *aspace = get_frame_address_space (frame);
2487 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2488 const int insn_size = 4;
2489 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2490 CORE_ADDR pc = get_frame_pc (frame);
2491 CORE_ADDR breaks[2] = { -1, -1 };
2492 CORE_ADDR loc = pc;
2493 CORE_ADDR closing_insn = 0;
2494 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2495 byte_order_for_code);
2496 int index;
2497 int insn_count;
2498 int bc_insn_count = 0; /* Conditional branch instruction count. */
2499 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2500 aarch64_inst inst;
2501
2502 if (aarch64_decode_insn (insn, &inst) != 0)
2503 return 0;
2504
2505 /* Look for a Load Exclusive instruction which begins the sequence. */
2506 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2507 return 0;
2508
2509 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2510 {
2511 loc += insn_size;
2512 insn = read_memory_unsigned_integer (loc, insn_size,
2513 byte_order_for_code);
2514
2515 if (aarch64_decode_insn (insn, &inst) != 0)
2516 return 0;
2517 /* Check if the instruction is a conditional branch. */
2518 if (inst.opcode->iclass == condbranch)
2519 {
2520 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2521
2522 if (bc_insn_count >= 1)
2523 return 0;
2524
2525 /* It is, so we'll try to set a breakpoint at the destination. */
2526 breaks[1] = loc + inst.operands[0].imm.value;
2527
2528 bc_insn_count++;
2529 last_breakpoint++;
2530 }
2531
2532 /* Look for the Store Exclusive which closes the atomic sequence. */
2533 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2534 {
2535 closing_insn = loc;
2536 break;
2537 }
2538 }
2539
2540 /* We didn't find a closing Store Exclusive instruction, fall back. */
2541 if (!closing_insn)
2542 return 0;
2543
2544 /* Insert breakpoint after the end of the atomic sequence. */
2545 breaks[0] = loc + insn_size;
2546
2547 /* Check for duplicated breakpoints, and also check that the second
2548 breakpoint is not within the atomic sequence. */
2549 if (last_breakpoint
2550 && (breaks[1] == breaks[0]
2551 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2552 last_breakpoint = 0;
2553
2554 /* Insert the breakpoint at the end of the sequence, and one at the
2555 destination of the conditional branch, if it exists. */
2556 for (index = 0; index <= last_breakpoint; index++)
2557 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2558
2559 return 1;
2560 }
2561
2562 /* Initialize the current architecture based on INFO. If possible,
2563 re-use an architecture from ARCHES, which is a list of
2564 architectures already created during this debugging session.
2565
2566 Called e.g. at program startup, when reading a core file, and when
2567 reading a binary file. */
2568
2569 static struct gdbarch *
2570 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2571 {
2572 struct gdbarch_tdep *tdep;
2573 struct gdbarch *gdbarch;
2574 struct gdbarch_list *best_arch;
2575 struct tdesc_arch_data *tdesc_data = NULL;
2576 const struct target_desc *tdesc = info.target_desc;
2577 int i;
2578 int have_fpa_registers = 1;
2579 int valid_p = 1;
2580 const struct tdesc_feature *feature;
2581 int num_regs = 0;
2582 int num_pseudo_regs = 0;
2583
2584 /* Ensure we always have a target descriptor. */
2585 if (!tdesc_has_registers (tdesc))
2586 tdesc = tdesc_aarch64;
2587
2588 gdb_assert (tdesc);
2589
2590 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2591
2592 if (feature == NULL)
2593 return NULL;
2594
2595 tdesc_data = tdesc_data_alloc ();
2596
2597 /* Validate the descriptor provides the mandatory core R registers
2598 and allocate their numbers. */
2599 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2600 valid_p &=
2601 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2602 aarch64_r_register_names[i]);
2603
2604 num_regs = AARCH64_X0_REGNUM + i;
2605
2606 /* Look for the V registers. */
2607 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2608 if (feature)
2609 {
2610 /* Validate the descriptor provides the mandatory V registers
2611 and allocate their numbers. */
2612 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2613 valid_p &=
2614 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2615 aarch64_v_register_names[i]);
2616
2617 num_regs = AARCH64_V0_REGNUM + i;
2618
2619 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2620 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2621 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2622 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2623 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2624 }
2625
2626 if (!valid_p)
2627 {
2628 tdesc_data_cleanup (tdesc_data);
2629 return NULL;
2630 }
2631
2632 /* AArch64 code is always little-endian. */
2633 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2634
2635 /* If there is already a candidate, use it. */
2636 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2637 best_arch != NULL;
2638 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2639 {
2640 /* Found a match. */
2641 break;
2642 }
2643
2644 if (best_arch != NULL)
2645 {
2646 if (tdesc_data != NULL)
2647 tdesc_data_cleanup (tdesc_data);
2648 return best_arch->gdbarch;
2649 }
2650
2651 tdep = XCNEW (struct gdbarch_tdep);
2652 gdbarch = gdbarch_alloc (&info, tdep);
2653
2654 /* This should be low enough for everything. */
2655 tdep->lowest_pc = 0x20;
2656 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2657 tdep->jb_elt_size = 8;
2658
2659 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2660 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2661
2662 /* Frame handling. */
2663 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2664 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2665 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2666
2667 /* Advance PC across function entry code. */
2668 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2669
2670 /* The stack grows downward. */
2671 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2672
2673 /* Breakpoint manipulation. */
2674 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2675 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2676 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2677
2678 /* Information about registers, etc. */
2679 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2680 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2681 set_gdbarch_num_regs (gdbarch, num_regs);
2682
2683 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2684 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2685 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2686 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2687 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2688 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2689 aarch64_pseudo_register_reggroup_p);
2690
2691 /* ABI */
2692 set_gdbarch_short_bit (gdbarch, 16);
2693 set_gdbarch_int_bit (gdbarch, 32);
2694 set_gdbarch_float_bit (gdbarch, 32);
2695 set_gdbarch_double_bit (gdbarch, 64);
2696 set_gdbarch_long_double_bit (gdbarch, 128);
2697 set_gdbarch_long_bit (gdbarch, 64);
2698 set_gdbarch_long_long_bit (gdbarch, 64);
2699 set_gdbarch_ptr_bit (gdbarch, 64);
2700 set_gdbarch_char_signed (gdbarch, 0);
2701 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2702 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2703 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2704
2705 /* Internal <-> external register number maps. */
2706 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2707
2708 /* Returning results. */
2709 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2710
2711 /* Disassembly. */
2712 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2713
2714 /* Virtual tables. */
2715 set_gdbarch_vbit_in_delta (gdbarch, 1);
2716
2717 /* Hook in the ABI-specific overrides, if they have been registered. */
2718 info.target_desc = tdesc;
2719 info.tdep_info = (void *) tdesc_data;
2720 gdbarch_init_osabi (info, gdbarch);
2721
2722 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2723
2724 /* Add some default predicates. */
2725 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2726 dwarf2_append_unwinders (gdbarch);
2727 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2728
2729 frame_base_set_default (gdbarch, &aarch64_normal_base);
2730
2731 /* Now we have tuned the configuration, set a few final things,
2732 based on what the OS ABI has told us. */
2733
2734 if (tdep->jb_pc >= 0)
2735 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2736
2737 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2738
2739 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2740
2741 /* Add standard register aliases. */
2742 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2743 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2744 value_of_aarch64_user_reg,
2745 &aarch64_register_aliases[i].regnum);
2746
2747 return gdbarch;
2748 }
2749
2750 static void
2751 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2752 {
2753 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2754
2755 if (tdep == NULL)
2756 return;
2757
2758 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2759 paddress (gdbarch, tdep->lowest_pc));
2760 }
2761
2762 /* Suppress warning from -Wmissing-prototypes. */
2763 extern initialize_file_ftype _initialize_aarch64_tdep;
2764
2765 void
2766 _initialize_aarch64_tdep (void)
2767 {
2768 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2769 aarch64_dump_tdep);
2770
2771 initialize_tdesc_aarch64 ();
2772
2773 /* Debug this file's internals. */
2774 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2775 Set AArch64 debugging."), _("\
2776 Show AArch64 debugging."), _("\
2777 When on, AArch64 specific debugging is enabled."),
2778 NULL,
2779 show_aarch64_debug,
2780 &setdebuglist, &showdebuglist);
2781 }
2782
2783 /* AArch64 process record-replay related structures, defines etc. */
2784
2785 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2786 do \
2787 { \
2788 unsigned int reg_len = LENGTH; \
2789 if (reg_len) \
2790 { \
2791 REGS = XNEWVEC (uint32_t, reg_len); \
2792 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2793 } \
2794 } \
2795 while (0)
2796
2797 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2798 do \
2799 { \
2800 unsigned int mem_len = LENGTH; \
2801 if (mem_len) \
2802 { \
2803 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2804 memcpy(&MEMS->len, &RECORD_BUF[0], \
2805 sizeof(struct aarch64_mem_r) * LENGTH); \
2806 } \
2807 } \
2808 while (0)
2809
2810 /* AArch64 record/replay structures and enumerations. */
2811
2812 struct aarch64_mem_r
2813 {
2814 uint64_t len; /* Record length. */
2815 uint64_t addr; /* Memory address. */
2816 };
2817
2818 enum aarch64_record_result
2819 {
2820 AARCH64_RECORD_SUCCESS,
2821 AARCH64_RECORD_FAILURE,
2822 AARCH64_RECORD_UNSUPPORTED,
2823 AARCH64_RECORD_UNKNOWN
2824 };
2825
2826 typedef struct insn_decode_record_t
2827 {
2828 struct gdbarch *gdbarch;
2829 struct regcache *regcache;
2830 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2831 uint32_t aarch64_insn; /* Insn to be recorded. */
2832 uint32_t mem_rec_count; /* Count of memory records. */
2833 uint32_t reg_rec_count; /* Count of register records. */
2834 uint32_t *aarch64_regs; /* Registers to be recorded. */
2835 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2836 } insn_decode_record;
2837
2838 /* Record handler for data processing - register instructions. */
2839
2840 static unsigned int
2841 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2842 {
2843 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2844 uint32_t record_buf[4];
2845
2846 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2847 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2848 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2849
2850 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2851 {
2852 uint8_t setflags;
2853
2854 /* Logical (shifted register). */
2855 if (insn_bits24_27 == 0x0a)
2856 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2857 /* Add/subtract. */
2858 else if (insn_bits24_27 == 0x0b)
2859 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2860 else
2861 return AARCH64_RECORD_UNKNOWN;
2862
2863 record_buf[0] = reg_rd;
2864 aarch64_insn_r->reg_rec_count = 1;
2865 if (setflags)
2866 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2867 }
2868 else
2869 {
2870 if (insn_bits24_27 == 0x0b)
2871 {
2872 /* Data-processing (3 source). */
2873 record_buf[0] = reg_rd;
2874 aarch64_insn_r->reg_rec_count = 1;
2875 }
2876 else if (insn_bits24_27 == 0x0a)
2877 {
2878 if (insn_bits21_23 == 0x00)
2879 {
2880 /* Add/subtract (with carry). */
2881 record_buf[0] = reg_rd;
2882 aarch64_insn_r->reg_rec_count = 1;
2883 if (bit (aarch64_insn_r->aarch64_insn, 29))
2884 {
2885 record_buf[1] = AARCH64_CPSR_REGNUM;
2886 aarch64_insn_r->reg_rec_count = 2;
2887 }
2888 }
2889 else if (insn_bits21_23 == 0x02)
2890 {
2891 /* Conditional compare (register) and conditional compare
2892 (immediate) instructions. */
2893 record_buf[0] = AARCH64_CPSR_REGNUM;
2894 aarch64_insn_r->reg_rec_count = 1;
2895 }
2896 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2897 {
2898 /* CConditional select. */
2899 /* Data-processing (2 source). */
2900 /* Data-processing (1 source). */
2901 record_buf[0] = reg_rd;
2902 aarch64_insn_r->reg_rec_count = 1;
2903 }
2904 else
2905 return AARCH64_RECORD_UNKNOWN;
2906 }
2907 }
2908
2909 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2910 record_buf);
2911 return AARCH64_RECORD_SUCCESS;
2912 }
2913
2914 /* Record handler for data processing - immediate instructions. */
2915
2916 static unsigned int
2917 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2918 {
2919 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2920 uint32_t record_buf[4];
2921
2922 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2923 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2924 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2925 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2926
2927 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2928 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2929 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2930 {
2931 record_buf[0] = reg_rd;
2932 aarch64_insn_r->reg_rec_count = 1;
2933 }
2934 else if (insn_bits24_27 == 0x01)
2935 {
2936 /* Add/Subtract (immediate). */
2937 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2938 record_buf[0] = reg_rd;
2939 aarch64_insn_r->reg_rec_count = 1;
2940 if (setflags)
2941 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2942 }
2943 else if (insn_bits24_27 == 0x02 && !insn_bit23)
2944 {
2945 /* Logical (immediate). */
2946 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
2947 record_buf[0] = reg_rd;
2948 aarch64_insn_r->reg_rec_count = 1;
2949 if (setflags)
2950 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2951 }
2952 else
2953 return AARCH64_RECORD_UNKNOWN;
2954
2955 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2956 record_buf);
2957 return AARCH64_RECORD_SUCCESS;
2958 }
2959
2960 /* Record handler for branch, exception generation and system instructions. */
2961
2962 static unsigned int
2963 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
2964 {
2965 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
2966 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
2967 uint32_t record_buf[4];
2968
2969 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2970 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
2971 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
2972
2973 if (insn_bits28_31 == 0x0d)
2974 {
2975 /* Exception generation instructions. */
2976 if (insn_bits24_27 == 0x04)
2977 {
2978 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
2979 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
2980 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
2981 {
2982 ULONGEST svc_number;
2983
2984 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
2985 &svc_number);
2986 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
2987 svc_number);
2988 }
2989 else
2990 return AARCH64_RECORD_UNSUPPORTED;
2991 }
2992 /* System instructions. */
2993 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
2994 {
2995 uint32_t reg_rt, reg_crn;
2996
2997 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2998 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
2999
3000 /* Record rt in case of sysl and mrs instructions. */
3001 if (bit (aarch64_insn_r->aarch64_insn, 21))
3002 {
3003 record_buf[0] = reg_rt;
3004 aarch64_insn_r->reg_rec_count = 1;
3005 }
3006 /* Record cpsr for hint and msr(immediate) instructions. */
3007 else if (reg_crn == 0x02 || reg_crn == 0x04)
3008 {
3009 record_buf[0] = AARCH64_CPSR_REGNUM;
3010 aarch64_insn_r->reg_rec_count = 1;
3011 }
3012 }
3013 /* Unconditional branch (register). */
3014 else if((insn_bits24_27 & 0x0e) == 0x06)
3015 {
3016 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3017 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3018 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3019 }
3020 else
3021 return AARCH64_RECORD_UNKNOWN;
3022 }
3023 /* Unconditional branch (immediate). */
3024 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3025 {
3026 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3027 if (bit (aarch64_insn_r->aarch64_insn, 31))
3028 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3029 }
3030 else
3031 /* Compare & branch (immediate), Test & branch (immediate) and
3032 Conditional branch (immediate). */
3033 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3034
3035 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3036 record_buf);
3037 return AARCH64_RECORD_SUCCESS;
3038 }
3039
3040 /* Record handler for advanced SIMD load and store instructions. */
3041
3042 static unsigned int
3043 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3044 {
3045 CORE_ADDR address;
3046 uint64_t addr_offset = 0;
3047 uint32_t record_buf[24];
3048 uint64_t record_buf_mem[24];
3049 uint32_t reg_rn, reg_rt;
3050 uint32_t reg_index = 0, mem_index = 0;
3051 uint8_t opcode_bits, size_bits;
3052
3053 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3054 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3055 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3056 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3057 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3058
3059 if (record_debug)
3060 debug_printf ("Process record: Advanced SIMD load/store\n");
3061
3062 /* Load/store single structure. */
3063 if (bit (aarch64_insn_r->aarch64_insn, 24))
3064 {
3065 uint8_t sindex, scale, selem, esize, replicate = 0;
3066 scale = opcode_bits >> 2;
3067 selem = ((opcode_bits & 0x02) |
3068 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3069 switch (scale)
3070 {
3071 case 1:
3072 if (size_bits & 0x01)
3073 return AARCH64_RECORD_UNKNOWN;
3074 break;
3075 case 2:
3076 if ((size_bits >> 1) & 0x01)
3077 return AARCH64_RECORD_UNKNOWN;
3078 if (size_bits & 0x01)
3079 {
3080 if (!((opcode_bits >> 1) & 0x01))
3081 scale = 3;
3082 else
3083 return AARCH64_RECORD_UNKNOWN;
3084 }
3085 break;
3086 case 3:
3087 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3088 {
3089 scale = size_bits;
3090 replicate = 1;
3091 break;
3092 }
3093 else
3094 return AARCH64_RECORD_UNKNOWN;
3095 default:
3096 break;
3097 }
3098 esize = 8 << scale;
3099 if (replicate)
3100 for (sindex = 0; sindex < selem; sindex++)
3101 {
3102 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3103 reg_rt = (reg_rt + 1) % 32;
3104 }
3105 else
3106 {
3107 for (sindex = 0; sindex < selem; sindex++)
3108 if (bit (aarch64_insn_r->aarch64_insn, 22))
3109 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3110 else
3111 {
3112 record_buf_mem[mem_index++] = esize / 8;
3113 record_buf_mem[mem_index++] = address + addr_offset;
3114 }
3115 addr_offset = addr_offset + (esize / 8);
3116 reg_rt = (reg_rt + 1) % 32;
3117 }
3118 }
3119 /* Load/store multiple structure. */
3120 else
3121 {
3122 uint8_t selem, esize, rpt, elements;
3123 uint8_t eindex, rindex;
3124
3125 esize = 8 << size_bits;
3126 if (bit (aarch64_insn_r->aarch64_insn, 30))
3127 elements = 128 / esize;
3128 else
3129 elements = 64 / esize;
3130
3131 switch (opcode_bits)
3132 {
3133 /*LD/ST4 (4 Registers). */
3134 case 0:
3135 rpt = 1;
3136 selem = 4;
3137 break;
3138 /*LD/ST1 (4 Registers). */
3139 case 2:
3140 rpt = 4;
3141 selem = 1;
3142 break;
3143 /*LD/ST3 (3 Registers). */
3144 case 4:
3145 rpt = 1;
3146 selem = 3;
3147 break;
3148 /*LD/ST1 (3 Registers). */
3149 case 6:
3150 rpt = 3;
3151 selem = 1;
3152 break;
3153 /*LD/ST1 (1 Register). */
3154 case 7:
3155 rpt = 1;
3156 selem = 1;
3157 break;
3158 /*LD/ST2 (2 Registers). */
3159 case 8:
3160 rpt = 1;
3161 selem = 2;
3162 break;
3163 /*LD/ST1 (2 Registers). */
3164 case 10:
3165 rpt = 2;
3166 selem = 1;
3167 break;
3168 default:
3169 return AARCH64_RECORD_UNSUPPORTED;
3170 break;
3171 }
3172 for (rindex = 0; rindex < rpt; rindex++)
3173 for (eindex = 0; eindex < elements; eindex++)
3174 {
3175 uint8_t reg_tt, sindex;
3176 reg_tt = (reg_rt + rindex) % 32;
3177 for (sindex = 0; sindex < selem; sindex++)
3178 {
3179 if (bit (aarch64_insn_r->aarch64_insn, 22))
3180 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3181 else
3182 {
3183 record_buf_mem[mem_index++] = esize / 8;
3184 record_buf_mem[mem_index++] = address + addr_offset;
3185 }
3186 addr_offset = addr_offset + (esize / 8);
3187 reg_tt = (reg_tt + 1) % 32;
3188 }
3189 }
3190 }
3191
3192 if (bit (aarch64_insn_r->aarch64_insn, 23))
3193 record_buf[reg_index++] = reg_rn;
3194
3195 aarch64_insn_r->reg_rec_count = reg_index;
3196 aarch64_insn_r->mem_rec_count = mem_index / 2;
3197 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3198 record_buf_mem);
3199 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3200 record_buf);
3201 return AARCH64_RECORD_SUCCESS;
3202 }
3203
3204 /* Record handler for load and store instructions. */
3205
3206 static unsigned int
3207 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3208 {
3209 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3210 uint8_t insn_bit23, insn_bit21;
3211 uint8_t opc, size_bits, ld_flag, vector_flag;
3212 uint32_t reg_rn, reg_rt, reg_rt2;
3213 uint64_t datasize, offset;
3214 uint32_t record_buf[8];
3215 uint64_t record_buf_mem[8];
3216 CORE_ADDR address;
3217
3218 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3219 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3220 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3221 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3222 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3223 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3224 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3225 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3226 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3227 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3228 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3229
3230 /* Load/store exclusive. */
3231 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3232 {
3233 if (record_debug)
3234 debug_printf ("Process record: load/store exclusive\n");
3235
3236 if (ld_flag)
3237 {
3238 record_buf[0] = reg_rt;
3239 aarch64_insn_r->reg_rec_count = 1;
3240 if (insn_bit21)
3241 {
3242 record_buf[1] = reg_rt2;
3243 aarch64_insn_r->reg_rec_count = 2;
3244 }
3245 }
3246 else
3247 {
3248 if (insn_bit21)
3249 datasize = (8 << size_bits) * 2;
3250 else
3251 datasize = (8 << size_bits);
3252 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3253 &address);
3254 record_buf_mem[0] = datasize / 8;
3255 record_buf_mem[1] = address;
3256 aarch64_insn_r->mem_rec_count = 1;
3257 if (!insn_bit23)
3258 {
3259 /* Save register rs. */
3260 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3261 aarch64_insn_r->reg_rec_count = 1;
3262 }
3263 }
3264 }
3265 /* Load register (literal) instructions decoding. */
3266 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3267 {
3268 if (record_debug)
3269 debug_printf ("Process record: load register (literal)\n");
3270 if (vector_flag)
3271 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3272 else
3273 record_buf[0] = reg_rt;
3274 aarch64_insn_r->reg_rec_count = 1;
3275 }
3276 /* All types of load/store pair instructions decoding. */
3277 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3278 {
3279 if (record_debug)
3280 debug_printf ("Process record: load/store pair\n");
3281
3282 if (ld_flag)
3283 {
3284 if (vector_flag)
3285 {
3286 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3287 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3288 }
3289 else
3290 {
3291 record_buf[0] = reg_rt;
3292 record_buf[1] = reg_rt2;
3293 }
3294 aarch64_insn_r->reg_rec_count = 2;
3295 }
3296 else
3297 {
3298 uint16_t imm7_off;
3299 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3300 if (!vector_flag)
3301 size_bits = size_bits >> 1;
3302 datasize = 8 << (2 + size_bits);
3303 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3304 offset = offset << (2 + size_bits);
3305 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3306 &address);
3307 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3308 {
3309 if (imm7_off & 0x40)
3310 address = address - offset;
3311 else
3312 address = address + offset;
3313 }
3314
3315 record_buf_mem[0] = datasize / 8;
3316 record_buf_mem[1] = address;
3317 record_buf_mem[2] = datasize / 8;
3318 record_buf_mem[3] = address + (datasize / 8);
3319 aarch64_insn_r->mem_rec_count = 2;
3320 }
3321 if (bit (aarch64_insn_r->aarch64_insn, 23))
3322 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3323 }
3324 /* Load/store register (unsigned immediate) instructions. */
3325 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3326 {
3327 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3328 if (!(opc >> 1))
3329 if (opc & 0x01)
3330 ld_flag = 0x01;
3331 else
3332 ld_flag = 0x0;
3333 else
3334 if (size_bits != 0x03)
3335 ld_flag = 0x01;
3336 else
3337 return AARCH64_RECORD_UNKNOWN;
3338
3339 if (record_debug)
3340 {
3341 debug_printf ("Process record: load/store (unsigned immediate):"
3342 " size %x V %d opc %x\n", size_bits, vector_flag,
3343 opc);
3344 }
3345
3346 if (!ld_flag)
3347 {
3348 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3349 datasize = 8 << size_bits;
3350 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3351 &address);
3352 offset = offset << size_bits;
3353 address = address + offset;
3354
3355 record_buf_mem[0] = datasize >> 3;
3356 record_buf_mem[1] = address;
3357 aarch64_insn_r->mem_rec_count = 1;
3358 }
3359 else
3360 {
3361 if (vector_flag)
3362 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3363 else
3364 record_buf[0] = reg_rt;
3365 aarch64_insn_r->reg_rec_count = 1;
3366 }
3367 }
3368 /* Load/store register (register offset) instructions. */
3369 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3370 && insn_bits10_11 == 0x02 && insn_bit21)
3371 {
3372 if (record_debug)
3373 debug_printf ("Process record: load/store (register offset)\n");
3374 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3375 if (!(opc >> 1))
3376 if (opc & 0x01)
3377 ld_flag = 0x01;
3378 else
3379 ld_flag = 0x0;
3380 else
3381 if (size_bits != 0x03)
3382 ld_flag = 0x01;
3383 else
3384 return AARCH64_RECORD_UNKNOWN;
3385
3386 if (!ld_flag)
3387 {
3388 uint64_t reg_rm_val;
3389 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3390 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3391 if (bit (aarch64_insn_r->aarch64_insn, 12))
3392 offset = reg_rm_val << size_bits;
3393 else
3394 offset = reg_rm_val;
3395 datasize = 8 << size_bits;
3396 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3397 &address);
3398 address = address + offset;
3399 record_buf_mem[0] = datasize >> 3;
3400 record_buf_mem[1] = address;
3401 aarch64_insn_r->mem_rec_count = 1;
3402 }
3403 else
3404 {
3405 if (vector_flag)
3406 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3407 else
3408 record_buf[0] = reg_rt;
3409 aarch64_insn_r->reg_rec_count = 1;
3410 }
3411 }
3412 /* Load/store register (immediate and unprivileged) instructions. */
3413 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3414 && !insn_bit21)
3415 {
3416 if (record_debug)
3417 {
3418 debug_printf ("Process record: load/store "
3419 "(immediate and unprivileged)\n");
3420 }
3421 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3422 if (!(opc >> 1))
3423 if (opc & 0x01)
3424 ld_flag = 0x01;
3425 else
3426 ld_flag = 0x0;
3427 else
3428 if (size_bits != 0x03)
3429 ld_flag = 0x01;
3430 else
3431 return AARCH64_RECORD_UNKNOWN;
3432
3433 if (!ld_flag)
3434 {
3435 uint16_t imm9_off;
3436 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3437 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3438 datasize = 8 << size_bits;
3439 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3440 &address);
3441 if (insn_bits10_11 != 0x01)
3442 {
3443 if (imm9_off & 0x0100)
3444 address = address - offset;
3445 else
3446 address = address + offset;
3447 }
3448 record_buf_mem[0] = datasize >> 3;
3449 record_buf_mem[1] = address;
3450 aarch64_insn_r->mem_rec_count = 1;
3451 }
3452 else
3453 {
3454 if (vector_flag)
3455 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3456 else
3457 record_buf[0] = reg_rt;
3458 aarch64_insn_r->reg_rec_count = 1;
3459 }
3460 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3461 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3462 }
3463 /* Advanced SIMD load/store instructions. */
3464 else
3465 return aarch64_record_asimd_load_store (aarch64_insn_r);
3466
3467 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3468 record_buf_mem);
3469 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3470 record_buf);
3471 return AARCH64_RECORD_SUCCESS;
3472 }
3473
3474 /* Record handler for data processing SIMD and floating point instructions. */
3475
3476 static unsigned int
3477 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3478 {
3479 uint8_t insn_bit21, opcode, rmode, reg_rd;
3480 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3481 uint8_t insn_bits11_14;
3482 uint32_t record_buf[2];
3483
3484 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3485 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3486 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3487 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3488 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3489 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3490 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3491 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3492 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3493
3494 if (record_debug)
3495 debug_printf ("Process record: data processing SIMD/FP: ");
3496
3497 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3498 {
3499 /* Floating point - fixed point conversion instructions. */
3500 if (!insn_bit21)
3501 {
3502 if (record_debug)
3503 debug_printf ("FP - fixed point conversion");
3504
3505 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3506 record_buf[0] = reg_rd;
3507 else
3508 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3509 }
3510 /* Floating point - conditional compare instructions. */
3511 else if (insn_bits10_11 == 0x01)
3512 {
3513 if (record_debug)
3514 debug_printf ("FP - conditional compare");
3515
3516 record_buf[0] = AARCH64_CPSR_REGNUM;
3517 }
3518 /* Floating point - data processing (2-source) and
3519 conditional select instructions. */
3520 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3521 {
3522 if (record_debug)
3523 debug_printf ("FP - DP (2-source)");
3524
3525 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3526 }
3527 else if (insn_bits10_11 == 0x00)
3528 {
3529 /* Floating point - immediate instructions. */
3530 if ((insn_bits12_15 & 0x01) == 0x01
3531 || (insn_bits12_15 & 0x07) == 0x04)
3532 {
3533 if (record_debug)
3534 debug_printf ("FP - immediate");
3535 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3536 }
3537 /* Floating point - compare instructions. */
3538 else if ((insn_bits12_15 & 0x03) == 0x02)
3539 {
3540 if (record_debug)
3541 debug_printf ("FP - immediate");
3542 record_buf[0] = AARCH64_CPSR_REGNUM;
3543 }
3544 /* Floating point - integer conversions instructions. */
3545 else if (insn_bits12_15 == 0x00)
3546 {
3547 /* Convert float to integer instruction. */
3548 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3549 {
3550 if (record_debug)
3551 debug_printf ("float to int conversion");
3552
3553 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3554 }
3555 /* Convert integer to float instruction. */
3556 else if ((opcode >> 1) == 0x01 && !rmode)
3557 {
3558 if (record_debug)
3559 debug_printf ("int to float conversion");
3560
3561 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3562 }
3563 /* Move float to integer instruction. */
3564 else if ((opcode >> 1) == 0x03)
3565 {
3566 if (record_debug)
3567 debug_printf ("move float to int");
3568
3569 if (!(opcode & 0x01))
3570 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3571 else
3572 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3573 }
3574 else
3575 return AARCH64_RECORD_UNKNOWN;
3576 }
3577 else
3578 return AARCH64_RECORD_UNKNOWN;
3579 }
3580 else
3581 return AARCH64_RECORD_UNKNOWN;
3582 }
3583 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3584 {
3585 if (record_debug)
3586 debug_printf ("SIMD copy");
3587
3588 /* Advanced SIMD copy instructions. */
3589 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3590 && !bit (aarch64_insn_r->aarch64_insn, 15)
3591 && bit (aarch64_insn_r->aarch64_insn, 10))
3592 {
3593 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3594 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3595 else
3596 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3597 }
3598 else
3599 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3600 }
3601 /* All remaining floating point or advanced SIMD instructions. */
3602 else
3603 {
3604 if (record_debug)
3605 debug_printf ("all remain");
3606
3607 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3608 }
3609
3610 if (record_debug)
3611 debug_printf ("\n");
3612
3613 aarch64_insn_r->reg_rec_count++;
3614 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3615 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3616 record_buf);
3617 return AARCH64_RECORD_SUCCESS;
3618 }
3619
3620 /* Decodes insns type and invokes its record handler. */
3621
3622 static unsigned int
3623 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3624 {
3625 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3626
3627 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3628 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3629 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3630 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3631
3632 /* Data processing - immediate instructions. */
3633 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3634 return aarch64_record_data_proc_imm (aarch64_insn_r);
3635
3636 /* Branch, exception generation and system instructions. */
3637 if (ins_bit26 && !ins_bit27 && ins_bit28)
3638 return aarch64_record_branch_except_sys (aarch64_insn_r);
3639
3640 /* Load and store instructions. */
3641 if (!ins_bit25 && ins_bit27)
3642 return aarch64_record_load_store (aarch64_insn_r);
3643
3644 /* Data processing - register instructions. */
3645 if (ins_bit25 && !ins_bit26 && ins_bit27)
3646 return aarch64_record_data_proc_reg (aarch64_insn_r);
3647
3648 /* Data processing - SIMD and floating point instructions. */
3649 if (ins_bit25 && ins_bit26 && ins_bit27)
3650 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3651
3652 return AARCH64_RECORD_UNSUPPORTED;
3653 }
3654
3655 /* Cleans up local record registers and memory allocations. */
3656
3657 static void
3658 deallocate_reg_mem (insn_decode_record *record)
3659 {
3660 xfree (record->aarch64_regs);
3661 xfree (record->aarch64_mems);
3662 }
3663
3664 /* Parse the current instruction and record the values of the registers and
3665 memory that will be changed in current instruction to record_arch_list
3666 return -1 if something is wrong. */
3667
3668 int
3669 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3670 CORE_ADDR insn_addr)
3671 {
3672 uint32_t rec_no = 0;
3673 uint8_t insn_size = 4;
3674 uint32_t ret = 0;
3675 ULONGEST t_bit = 0, insn_id = 0;
3676 gdb_byte buf[insn_size];
3677 insn_decode_record aarch64_record;
3678
3679 memset (&buf[0], 0, insn_size);
3680 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3681 target_read_memory (insn_addr, &buf[0], insn_size);
3682 aarch64_record.aarch64_insn
3683 = (uint32_t) extract_unsigned_integer (&buf[0],
3684 insn_size,
3685 gdbarch_byte_order (gdbarch));
3686 aarch64_record.regcache = regcache;
3687 aarch64_record.this_addr = insn_addr;
3688 aarch64_record.gdbarch = gdbarch;
3689
3690 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3691 if (ret == AARCH64_RECORD_UNSUPPORTED)
3692 {
3693 printf_unfiltered (_("Process record does not support instruction "
3694 "0x%0x at address %s.\n"),
3695 aarch64_record.aarch64_insn,
3696 paddress (gdbarch, insn_addr));
3697 ret = -1;
3698 }
3699
3700 if (0 == ret)
3701 {
3702 /* Record registers. */
3703 record_full_arch_list_add_reg (aarch64_record.regcache,
3704 AARCH64_PC_REGNUM);
3705 /* Always record register CPSR. */
3706 record_full_arch_list_add_reg (aarch64_record.regcache,
3707 AARCH64_CPSR_REGNUM);
3708 if (aarch64_record.aarch64_regs)
3709 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3710 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3711 aarch64_record.aarch64_regs[rec_no]))
3712 ret = -1;
3713
3714 /* Record memories. */
3715 if (aarch64_record.aarch64_mems)
3716 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3717 if (record_full_arch_list_add_mem
3718 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3719 aarch64_record.aarch64_mems[rec_no].len))
3720 ret = -1;
3721
3722 if (record_full_arch_list_add_end ())
3723 ret = -1;
3724 }
3725
3726 deallocate_reg_mem (&aarch64_record);
3727 return ret;
3728 }
This page took 0.129802 seconds and 4 git commands to generate.