1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
58 #include "features/aarch64.c"
60 /* Pseudo register base numbers. */
61 #define AARCH64_Q0_REGNUM 0
62 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
63 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
64 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
65 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
67 /* The standard register names, and all the valid aliases for them. */
70 const char *const name
;
72 } aarch64_register_aliases
[] =
74 /* 64-bit register names. */
75 {"fp", AARCH64_FP_REGNUM
},
76 {"lr", AARCH64_LR_REGNUM
},
77 {"sp", AARCH64_SP_REGNUM
},
79 /* 32-bit register names. */
80 {"w0", AARCH64_X0_REGNUM
+ 0},
81 {"w1", AARCH64_X0_REGNUM
+ 1},
82 {"w2", AARCH64_X0_REGNUM
+ 2},
83 {"w3", AARCH64_X0_REGNUM
+ 3},
84 {"w4", AARCH64_X0_REGNUM
+ 4},
85 {"w5", AARCH64_X0_REGNUM
+ 5},
86 {"w6", AARCH64_X0_REGNUM
+ 6},
87 {"w7", AARCH64_X0_REGNUM
+ 7},
88 {"w8", AARCH64_X0_REGNUM
+ 8},
89 {"w9", AARCH64_X0_REGNUM
+ 9},
90 {"w10", AARCH64_X0_REGNUM
+ 10},
91 {"w11", AARCH64_X0_REGNUM
+ 11},
92 {"w12", AARCH64_X0_REGNUM
+ 12},
93 {"w13", AARCH64_X0_REGNUM
+ 13},
94 {"w14", AARCH64_X0_REGNUM
+ 14},
95 {"w15", AARCH64_X0_REGNUM
+ 15},
96 {"w16", AARCH64_X0_REGNUM
+ 16},
97 {"w17", AARCH64_X0_REGNUM
+ 17},
98 {"w18", AARCH64_X0_REGNUM
+ 18},
99 {"w19", AARCH64_X0_REGNUM
+ 19},
100 {"w20", AARCH64_X0_REGNUM
+ 20},
101 {"w21", AARCH64_X0_REGNUM
+ 21},
102 {"w22", AARCH64_X0_REGNUM
+ 22},
103 {"w23", AARCH64_X0_REGNUM
+ 23},
104 {"w24", AARCH64_X0_REGNUM
+ 24},
105 {"w25", AARCH64_X0_REGNUM
+ 25},
106 {"w26", AARCH64_X0_REGNUM
+ 26},
107 {"w27", AARCH64_X0_REGNUM
+ 27},
108 {"w28", AARCH64_X0_REGNUM
+ 28},
109 {"w29", AARCH64_X0_REGNUM
+ 29},
110 {"w30", AARCH64_X0_REGNUM
+ 30},
113 {"ip0", AARCH64_X0_REGNUM
+ 16},
114 {"ip1", AARCH64_X0_REGNUM
+ 17}
117 /* The required core 'R' registers. */
118 static const char *const aarch64_r_register_names
[] =
120 /* These registers must appear in consecutive RAW register number
121 order and they must begin with AARCH64_X0_REGNUM! */
122 "x0", "x1", "x2", "x3",
123 "x4", "x5", "x6", "x7",
124 "x8", "x9", "x10", "x11",
125 "x12", "x13", "x14", "x15",
126 "x16", "x17", "x18", "x19",
127 "x20", "x21", "x22", "x23",
128 "x24", "x25", "x26", "x27",
129 "x28", "x29", "x30", "sp",
133 /* The FP/SIMD 'V' registers. */
134 static const char *const aarch64_v_register_names
[] =
136 /* These registers must appear in consecutive RAW register number
137 order and they must begin with AARCH64_V0_REGNUM! */
138 "v0", "v1", "v2", "v3",
139 "v4", "v5", "v6", "v7",
140 "v8", "v9", "v10", "v11",
141 "v12", "v13", "v14", "v15",
142 "v16", "v17", "v18", "v19",
143 "v20", "v21", "v22", "v23",
144 "v24", "v25", "v26", "v27",
145 "v28", "v29", "v30", "v31",
150 /* AArch64 prologue cache structure. */
151 struct aarch64_prologue_cache
153 /* The program counter at the start of the function. It is used to
154 identify this frame as a prologue frame. */
157 /* The program counter at the time this frame was created; i.e. where
158 this function was called from. It is used to identify this frame as a
162 /* The stack pointer at the time this frame was created; i.e. the
163 caller's stack pointer when this function was called. It is used
164 to identify this frame. */
167 /* Is the target available to read from? */
170 /* The frame base for this frame is just prev_sp - frame size.
171 FRAMESIZE is the distance from the frame pointer to the
172 initial stack pointer. */
175 /* The register used to hold the frame pointer for this frame. */
178 /* Saved register offsets. */
179 struct trad_frame_saved_reg
*saved_regs
;
182 /* Toggle this file's internal debugging dump. */
183 static int aarch64_debug
;
186 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
187 struct cmd_list_element
*c
, const char *value
)
189 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
192 /* Extract a signed value from a bit field within an instruction
195 INSN is the instruction opcode.
197 WIDTH specifies the width of the bit field to extract (in bits).
199 OFFSET specifies the least significant bit of the field where bits
200 are numbered zero counting from least to most significant. */
203 extract_signed_bitfield (uint32_t insn
, unsigned width
, unsigned offset
)
205 unsigned shift_l
= sizeof (int32_t) * 8 - (offset
+ width
);
206 unsigned shift_r
= sizeof (int32_t) * 8 - width
;
208 return ((int32_t) insn
<< shift_l
) >> shift_r
;
211 /* Determine if specified bits within an instruction opcode matches a
214 INSN is the instruction opcode.
216 MASK specifies the bits within the opcode that are to be tested
217 agsinst for a match with PATTERN. */
220 decode_masked_match (uint32_t insn
, uint32_t mask
, uint32_t pattern
)
222 return (insn
& mask
) == pattern
;
225 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
227 ADDR specifies the address of the opcode.
228 INSN specifies the opcode to test.
229 RD receives the 'rd' field from the decoded instruction.
230 RN receives the 'rn' field from the decoded instruction.
232 Return 1 if the opcodes matches and is decoded, otherwise 0. */
234 decode_add_sub_imm (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
, unsigned *rn
,
237 if ((insn
& 0x9f000000) == 0x91000000)
242 *rd
= (insn
>> 0) & 0x1f;
243 *rn
= (insn
>> 5) & 0x1f;
244 *imm
= (insn
>> 10) & 0xfff;
245 shift
= (insn
>> 22) & 0x3;
246 op_is_sub
= (insn
>> 30) & 0x1;
264 fprintf_unfiltered (gdb_stdlog
,
265 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
266 core_addr_to_string_nz (addr
), insn
, *rd
, *rn
,
273 /* Decode an opcode if it represents an ADRP instruction.
275 ADDR specifies the address of the opcode.
276 INSN specifies the opcode to test.
277 RD receives the 'rd' field from the decoded instruction.
279 Return 1 if the opcodes matches and is decoded, otherwise 0. */
282 decode_adrp (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
284 if (decode_masked_match (insn
, 0x9f000000, 0x90000000))
286 *rd
= (insn
>> 0) & 0x1f;
289 fprintf_unfiltered (gdb_stdlog
,
290 "decode: 0x%s 0x%x adrp x%u, #?\n",
291 core_addr_to_string_nz (addr
), insn
, *rd
);
297 /* Decode an opcode if it represents an branch immediate or branch
298 and link immediate instruction.
300 ADDR specifies the address of the opcode.
301 INSN specifies the opcode to test.
302 LINK receives the 'link' bit from the decoded instruction.
303 OFFSET receives the immediate offset from the decoded instruction.
305 Return 1 if the opcodes matches and is decoded, otherwise 0. */
308 decode_b (CORE_ADDR addr
, uint32_t insn
, unsigned *link
, int32_t *offset
)
310 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
311 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
312 if (decode_masked_match (insn
, 0x7c000000, 0x14000000))
315 *offset
= extract_signed_bitfield (insn
, 26, 0) << 2;
318 fprintf_unfiltered (gdb_stdlog
,
319 "decode: 0x%s 0x%x %s 0x%s\n",
320 core_addr_to_string_nz (addr
), insn
,
322 core_addr_to_string_nz (addr
+ *offset
));
329 /* Decode an opcode if it represents a conditional branch instruction.
331 ADDR specifies the address of the opcode.
332 INSN specifies the opcode to test.
333 COND receives the branch condition field from the decoded
335 OFFSET receives the immediate offset from the decoded instruction.
337 Return 1 if the opcodes matches and is decoded, otherwise 0. */
340 decode_bcond (CORE_ADDR addr
, uint32_t insn
, unsigned *cond
, int32_t *offset
)
342 if (decode_masked_match (insn
, 0xfe000000, 0x54000000))
344 *cond
= (insn
>> 0) & 0xf;
345 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
348 fprintf_unfiltered (gdb_stdlog
,
349 "decode: 0x%s 0x%x b<%u> 0x%s\n",
350 core_addr_to_string_nz (addr
), insn
, *cond
,
351 core_addr_to_string_nz (addr
+ *offset
));
357 /* Decode an opcode if it represents a branch via register instruction.
359 ADDR specifies the address of the opcode.
360 INSN specifies the opcode to test.
361 LINK receives the 'link' bit from the decoded instruction.
362 RN receives the 'rn' field from the decoded instruction.
364 Return 1 if the opcodes matches and is decoded, otherwise 0. */
367 decode_br (CORE_ADDR addr
, uint32_t insn
, unsigned *link
, unsigned *rn
)
369 /* 8 4 0 6 2 8 4 0 */
370 /* blr 110101100011111100000000000rrrrr */
371 /* br 110101100001111100000000000rrrrr */
372 if (decode_masked_match (insn
, 0xffdffc1f, 0xd61f0000))
374 *link
= (insn
>> 21) & 1;
375 *rn
= (insn
>> 5) & 0x1f;
378 fprintf_unfiltered (gdb_stdlog
,
379 "decode: 0x%s 0x%x %s 0x%x\n",
380 core_addr_to_string_nz (addr
), insn
,
381 *link
? "blr" : "br", *rn
);
388 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
390 ADDR specifies the address of the opcode.
391 INSN specifies the opcode to test.
392 IS64 receives the 'sf' field from the decoded instruction.
393 OP receives the 'op' field from the decoded instruction.
394 RN receives the 'rn' field from the decoded instruction.
395 OFFSET receives the 'imm19' field from the decoded instruction.
397 Return 1 if the opcodes matches and is decoded, otherwise 0. */
400 decode_cb (CORE_ADDR addr
,
401 uint32_t insn
, int *is64
, unsigned *op
, unsigned *rn
,
404 if (decode_masked_match (insn
, 0x7e000000, 0x34000000))
406 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
407 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
409 *rn
= (insn
>> 0) & 0x1f;
410 *is64
= (insn
>> 31) & 0x1;
411 *op
= (insn
>> 24) & 0x1;
412 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
415 fprintf_unfiltered (gdb_stdlog
,
416 "decode: 0x%s 0x%x %s 0x%s\n",
417 core_addr_to_string_nz (addr
), insn
,
418 *op
? "cbnz" : "cbz",
419 core_addr_to_string_nz (addr
+ *offset
));
425 /* Decode an opcode if it represents a ERET instruction.
427 ADDR specifies the address of the opcode.
428 INSN specifies the opcode to test.
430 Return 1 if the opcodes matches and is decoded, otherwise 0. */
433 decode_eret (CORE_ADDR addr
, uint32_t insn
)
435 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
436 if (insn
== 0xd69f03e0)
439 fprintf_unfiltered (gdb_stdlog
, "decode: 0x%s 0x%x eret\n",
440 core_addr_to_string_nz (addr
), insn
);
446 /* Decode an opcode if it represents a MOVZ instruction.
448 ADDR specifies the address of the opcode.
449 INSN specifies the opcode to test.
450 RD receives the 'rd' field from the decoded instruction.
452 Return 1 if the opcodes matches and is decoded, otherwise 0. */
455 decode_movz (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
457 if (decode_masked_match (insn
, 0xff800000, 0x52800000))
459 *rd
= (insn
>> 0) & 0x1f;
462 fprintf_unfiltered (gdb_stdlog
,
463 "decode: 0x%s 0x%x movz x%u, #?\n",
464 core_addr_to_string_nz (addr
), insn
, *rd
);
470 /* Decode an opcode if it represents a ORR (shifted register)
473 ADDR specifies the address of the opcode.
474 INSN specifies the opcode to test.
475 RD receives the 'rd' field from the decoded instruction.
476 RN receives the 'rn' field from the decoded instruction.
477 RM receives the 'rm' field from the decoded instruction.
478 IMM receives the 'imm6' field from the decoded instruction.
480 Return 1 if the opcodes matches and is decoded, otherwise 0. */
483 decode_orr_shifted_register_x (CORE_ADDR addr
,
484 uint32_t insn
, unsigned *rd
, unsigned *rn
,
485 unsigned *rm
, int32_t *imm
)
487 if (decode_masked_match (insn
, 0xff200000, 0xaa000000))
489 *rd
= (insn
>> 0) & 0x1f;
490 *rn
= (insn
>> 5) & 0x1f;
491 *rm
= (insn
>> 16) & 0x1f;
492 *imm
= (insn
>> 10) & 0x3f;
495 fprintf_unfiltered (gdb_stdlog
,
496 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
497 core_addr_to_string_nz (addr
), insn
, *rd
,
504 /* Decode an opcode if it represents a RET instruction.
506 ADDR specifies the address of the opcode.
507 INSN specifies the opcode to test.
508 RN receives the 'rn' field from the decoded instruction.
510 Return 1 if the opcodes matches and is decoded, otherwise 0. */
513 decode_ret (CORE_ADDR addr
, uint32_t insn
, unsigned *rn
)
515 if (decode_masked_match (insn
, 0xfffffc1f, 0xd65f0000))
517 *rn
= (insn
>> 5) & 0x1f;
519 fprintf_unfiltered (gdb_stdlog
,
520 "decode: 0x%s 0x%x ret x%u\n",
521 core_addr_to_string_nz (addr
), insn
, *rn
);
527 /* Decode an opcode if it represents the following instruction:
528 STP rt, rt2, [rn, #imm]
530 ADDR specifies the address of the opcode.
531 INSN specifies the opcode to test.
532 RT1 receives the 'rt' field from the decoded instruction.
533 RT2 receives the 'rt2' field from the decoded instruction.
534 RN receives the 'rn' field from the decoded instruction.
535 IMM receives the 'imm' field from the decoded instruction.
537 Return 1 if the opcodes matches and is decoded, otherwise 0. */
540 decode_stp_offset (CORE_ADDR addr
,
542 unsigned *rt1
, unsigned *rt2
, unsigned *rn
, int32_t *imm
)
544 if (decode_masked_match (insn
, 0xffc00000, 0xa9000000))
546 *rt1
= (insn
>> 0) & 0x1f;
547 *rn
= (insn
>> 5) & 0x1f;
548 *rt2
= (insn
>> 10) & 0x1f;
549 *imm
= extract_signed_bitfield (insn
, 7, 15);
553 fprintf_unfiltered (gdb_stdlog
,
554 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
555 core_addr_to_string_nz (addr
), insn
,
556 *rt1
, *rt2
, *rn
, *imm
);
562 /* Decode an opcode if it represents the following instruction:
563 STP rt, rt2, [rn, #imm]!
565 ADDR specifies the address of the opcode.
566 INSN specifies the opcode to test.
567 RT1 receives the 'rt' field from the decoded instruction.
568 RT2 receives the 'rt2' field from the decoded instruction.
569 RN receives the 'rn' field from the decoded instruction.
570 IMM receives the 'imm' field from the decoded instruction.
572 Return 1 if the opcodes matches and is decoded, otherwise 0. */
575 decode_stp_offset_wb (CORE_ADDR addr
,
577 unsigned *rt1
, unsigned *rt2
, unsigned *rn
,
580 if (decode_masked_match (insn
, 0xffc00000, 0xa9800000))
582 *rt1
= (insn
>> 0) & 0x1f;
583 *rn
= (insn
>> 5) & 0x1f;
584 *rt2
= (insn
>> 10) & 0x1f;
585 *imm
= extract_signed_bitfield (insn
, 7, 15);
589 fprintf_unfiltered (gdb_stdlog
,
590 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
591 core_addr_to_string_nz (addr
), insn
,
592 *rt1
, *rt2
, *rn
, *imm
);
598 /* Decode an opcode if it represents the following instruction:
601 ADDR specifies the address of the opcode.
602 INSN specifies the opcode to test.
603 IS64 receives size field from the decoded instruction.
604 RT receives the 'rt' field from the decoded instruction.
605 RN receives the 'rn' field from the decoded instruction.
606 IMM receives the 'imm' field from the decoded instruction.
608 Return 1 if the opcodes matches and is decoded, otherwise 0. */
611 decode_stur (CORE_ADDR addr
, uint32_t insn
, int *is64
, unsigned *rt
,
612 unsigned *rn
, int32_t *imm
)
614 if (decode_masked_match (insn
, 0xbfe00c00, 0xb8000000))
616 *is64
= (insn
>> 30) & 1;
617 *rt
= (insn
>> 0) & 0x1f;
618 *rn
= (insn
>> 5) & 0x1f;
619 *imm
= extract_signed_bitfield (insn
, 9, 12);
622 fprintf_unfiltered (gdb_stdlog
,
623 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
624 core_addr_to_string_nz (addr
), insn
,
625 *is64
? 'x' : 'w', *rt
, *rn
, *imm
);
631 /* Decode an opcode if it represents a TB or TBNZ instruction.
633 ADDR specifies the address of the opcode.
634 INSN specifies the opcode to test.
635 OP receives the 'op' field from the decoded instruction.
636 BIT receives the bit position field from the decoded instruction.
637 RT receives 'rt' field from the decoded instruction.
638 IMM receives 'imm' field from the decoded instruction.
640 Return 1 if the opcodes matches and is decoded, otherwise 0. */
643 decode_tb (CORE_ADDR addr
,
644 uint32_t insn
, unsigned *op
, unsigned *bit
, unsigned *rt
,
647 if (decode_masked_match (insn
, 0x7e000000, 0x36000000))
649 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
650 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
652 *rt
= (insn
>> 0) & 0x1f;
653 *op
= insn
& (1 << 24);
654 *bit
= ((insn
>> (31 - 4)) & 0x20) | ((insn
>> 19) & 0x1f);
655 *imm
= extract_signed_bitfield (insn
, 14, 5) << 2;
658 fprintf_unfiltered (gdb_stdlog
,
659 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
660 core_addr_to_string_nz (addr
), insn
,
661 *op
? "tbnz" : "tbz", *rt
, *bit
,
662 core_addr_to_string_nz (addr
+ *imm
));
668 /* Analyze a prologue, looking for a recognizable stack frame
669 and frame pointer. Scan until we encounter a store that could
670 clobber the stack frame unexpectedly, or an unknown instruction. */
673 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
674 CORE_ADDR start
, CORE_ADDR limit
,
675 struct aarch64_prologue_cache
*cache
)
677 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
679 pv_t regs
[AARCH64_X_REGISTER_COUNT
];
680 struct pv_area
*stack
;
681 struct cleanup
*back_to
;
683 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
684 regs
[i
] = pv_register (i
, 0);
685 stack
= make_pv_area (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
686 back_to
= make_cleanup_free_pv_area (stack
);
688 for (; start
< limit
; start
+= 4)
706 insn
= read_memory_unsigned_integer (start
, 4, byte_order_for_code
);
708 if (decode_add_sub_imm (start
, insn
, &rd
, &rn
, &imm
))
709 regs
[rd
] = pv_add_constant (regs
[rn
], imm
);
710 else if (decode_adrp (start
, insn
, &rd
))
711 regs
[rd
] = pv_unknown ();
712 else if (decode_b (start
, insn
, &is_link
, &offset
))
714 /* Stop analysis on branch. */
717 else if (decode_bcond (start
, insn
, &cond
, &offset
))
719 /* Stop analysis on branch. */
722 else if (decode_br (start
, insn
, &is_link
, &rn
))
724 /* Stop analysis on branch. */
727 else if (decode_cb (start
, insn
, &is64
, &op
, &rn
, &offset
))
729 /* Stop analysis on branch. */
732 else if (decode_eret (start
, insn
))
734 /* Stop analysis on branch. */
737 else if (decode_movz (start
, insn
, &rd
))
738 regs
[rd
] = pv_unknown ();
740 if (decode_orr_shifted_register_x (start
, insn
, &rd
, &rn
, &rm
, &imm
))
742 if (imm
== 0 && rn
== 31)
749 "aarch64: prologue analysis gave up addr=0x%s "
750 "opcode=0x%x (orr x register)\n",
751 core_addr_to_string_nz (start
),
756 else if (decode_ret (start
, insn
, &rn
))
758 /* Stop analysis on branch. */
761 else if (decode_stur (start
, insn
, &is64
, &rt
, &rn
, &offset
))
763 pv_area_store (stack
, pv_add_constant (regs
[rn
], offset
),
764 is64
? 8 : 4, regs
[rt
]);
766 else if (decode_stp_offset (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
768 /* If recording this store would invalidate the store area
769 (perhaps because rn is not known) then we should abandon
770 further prologue analysis. */
771 if (pv_area_store_would_trash (stack
,
772 pv_add_constant (regs
[rn
], imm
)))
775 if (pv_area_store_would_trash (stack
,
776 pv_add_constant (regs
[rn
], imm
+ 8)))
779 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
781 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
784 else if (decode_stp_offset_wb (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
786 /* If recording this store would invalidate the store area
787 (perhaps because rn is not known) then we should abandon
788 further prologue analysis. */
789 if (pv_area_store_would_trash (stack
,
790 pv_add_constant (regs
[rn
], imm
)))
793 if (pv_area_store_would_trash (stack
,
794 pv_add_constant (regs
[rn
], imm
+ 8)))
797 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
799 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
801 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
803 else if (decode_tb (start
, insn
, &op
, &bit
, &rn
, &offset
))
805 /* Stop analysis on branch. */
811 fprintf_unfiltered (gdb_stdlog
,
812 "aarch64: prologue analysis gave up addr=0x%s"
814 core_addr_to_string_nz (start
), insn
);
821 do_cleanups (back_to
);
825 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
827 /* Frame pointer is fp. Frame size is constant. */
828 cache
->framereg
= AARCH64_FP_REGNUM
;
829 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
831 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
833 /* Try the stack pointer. */
834 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
835 cache
->framereg
= AARCH64_SP_REGNUM
;
839 /* We're just out of luck. We don't know where the frame is. */
840 cache
->framereg
= -1;
841 cache
->framesize
= 0;
844 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
848 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
849 cache
->saved_regs
[i
].addr
= offset
;
852 do_cleanups (back_to
);
856 /* Implement the "skip_prologue" gdbarch method. */
859 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
863 CORE_ADDR func_addr
, limit_pc
;
864 struct symtab_and_line sal
;
866 /* See if we can determine the end of the prologue via the symbol
867 table. If so, then return either PC, or the PC after the
868 prologue, whichever is greater. */
869 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
871 CORE_ADDR post_prologue_pc
872 = skip_prologue_using_sal (gdbarch
, func_addr
);
874 if (post_prologue_pc
!= 0)
875 return max (pc
, post_prologue_pc
);
878 /* Can't determine prologue from the symbol table, need to examine
881 /* Find an upper limit on the function prologue using the debug
882 information. If the debug information could not be used to
883 provide that bound, then use an arbitrary large number as the
885 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
887 limit_pc
= pc
+ 128; /* Magic. */
889 /* Try disassembling prologue. */
890 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
893 /* Scan the function prologue for THIS_FRAME and populate the prologue
897 aarch64_scan_prologue (struct frame_info
*this_frame
,
898 struct aarch64_prologue_cache
*cache
)
900 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
901 CORE_ADDR prologue_start
;
902 CORE_ADDR prologue_end
;
903 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
904 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
906 cache
->prev_pc
= prev_pc
;
908 /* Assume we do not find a frame. */
909 cache
->framereg
= -1;
910 cache
->framesize
= 0;
912 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
915 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
919 /* No line info so use the current PC. */
920 prologue_end
= prev_pc
;
922 else if (sal
.end
< prologue_end
)
924 /* The next line begins after the function end. */
925 prologue_end
= sal
.end
;
928 prologue_end
= min (prologue_end
, prev_pc
);
929 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
936 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
938 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
942 cache
->framereg
= AARCH64_FP_REGNUM
;
943 cache
->framesize
= 16;
944 cache
->saved_regs
[29].addr
= 0;
945 cache
->saved_regs
[30].addr
= 8;
949 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
950 function may throw an exception if the inferior's registers or memory is
954 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
955 struct aarch64_prologue_cache
*cache
)
957 CORE_ADDR unwound_fp
;
960 aarch64_scan_prologue (this_frame
, cache
);
962 if (cache
->framereg
== -1)
965 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
969 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
971 /* Calculate actual addresses of saved registers using offsets
972 determined by aarch64_analyze_prologue. */
973 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
974 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
975 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
977 cache
->func
= get_frame_func (this_frame
);
979 cache
->available_p
= 1;
982 /* Allocate and fill in *THIS_CACHE with information about the prologue of
983 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
984 Return a pointer to the current aarch64_prologue_cache in
987 static struct aarch64_prologue_cache
*
988 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
990 struct aarch64_prologue_cache
*cache
;
992 if (*this_cache
!= NULL
)
995 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
996 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1001 aarch64_make_prologue_cache_1 (this_frame
, cache
);
1003 CATCH (ex
, RETURN_MASK_ERROR
)
1005 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1006 throw_exception (ex
);
1013 /* Implement the "stop_reason" frame_unwind method. */
1015 static enum unwind_stop_reason
1016 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1019 struct aarch64_prologue_cache
*cache
1020 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1022 if (!cache
->available_p
)
1023 return UNWIND_UNAVAILABLE
;
1025 /* Halt the backtrace at "_start". */
1026 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
1027 return UNWIND_OUTERMOST
;
1029 /* We've hit a wall, stop. */
1030 if (cache
->prev_sp
== 0)
1031 return UNWIND_OUTERMOST
;
1033 return UNWIND_NO_REASON
;
1036 /* Our frame ID for a normal frame is the current function's starting
1037 PC and the caller's SP when we were called. */
1040 aarch64_prologue_this_id (struct frame_info
*this_frame
,
1041 void **this_cache
, struct frame_id
*this_id
)
1043 struct aarch64_prologue_cache
*cache
1044 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1046 if (!cache
->available_p
)
1047 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
1049 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
1052 /* Implement the "prev_register" frame_unwind method. */
1054 static struct value
*
1055 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
1056 void **this_cache
, int prev_regnum
)
1058 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1059 struct aarch64_prologue_cache
*cache
1060 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1062 /* If we are asked to unwind the PC, then we need to return the LR
1063 instead. The prologue may save PC, but it will point into this
1064 frame's prologue, not the next frame's resume location. */
1065 if (prev_regnum
== AARCH64_PC_REGNUM
)
1069 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1070 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
1073 /* SP is generally not saved to the stack, but this frame is
1074 identified by the next frame's stack pointer at the time of the
1075 call. The value was already reconstructed into PREV_SP. */
1081 | | | <- Previous SP
1084 +--| saved fp |<- FP
1088 if (prev_regnum
== AARCH64_SP_REGNUM
)
1089 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1092 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1096 /* AArch64 prologue unwinder. */
1097 struct frame_unwind aarch64_prologue_unwind
=
1100 aarch64_prologue_frame_unwind_stop_reason
,
1101 aarch64_prologue_this_id
,
1102 aarch64_prologue_prev_register
,
1104 default_frame_sniffer
1107 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1108 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1109 Return a pointer to the current aarch64_prologue_cache in
1112 static struct aarch64_prologue_cache
*
1113 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
1115 struct aarch64_prologue_cache
*cache
;
1117 if (*this_cache
!= NULL
)
1120 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1121 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1122 *this_cache
= cache
;
1126 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
1128 cache
->prev_pc
= get_frame_pc (this_frame
);
1129 cache
->available_p
= 1;
1131 CATCH (ex
, RETURN_MASK_ERROR
)
1133 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1134 throw_exception (ex
);
1141 /* Implement the "stop_reason" frame_unwind method. */
1143 static enum unwind_stop_reason
1144 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1147 struct aarch64_prologue_cache
*cache
1148 = aarch64_make_stub_cache (this_frame
, this_cache
);
1150 if (!cache
->available_p
)
1151 return UNWIND_UNAVAILABLE
;
1153 return UNWIND_NO_REASON
;
1156 /* Our frame ID for a stub frame is the current SP and LR. */
1159 aarch64_stub_this_id (struct frame_info
*this_frame
,
1160 void **this_cache
, struct frame_id
*this_id
)
1162 struct aarch64_prologue_cache
*cache
1163 = aarch64_make_stub_cache (this_frame
, this_cache
);
1165 if (cache
->available_p
)
1166 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
1168 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
1171 /* Implement the "sniffer" frame_unwind method. */
1174 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1175 struct frame_info
*this_frame
,
1176 void **this_prologue_cache
)
1178 CORE_ADDR addr_in_block
;
1181 addr_in_block
= get_frame_address_in_block (this_frame
);
1182 if (in_plt_section (addr_in_block
)
1183 /* We also use the stub winder if the target memory is unreadable
1184 to avoid having the prologue unwinder trying to read it. */
1185 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1191 /* AArch64 stub unwinder. */
1192 struct frame_unwind aarch64_stub_unwind
=
1195 aarch64_stub_frame_unwind_stop_reason
,
1196 aarch64_stub_this_id
,
1197 aarch64_prologue_prev_register
,
1199 aarch64_stub_unwind_sniffer
1202 /* Return the frame base address of *THIS_FRAME. */
1205 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1207 struct aarch64_prologue_cache
*cache
1208 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1210 return cache
->prev_sp
- cache
->framesize
;
1213 /* AArch64 default frame base information. */
1214 struct frame_base aarch64_normal_base
=
1216 &aarch64_prologue_unwind
,
1217 aarch64_normal_frame_base
,
1218 aarch64_normal_frame_base
,
1219 aarch64_normal_frame_base
1222 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1223 dummy frame. The frame ID's base needs to match the TOS value
1224 saved by save_dummy_frame_tos () and returned from
1225 aarch64_push_dummy_call, and the PC needs to match the dummy
1226 frame's breakpoint. */
1228 static struct frame_id
1229 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1231 return frame_id_build (get_frame_register_unsigned (this_frame
,
1233 get_frame_pc (this_frame
));
1236 /* Implement the "unwind_pc" gdbarch method. */
1239 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1242 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1247 /* Implement the "unwind_sp" gdbarch method. */
1250 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1252 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1255 /* Return the value of the REGNUM register in the previous frame of
1258 static struct value
*
1259 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1260 void **this_cache
, int regnum
)
1262 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1267 case AARCH64_PC_REGNUM
:
1268 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1269 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1272 internal_error (__FILE__
, __LINE__
,
1273 _("Unexpected register %d"), regnum
);
1277 /* Implement the "init_reg" dwarf2_frame_ops method. */
1280 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1281 struct dwarf2_frame_state_reg
*reg
,
1282 struct frame_info
*this_frame
)
1286 case AARCH64_PC_REGNUM
:
1287 reg
->how
= DWARF2_FRAME_REG_FN
;
1288 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1290 case AARCH64_SP_REGNUM
:
1291 reg
->how
= DWARF2_FRAME_REG_CFA
;
1296 /* When arguments must be pushed onto the stack, they go on in reverse
1297 order. The code below implements a FILO (stack) to do this. */
1301 /* Value to pass on stack. */
1304 /* Size in bytes of value to pass on stack. */
1308 DEF_VEC_O (stack_item_t
);
1310 /* Return the alignment (in bytes) of the given type. */
1313 aarch64_type_align (struct type
*t
)
1319 t
= check_typedef (t
);
1320 switch (TYPE_CODE (t
))
1323 /* Should never happen. */
1324 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1328 case TYPE_CODE_ENUM
:
1332 case TYPE_CODE_RANGE
:
1333 case TYPE_CODE_BITSTRING
:
1335 case TYPE_CODE_CHAR
:
1336 case TYPE_CODE_BOOL
:
1337 return TYPE_LENGTH (t
);
1339 case TYPE_CODE_ARRAY
:
1340 case TYPE_CODE_COMPLEX
:
1341 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1343 case TYPE_CODE_STRUCT
:
1344 case TYPE_CODE_UNION
:
1346 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1348 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1356 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1357 defined in the AAPCS64 ABI document; otherwise return 0. */
1360 is_hfa (struct type
*ty
)
1362 switch (TYPE_CODE (ty
))
1364 case TYPE_CODE_ARRAY
:
1366 struct type
*target_ty
= TYPE_TARGET_TYPE (ty
);
1367 if (TYPE_CODE (target_ty
) == TYPE_CODE_FLT
&& TYPE_LENGTH (ty
) <= 4)
1372 case TYPE_CODE_UNION
:
1373 case TYPE_CODE_STRUCT
:
1375 if (TYPE_NFIELDS (ty
) > 0 && TYPE_NFIELDS (ty
) <= 4)
1377 struct type
*member0_type
;
1379 member0_type
= check_typedef (TYPE_FIELD_TYPE (ty
, 0));
1380 if (TYPE_CODE (member0_type
) == TYPE_CODE_FLT
)
1384 for (i
= 0; i
< TYPE_NFIELDS (ty
); i
++)
1386 struct type
*member1_type
;
1388 member1_type
= check_typedef (TYPE_FIELD_TYPE (ty
, i
));
1389 if (TYPE_CODE (member0_type
) != TYPE_CODE (member1_type
)
1390 || (TYPE_LENGTH (member0_type
)
1391 != TYPE_LENGTH (member1_type
)))
1407 /* AArch64 function call information structure. */
1408 struct aarch64_call_info
1410 /* the current argument number. */
1413 /* The next general purpose register number, equivalent to NGRN as
1414 described in the AArch64 Procedure Call Standard. */
1417 /* The next SIMD and floating point register number, equivalent to
1418 NSRN as described in the AArch64 Procedure Call Standard. */
1421 /* The next stacked argument address, equivalent to NSAA as
1422 described in the AArch64 Procedure Call Standard. */
1425 /* Stack item vector. */
1426 VEC(stack_item_t
) *si
;
1429 /* Pass a value in a sequence of consecutive X registers. The caller
1430 is responsbile for ensuring sufficient registers are available. */
1433 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1434 struct aarch64_call_info
*info
, struct type
*type
,
1435 const bfd_byte
*buf
)
1437 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1438 int len
= TYPE_LENGTH (type
);
1439 enum type_code typecode
= TYPE_CODE (type
);
1440 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1446 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1447 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1451 /* Adjust sub-word struct/union args when big-endian. */
1452 if (byte_order
== BFD_ENDIAN_BIG
1453 && partial_len
< X_REGISTER_SIZE
1454 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1455 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1458 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s = 0x%s\n",
1460 gdbarch_register_name (gdbarch
, regnum
),
1461 phex (regval
, X_REGISTER_SIZE
));
1462 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1469 /* Attempt to marshall a value in a V register. Return 1 if
1470 successful, or 0 if insufficient registers are available. This
1471 function, unlike the equivalent pass_in_x() function does not
1472 handle arguments spread across multiple registers. */
1475 pass_in_v (struct gdbarch
*gdbarch
,
1476 struct regcache
*regcache
,
1477 struct aarch64_call_info
*info
,
1478 const bfd_byte
*buf
)
1482 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1483 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1488 regcache_cooked_write (regcache
, regnum
, buf
);
1490 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s\n",
1492 gdbarch_register_name (gdbarch
, regnum
));
1499 /* Marshall an argument onto the stack. */
1502 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1503 const bfd_byte
*buf
)
1505 int len
= TYPE_LENGTH (type
);
1511 align
= aarch64_type_align (type
);
1513 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1514 Natural alignment of the argument's type. */
1515 align
= align_up (align
, 8);
1517 /* The AArch64 PCS requires at most doubleword alignment. */
1522 fprintf_unfiltered (gdb_stdlog
, "arg %d len=%d @ sp + %d\n",
1523 info
->argnum
, len
, info
->nsaa
);
1527 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1530 if (info
->nsaa
& (align
- 1))
1532 /* Push stack alignment padding. */
1533 int pad
= align
- (info
->nsaa
& (align
- 1));
1538 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1543 /* Marshall an argument into a sequence of one or more consecutive X
1544 registers or, if insufficient X registers are available then onto
1548 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1549 struct aarch64_call_info
*info
, struct type
*type
,
1550 const bfd_byte
*buf
)
1552 int len
= TYPE_LENGTH (type
);
1553 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1555 /* PCS C.13 - Pass in registers if we have enough spare */
1556 if (info
->ngrn
+ nregs
<= 8)
1558 pass_in_x (gdbarch
, regcache
, info
, type
, buf
);
1559 info
->ngrn
+= nregs
;
1564 pass_on_stack (info
, type
, buf
);
1568 /* Pass a value in a V register, or on the stack if insufficient are
1572 pass_in_v_or_stack (struct gdbarch
*gdbarch
,
1573 struct regcache
*regcache
,
1574 struct aarch64_call_info
*info
,
1576 const bfd_byte
*buf
)
1578 if (!pass_in_v (gdbarch
, regcache
, info
, buf
))
1579 pass_on_stack (info
, type
, buf
);
1582 /* Implement the "push_dummy_call" gdbarch method. */
1585 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1586 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1588 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1589 CORE_ADDR struct_addr
)
1595 struct aarch64_call_info info
;
1596 struct type
*func_type
;
1597 struct type
*return_type
;
1598 int lang_struct_return
;
1600 memset (&info
, 0, sizeof (info
));
1602 /* We need to know what the type of the called function is in order
1603 to determine the number of named/anonymous arguments for the
1604 actual argument placement, and the return type in order to handle
1605 return value correctly.
1607 The generic code above us views the decision of return in memory
1608 or return in registers as a two stage processes. The language
1609 handler is consulted first and may decide to return in memory (eg
1610 class with copy constructor returned by value), this will cause
1611 the generic code to allocate space AND insert an initial leading
1614 If the language code does not decide to pass in memory then the
1615 target code is consulted.
1617 If the language code decides to pass in memory we want to move
1618 the pointer inserted as the initial argument from the argument
1619 list and into X8, the conventional AArch64 struct return pointer
1622 This is slightly awkward, ideally the flag "lang_struct_return"
1623 would be passed to the targets implementation of push_dummy_call.
1624 Rather that change the target interface we call the language code
1625 directly ourselves. */
1627 func_type
= check_typedef (value_type (function
));
1629 /* Dereference function pointer types. */
1630 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1631 func_type
= TYPE_TARGET_TYPE (func_type
);
1633 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1634 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1636 /* If language_pass_by_reference () returned true we will have been
1637 given an additional initial argument, a hidden pointer to the
1638 return slot in memory. */
1639 return_type
= TYPE_TARGET_TYPE (func_type
);
1640 lang_struct_return
= language_pass_by_reference (return_type
);
1642 /* Set the return address. For the AArch64, the return breakpoint
1643 is always at BP_ADDR. */
1644 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1646 /* If we were given an initial argument for the return slot because
1647 lang_struct_return was true, lose it. */
1648 if (lang_struct_return
)
1654 /* The struct_return pointer occupies X8. */
1655 if (struct_return
|| lang_struct_return
)
1658 fprintf_unfiltered (gdb_stdlog
, "struct return in %s = 0x%s\n",
1659 gdbarch_register_name
1661 AARCH64_STRUCT_RETURN_REGNUM
),
1662 paddress (gdbarch
, struct_addr
));
1663 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1667 for (argnum
= 0; argnum
< nargs
; argnum
++)
1669 struct value
*arg
= args
[argnum
];
1670 struct type
*arg_type
;
1673 arg_type
= check_typedef (value_type (arg
));
1674 len
= TYPE_LENGTH (arg_type
);
1676 switch (TYPE_CODE (arg_type
))
1679 case TYPE_CODE_BOOL
:
1680 case TYPE_CODE_CHAR
:
1681 case TYPE_CODE_RANGE
:
1682 case TYPE_CODE_ENUM
:
1685 /* Promote to 32 bit integer. */
1686 if (TYPE_UNSIGNED (arg_type
))
1687 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1689 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1690 arg
= value_cast (arg_type
, arg
);
1692 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1693 value_contents (arg
));
1696 case TYPE_CODE_COMPLEX
:
1699 const bfd_byte
*buf
= value_contents (arg
);
1700 struct type
*target_type
=
1701 check_typedef (TYPE_TARGET_TYPE (arg_type
));
1703 pass_in_v (gdbarch
, regcache
, &info
, buf
);
1704 pass_in_v (gdbarch
, regcache
, &info
,
1705 buf
+ TYPE_LENGTH (target_type
));
1710 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1714 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1715 value_contents (arg
));
1718 case TYPE_CODE_STRUCT
:
1719 case TYPE_CODE_ARRAY
:
1720 case TYPE_CODE_UNION
:
1721 if (is_hfa (arg_type
))
1723 int elements
= TYPE_NFIELDS (arg_type
);
1725 /* Homogeneous Aggregates */
1726 if (info
.nsrn
+ elements
< 8)
1730 for (i
= 0; i
< elements
; i
++)
1732 /* We know that we have sufficient registers
1733 available therefore this will never fallback
1735 struct value
*field
=
1736 value_primitive_field (arg
, 0, i
, arg_type
);
1737 struct type
*field_type
=
1738 check_typedef (value_type (field
));
1740 pass_in_v_or_stack (gdbarch
, regcache
, &info
, field_type
,
1741 value_contents_writeable (field
));
1747 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1752 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1753 invisible reference. */
1755 /* Allocate aligned storage. */
1756 sp
= align_down (sp
- len
, 16);
1758 /* Write the real data into the stack. */
1759 write_memory (sp
, value_contents (arg
), len
);
1761 /* Construct the indirection. */
1762 arg_type
= lookup_pointer_type (arg_type
);
1763 arg
= value_from_pointer (arg_type
, sp
);
1764 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1765 value_contents (arg
));
1768 /* PCS C.15 / C.18 multiple values pass. */
1769 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1770 value_contents (arg
));
1774 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1775 value_contents (arg
));
1780 /* Make sure stack retains 16 byte alignment. */
1782 sp
-= 16 - (info
.nsaa
& 15);
1784 while (!VEC_empty (stack_item_t
, info
.si
))
1786 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1789 write_memory (sp
, si
->data
, si
->len
);
1790 VEC_pop (stack_item_t
, info
.si
);
1793 VEC_free (stack_item_t
, info
.si
);
1795 /* Finally, update the SP register. */
1796 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1801 /* Implement the "frame_align" gdbarch method. */
1804 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1806 /* Align the stack to sixteen bytes. */
1807 return sp
& ~(CORE_ADDR
) 15;
1810 /* Return the type for an AdvSISD Q register. */
1812 static struct type
*
1813 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1815 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1817 if (tdep
->vnq_type
== NULL
)
1822 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1825 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1826 append_composite_type_field (t
, "u", elem
);
1828 elem
= builtin_type (gdbarch
)->builtin_int128
;
1829 append_composite_type_field (t
, "s", elem
);
1834 return tdep
->vnq_type
;
1837 /* Return the type for an AdvSISD D register. */
1839 static struct type
*
1840 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1842 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1844 if (tdep
->vnd_type
== NULL
)
1849 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1852 elem
= builtin_type (gdbarch
)->builtin_double
;
1853 append_composite_type_field (t
, "f", elem
);
1855 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1856 append_composite_type_field (t
, "u", elem
);
1858 elem
= builtin_type (gdbarch
)->builtin_int64
;
1859 append_composite_type_field (t
, "s", elem
);
1864 return tdep
->vnd_type
;
1867 /* Return the type for an AdvSISD S register. */
1869 static struct type
*
1870 aarch64_vns_type (struct gdbarch
*gdbarch
)
1872 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1874 if (tdep
->vns_type
== NULL
)
1879 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1882 elem
= builtin_type (gdbarch
)->builtin_float
;
1883 append_composite_type_field (t
, "f", elem
);
1885 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1886 append_composite_type_field (t
, "u", elem
);
1888 elem
= builtin_type (gdbarch
)->builtin_int32
;
1889 append_composite_type_field (t
, "s", elem
);
1894 return tdep
->vns_type
;
1897 /* Return the type for an AdvSISD H register. */
1899 static struct type
*
1900 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1902 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1904 if (tdep
->vnh_type
== NULL
)
1909 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1912 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1913 append_composite_type_field (t
, "u", elem
);
1915 elem
= builtin_type (gdbarch
)->builtin_int16
;
1916 append_composite_type_field (t
, "s", elem
);
1921 return tdep
->vnh_type
;
1924 /* Return the type for an AdvSISD B register. */
1926 static struct type
*
1927 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1929 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1931 if (tdep
->vnb_type
== NULL
)
1936 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1939 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1940 append_composite_type_field (t
, "u", elem
);
1942 elem
= builtin_type (gdbarch
)->builtin_int8
;
1943 append_composite_type_field (t
, "s", elem
);
1948 return tdep
->vnb_type
;
1951 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1954 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1956 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1957 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1959 if (reg
== AARCH64_DWARF_SP
)
1960 return AARCH64_SP_REGNUM
;
1962 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1963 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1969 /* Implement the "print_insn" gdbarch method. */
1972 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1974 info
->symbols
= NULL
;
1975 return print_insn_aarch64 (memaddr
, info
);
1978 /* AArch64 BRK software debug mode instruction.
1979 Note that AArch64 code is always little-endian.
1980 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1981 static const gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1983 /* Implement the "breakpoint_from_pc" gdbarch method. */
1985 static const gdb_byte
*
1986 aarch64_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
1989 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1991 *lenptr
= sizeof (aarch64_default_breakpoint
);
1992 return aarch64_default_breakpoint
;
1995 /* Extract from an array REGS containing the (raw) register state a
1996 function return value of type TYPE, and copy that, in virtual
1997 format, into VALBUF. */
2000 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
2003 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
2004 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2006 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
2008 bfd_byte buf
[V_REGISTER_SIZE
];
2009 int len
= TYPE_LENGTH (type
);
2011 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
2012 memcpy (valbuf
, buf
, len
);
2014 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2015 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2016 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2017 || TYPE_CODE (type
) == TYPE_CODE_PTR
2018 || TYPE_CODE (type
) == TYPE_CODE_REF
2019 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2021 /* If the the type is a plain integer, then the access is
2022 straight-forward. Otherwise we have to play around a bit
2024 int len
= TYPE_LENGTH (type
);
2025 int regno
= AARCH64_X0_REGNUM
;
2030 /* By using store_unsigned_integer we avoid having to do
2031 anything special for small big-endian values. */
2032 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
2033 store_unsigned_integer (valbuf
,
2034 (len
> X_REGISTER_SIZE
2035 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
2036 len
-= X_REGISTER_SIZE
;
2037 valbuf
+= X_REGISTER_SIZE
;
2040 else if (TYPE_CODE (type
) == TYPE_CODE_COMPLEX
)
2042 int regno
= AARCH64_V0_REGNUM
;
2043 bfd_byte buf
[V_REGISTER_SIZE
];
2044 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
2045 int len
= TYPE_LENGTH (target_type
);
2047 regcache_cooked_read (regs
, regno
, buf
);
2048 memcpy (valbuf
, buf
, len
);
2050 regcache_cooked_read (regs
, regno
+ 1, buf
);
2051 memcpy (valbuf
, buf
, len
);
2054 else if (is_hfa (type
))
2056 int elements
= TYPE_NFIELDS (type
);
2057 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2058 int len
= TYPE_LENGTH (member_type
);
2061 for (i
= 0; i
< elements
; i
++)
2063 int regno
= AARCH64_V0_REGNUM
+ i
;
2064 bfd_byte buf
[X_REGISTER_SIZE
];
2067 fprintf_unfiltered (gdb_stdlog
,
2068 "read HFA return value element %d from %s\n",
2070 gdbarch_register_name (gdbarch
, regno
));
2071 regcache_cooked_read (regs
, regno
, buf
);
2073 memcpy (valbuf
, buf
, len
);
2079 /* For a structure or union the behaviour is as if the value had
2080 been stored to word-aligned memory and then loaded into
2081 registers with 64-bit load instruction(s). */
2082 int len
= TYPE_LENGTH (type
);
2083 int regno
= AARCH64_X0_REGNUM
;
2084 bfd_byte buf
[X_REGISTER_SIZE
];
2088 regcache_cooked_read (regs
, regno
++, buf
);
2089 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2090 len
-= X_REGISTER_SIZE
;
2091 valbuf
+= X_REGISTER_SIZE
;
2097 /* Will a function return an aggregate type in memory or in a
2098 register? Return 0 if an aggregate type can be returned in a
2099 register, 1 if it must be returned in memory. */
2102 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2105 enum type_code code
;
2107 CHECK_TYPEDEF (type
);
2109 /* In the AArch64 ABI, "integer" like aggregate types are returned
2110 in registers. For an aggregate type to be integer like, its size
2111 must be less than or equal to 4 * X_REGISTER_SIZE. */
2115 /* PCS B.5 If the argument is a Named HFA, then the argument is
2120 if (TYPE_LENGTH (type
) > 16)
2122 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2123 invisible reference. */
2131 /* Write into appropriate registers a function return value of type
2132 TYPE, given in virtual format. */
2135 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2136 const gdb_byte
*valbuf
)
2138 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
2139 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2141 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
2143 bfd_byte buf
[V_REGISTER_SIZE
];
2144 int len
= TYPE_LENGTH (type
);
2146 memcpy (buf
, valbuf
, len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2147 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
2149 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2150 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2151 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2152 || TYPE_CODE (type
) == TYPE_CODE_PTR
2153 || TYPE_CODE (type
) == TYPE_CODE_REF
2154 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2156 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2158 /* Values of one word or less are zero/sign-extended and
2160 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2161 LONGEST val
= unpack_long (type
, valbuf
);
2163 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2164 regcache_cooked_write (regs
, AARCH64_X0_REGNUM
, tmpbuf
);
2168 /* Integral values greater than one word are stored in
2169 consecutive registers starting with r0. This will always
2170 be a multiple of the regiser size. */
2171 int len
= TYPE_LENGTH (type
);
2172 int regno
= AARCH64_X0_REGNUM
;
2176 regcache_cooked_write (regs
, regno
++, valbuf
);
2177 len
-= X_REGISTER_SIZE
;
2178 valbuf
+= X_REGISTER_SIZE
;
2182 else if (is_hfa (type
))
2184 int elements
= TYPE_NFIELDS (type
);
2185 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2186 int len
= TYPE_LENGTH (member_type
);
2189 for (i
= 0; i
< elements
; i
++)
2191 int regno
= AARCH64_V0_REGNUM
+ i
;
2192 bfd_byte tmpbuf
[MAX_REGISTER_SIZE
];
2195 fprintf_unfiltered (gdb_stdlog
,
2196 "write HFA return value element %d to %s\n",
2198 gdbarch_register_name (gdbarch
, regno
));
2200 memcpy (tmpbuf
, valbuf
, len
);
2201 regcache_cooked_write (regs
, regno
, tmpbuf
);
2207 /* For a structure or union the behaviour is as if the value had
2208 been stored to word-aligned memory and then loaded into
2209 registers with 64-bit load instruction(s). */
2210 int len
= TYPE_LENGTH (type
);
2211 int regno
= AARCH64_X0_REGNUM
;
2212 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2216 memcpy (tmpbuf
, valbuf
,
2217 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2218 regcache_cooked_write (regs
, regno
++, tmpbuf
);
2219 len
-= X_REGISTER_SIZE
;
2220 valbuf
+= X_REGISTER_SIZE
;
2225 /* Implement the "return_value" gdbarch method. */
2227 static enum return_value_convention
2228 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2229 struct type
*valtype
, struct regcache
*regcache
,
2230 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2232 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2234 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2235 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2236 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2238 if (aarch64_return_in_memory (gdbarch
, valtype
))
2241 fprintf_unfiltered (gdb_stdlog
, "return value in memory\n");
2242 return RETURN_VALUE_STRUCT_CONVENTION
;
2247 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2250 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2253 fprintf_unfiltered (gdb_stdlog
, "return value in registers\n");
2255 return RETURN_VALUE_REGISTER_CONVENTION
;
2258 /* Implement the "get_longjmp_target" gdbarch method. */
2261 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2264 gdb_byte buf
[X_REGISTER_SIZE
];
2265 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2266 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2267 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2269 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2271 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2275 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2279 /* Implement the "gen_return_address" gdbarch method. */
2282 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2283 struct agent_expr
*ax
, struct axs_value
*value
,
2286 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2287 value
->kind
= axs_lvalue_register
;
2288 value
->u
.reg
= AARCH64_LR_REGNUM
;
2292 /* Return the pseudo register name corresponding to register regnum. */
2295 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2297 static const char *const q_name
[] =
2299 "q0", "q1", "q2", "q3",
2300 "q4", "q5", "q6", "q7",
2301 "q8", "q9", "q10", "q11",
2302 "q12", "q13", "q14", "q15",
2303 "q16", "q17", "q18", "q19",
2304 "q20", "q21", "q22", "q23",
2305 "q24", "q25", "q26", "q27",
2306 "q28", "q29", "q30", "q31",
2309 static const char *const d_name
[] =
2311 "d0", "d1", "d2", "d3",
2312 "d4", "d5", "d6", "d7",
2313 "d8", "d9", "d10", "d11",
2314 "d12", "d13", "d14", "d15",
2315 "d16", "d17", "d18", "d19",
2316 "d20", "d21", "d22", "d23",
2317 "d24", "d25", "d26", "d27",
2318 "d28", "d29", "d30", "d31",
2321 static const char *const s_name
[] =
2323 "s0", "s1", "s2", "s3",
2324 "s4", "s5", "s6", "s7",
2325 "s8", "s9", "s10", "s11",
2326 "s12", "s13", "s14", "s15",
2327 "s16", "s17", "s18", "s19",
2328 "s20", "s21", "s22", "s23",
2329 "s24", "s25", "s26", "s27",
2330 "s28", "s29", "s30", "s31",
2333 static const char *const h_name
[] =
2335 "h0", "h1", "h2", "h3",
2336 "h4", "h5", "h6", "h7",
2337 "h8", "h9", "h10", "h11",
2338 "h12", "h13", "h14", "h15",
2339 "h16", "h17", "h18", "h19",
2340 "h20", "h21", "h22", "h23",
2341 "h24", "h25", "h26", "h27",
2342 "h28", "h29", "h30", "h31",
2345 static const char *const b_name
[] =
2347 "b0", "b1", "b2", "b3",
2348 "b4", "b5", "b6", "b7",
2349 "b8", "b9", "b10", "b11",
2350 "b12", "b13", "b14", "b15",
2351 "b16", "b17", "b18", "b19",
2352 "b20", "b21", "b22", "b23",
2353 "b24", "b25", "b26", "b27",
2354 "b28", "b29", "b30", "b31",
2357 regnum
-= gdbarch_num_regs (gdbarch
);
2359 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2360 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2362 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2363 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2365 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2366 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2368 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2369 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2371 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2372 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2374 internal_error (__FILE__
, __LINE__
,
2375 _("aarch64_pseudo_register_name: bad register number %d"),
2379 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2381 static struct type
*
2382 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2384 regnum
-= gdbarch_num_regs (gdbarch
);
2386 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2387 return aarch64_vnq_type (gdbarch
);
2389 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2390 return aarch64_vnd_type (gdbarch
);
2392 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2393 return aarch64_vns_type (gdbarch
);
2395 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2396 return aarch64_vnh_type (gdbarch
);
2398 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2399 return aarch64_vnb_type (gdbarch
);
2401 internal_error (__FILE__
, __LINE__
,
2402 _("aarch64_pseudo_register_type: bad register number %d"),
2406 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2409 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2410 struct reggroup
*group
)
2412 regnum
-= gdbarch_num_regs (gdbarch
);
2414 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2415 return group
== all_reggroup
|| group
== vector_reggroup
;
2416 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2417 return (group
== all_reggroup
|| group
== vector_reggroup
2418 || group
== float_reggroup
);
2419 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2420 return (group
== all_reggroup
|| group
== vector_reggroup
2421 || group
== float_reggroup
);
2422 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2423 return group
== all_reggroup
|| group
== vector_reggroup
;
2424 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2425 return group
== all_reggroup
|| group
== vector_reggroup
;
2427 return group
== all_reggroup
;
2430 /* Implement the "pseudo_register_read_value" gdbarch method. */
2432 static struct value
*
2433 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
,
2434 struct regcache
*regcache
,
2437 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2438 struct value
*result_value
;
2441 result_value
= allocate_value (register_type (gdbarch
, regnum
));
2442 VALUE_LVAL (result_value
) = lval_register
;
2443 VALUE_REGNUM (result_value
) = regnum
;
2444 buf
= value_contents_raw (result_value
);
2446 regnum
-= gdbarch_num_regs (gdbarch
);
2448 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2450 enum register_status status
;
2453 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2454 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2455 if (status
!= REG_VALID
)
2456 mark_value_bytes_unavailable (result_value
, 0,
2457 TYPE_LENGTH (value_type (result_value
)));
2459 memcpy (buf
, reg_buf
, Q_REGISTER_SIZE
);
2460 return result_value
;
2463 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2465 enum register_status status
;
2468 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2469 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2470 if (status
!= REG_VALID
)
2471 mark_value_bytes_unavailable (result_value
, 0,
2472 TYPE_LENGTH (value_type (result_value
)));
2474 memcpy (buf
, reg_buf
, D_REGISTER_SIZE
);
2475 return result_value
;
2478 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2480 enum register_status status
;
2483 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2484 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2485 memcpy (buf
, reg_buf
, S_REGISTER_SIZE
);
2486 return result_value
;
2489 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2491 enum register_status status
;
2494 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2495 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2496 if (status
!= REG_VALID
)
2497 mark_value_bytes_unavailable (result_value
, 0,
2498 TYPE_LENGTH (value_type (result_value
)));
2500 memcpy (buf
, reg_buf
, H_REGISTER_SIZE
);
2501 return result_value
;
2504 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2506 enum register_status status
;
2509 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2510 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2511 if (status
!= REG_VALID
)
2512 mark_value_bytes_unavailable (result_value
, 0,
2513 TYPE_LENGTH (value_type (result_value
)));
2515 memcpy (buf
, reg_buf
, B_REGISTER_SIZE
);
2516 return result_value
;
2519 gdb_assert_not_reached ("regnum out of bound");
2522 /* Implement the "pseudo_register_write" gdbarch method. */
2525 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2526 int regnum
, const gdb_byte
*buf
)
2528 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2530 /* Ensure the register buffer is zero, we want gdb writes of the
2531 various 'scalar' pseudo registers to behavior like architectural
2532 writes, register width bytes are written the remainder are set to
2534 memset (reg_buf
, 0, sizeof (reg_buf
));
2536 regnum
-= gdbarch_num_regs (gdbarch
);
2538 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2540 /* pseudo Q registers */
2543 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2544 memcpy (reg_buf
, buf
, Q_REGISTER_SIZE
);
2545 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2549 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2551 /* pseudo D registers */
2554 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2555 memcpy (reg_buf
, buf
, D_REGISTER_SIZE
);
2556 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2560 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2564 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2565 memcpy (reg_buf
, buf
, S_REGISTER_SIZE
);
2566 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2570 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2572 /* pseudo H registers */
2575 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2576 memcpy (reg_buf
, buf
, H_REGISTER_SIZE
);
2577 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2581 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2583 /* pseudo B registers */
2586 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2587 memcpy (reg_buf
, buf
, B_REGISTER_SIZE
);
2588 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2592 gdb_assert_not_reached ("regnum out of bound");
2595 /* Callback function for user_reg_add. */
2597 static struct value
*
2598 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2600 const int *reg_p
= baton
;
2602 return value_of_register (*reg_p
, frame
);
2606 /* Implement the "software_single_step" gdbarch method, needed to
2607 single step through atomic sequences on AArch64. */
2610 aarch64_software_single_step (struct frame_info
*frame
)
2612 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2613 struct address_space
*aspace
= get_frame_address_space (frame
);
2614 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2615 const int insn_size
= 4;
2616 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2617 CORE_ADDR pc
= get_frame_pc (frame
);
2618 CORE_ADDR breaks
[2] = { -1, -1 };
2620 CORE_ADDR closing_insn
= 0;
2621 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2622 byte_order_for_code
);
2625 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2626 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2628 /* Look for a Load Exclusive instruction which begins the sequence. */
2629 if (!decode_masked_match (insn
, 0x3fc00000, 0x08400000))
2632 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2638 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2639 byte_order_for_code
);
2641 /* Check if the instruction is a conditional branch. */
2642 if (decode_bcond (loc
, insn
, &cond
, &offset
))
2644 if (bc_insn_count
>= 1)
2647 /* It is, so we'll try to set a breakpoint at the destination. */
2648 breaks
[1] = loc
+ offset
;
2654 /* Look for the Store Exclusive which closes the atomic sequence. */
2655 if (decode_masked_match (insn
, 0x3fc00000, 0x08000000))
2662 /* We didn't find a closing Store Exclusive instruction, fall back. */
2666 /* Insert breakpoint after the end of the atomic sequence. */
2667 breaks
[0] = loc
+ insn_size
;
2669 /* Check for duplicated breakpoints, and also check that the second
2670 breakpoint is not within the atomic sequence. */
2672 && (breaks
[1] == breaks
[0]
2673 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2674 last_breakpoint
= 0;
2676 /* Insert the breakpoint at the end of the sequence, and one at the
2677 destination of the conditional branch, if it exists. */
2678 for (index
= 0; index
<= last_breakpoint
; index
++)
2679 insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
2684 /* Initialize the current architecture based on INFO. If possible,
2685 re-use an architecture from ARCHES, which is a list of
2686 architectures already created during this debugging session.
2688 Called e.g. at program startup, when reading a core file, and when
2689 reading a binary file. */
2691 static struct gdbarch
*
2692 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2694 struct gdbarch_tdep
*tdep
;
2695 struct gdbarch
*gdbarch
;
2696 struct gdbarch_list
*best_arch
;
2697 struct tdesc_arch_data
*tdesc_data
= NULL
;
2698 const struct target_desc
*tdesc
= info
.target_desc
;
2700 int have_fpa_registers
= 1;
2702 const struct tdesc_feature
*feature
;
2704 int num_pseudo_regs
= 0;
2706 /* Ensure we always have a target descriptor. */
2707 if (!tdesc_has_registers (tdesc
))
2708 tdesc
= tdesc_aarch64
;
2712 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2714 if (feature
== NULL
)
2717 tdesc_data
= tdesc_data_alloc ();
2719 /* Validate the descriptor provides the mandatory core R registers
2720 and allocate their numbers. */
2721 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
2723 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_X0_REGNUM
+ i
,
2724 aarch64_r_register_names
[i
]);
2726 num_regs
= AARCH64_X0_REGNUM
+ i
;
2728 /* Look for the V registers. */
2729 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2732 /* Validate the descriptor provides the mandatory V registers
2733 and allocate their numbers. */
2734 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
2736 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_V0_REGNUM
+ i
,
2737 aarch64_v_register_names
[i
]);
2739 num_regs
= AARCH64_V0_REGNUM
+ i
;
2741 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
2742 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
2743 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
2744 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
2745 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
2750 tdesc_data_cleanup (tdesc_data
);
2754 /* AArch64 code is always little-endian. */
2755 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
2757 /* If there is already a candidate, use it. */
2758 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
2760 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
2762 /* Found a match. */
2766 if (best_arch
!= NULL
)
2768 if (tdesc_data
!= NULL
)
2769 tdesc_data_cleanup (tdesc_data
);
2770 return best_arch
->gdbarch
;
2773 tdep
= xcalloc (1, sizeof (struct gdbarch_tdep
));
2774 gdbarch
= gdbarch_alloc (&info
, tdep
);
2776 /* This should be low enough for everything. */
2777 tdep
->lowest_pc
= 0x20;
2778 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
2779 tdep
->jb_elt_size
= 8;
2781 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
2782 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
2784 /* Frame handling. */
2785 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
2786 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
2787 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
2789 /* Advance PC across function entry code. */
2790 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
2792 /* The stack grows downward. */
2793 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2795 /* Breakpoint manipulation. */
2796 set_gdbarch_breakpoint_from_pc (gdbarch
, aarch64_breakpoint_from_pc
);
2797 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
2798 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
2800 /* Information about registers, etc. */
2801 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
2802 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
2803 set_gdbarch_num_regs (gdbarch
, num_regs
);
2805 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
2806 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
2807 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
2808 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
2809 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
2810 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
2811 aarch64_pseudo_register_reggroup_p
);
2814 set_gdbarch_short_bit (gdbarch
, 16);
2815 set_gdbarch_int_bit (gdbarch
, 32);
2816 set_gdbarch_float_bit (gdbarch
, 32);
2817 set_gdbarch_double_bit (gdbarch
, 64);
2818 set_gdbarch_long_double_bit (gdbarch
, 128);
2819 set_gdbarch_long_bit (gdbarch
, 64);
2820 set_gdbarch_long_long_bit (gdbarch
, 64);
2821 set_gdbarch_ptr_bit (gdbarch
, 64);
2822 set_gdbarch_char_signed (gdbarch
, 0);
2823 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2824 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2825 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
2827 /* Internal <-> external register number maps. */
2828 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
2830 /* Returning results. */
2831 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
2834 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
2836 /* Virtual tables. */
2837 set_gdbarch_vbit_in_delta (gdbarch
, 1);
2839 /* Hook in the ABI-specific overrides, if they have been registered. */
2840 info
.target_desc
= tdesc
;
2841 info
.tdep_info
= (void *) tdesc_data
;
2842 gdbarch_init_osabi (info
, gdbarch
);
2844 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
2846 /* Add some default predicates. */
2847 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
2848 dwarf2_append_unwinders (gdbarch
);
2849 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
2851 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
2853 /* Now we have tuned the configuration, set a few final things,
2854 based on what the OS ABI has told us. */
2856 if (tdep
->jb_pc
>= 0)
2857 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
2859 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
2861 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
2863 /* Add standard register aliases. */
2864 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
2865 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
2866 value_of_aarch64_user_reg
,
2867 &aarch64_register_aliases
[i
].regnum
);
2873 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
2875 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2880 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2881 paddress (gdbarch
, tdep
->lowest_pc
));
2884 /* Suppress warning from -Wmissing-prototypes. */
2885 extern initialize_file_ftype _initialize_aarch64_tdep
;
2888 _initialize_aarch64_tdep (void)
2890 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
2893 initialize_tdesc_aarch64 ();
2895 /* Debug this file's internals. */
2896 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
2897 Set AArch64 debugging."), _("\
2898 Show AArch64 debugging."), _("\
2899 When on, AArch64 specific debugging is enabled."),
2902 &setdebuglist
, &showdebuglist
);
2905 /* AArch64 process record-replay related structures, defines etc. */
2907 #define submask(x) ((1L << ((x) + 1)) - 1)
2908 #define bit(obj,st) (((obj) >> (st)) & 1)
2909 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2911 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2914 unsigned int reg_len = LENGTH; \
2917 REGS = XNEWVEC (uint32_t, reg_len); \
2918 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2923 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2926 unsigned int mem_len = LENGTH; \
2929 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2930 memcpy(&MEMS->len, &RECORD_BUF[0], \
2931 sizeof(struct aarch64_mem_r) * LENGTH); \
2936 /* AArch64 record/replay structures and enumerations. */
2938 struct aarch64_mem_r
2940 uint64_t len
; /* Record length. */
2941 uint64_t addr
; /* Memory address. */
2944 enum aarch64_record_result
2946 AARCH64_RECORD_SUCCESS
,
2947 AARCH64_RECORD_FAILURE
,
2948 AARCH64_RECORD_UNSUPPORTED
,
2949 AARCH64_RECORD_UNKNOWN
2952 typedef struct insn_decode_record_t
2954 struct gdbarch
*gdbarch
;
2955 struct regcache
*regcache
;
2956 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
2957 uint32_t aarch64_insn
; /* Insn to be recorded. */
2958 uint32_t mem_rec_count
; /* Count of memory records. */
2959 uint32_t reg_rec_count
; /* Count of register records. */
2960 uint32_t *aarch64_regs
; /* Registers to be recorded. */
2961 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
2962 } insn_decode_record
;
2964 /* Record handler for data processing - register instructions. */
2967 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
2969 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
2970 uint32_t record_buf
[4];
2972 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
2973 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2974 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
2976 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
2980 /* Logical (shifted register). */
2981 if (insn_bits24_27
== 0x0a)
2982 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
2984 else if (insn_bits24_27
== 0x0b)
2985 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
2987 return AARCH64_RECORD_UNKNOWN
;
2989 record_buf
[0] = reg_rd
;
2990 aarch64_insn_r
->reg_rec_count
= 1;
2992 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
2996 if (insn_bits24_27
== 0x0b)
2998 /* Data-processing (3 source). */
2999 record_buf
[0] = reg_rd
;
3000 aarch64_insn_r
->reg_rec_count
= 1;
3002 else if (insn_bits24_27
== 0x0a)
3004 if (insn_bits21_23
== 0x00)
3006 /* Add/subtract (with carry). */
3007 record_buf
[0] = reg_rd
;
3008 aarch64_insn_r
->reg_rec_count
= 1;
3009 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3011 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3012 aarch64_insn_r
->reg_rec_count
= 2;
3015 else if (insn_bits21_23
== 0x02)
3017 /* Conditional compare (register) and conditional compare
3018 (immediate) instructions. */
3019 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3020 aarch64_insn_r
->reg_rec_count
= 1;
3022 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3024 /* CConditional select. */
3025 /* Data-processing (2 source). */
3026 /* Data-processing (1 source). */
3027 record_buf
[0] = reg_rd
;
3028 aarch64_insn_r
->reg_rec_count
= 1;
3031 return AARCH64_RECORD_UNKNOWN
;
3035 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3037 return AARCH64_RECORD_SUCCESS
;
3040 /* Record handler for data processing - immediate instructions. */
3043 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3045 uint8_t reg_rd
, insn_bit28
, insn_bit23
, insn_bits24_27
, setflags
;
3046 uint32_t record_buf
[4];
3048 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3049 insn_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3050 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3051 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3053 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3054 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3055 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3057 record_buf
[0] = reg_rd
;
3058 aarch64_insn_r
->reg_rec_count
= 1;
3060 else if (insn_bits24_27
== 0x01)
3062 /* Add/Subtract (immediate). */
3063 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3064 record_buf
[0] = reg_rd
;
3065 aarch64_insn_r
->reg_rec_count
= 1;
3067 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3069 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3071 /* Logical (immediate). */
3072 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3073 record_buf
[0] = reg_rd
;
3074 aarch64_insn_r
->reg_rec_count
= 1;
3076 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3079 return AARCH64_RECORD_UNKNOWN
;
3081 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3083 return AARCH64_RECORD_SUCCESS
;
3086 /* Record handler for branch, exception generation and system instructions. */
3089 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3091 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3092 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3093 uint32_t record_buf
[4];
3095 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3096 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3097 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3099 if (insn_bits28_31
== 0x0d)
3101 /* Exception generation instructions. */
3102 if (insn_bits24_27
== 0x04)
3104 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3105 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3106 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3108 ULONGEST svc_number
;
3110 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3112 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3116 return AARCH64_RECORD_UNSUPPORTED
;
3118 /* System instructions. */
3119 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3121 uint32_t reg_rt
, reg_crn
;
3123 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3124 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3126 /* Record rt in case of sysl and mrs instructions. */
3127 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3129 record_buf
[0] = reg_rt
;
3130 aarch64_insn_r
->reg_rec_count
= 1;
3132 /* Record cpsr for hint and msr(immediate) instructions. */
3133 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3135 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3136 aarch64_insn_r
->reg_rec_count
= 1;
3139 /* Unconditional branch (register). */
3140 else if((insn_bits24_27
& 0x0e) == 0x06)
3142 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3143 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3144 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3147 return AARCH64_RECORD_UNKNOWN
;
3149 /* Unconditional branch (immediate). */
3150 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3152 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3153 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3154 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3157 /* Compare & branch (immediate), Test & branch (immediate) and
3158 Conditional branch (immediate). */
3159 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3161 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3163 return AARCH64_RECORD_SUCCESS
;
3166 /* Record handler for advanced SIMD load and store instructions. */
3169 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3172 uint64_t addr_offset
= 0;
3173 uint32_t record_buf
[24];
3174 uint64_t record_buf_mem
[24];
3175 uint32_t reg_rn
, reg_rt
;
3176 uint32_t reg_index
= 0, mem_index
= 0;
3177 uint8_t opcode_bits
, size_bits
;
3179 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3180 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3181 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3182 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3183 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3187 fprintf_unfiltered (gdb_stdlog
,
3188 "Process record: Advanced SIMD load/store\n");
3191 /* Load/store single structure. */
3192 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3194 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3195 scale
= opcode_bits
>> 2;
3196 selem
= ((opcode_bits
& 0x02) |
3197 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3201 if (size_bits
& 0x01)
3202 return AARCH64_RECORD_UNKNOWN
;
3205 if ((size_bits
>> 1) & 0x01)
3206 return AARCH64_RECORD_UNKNOWN
;
3207 if (size_bits
& 0x01)
3209 if (!((opcode_bits
>> 1) & 0x01))
3212 return AARCH64_RECORD_UNKNOWN
;
3216 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3223 return AARCH64_RECORD_UNKNOWN
;
3229 for (sindex
= 0; sindex
< selem
; sindex
++)
3231 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3232 reg_rt
= (reg_rt
+ 1) % 32;
3236 for (sindex
= 0; sindex
< selem
; sindex
++)
3237 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3238 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3241 record_buf_mem
[mem_index
++] = esize
/ 8;
3242 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3244 addr_offset
= addr_offset
+ (esize
/ 8);
3245 reg_rt
= (reg_rt
+ 1) % 32;
3248 /* Load/store multiple structure. */
3251 uint8_t selem
, esize
, rpt
, elements
;
3252 uint8_t eindex
, rindex
;
3254 esize
= 8 << size_bits
;
3255 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3256 elements
= 128 / esize
;
3258 elements
= 64 / esize
;
3260 switch (opcode_bits
)
3262 /*LD/ST4 (4 Registers). */
3267 /*LD/ST1 (4 Registers). */
3272 /*LD/ST3 (3 Registers). */
3277 /*LD/ST1 (3 Registers). */
3282 /*LD/ST1 (1 Register). */
3287 /*LD/ST2 (2 Registers). */
3292 /*LD/ST1 (2 Registers). */
3298 return AARCH64_RECORD_UNSUPPORTED
;
3301 for (rindex
= 0; rindex
< rpt
; rindex
++)
3302 for (eindex
= 0; eindex
< elements
; eindex
++)
3304 uint8_t reg_tt
, sindex
;
3305 reg_tt
= (reg_rt
+ rindex
) % 32;
3306 for (sindex
= 0; sindex
< selem
; sindex
++)
3308 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3309 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3312 record_buf_mem
[mem_index
++] = esize
/ 8;
3313 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3315 addr_offset
= addr_offset
+ (esize
/ 8);
3316 reg_tt
= (reg_tt
+ 1) % 32;
3321 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3322 record_buf
[reg_index
++] = reg_rn
;
3324 aarch64_insn_r
->reg_rec_count
= reg_index
;
3325 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3326 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3328 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3330 return AARCH64_RECORD_SUCCESS
;
3333 /* Record handler for load and store instructions. */
3336 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3338 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3339 uint8_t insn_bit23
, insn_bit21
;
3340 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3341 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3342 uint64_t datasize
, offset
;
3343 uint32_t record_buf
[8];
3344 uint64_t record_buf_mem
[8];
3347 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3348 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3349 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3350 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3351 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3352 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3353 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3354 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3355 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3356 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3357 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3359 /* Load/store exclusive. */
3360 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3364 fprintf_unfiltered (gdb_stdlog
,
3365 "Process record: load/store exclusive\n");
3370 record_buf
[0] = reg_rt
;
3371 aarch64_insn_r
->reg_rec_count
= 1;
3374 record_buf
[1] = reg_rt2
;
3375 aarch64_insn_r
->reg_rec_count
= 2;
3381 datasize
= (8 << size_bits
) * 2;
3383 datasize
= (8 << size_bits
);
3384 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3386 record_buf_mem
[0] = datasize
/ 8;
3387 record_buf_mem
[1] = address
;
3388 aarch64_insn_r
->mem_rec_count
= 1;
3391 /* Save register rs. */
3392 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3393 aarch64_insn_r
->reg_rec_count
= 1;
3397 /* Load register (literal) instructions decoding. */
3398 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3402 fprintf_unfiltered (gdb_stdlog
,
3403 "Process record: load register (literal)\n");
3406 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3408 record_buf
[0] = reg_rt
;
3409 aarch64_insn_r
->reg_rec_count
= 1;
3411 /* All types of load/store pair instructions decoding. */
3412 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3416 fprintf_unfiltered (gdb_stdlog
,
3417 "Process record: load/store pair\n");
3424 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3425 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3429 record_buf
[0] = reg_rt
;
3430 record_buf
[1] = reg_rt2
;
3432 aarch64_insn_r
->reg_rec_count
= 2;
3437 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3439 size_bits
= size_bits
>> 1;
3440 datasize
= 8 << (2 + size_bits
);
3441 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3442 offset
= offset
<< (2 + size_bits
);
3443 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3445 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3447 if (imm7_off
& 0x40)
3448 address
= address
- offset
;
3450 address
= address
+ offset
;
3453 record_buf_mem
[0] = datasize
/ 8;
3454 record_buf_mem
[1] = address
;
3455 record_buf_mem
[2] = datasize
/ 8;
3456 record_buf_mem
[3] = address
+ (datasize
/ 8);
3457 aarch64_insn_r
->mem_rec_count
= 2;
3459 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3460 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3462 /* Load/store register (unsigned immediate) instructions. */
3463 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3465 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3472 if (size_bits
!= 0x03)
3475 return AARCH64_RECORD_UNKNOWN
;
3479 fprintf_unfiltered (gdb_stdlog
,
3480 "Process record: load/store (unsigned immediate):"
3481 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3487 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3488 datasize
= 8 << size_bits
;
3489 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3491 offset
= offset
<< size_bits
;
3492 address
= address
+ offset
;
3494 record_buf_mem
[0] = datasize
>> 3;
3495 record_buf_mem
[1] = address
;
3496 aarch64_insn_r
->mem_rec_count
= 1;
3501 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3503 record_buf
[0] = reg_rt
;
3504 aarch64_insn_r
->reg_rec_count
= 1;
3507 /* Load/store register (register offset) instructions. */
3508 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3509 && insn_bits10_11
== 0x02 && insn_bit21
)
3513 fprintf_unfiltered (gdb_stdlog
,
3514 "Process record: load/store (register offset)\n");
3516 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3523 if (size_bits
!= 0x03)
3526 return AARCH64_RECORD_UNKNOWN
;
3530 uint64_t reg_rm_val
;
3531 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3532 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3533 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3534 offset
= reg_rm_val
<< size_bits
;
3536 offset
= reg_rm_val
;
3537 datasize
= 8 << size_bits
;
3538 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3540 address
= address
+ offset
;
3541 record_buf_mem
[0] = datasize
>> 3;
3542 record_buf_mem
[1] = address
;
3543 aarch64_insn_r
->mem_rec_count
= 1;
3548 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3550 record_buf
[0] = reg_rt
;
3551 aarch64_insn_r
->reg_rec_count
= 1;
3554 /* Load/store register (immediate and unprivileged) instructions. */
3555 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3560 fprintf_unfiltered (gdb_stdlog
,
3561 "Process record: load/store (immediate and unprivileged)\n");
3563 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3570 if (size_bits
!= 0x03)
3573 return AARCH64_RECORD_UNKNOWN
;
3578 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3579 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3580 datasize
= 8 << size_bits
;
3581 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3583 if (insn_bits10_11
!= 0x01)
3585 if (imm9_off
& 0x0100)
3586 address
= address
- offset
;
3588 address
= address
+ offset
;
3590 record_buf_mem
[0] = datasize
>> 3;
3591 record_buf_mem
[1] = address
;
3592 aarch64_insn_r
->mem_rec_count
= 1;
3597 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3599 record_buf
[0] = reg_rt
;
3600 aarch64_insn_r
->reg_rec_count
= 1;
3602 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3603 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3605 /* Advanced SIMD load/store instructions. */
3607 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3609 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3611 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3613 return AARCH64_RECORD_SUCCESS
;
3616 /* Record handler for data processing SIMD and floating point instructions. */
3619 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3621 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3622 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3623 uint8_t insn_bits11_14
;
3624 uint32_t record_buf
[2];
3626 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3627 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3628 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3629 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3630 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3631 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3632 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3633 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3634 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3638 fprintf_unfiltered (gdb_stdlog
,
3639 "Process record: data processing SIMD/FP: ");
3642 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3644 /* Floating point - fixed point conversion instructions. */
3648 fprintf_unfiltered (gdb_stdlog
, "FP - fixed point conversion");
3650 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3651 record_buf
[0] = reg_rd
;
3653 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3655 /* Floating point - conditional compare instructions. */
3656 else if (insn_bits10_11
== 0x01)
3659 fprintf_unfiltered (gdb_stdlog
, "FP - conditional compare");
3661 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3663 /* Floating point - data processing (2-source) and
3664 conditional select instructions. */
3665 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3668 fprintf_unfiltered (gdb_stdlog
, "FP - DP (2-source)");
3670 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3672 else if (insn_bits10_11
== 0x00)
3674 /* Floating point - immediate instructions. */
3675 if ((insn_bits12_15
& 0x01) == 0x01
3676 || (insn_bits12_15
& 0x07) == 0x04)
3679 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3680 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3682 /* Floating point - compare instructions. */
3683 else if ((insn_bits12_15
& 0x03) == 0x02)
3686 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3687 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3689 /* Floating point - integer conversions instructions. */
3690 else if (insn_bits12_15
== 0x00)
3692 /* Convert float to integer instruction. */
3693 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
3696 fprintf_unfiltered (gdb_stdlog
, "float to int conversion");
3698 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3700 /* Convert integer to float instruction. */
3701 else if ((opcode
>> 1) == 0x01 && !rmode
)
3704 fprintf_unfiltered (gdb_stdlog
, "int to float conversion");
3706 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3708 /* Move float to integer instruction. */
3709 else if ((opcode
>> 1) == 0x03)
3712 fprintf_unfiltered (gdb_stdlog
, "move float to int");
3714 if (!(opcode
& 0x01))
3715 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3717 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3720 return AARCH64_RECORD_UNKNOWN
;
3723 return AARCH64_RECORD_UNKNOWN
;
3726 return AARCH64_RECORD_UNKNOWN
;
3728 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
3731 fprintf_unfiltered (gdb_stdlog
, "SIMD copy");
3733 /* Advanced SIMD copy instructions. */
3734 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3735 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
3736 && bit (aarch64_insn_r
->aarch64_insn
, 10))
3738 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
3739 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3741 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3744 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3746 /* All remaining floating point or advanced SIMD instructions. */
3750 fprintf_unfiltered (gdb_stdlog
, "all remain");
3752 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3756 fprintf_unfiltered (gdb_stdlog
, "\n");
3758 aarch64_insn_r
->reg_rec_count
++;
3759 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
3760 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3762 return AARCH64_RECORD_SUCCESS
;
3765 /* Decodes insns type and invokes its record handler. */
3768 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
3770 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
3772 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
3773 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3774 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
3775 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3777 /* Data processing - immediate instructions. */
3778 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
3779 return aarch64_record_data_proc_imm (aarch64_insn_r
);
3781 /* Branch, exception generation and system instructions. */
3782 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
3783 return aarch64_record_branch_except_sys (aarch64_insn_r
);
3785 /* Load and store instructions. */
3786 if (!ins_bit25
&& ins_bit27
)
3787 return aarch64_record_load_store (aarch64_insn_r
);
3789 /* Data processing - register instructions. */
3790 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
3791 return aarch64_record_data_proc_reg (aarch64_insn_r
);
3793 /* Data processing - SIMD and floating point instructions. */
3794 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
3795 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
3797 return AARCH64_RECORD_UNSUPPORTED
;
3800 /* Cleans up local record registers and memory allocations. */
3803 deallocate_reg_mem (insn_decode_record
*record
)
3805 xfree (record
->aarch64_regs
);
3806 xfree (record
->aarch64_mems
);
3809 /* Parse the current instruction and record the values of the registers and
3810 memory that will be changed in current instruction to record_arch_list
3811 return -1 if something is wrong. */
3814 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
3815 CORE_ADDR insn_addr
)
3817 uint32_t rec_no
= 0;
3818 uint8_t insn_size
= 4;
3820 ULONGEST t_bit
= 0, insn_id
= 0;
3821 gdb_byte buf
[insn_size
];
3822 insn_decode_record aarch64_record
;
3824 memset (&buf
[0], 0, insn_size
);
3825 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
3826 target_read_memory (insn_addr
, &buf
[0], insn_size
);
3827 aarch64_record
.aarch64_insn
3828 = (uint32_t) extract_unsigned_integer (&buf
[0],
3830 gdbarch_byte_order (gdbarch
));
3831 aarch64_record
.regcache
= regcache
;
3832 aarch64_record
.this_addr
= insn_addr
;
3833 aarch64_record
.gdbarch
= gdbarch
;
3835 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
3836 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
3838 printf_unfiltered (_("Process record does not support instruction "
3839 "0x%0x at address %s.\n"),
3840 aarch64_record
.aarch64_insn
,
3841 paddress (gdbarch
, insn_addr
));
3847 /* Record registers. */
3848 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3850 /* Always record register CPSR. */
3851 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3852 AARCH64_CPSR_REGNUM
);
3853 if (aarch64_record
.aarch64_regs
)
3854 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
3855 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
3856 aarch64_record
.aarch64_regs
[rec_no
]))
3859 /* Record memories. */
3860 if (aarch64_record
.aarch64_mems
)
3861 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
3862 if (record_full_arch_list_add_mem
3863 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
3864 aarch64_record
.aarch64_mems
[rec_no
].len
))
3867 if (record_full_arch_list_add_end ())
3871 deallocate_reg_mem (&aarch64_record
);