1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
58 #include "features/aarch64.c"
60 /* Pseudo register base numbers. */
61 #define AARCH64_Q0_REGNUM 0
62 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
63 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
64 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
65 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
67 /* The standard register names, and all the valid aliases for them. */
70 const char *const name
;
72 } aarch64_register_aliases
[] =
74 /* 64-bit register names. */
75 {"fp", AARCH64_FP_REGNUM
},
76 {"lr", AARCH64_LR_REGNUM
},
77 {"sp", AARCH64_SP_REGNUM
},
79 /* 32-bit register names. */
80 {"w0", AARCH64_X0_REGNUM
+ 0},
81 {"w1", AARCH64_X0_REGNUM
+ 1},
82 {"w2", AARCH64_X0_REGNUM
+ 2},
83 {"w3", AARCH64_X0_REGNUM
+ 3},
84 {"w4", AARCH64_X0_REGNUM
+ 4},
85 {"w5", AARCH64_X0_REGNUM
+ 5},
86 {"w6", AARCH64_X0_REGNUM
+ 6},
87 {"w7", AARCH64_X0_REGNUM
+ 7},
88 {"w8", AARCH64_X0_REGNUM
+ 8},
89 {"w9", AARCH64_X0_REGNUM
+ 9},
90 {"w10", AARCH64_X0_REGNUM
+ 10},
91 {"w11", AARCH64_X0_REGNUM
+ 11},
92 {"w12", AARCH64_X0_REGNUM
+ 12},
93 {"w13", AARCH64_X0_REGNUM
+ 13},
94 {"w14", AARCH64_X0_REGNUM
+ 14},
95 {"w15", AARCH64_X0_REGNUM
+ 15},
96 {"w16", AARCH64_X0_REGNUM
+ 16},
97 {"w17", AARCH64_X0_REGNUM
+ 17},
98 {"w18", AARCH64_X0_REGNUM
+ 18},
99 {"w19", AARCH64_X0_REGNUM
+ 19},
100 {"w20", AARCH64_X0_REGNUM
+ 20},
101 {"w21", AARCH64_X0_REGNUM
+ 21},
102 {"w22", AARCH64_X0_REGNUM
+ 22},
103 {"w23", AARCH64_X0_REGNUM
+ 23},
104 {"w24", AARCH64_X0_REGNUM
+ 24},
105 {"w25", AARCH64_X0_REGNUM
+ 25},
106 {"w26", AARCH64_X0_REGNUM
+ 26},
107 {"w27", AARCH64_X0_REGNUM
+ 27},
108 {"w28", AARCH64_X0_REGNUM
+ 28},
109 {"w29", AARCH64_X0_REGNUM
+ 29},
110 {"w30", AARCH64_X0_REGNUM
+ 30},
113 {"ip0", AARCH64_X0_REGNUM
+ 16},
114 {"ip1", AARCH64_X0_REGNUM
+ 17}
117 /* The required core 'R' registers. */
118 static const char *const aarch64_r_register_names
[] =
120 /* These registers must appear in consecutive RAW register number
121 order and they must begin with AARCH64_X0_REGNUM! */
122 "x0", "x1", "x2", "x3",
123 "x4", "x5", "x6", "x7",
124 "x8", "x9", "x10", "x11",
125 "x12", "x13", "x14", "x15",
126 "x16", "x17", "x18", "x19",
127 "x20", "x21", "x22", "x23",
128 "x24", "x25", "x26", "x27",
129 "x28", "x29", "x30", "sp",
133 /* The FP/SIMD 'V' registers. */
134 static const char *const aarch64_v_register_names
[] =
136 /* These registers must appear in consecutive RAW register number
137 order and they must begin with AARCH64_V0_REGNUM! */
138 "v0", "v1", "v2", "v3",
139 "v4", "v5", "v6", "v7",
140 "v8", "v9", "v10", "v11",
141 "v12", "v13", "v14", "v15",
142 "v16", "v17", "v18", "v19",
143 "v20", "v21", "v22", "v23",
144 "v24", "v25", "v26", "v27",
145 "v28", "v29", "v30", "v31",
150 /* AArch64 prologue cache structure. */
151 struct aarch64_prologue_cache
153 /* The program counter at the start of the function. It is used to
154 identify this frame as a prologue frame. */
157 /* The program counter at the time this frame was created; i.e. where
158 this function was called from. It is used to identify this frame as a
162 /* The stack pointer at the time this frame was created; i.e. the
163 caller's stack pointer when this function was called. It is used
164 to identify this frame. */
167 /* Is the target available to read from? */
170 /* The frame base for this frame is just prev_sp - frame size.
171 FRAMESIZE is the distance from the frame pointer to the
172 initial stack pointer. */
175 /* The register used to hold the frame pointer for this frame. */
178 /* Saved register offsets. */
179 struct trad_frame_saved_reg
*saved_regs
;
182 /* Toggle this file's internal debugging dump. */
183 static int aarch64_debug
;
186 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
187 struct cmd_list_element
*c
, const char *value
)
189 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
192 /* Extract a signed value from a bit field within an instruction
195 INSN is the instruction opcode.
197 WIDTH specifies the width of the bit field to extract (in bits).
199 OFFSET specifies the least significant bit of the field where bits
200 are numbered zero counting from least to most significant. */
203 extract_signed_bitfield (uint32_t insn
, unsigned width
, unsigned offset
)
205 unsigned shift_l
= sizeof (int32_t) * 8 - (offset
+ width
);
206 unsigned shift_r
= sizeof (int32_t) * 8 - width
;
208 return ((int32_t) insn
<< shift_l
) >> shift_r
;
211 /* Determine if specified bits within an instruction opcode matches a
214 INSN is the instruction opcode.
216 MASK specifies the bits within the opcode that are to be tested
217 agsinst for a match with PATTERN. */
220 decode_masked_match (uint32_t insn
, uint32_t mask
, uint32_t pattern
)
222 return (insn
& mask
) == pattern
;
225 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
227 ADDR specifies the address of the opcode.
228 INSN specifies the opcode to test.
229 RD receives the 'rd' field from the decoded instruction.
230 RN receives the 'rn' field from the decoded instruction.
232 Return 1 if the opcodes matches and is decoded, otherwise 0. */
234 decode_add_sub_imm (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
, unsigned *rn
,
237 if ((insn
& 0x9f000000) == 0x91000000)
242 *rd
= (insn
>> 0) & 0x1f;
243 *rn
= (insn
>> 5) & 0x1f;
244 *imm
= (insn
>> 10) & 0xfff;
245 shift
= (insn
>> 22) & 0x3;
246 op_is_sub
= (insn
>> 30) & 0x1;
264 fprintf_unfiltered (gdb_stdlog
,
265 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
266 core_addr_to_string_nz (addr
), insn
, *rd
, *rn
,
273 /* Decode an opcode if it represents an ADRP instruction.
275 ADDR specifies the address of the opcode.
276 INSN specifies the opcode to test.
277 RD receives the 'rd' field from the decoded instruction.
279 Return 1 if the opcodes matches and is decoded, otherwise 0. */
282 decode_adrp (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
284 if (decode_masked_match (insn
, 0x9f000000, 0x90000000))
286 *rd
= (insn
>> 0) & 0x1f;
289 fprintf_unfiltered (gdb_stdlog
,
290 "decode: 0x%s 0x%x adrp x%u, #?\n",
291 core_addr_to_string_nz (addr
), insn
, *rd
);
297 /* Decode an opcode if it represents an branch immediate or branch
298 and link immediate instruction.
300 ADDR specifies the address of the opcode.
301 INSN specifies the opcode to test.
302 IS_BL receives the 'op' bit from the decoded instruction.
303 OFFSET receives the immediate offset from the decoded instruction.
305 Return 1 if the opcodes matches and is decoded, otherwise 0. */
308 decode_b (CORE_ADDR addr
, uint32_t insn
, int *is_bl
, int32_t *offset
)
310 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
311 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
312 if (decode_masked_match (insn
, 0x7c000000, 0x14000000))
314 *is_bl
= (insn
>> 31) & 0x1;
315 *offset
= extract_signed_bitfield (insn
, 26, 0) << 2;
318 fprintf_unfiltered (gdb_stdlog
,
319 "decode: 0x%s 0x%x %s 0x%s\n",
320 core_addr_to_string_nz (addr
), insn
,
322 core_addr_to_string_nz (addr
+ *offset
));
329 /* Decode an opcode if it represents a conditional branch instruction.
331 ADDR specifies the address of the opcode.
332 INSN specifies the opcode to test.
333 COND receives the branch condition field from the decoded
335 OFFSET receives the immediate offset from the decoded instruction.
337 Return 1 if the opcodes matches and is decoded, otherwise 0. */
340 decode_bcond (CORE_ADDR addr
, uint32_t insn
, unsigned *cond
, int32_t *offset
)
342 if (decode_masked_match (insn
, 0xfe000000, 0x54000000))
344 *cond
= (insn
>> 0) & 0xf;
345 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
348 fprintf_unfiltered (gdb_stdlog
,
349 "decode: 0x%s 0x%x b<%u> 0x%s\n",
350 core_addr_to_string_nz (addr
), insn
, *cond
,
351 core_addr_to_string_nz (addr
+ *offset
));
357 /* Decode an opcode if it represents a branch via register instruction.
359 ADDR specifies the address of the opcode.
360 INSN specifies the opcode to test.
361 IS_BLR receives the 'op' bit from the decoded instruction.
362 RN receives the 'rn' field from the decoded instruction.
364 Return 1 if the opcodes matches and is decoded, otherwise 0. */
367 decode_br (CORE_ADDR addr
, uint32_t insn
, int *is_blr
, unsigned *rn
)
369 /* 8 4 0 6 2 8 4 0 */
370 /* blr 110101100011111100000000000rrrrr */
371 /* br 110101100001111100000000000rrrrr */
372 if (decode_masked_match (insn
, 0xffdffc1f, 0xd61f0000))
374 *is_blr
= (insn
>> 21) & 1;
375 *rn
= (insn
>> 5) & 0x1f;
378 fprintf_unfiltered (gdb_stdlog
,
379 "decode: 0x%s 0x%x %s 0x%x\n",
380 core_addr_to_string_nz (addr
), insn
,
381 *is_blr
? "blr" : "br", *rn
);
388 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
390 ADDR specifies the address of the opcode.
391 INSN specifies the opcode to test.
392 IS64 receives the 'sf' field from the decoded instruction.
393 IS_CBNZ receives the 'op' field from the decoded instruction.
394 RN receives the 'rn' field from the decoded instruction.
395 OFFSET receives the 'imm19' field from the decoded instruction.
397 Return 1 if the opcodes matches and is decoded, otherwise 0. */
400 decode_cb (CORE_ADDR addr
, uint32_t insn
, int *is64
, int *is_cbnz
,
401 unsigned *rn
, int32_t *offset
)
403 if (decode_masked_match (insn
, 0x7e000000, 0x34000000))
405 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
406 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
408 *rn
= (insn
>> 0) & 0x1f;
409 *is64
= (insn
>> 31) & 0x1;
410 *is_cbnz
= (insn
>> 24) & 0x1;
411 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
414 fprintf_unfiltered (gdb_stdlog
,
415 "decode: 0x%s 0x%x %s 0x%s\n",
416 core_addr_to_string_nz (addr
), insn
,
417 *is_cbnz
? "cbnz" : "cbz",
418 core_addr_to_string_nz (addr
+ *offset
));
424 /* Decode an opcode if it represents a ERET instruction.
426 ADDR specifies the address of the opcode.
427 INSN specifies the opcode to test.
429 Return 1 if the opcodes matches and is decoded, otherwise 0. */
432 decode_eret (CORE_ADDR addr
, uint32_t insn
)
434 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
435 if (insn
== 0xd69f03e0)
438 fprintf_unfiltered (gdb_stdlog
, "decode: 0x%s 0x%x eret\n",
439 core_addr_to_string_nz (addr
), insn
);
445 /* Decode an opcode if it represents a MOVZ instruction.
447 ADDR specifies the address of the opcode.
448 INSN specifies the opcode to test.
449 RD receives the 'rd' field from the decoded instruction.
451 Return 1 if the opcodes matches and is decoded, otherwise 0. */
454 decode_movz (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
456 if (decode_masked_match (insn
, 0xff800000, 0x52800000))
458 *rd
= (insn
>> 0) & 0x1f;
461 fprintf_unfiltered (gdb_stdlog
,
462 "decode: 0x%s 0x%x movz x%u, #?\n",
463 core_addr_to_string_nz (addr
), insn
, *rd
);
469 /* Decode an opcode if it represents a ORR (shifted register)
472 ADDR specifies the address of the opcode.
473 INSN specifies the opcode to test.
474 RD receives the 'rd' field from the decoded instruction.
475 RN receives the 'rn' field from the decoded instruction.
476 RM receives the 'rm' field from the decoded instruction.
477 IMM receives the 'imm6' field from the decoded instruction.
479 Return 1 if the opcodes matches and is decoded, otherwise 0. */
482 decode_orr_shifted_register_x (CORE_ADDR addr
,
483 uint32_t insn
, unsigned *rd
, unsigned *rn
,
484 unsigned *rm
, int32_t *imm
)
486 if (decode_masked_match (insn
, 0xff200000, 0xaa000000))
488 *rd
= (insn
>> 0) & 0x1f;
489 *rn
= (insn
>> 5) & 0x1f;
490 *rm
= (insn
>> 16) & 0x1f;
491 *imm
= (insn
>> 10) & 0x3f;
494 fprintf_unfiltered (gdb_stdlog
,
495 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
496 core_addr_to_string_nz (addr
), insn
, *rd
,
503 /* Decode an opcode if it represents a RET instruction.
505 ADDR specifies the address of the opcode.
506 INSN specifies the opcode to test.
507 RN receives the 'rn' field from the decoded instruction.
509 Return 1 if the opcodes matches and is decoded, otherwise 0. */
512 decode_ret (CORE_ADDR addr
, uint32_t insn
, unsigned *rn
)
514 if (decode_masked_match (insn
, 0xfffffc1f, 0xd65f0000))
516 *rn
= (insn
>> 5) & 0x1f;
518 fprintf_unfiltered (gdb_stdlog
,
519 "decode: 0x%s 0x%x ret x%u\n",
520 core_addr_to_string_nz (addr
), insn
, *rn
);
526 /* Decode an opcode if it represents the following instruction:
527 STP rt, rt2, [rn, #imm]
529 ADDR specifies the address of the opcode.
530 INSN specifies the opcode to test.
531 RT1 receives the 'rt' field from the decoded instruction.
532 RT2 receives the 'rt2' field from the decoded instruction.
533 RN receives the 'rn' field from the decoded instruction.
534 IMM receives the 'imm' field from the decoded instruction.
536 Return 1 if the opcodes matches and is decoded, otherwise 0. */
539 decode_stp_offset (CORE_ADDR addr
,
541 unsigned *rt1
, unsigned *rt2
, unsigned *rn
, int32_t *imm
)
543 if (decode_masked_match (insn
, 0xffc00000, 0xa9000000))
545 *rt1
= (insn
>> 0) & 0x1f;
546 *rn
= (insn
>> 5) & 0x1f;
547 *rt2
= (insn
>> 10) & 0x1f;
548 *imm
= extract_signed_bitfield (insn
, 7, 15);
552 fprintf_unfiltered (gdb_stdlog
,
553 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
554 core_addr_to_string_nz (addr
), insn
,
555 *rt1
, *rt2
, *rn
, *imm
);
561 /* Decode an opcode if it represents the following instruction:
562 STP rt, rt2, [rn, #imm]!
564 ADDR specifies the address of the opcode.
565 INSN specifies the opcode to test.
566 RT1 receives the 'rt' field from the decoded instruction.
567 RT2 receives the 'rt2' field from the decoded instruction.
568 RN receives the 'rn' field from the decoded instruction.
569 IMM receives the 'imm' field from the decoded instruction.
571 Return 1 if the opcodes matches and is decoded, otherwise 0. */
574 decode_stp_offset_wb (CORE_ADDR addr
,
576 unsigned *rt1
, unsigned *rt2
, unsigned *rn
,
579 if (decode_masked_match (insn
, 0xffc00000, 0xa9800000))
581 *rt1
= (insn
>> 0) & 0x1f;
582 *rn
= (insn
>> 5) & 0x1f;
583 *rt2
= (insn
>> 10) & 0x1f;
584 *imm
= extract_signed_bitfield (insn
, 7, 15);
588 fprintf_unfiltered (gdb_stdlog
,
589 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
590 core_addr_to_string_nz (addr
), insn
,
591 *rt1
, *rt2
, *rn
, *imm
);
597 /* Decode an opcode if it represents the following instruction:
600 ADDR specifies the address of the opcode.
601 INSN specifies the opcode to test.
602 IS64 receives size field from the decoded instruction.
603 RT receives the 'rt' field from the decoded instruction.
604 RN receives the 'rn' field from the decoded instruction.
605 IMM receives the 'imm' field from the decoded instruction.
607 Return 1 if the opcodes matches and is decoded, otherwise 0. */
610 decode_stur (CORE_ADDR addr
, uint32_t insn
, int *is64
, unsigned *rt
,
611 unsigned *rn
, int32_t *imm
)
613 if (decode_masked_match (insn
, 0xbfe00c00, 0xb8000000))
615 *is64
= (insn
>> 30) & 1;
616 *rt
= (insn
>> 0) & 0x1f;
617 *rn
= (insn
>> 5) & 0x1f;
618 *imm
= extract_signed_bitfield (insn
, 9, 12);
621 fprintf_unfiltered (gdb_stdlog
,
622 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
623 core_addr_to_string_nz (addr
), insn
,
624 *is64
? 'x' : 'w', *rt
, *rn
, *imm
);
630 /* Decode an opcode if it represents a TB or TBNZ instruction.
632 ADDR specifies the address of the opcode.
633 INSN specifies the opcode to test.
634 IS_TBNZ receives the 'op' field from the decoded instruction.
635 BIT receives the bit position field from the decoded instruction.
636 RT receives 'rt' field from the decoded instruction.
637 IMM receives 'imm' field from the decoded instruction.
639 Return 1 if the opcodes matches and is decoded, otherwise 0. */
642 decode_tb (CORE_ADDR addr
, uint32_t insn
, int *is_tbnz
, unsigned *bit
,
643 unsigned *rt
, int32_t *imm
)
645 if (decode_masked_match (insn
, 0x7e000000, 0x36000000))
647 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
648 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
650 *rt
= (insn
>> 0) & 0x1f;
651 *is_tbnz
= (insn
>> 24) & 0x1;
652 *bit
= ((insn
>> (31 - 4)) & 0x20) | ((insn
>> 19) & 0x1f);
653 *imm
= extract_signed_bitfield (insn
, 14, 5) << 2;
656 fprintf_unfiltered (gdb_stdlog
,
657 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
658 core_addr_to_string_nz (addr
), insn
,
659 *is_tbnz
? "tbnz" : "tbz", *rt
, *bit
,
660 core_addr_to_string_nz (addr
+ *imm
));
666 /* Analyze a prologue, looking for a recognizable stack frame
667 and frame pointer. Scan until we encounter a store that could
668 clobber the stack frame unexpectedly, or an unknown instruction. */
671 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
672 CORE_ADDR start
, CORE_ADDR limit
,
673 struct aarch64_prologue_cache
*cache
)
675 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
677 pv_t regs
[AARCH64_X_REGISTER_COUNT
];
678 struct pv_area
*stack
;
679 struct cleanup
*back_to
;
681 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
682 regs
[i
] = pv_register (i
, 0);
683 stack
= make_pv_area (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
684 back_to
= make_cleanup_free_pv_area (stack
);
686 for (; start
< limit
; start
+= 4)
705 insn
= read_memory_unsigned_integer (start
, 4, byte_order_for_code
);
707 if (decode_add_sub_imm (start
, insn
, &rd
, &rn
, &imm
))
708 regs
[rd
] = pv_add_constant (regs
[rn
], imm
);
709 else if (decode_adrp (start
, insn
, &rd
))
710 regs
[rd
] = pv_unknown ();
711 else if (decode_b (start
, insn
, &is_link
, &offset
))
713 /* Stop analysis on branch. */
716 else if (decode_bcond (start
, insn
, &cond
, &offset
))
718 /* Stop analysis on branch. */
721 else if (decode_br (start
, insn
, &is_link
, &rn
))
723 /* Stop analysis on branch. */
726 else if (decode_cb (start
, insn
, &is64
, &is_cbnz
, &rn
, &offset
))
728 /* Stop analysis on branch. */
731 else if (decode_eret (start
, insn
))
733 /* Stop analysis on branch. */
736 else if (decode_movz (start
, insn
, &rd
))
737 regs
[rd
] = pv_unknown ();
739 if (decode_orr_shifted_register_x (start
, insn
, &rd
, &rn
, &rm
, &imm
))
741 if (imm
== 0 && rn
== 31)
748 "aarch64: prologue analysis gave up addr=0x%s "
749 "opcode=0x%x (orr x register)\n",
750 core_addr_to_string_nz (start
),
755 else if (decode_ret (start
, insn
, &rn
))
757 /* Stop analysis on branch. */
760 else if (decode_stur (start
, insn
, &is64
, &rt
, &rn
, &offset
))
762 pv_area_store (stack
, pv_add_constant (regs
[rn
], offset
),
763 is64
? 8 : 4, regs
[rt
]);
765 else if (decode_stp_offset (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
767 /* If recording this store would invalidate the store area
768 (perhaps because rn is not known) then we should abandon
769 further prologue analysis. */
770 if (pv_area_store_would_trash (stack
,
771 pv_add_constant (regs
[rn
], imm
)))
774 if (pv_area_store_would_trash (stack
,
775 pv_add_constant (regs
[rn
], imm
+ 8)))
778 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
780 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
783 else if (decode_stp_offset_wb (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
785 /* If recording this store would invalidate the store area
786 (perhaps because rn is not known) then we should abandon
787 further prologue analysis. */
788 if (pv_area_store_would_trash (stack
,
789 pv_add_constant (regs
[rn
], imm
)))
792 if (pv_area_store_would_trash (stack
,
793 pv_add_constant (regs
[rn
], imm
+ 8)))
796 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
798 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
800 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
802 else if (decode_tb (start
, insn
, &is_tbnz
, &bit
, &rn
, &offset
))
804 /* Stop analysis on branch. */
810 fprintf_unfiltered (gdb_stdlog
,
811 "aarch64: prologue analysis gave up addr=0x%s"
813 core_addr_to_string_nz (start
), insn
);
820 do_cleanups (back_to
);
824 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
826 /* Frame pointer is fp. Frame size is constant. */
827 cache
->framereg
= AARCH64_FP_REGNUM
;
828 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
830 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
832 /* Try the stack pointer. */
833 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
834 cache
->framereg
= AARCH64_SP_REGNUM
;
838 /* We're just out of luck. We don't know where the frame is. */
839 cache
->framereg
= -1;
840 cache
->framesize
= 0;
843 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
847 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
848 cache
->saved_regs
[i
].addr
= offset
;
851 do_cleanups (back_to
);
855 /* Implement the "skip_prologue" gdbarch method. */
858 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
862 CORE_ADDR func_addr
, limit_pc
;
863 struct symtab_and_line sal
;
865 /* See if we can determine the end of the prologue via the symbol
866 table. If so, then return either PC, or the PC after the
867 prologue, whichever is greater. */
868 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
870 CORE_ADDR post_prologue_pc
871 = skip_prologue_using_sal (gdbarch
, func_addr
);
873 if (post_prologue_pc
!= 0)
874 return max (pc
, post_prologue_pc
);
877 /* Can't determine prologue from the symbol table, need to examine
880 /* Find an upper limit on the function prologue using the debug
881 information. If the debug information could not be used to
882 provide that bound, then use an arbitrary large number as the
884 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
886 limit_pc
= pc
+ 128; /* Magic. */
888 /* Try disassembling prologue. */
889 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
892 /* Scan the function prologue for THIS_FRAME and populate the prologue
896 aarch64_scan_prologue (struct frame_info
*this_frame
,
897 struct aarch64_prologue_cache
*cache
)
899 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
900 CORE_ADDR prologue_start
;
901 CORE_ADDR prologue_end
;
902 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
903 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
905 cache
->prev_pc
= prev_pc
;
907 /* Assume we do not find a frame. */
908 cache
->framereg
= -1;
909 cache
->framesize
= 0;
911 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
914 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
918 /* No line info so use the current PC. */
919 prologue_end
= prev_pc
;
921 else if (sal
.end
< prologue_end
)
923 /* The next line begins after the function end. */
924 prologue_end
= sal
.end
;
927 prologue_end
= min (prologue_end
, prev_pc
);
928 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
935 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
937 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
941 cache
->framereg
= AARCH64_FP_REGNUM
;
942 cache
->framesize
= 16;
943 cache
->saved_regs
[29].addr
= 0;
944 cache
->saved_regs
[30].addr
= 8;
948 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
949 function may throw an exception if the inferior's registers or memory is
953 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
954 struct aarch64_prologue_cache
*cache
)
956 CORE_ADDR unwound_fp
;
959 aarch64_scan_prologue (this_frame
, cache
);
961 if (cache
->framereg
== -1)
964 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
968 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
970 /* Calculate actual addresses of saved registers using offsets
971 determined by aarch64_analyze_prologue. */
972 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
973 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
974 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
976 cache
->func
= get_frame_func (this_frame
);
978 cache
->available_p
= 1;
981 /* Allocate and fill in *THIS_CACHE with information about the prologue of
982 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
983 Return a pointer to the current aarch64_prologue_cache in
986 static struct aarch64_prologue_cache
*
987 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
989 struct aarch64_prologue_cache
*cache
;
991 if (*this_cache
!= NULL
)
994 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
995 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1000 aarch64_make_prologue_cache_1 (this_frame
, cache
);
1002 CATCH (ex
, RETURN_MASK_ERROR
)
1004 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1005 throw_exception (ex
);
1012 /* Implement the "stop_reason" frame_unwind method. */
1014 static enum unwind_stop_reason
1015 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1018 struct aarch64_prologue_cache
*cache
1019 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1021 if (!cache
->available_p
)
1022 return UNWIND_UNAVAILABLE
;
1024 /* Halt the backtrace at "_start". */
1025 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
1026 return UNWIND_OUTERMOST
;
1028 /* We've hit a wall, stop. */
1029 if (cache
->prev_sp
== 0)
1030 return UNWIND_OUTERMOST
;
1032 return UNWIND_NO_REASON
;
1035 /* Our frame ID for a normal frame is the current function's starting
1036 PC and the caller's SP when we were called. */
1039 aarch64_prologue_this_id (struct frame_info
*this_frame
,
1040 void **this_cache
, struct frame_id
*this_id
)
1042 struct aarch64_prologue_cache
*cache
1043 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1045 if (!cache
->available_p
)
1046 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
1048 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
1051 /* Implement the "prev_register" frame_unwind method. */
1053 static struct value
*
1054 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
1055 void **this_cache
, int prev_regnum
)
1057 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1058 struct aarch64_prologue_cache
*cache
1059 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1061 /* If we are asked to unwind the PC, then we need to return the LR
1062 instead. The prologue may save PC, but it will point into this
1063 frame's prologue, not the next frame's resume location. */
1064 if (prev_regnum
== AARCH64_PC_REGNUM
)
1068 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1069 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
1072 /* SP is generally not saved to the stack, but this frame is
1073 identified by the next frame's stack pointer at the time of the
1074 call. The value was already reconstructed into PREV_SP. */
1080 | | | <- Previous SP
1083 +--| saved fp |<- FP
1087 if (prev_regnum
== AARCH64_SP_REGNUM
)
1088 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1091 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1095 /* AArch64 prologue unwinder. */
1096 struct frame_unwind aarch64_prologue_unwind
=
1099 aarch64_prologue_frame_unwind_stop_reason
,
1100 aarch64_prologue_this_id
,
1101 aarch64_prologue_prev_register
,
1103 default_frame_sniffer
1106 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1107 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1108 Return a pointer to the current aarch64_prologue_cache in
1111 static struct aarch64_prologue_cache
*
1112 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
1114 struct aarch64_prologue_cache
*cache
;
1116 if (*this_cache
!= NULL
)
1119 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1120 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1121 *this_cache
= cache
;
1125 cache
->prev_sp
= get_frame_register_unsigned (this_frame
,
1127 cache
->prev_pc
= get_frame_pc (this_frame
);
1128 cache
->available_p
= 1;
1130 CATCH (ex
, RETURN_MASK_ERROR
)
1132 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1133 throw_exception (ex
);
1140 /* Implement the "stop_reason" frame_unwind method. */
1142 static enum unwind_stop_reason
1143 aarch64_stub_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1146 struct aarch64_prologue_cache
*cache
1147 = aarch64_make_stub_cache (this_frame
, this_cache
);
1149 if (!cache
->available_p
)
1150 return UNWIND_UNAVAILABLE
;
1152 return UNWIND_NO_REASON
;
1155 /* Our frame ID for a stub frame is the current SP and LR. */
1158 aarch64_stub_this_id (struct frame_info
*this_frame
,
1159 void **this_cache
, struct frame_id
*this_id
)
1161 struct aarch64_prologue_cache
*cache
1162 = aarch64_make_stub_cache (this_frame
, this_cache
);
1164 if (cache
->available_p
)
1165 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
1167 *this_id
= frame_id_build_unavailable_stack (cache
->prev_pc
);
1170 /* Implement the "sniffer" frame_unwind method. */
1173 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1174 struct frame_info
*this_frame
,
1175 void **this_prologue_cache
)
1177 CORE_ADDR addr_in_block
;
1180 addr_in_block
= get_frame_address_in_block (this_frame
);
1181 if (in_plt_section (addr_in_block
)
1182 /* We also use the stub winder if the target memory is unreadable
1183 to avoid having the prologue unwinder trying to read it. */
1184 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1190 /* AArch64 stub unwinder. */
1191 struct frame_unwind aarch64_stub_unwind
=
1194 aarch64_stub_frame_unwind_stop_reason
,
1195 aarch64_stub_this_id
,
1196 aarch64_prologue_prev_register
,
1198 aarch64_stub_unwind_sniffer
1201 /* Return the frame base address of *THIS_FRAME. */
1204 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1206 struct aarch64_prologue_cache
*cache
1207 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1209 return cache
->prev_sp
- cache
->framesize
;
1212 /* AArch64 default frame base information. */
1213 struct frame_base aarch64_normal_base
=
1215 &aarch64_prologue_unwind
,
1216 aarch64_normal_frame_base
,
1217 aarch64_normal_frame_base
,
1218 aarch64_normal_frame_base
1221 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1222 dummy frame. The frame ID's base needs to match the TOS value
1223 saved by save_dummy_frame_tos () and returned from
1224 aarch64_push_dummy_call, and the PC needs to match the dummy
1225 frame's breakpoint. */
1227 static struct frame_id
1228 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1230 return frame_id_build (get_frame_register_unsigned (this_frame
,
1232 get_frame_pc (this_frame
));
1235 /* Implement the "unwind_pc" gdbarch method. */
1238 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1241 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1246 /* Implement the "unwind_sp" gdbarch method. */
1249 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1251 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1254 /* Return the value of the REGNUM register in the previous frame of
1257 static struct value
*
1258 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1259 void **this_cache
, int regnum
)
1261 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1266 case AARCH64_PC_REGNUM
:
1267 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1268 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1271 internal_error (__FILE__
, __LINE__
,
1272 _("Unexpected register %d"), regnum
);
1276 /* Implement the "init_reg" dwarf2_frame_ops method. */
1279 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1280 struct dwarf2_frame_state_reg
*reg
,
1281 struct frame_info
*this_frame
)
1285 case AARCH64_PC_REGNUM
:
1286 reg
->how
= DWARF2_FRAME_REG_FN
;
1287 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1289 case AARCH64_SP_REGNUM
:
1290 reg
->how
= DWARF2_FRAME_REG_CFA
;
1295 /* When arguments must be pushed onto the stack, they go on in reverse
1296 order. The code below implements a FILO (stack) to do this. */
1300 /* Value to pass on stack. */
1303 /* Size in bytes of value to pass on stack. */
1307 DEF_VEC_O (stack_item_t
);
1309 /* Return the alignment (in bytes) of the given type. */
1312 aarch64_type_align (struct type
*t
)
1318 t
= check_typedef (t
);
1319 switch (TYPE_CODE (t
))
1322 /* Should never happen. */
1323 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1327 case TYPE_CODE_ENUM
:
1331 case TYPE_CODE_RANGE
:
1332 case TYPE_CODE_BITSTRING
:
1334 case TYPE_CODE_CHAR
:
1335 case TYPE_CODE_BOOL
:
1336 return TYPE_LENGTH (t
);
1338 case TYPE_CODE_ARRAY
:
1339 case TYPE_CODE_COMPLEX
:
1340 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1342 case TYPE_CODE_STRUCT
:
1343 case TYPE_CODE_UNION
:
1345 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1347 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1355 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1356 defined in the AAPCS64 ABI document; otherwise return 0. */
1359 is_hfa (struct type
*ty
)
1361 switch (TYPE_CODE (ty
))
1363 case TYPE_CODE_ARRAY
:
1365 struct type
*target_ty
= TYPE_TARGET_TYPE (ty
);
1366 if (TYPE_CODE (target_ty
) == TYPE_CODE_FLT
&& TYPE_LENGTH (ty
) <= 4)
1371 case TYPE_CODE_UNION
:
1372 case TYPE_CODE_STRUCT
:
1374 if (TYPE_NFIELDS (ty
) > 0 && TYPE_NFIELDS (ty
) <= 4)
1376 struct type
*member0_type
;
1378 member0_type
= check_typedef (TYPE_FIELD_TYPE (ty
, 0));
1379 if (TYPE_CODE (member0_type
) == TYPE_CODE_FLT
)
1383 for (i
= 0; i
< TYPE_NFIELDS (ty
); i
++)
1385 struct type
*member1_type
;
1387 member1_type
= check_typedef (TYPE_FIELD_TYPE (ty
, i
));
1388 if (TYPE_CODE (member0_type
) != TYPE_CODE (member1_type
)
1389 || (TYPE_LENGTH (member0_type
)
1390 != TYPE_LENGTH (member1_type
)))
1406 /* AArch64 function call information structure. */
1407 struct aarch64_call_info
1409 /* the current argument number. */
1412 /* The next general purpose register number, equivalent to NGRN as
1413 described in the AArch64 Procedure Call Standard. */
1416 /* The next SIMD and floating point register number, equivalent to
1417 NSRN as described in the AArch64 Procedure Call Standard. */
1420 /* The next stacked argument address, equivalent to NSAA as
1421 described in the AArch64 Procedure Call Standard. */
1424 /* Stack item vector. */
1425 VEC(stack_item_t
) *si
;
1428 /* Pass a value in a sequence of consecutive X registers. The caller
1429 is responsbile for ensuring sufficient registers are available. */
1432 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1433 struct aarch64_call_info
*info
, struct type
*type
,
1434 const bfd_byte
*buf
)
1436 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1437 int len
= TYPE_LENGTH (type
);
1438 enum type_code typecode
= TYPE_CODE (type
);
1439 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1445 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1446 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1450 /* Adjust sub-word struct/union args when big-endian. */
1451 if (byte_order
== BFD_ENDIAN_BIG
1452 && partial_len
< X_REGISTER_SIZE
1453 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1454 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1457 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s = 0x%s\n",
1459 gdbarch_register_name (gdbarch
, regnum
),
1460 phex (regval
, X_REGISTER_SIZE
));
1461 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1468 /* Attempt to marshall a value in a V register. Return 1 if
1469 successful, or 0 if insufficient registers are available. This
1470 function, unlike the equivalent pass_in_x() function does not
1471 handle arguments spread across multiple registers. */
1474 pass_in_v (struct gdbarch
*gdbarch
,
1475 struct regcache
*regcache
,
1476 struct aarch64_call_info
*info
,
1477 const bfd_byte
*buf
)
1481 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1482 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1487 regcache_cooked_write (regcache
, regnum
, buf
);
1489 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s\n",
1491 gdbarch_register_name (gdbarch
, regnum
));
1498 /* Marshall an argument onto the stack. */
1501 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1502 const bfd_byte
*buf
)
1504 int len
= TYPE_LENGTH (type
);
1510 align
= aarch64_type_align (type
);
1512 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1513 Natural alignment of the argument's type. */
1514 align
= align_up (align
, 8);
1516 /* The AArch64 PCS requires at most doubleword alignment. */
1521 fprintf_unfiltered (gdb_stdlog
, "arg %d len=%d @ sp + %d\n",
1522 info
->argnum
, len
, info
->nsaa
);
1526 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1529 if (info
->nsaa
& (align
- 1))
1531 /* Push stack alignment padding. */
1532 int pad
= align
- (info
->nsaa
& (align
- 1));
1537 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1542 /* Marshall an argument into a sequence of one or more consecutive X
1543 registers or, if insufficient X registers are available then onto
1547 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1548 struct aarch64_call_info
*info
, struct type
*type
,
1549 const bfd_byte
*buf
)
1551 int len
= TYPE_LENGTH (type
);
1552 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1554 /* PCS C.13 - Pass in registers if we have enough spare */
1555 if (info
->ngrn
+ nregs
<= 8)
1557 pass_in_x (gdbarch
, regcache
, info
, type
, buf
);
1558 info
->ngrn
+= nregs
;
1563 pass_on_stack (info
, type
, buf
);
1567 /* Pass a value in a V register, or on the stack if insufficient are
1571 pass_in_v_or_stack (struct gdbarch
*gdbarch
,
1572 struct regcache
*regcache
,
1573 struct aarch64_call_info
*info
,
1575 const bfd_byte
*buf
)
1577 if (!pass_in_v (gdbarch
, regcache
, info
, buf
))
1578 pass_on_stack (info
, type
, buf
);
1581 /* Implement the "push_dummy_call" gdbarch method. */
1584 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1585 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1587 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1588 CORE_ADDR struct_addr
)
1594 struct aarch64_call_info info
;
1595 struct type
*func_type
;
1596 struct type
*return_type
;
1597 int lang_struct_return
;
1599 memset (&info
, 0, sizeof (info
));
1601 /* We need to know what the type of the called function is in order
1602 to determine the number of named/anonymous arguments for the
1603 actual argument placement, and the return type in order to handle
1604 return value correctly.
1606 The generic code above us views the decision of return in memory
1607 or return in registers as a two stage processes. The language
1608 handler is consulted first and may decide to return in memory (eg
1609 class with copy constructor returned by value), this will cause
1610 the generic code to allocate space AND insert an initial leading
1613 If the language code does not decide to pass in memory then the
1614 target code is consulted.
1616 If the language code decides to pass in memory we want to move
1617 the pointer inserted as the initial argument from the argument
1618 list and into X8, the conventional AArch64 struct return pointer
1621 This is slightly awkward, ideally the flag "lang_struct_return"
1622 would be passed to the targets implementation of push_dummy_call.
1623 Rather that change the target interface we call the language code
1624 directly ourselves. */
1626 func_type
= check_typedef (value_type (function
));
1628 /* Dereference function pointer types. */
1629 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1630 func_type
= TYPE_TARGET_TYPE (func_type
);
1632 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1633 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1635 /* If language_pass_by_reference () returned true we will have been
1636 given an additional initial argument, a hidden pointer to the
1637 return slot in memory. */
1638 return_type
= TYPE_TARGET_TYPE (func_type
);
1639 lang_struct_return
= language_pass_by_reference (return_type
);
1641 /* Set the return address. For the AArch64, the return breakpoint
1642 is always at BP_ADDR. */
1643 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1645 /* If we were given an initial argument for the return slot because
1646 lang_struct_return was true, lose it. */
1647 if (lang_struct_return
)
1653 /* The struct_return pointer occupies X8. */
1654 if (struct_return
|| lang_struct_return
)
1657 fprintf_unfiltered (gdb_stdlog
, "struct return in %s = 0x%s\n",
1658 gdbarch_register_name
1660 AARCH64_STRUCT_RETURN_REGNUM
),
1661 paddress (gdbarch
, struct_addr
));
1662 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1666 for (argnum
= 0; argnum
< nargs
; argnum
++)
1668 struct value
*arg
= args
[argnum
];
1669 struct type
*arg_type
;
1672 arg_type
= check_typedef (value_type (arg
));
1673 len
= TYPE_LENGTH (arg_type
);
1675 switch (TYPE_CODE (arg_type
))
1678 case TYPE_CODE_BOOL
:
1679 case TYPE_CODE_CHAR
:
1680 case TYPE_CODE_RANGE
:
1681 case TYPE_CODE_ENUM
:
1684 /* Promote to 32 bit integer. */
1685 if (TYPE_UNSIGNED (arg_type
))
1686 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1688 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1689 arg
= value_cast (arg_type
, arg
);
1691 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1692 value_contents (arg
));
1695 case TYPE_CODE_COMPLEX
:
1698 const bfd_byte
*buf
= value_contents (arg
);
1699 struct type
*target_type
=
1700 check_typedef (TYPE_TARGET_TYPE (arg_type
));
1702 pass_in_v (gdbarch
, regcache
, &info
, buf
);
1703 pass_in_v (gdbarch
, regcache
, &info
,
1704 buf
+ TYPE_LENGTH (target_type
));
1709 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1713 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1714 value_contents (arg
));
1717 case TYPE_CODE_STRUCT
:
1718 case TYPE_CODE_ARRAY
:
1719 case TYPE_CODE_UNION
:
1720 if (is_hfa (arg_type
))
1722 int elements
= TYPE_NFIELDS (arg_type
);
1724 /* Homogeneous Aggregates */
1725 if (info
.nsrn
+ elements
< 8)
1729 for (i
= 0; i
< elements
; i
++)
1731 /* We know that we have sufficient registers
1732 available therefore this will never fallback
1734 struct value
*field
=
1735 value_primitive_field (arg
, 0, i
, arg_type
);
1736 struct type
*field_type
=
1737 check_typedef (value_type (field
));
1739 pass_in_v_or_stack (gdbarch
, regcache
, &info
, field_type
,
1740 value_contents_writeable (field
));
1746 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1751 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1752 invisible reference. */
1754 /* Allocate aligned storage. */
1755 sp
= align_down (sp
- len
, 16);
1757 /* Write the real data into the stack. */
1758 write_memory (sp
, value_contents (arg
), len
);
1760 /* Construct the indirection. */
1761 arg_type
= lookup_pointer_type (arg_type
);
1762 arg
= value_from_pointer (arg_type
, sp
);
1763 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1764 value_contents (arg
));
1767 /* PCS C.15 / C.18 multiple values pass. */
1768 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1769 value_contents (arg
));
1773 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1774 value_contents (arg
));
1779 /* Make sure stack retains 16 byte alignment. */
1781 sp
-= 16 - (info
.nsaa
& 15);
1783 while (!VEC_empty (stack_item_t
, info
.si
))
1785 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1788 write_memory (sp
, si
->data
, si
->len
);
1789 VEC_pop (stack_item_t
, info
.si
);
1792 VEC_free (stack_item_t
, info
.si
);
1794 /* Finally, update the SP register. */
1795 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1800 /* Implement the "frame_align" gdbarch method. */
1803 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1805 /* Align the stack to sixteen bytes. */
1806 return sp
& ~(CORE_ADDR
) 15;
1809 /* Return the type for an AdvSISD Q register. */
1811 static struct type
*
1812 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1814 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1816 if (tdep
->vnq_type
== NULL
)
1821 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1824 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1825 append_composite_type_field (t
, "u", elem
);
1827 elem
= builtin_type (gdbarch
)->builtin_int128
;
1828 append_composite_type_field (t
, "s", elem
);
1833 return tdep
->vnq_type
;
1836 /* Return the type for an AdvSISD D register. */
1838 static struct type
*
1839 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1841 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1843 if (tdep
->vnd_type
== NULL
)
1848 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1851 elem
= builtin_type (gdbarch
)->builtin_double
;
1852 append_composite_type_field (t
, "f", elem
);
1854 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1855 append_composite_type_field (t
, "u", elem
);
1857 elem
= builtin_type (gdbarch
)->builtin_int64
;
1858 append_composite_type_field (t
, "s", elem
);
1863 return tdep
->vnd_type
;
1866 /* Return the type for an AdvSISD S register. */
1868 static struct type
*
1869 aarch64_vns_type (struct gdbarch
*gdbarch
)
1871 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1873 if (tdep
->vns_type
== NULL
)
1878 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1881 elem
= builtin_type (gdbarch
)->builtin_float
;
1882 append_composite_type_field (t
, "f", elem
);
1884 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1885 append_composite_type_field (t
, "u", elem
);
1887 elem
= builtin_type (gdbarch
)->builtin_int32
;
1888 append_composite_type_field (t
, "s", elem
);
1893 return tdep
->vns_type
;
1896 /* Return the type for an AdvSISD H register. */
1898 static struct type
*
1899 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1901 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1903 if (tdep
->vnh_type
== NULL
)
1908 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1911 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1912 append_composite_type_field (t
, "u", elem
);
1914 elem
= builtin_type (gdbarch
)->builtin_int16
;
1915 append_composite_type_field (t
, "s", elem
);
1920 return tdep
->vnh_type
;
1923 /* Return the type for an AdvSISD B register. */
1925 static struct type
*
1926 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1928 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1930 if (tdep
->vnb_type
== NULL
)
1935 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1938 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1939 append_composite_type_field (t
, "u", elem
);
1941 elem
= builtin_type (gdbarch
)->builtin_int8
;
1942 append_composite_type_field (t
, "s", elem
);
1947 return tdep
->vnb_type
;
1950 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1953 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1955 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1956 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1958 if (reg
== AARCH64_DWARF_SP
)
1959 return AARCH64_SP_REGNUM
;
1961 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1962 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1968 /* Implement the "print_insn" gdbarch method. */
1971 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1973 info
->symbols
= NULL
;
1974 return print_insn_aarch64 (memaddr
, info
);
1977 /* AArch64 BRK software debug mode instruction.
1978 Note that AArch64 code is always little-endian.
1979 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1980 static const gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1982 /* Implement the "breakpoint_from_pc" gdbarch method. */
1984 static const gdb_byte
*
1985 aarch64_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
1988 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1990 *lenptr
= sizeof (aarch64_default_breakpoint
);
1991 return aarch64_default_breakpoint
;
1994 /* Extract from an array REGS containing the (raw) register state a
1995 function return value of type TYPE, and copy that, in virtual
1996 format, into VALBUF. */
1999 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
2002 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
2003 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2005 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
2007 bfd_byte buf
[V_REGISTER_SIZE
];
2008 int len
= TYPE_LENGTH (type
);
2010 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
2011 memcpy (valbuf
, buf
, len
);
2013 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2014 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2015 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2016 || TYPE_CODE (type
) == TYPE_CODE_PTR
2017 || TYPE_CODE (type
) == TYPE_CODE_REF
2018 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2020 /* If the the type is a plain integer, then the access is
2021 straight-forward. Otherwise we have to play around a bit
2023 int len
= TYPE_LENGTH (type
);
2024 int regno
= AARCH64_X0_REGNUM
;
2029 /* By using store_unsigned_integer we avoid having to do
2030 anything special for small big-endian values. */
2031 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
2032 store_unsigned_integer (valbuf
,
2033 (len
> X_REGISTER_SIZE
2034 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
2035 len
-= X_REGISTER_SIZE
;
2036 valbuf
+= X_REGISTER_SIZE
;
2039 else if (TYPE_CODE (type
) == TYPE_CODE_COMPLEX
)
2041 int regno
= AARCH64_V0_REGNUM
;
2042 bfd_byte buf
[V_REGISTER_SIZE
];
2043 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
2044 int len
= TYPE_LENGTH (target_type
);
2046 regcache_cooked_read (regs
, regno
, buf
);
2047 memcpy (valbuf
, buf
, len
);
2049 regcache_cooked_read (regs
, regno
+ 1, buf
);
2050 memcpy (valbuf
, buf
, len
);
2053 else if (is_hfa (type
))
2055 int elements
= TYPE_NFIELDS (type
);
2056 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2057 int len
= TYPE_LENGTH (member_type
);
2060 for (i
= 0; i
< elements
; i
++)
2062 int regno
= AARCH64_V0_REGNUM
+ i
;
2063 bfd_byte buf
[X_REGISTER_SIZE
];
2066 fprintf_unfiltered (gdb_stdlog
,
2067 "read HFA return value element %d from %s\n",
2069 gdbarch_register_name (gdbarch
, regno
));
2070 regcache_cooked_read (regs
, regno
, buf
);
2072 memcpy (valbuf
, buf
, len
);
2078 /* For a structure or union the behaviour is as if the value had
2079 been stored to word-aligned memory and then loaded into
2080 registers with 64-bit load instruction(s). */
2081 int len
= TYPE_LENGTH (type
);
2082 int regno
= AARCH64_X0_REGNUM
;
2083 bfd_byte buf
[X_REGISTER_SIZE
];
2087 regcache_cooked_read (regs
, regno
++, buf
);
2088 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2089 len
-= X_REGISTER_SIZE
;
2090 valbuf
+= X_REGISTER_SIZE
;
2096 /* Will a function return an aggregate type in memory or in a
2097 register? Return 0 if an aggregate type can be returned in a
2098 register, 1 if it must be returned in memory. */
2101 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2104 enum type_code code
;
2106 type
= check_typedef (type
);
2108 /* In the AArch64 ABI, "integer" like aggregate types are returned
2109 in registers. For an aggregate type to be integer like, its size
2110 must be less than or equal to 4 * X_REGISTER_SIZE. */
2114 /* PCS B.5 If the argument is a Named HFA, then the argument is
2119 if (TYPE_LENGTH (type
) > 16)
2121 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2122 invisible reference. */
2130 /* Write into appropriate registers a function return value of type
2131 TYPE, given in virtual format. */
2134 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2135 const gdb_byte
*valbuf
)
2137 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
2138 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2140 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
2142 bfd_byte buf
[V_REGISTER_SIZE
];
2143 int len
= TYPE_LENGTH (type
);
2145 memcpy (buf
, valbuf
, len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2146 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
2148 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2149 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2150 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2151 || TYPE_CODE (type
) == TYPE_CODE_PTR
2152 || TYPE_CODE (type
) == TYPE_CODE_REF
2153 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2155 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2157 /* Values of one word or less are zero/sign-extended and
2159 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2160 LONGEST val
= unpack_long (type
, valbuf
);
2162 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2163 regcache_cooked_write (regs
, AARCH64_X0_REGNUM
, tmpbuf
);
2167 /* Integral values greater than one word are stored in
2168 consecutive registers starting with r0. This will always
2169 be a multiple of the regiser size. */
2170 int len
= TYPE_LENGTH (type
);
2171 int regno
= AARCH64_X0_REGNUM
;
2175 regcache_cooked_write (regs
, regno
++, valbuf
);
2176 len
-= X_REGISTER_SIZE
;
2177 valbuf
+= X_REGISTER_SIZE
;
2181 else if (is_hfa (type
))
2183 int elements
= TYPE_NFIELDS (type
);
2184 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2185 int len
= TYPE_LENGTH (member_type
);
2188 for (i
= 0; i
< elements
; i
++)
2190 int regno
= AARCH64_V0_REGNUM
+ i
;
2191 bfd_byte tmpbuf
[MAX_REGISTER_SIZE
];
2194 fprintf_unfiltered (gdb_stdlog
,
2195 "write HFA return value element %d to %s\n",
2197 gdbarch_register_name (gdbarch
, regno
));
2199 memcpy (tmpbuf
, valbuf
, len
);
2200 regcache_cooked_write (regs
, regno
, tmpbuf
);
2206 /* For a structure or union the behaviour is as if the value had
2207 been stored to word-aligned memory and then loaded into
2208 registers with 64-bit load instruction(s). */
2209 int len
= TYPE_LENGTH (type
);
2210 int regno
= AARCH64_X0_REGNUM
;
2211 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2215 memcpy (tmpbuf
, valbuf
,
2216 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2217 regcache_cooked_write (regs
, regno
++, tmpbuf
);
2218 len
-= X_REGISTER_SIZE
;
2219 valbuf
+= X_REGISTER_SIZE
;
2224 /* Implement the "return_value" gdbarch method. */
2226 static enum return_value_convention
2227 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2228 struct type
*valtype
, struct regcache
*regcache
,
2229 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2231 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2233 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2234 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2235 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2237 if (aarch64_return_in_memory (gdbarch
, valtype
))
2240 fprintf_unfiltered (gdb_stdlog
, "return value in memory\n");
2241 return RETURN_VALUE_STRUCT_CONVENTION
;
2246 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2249 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2252 fprintf_unfiltered (gdb_stdlog
, "return value in registers\n");
2254 return RETURN_VALUE_REGISTER_CONVENTION
;
2257 /* Implement the "get_longjmp_target" gdbarch method. */
2260 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2263 gdb_byte buf
[X_REGISTER_SIZE
];
2264 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2265 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2266 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2268 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2270 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2274 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2278 /* Implement the "gen_return_address" gdbarch method. */
2281 aarch64_gen_return_address (struct gdbarch
*gdbarch
,
2282 struct agent_expr
*ax
, struct axs_value
*value
,
2285 value
->type
= register_type (gdbarch
, AARCH64_LR_REGNUM
);
2286 value
->kind
= axs_lvalue_register
;
2287 value
->u
.reg
= AARCH64_LR_REGNUM
;
2291 /* Return the pseudo register name corresponding to register regnum. */
2294 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2296 static const char *const q_name
[] =
2298 "q0", "q1", "q2", "q3",
2299 "q4", "q5", "q6", "q7",
2300 "q8", "q9", "q10", "q11",
2301 "q12", "q13", "q14", "q15",
2302 "q16", "q17", "q18", "q19",
2303 "q20", "q21", "q22", "q23",
2304 "q24", "q25", "q26", "q27",
2305 "q28", "q29", "q30", "q31",
2308 static const char *const d_name
[] =
2310 "d0", "d1", "d2", "d3",
2311 "d4", "d5", "d6", "d7",
2312 "d8", "d9", "d10", "d11",
2313 "d12", "d13", "d14", "d15",
2314 "d16", "d17", "d18", "d19",
2315 "d20", "d21", "d22", "d23",
2316 "d24", "d25", "d26", "d27",
2317 "d28", "d29", "d30", "d31",
2320 static const char *const s_name
[] =
2322 "s0", "s1", "s2", "s3",
2323 "s4", "s5", "s6", "s7",
2324 "s8", "s9", "s10", "s11",
2325 "s12", "s13", "s14", "s15",
2326 "s16", "s17", "s18", "s19",
2327 "s20", "s21", "s22", "s23",
2328 "s24", "s25", "s26", "s27",
2329 "s28", "s29", "s30", "s31",
2332 static const char *const h_name
[] =
2334 "h0", "h1", "h2", "h3",
2335 "h4", "h5", "h6", "h7",
2336 "h8", "h9", "h10", "h11",
2337 "h12", "h13", "h14", "h15",
2338 "h16", "h17", "h18", "h19",
2339 "h20", "h21", "h22", "h23",
2340 "h24", "h25", "h26", "h27",
2341 "h28", "h29", "h30", "h31",
2344 static const char *const b_name
[] =
2346 "b0", "b1", "b2", "b3",
2347 "b4", "b5", "b6", "b7",
2348 "b8", "b9", "b10", "b11",
2349 "b12", "b13", "b14", "b15",
2350 "b16", "b17", "b18", "b19",
2351 "b20", "b21", "b22", "b23",
2352 "b24", "b25", "b26", "b27",
2353 "b28", "b29", "b30", "b31",
2356 regnum
-= gdbarch_num_regs (gdbarch
);
2358 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2359 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2361 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2362 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2364 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2365 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2367 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2368 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2370 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2371 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2373 internal_error (__FILE__
, __LINE__
,
2374 _("aarch64_pseudo_register_name: bad register number %d"),
2378 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2380 static struct type
*
2381 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2383 regnum
-= gdbarch_num_regs (gdbarch
);
2385 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2386 return aarch64_vnq_type (gdbarch
);
2388 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2389 return aarch64_vnd_type (gdbarch
);
2391 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2392 return aarch64_vns_type (gdbarch
);
2394 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2395 return aarch64_vnh_type (gdbarch
);
2397 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2398 return aarch64_vnb_type (gdbarch
);
2400 internal_error (__FILE__
, __LINE__
,
2401 _("aarch64_pseudo_register_type: bad register number %d"),
2405 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2408 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2409 struct reggroup
*group
)
2411 regnum
-= gdbarch_num_regs (gdbarch
);
2413 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2414 return group
== all_reggroup
|| group
== vector_reggroup
;
2415 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2416 return (group
== all_reggroup
|| group
== vector_reggroup
2417 || group
== float_reggroup
);
2418 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2419 return (group
== all_reggroup
|| group
== vector_reggroup
2420 || group
== float_reggroup
);
2421 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2422 return group
== all_reggroup
|| group
== vector_reggroup
;
2423 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2424 return group
== all_reggroup
|| group
== vector_reggroup
;
2426 return group
== all_reggroup
;
2429 /* Implement the "pseudo_register_read_value" gdbarch method. */
2431 static struct value
*
2432 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
,
2433 struct regcache
*regcache
,
2436 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2437 struct value
*result_value
;
2440 result_value
= allocate_value (register_type (gdbarch
, regnum
));
2441 VALUE_LVAL (result_value
) = lval_register
;
2442 VALUE_REGNUM (result_value
) = regnum
;
2443 buf
= value_contents_raw (result_value
);
2445 regnum
-= gdbarch_num_regs (gdbarch
);
2447 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2449 enum register_status status
;
2452 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2453 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2454 if (status
!= REG_VALID
)
2455 mark_value_bytes_unavailable (result_value
, 0,
2456 TYPE_LENGTH (value_type (result_value
)));
2458 memcpy (buf
, reg_buf
, Q_REGISTER_SIZE
);
2459 return result_value
;
2462 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2464 enum register_status status
;
2467 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2468 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2469 if (status
!= REG_VALID
)
2470 mark_value_bytes_unavailable (result_value
, 0,
2471 TYPE_LENGTH (value_type (result_value
)));
2473 memcpy (buf
, reg_buf
, D_REGISTER_SIZE
);
2474 return result_value
;
2477 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2479 enum register_status status
;
2482 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2483 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2484 if (status
!= REG_VALID
)
2485 mark_value_bytes_unavailable (result_value
, 0,
2486 TYPE_LENGTH (value_type (result_value
)));
2488 memcpy (buf
, reg_buf
, S_REGISTER_SIZE
);
2489 return result_value
;
2492 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2494 enum register_status status
;
2497 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2498 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2499 if (status
!= REG_VALID
)
2500 mark_value_bytes_unavailable (result_value
, 0,
2501 TYPE_LENGTH (value_type (result_value
)));
2503 memcpy (buf
, reg_buf
, H_REGISTER_SIZE
);
2504 return result_value
;
2507 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2509 enum register_status status
;
2512 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2513 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2514 if (status
!= REG_VALID
)
2515 mark_value_bytes_unavailable (result_value
, 0,
2516 TYPE_LENGTH (value_type (result_value
)));
2518 memcpy (buf
, reg_buf
, B_REGISTER_SIZE
);
2519 return result_value
;
2522 gdb_assert_not_reached ("regnum out of bound");
2525 /* Implement the "pseudo_register_write" gdbarch method. */
2528 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2529 int regnum
, const gdb_byte
*buf
)
2531 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2533 /* Ensure the register buffer is zero, we want gdb writes of the
2534 various 'scalar' pseudo registers to behavior like architectural
2535 writes, register width bytes are written the remainder are set to
2537 memset (reg_buf
, 0, sizeof (reg_buf
));
2539 regnum
-= gdbarch_num_regs (gdbarch
);
2541 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2543 /* pseudo Q registers */
2546 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2547 memcpy (reg_buf
, buf
, Q_REGISTER_SIZE
);
2548 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2552 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2554 /* pseudo D registers */
2557 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2558 memcpy (reg_buf
, buf
, D_REGISTER_SIZE
);
2559 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2563 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2567 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2568 memcpy (reg_buf
, buf
, S_REGISTER_SIZE
);
2569 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2573 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2575 /* pseudo H registers */
2578 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2579 memcpy (reg_buf
, buf
, H_REGISTER_SIZE
);
2580 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2584 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2586 /* pseudo B registers */
2589 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2590 memcpy (reg_buf
, buf
, B_REGISTER_SIZE
);
2591 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2595 gdb_assert_not_reached ("regnum out of bound");
2598 /* Callback function for user_reg_add. */
2600 static struct value
*
2601 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2603 const int *reg_p
= baton
;
2605 return value_of_register (*reg_p
, frame
);
2609 /* Implement the "software_single_step" gdbarch method, needed to
2610 single step through atomic sequences on AArch64. */
2613 aarch64_software_single_step (struct frame_info
*frame
)
2615 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2616 struct address_space
*aspace
= get_frame_address_space (frame
);
2617 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2618 const int insn_size
= 4;
2619 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2620 CORE_ADDR pc
= get_frame_pc (frame
);
2621 CORE_ADDR breaks
[2] = { -1, -1 };
2623 CORE_ADDR closing_insn
= 0;
2624 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2625 byte_order_for_code
);
2628 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2629 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2631 /* Look for a Load Exclusive instruction which begins the sequence. */
2632 if (!decode_masked_match (insn
, 0x3fc00000, 0x08400000))
2635 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2641 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2642 byte_order_for_code
);
2644 /* Check if the instruction is a conditional branch. */
2645 if (decode_bcond (loc
, insn
, &cond
, &offset
))
2647 if (bc_insn_count
>= 1)
2650 /* It is, so we'll try to set a breakpoint at the destination. */
2651 breaks
[1] = loc
+ offset
;
2657 /* Look for the Store Exclusive which closes the atomic sequence. */
2658 if (decode_masked_match (insn
, 0x3fc00000, 0x08000000))
2665 /* We didn't find a closing Store Exclusive instruction, fall back. */
2669 /* Insert breakpoint after the end of the atomic sequence. */
2670 breaks
[0] = loc
+ insn_size
;
2672 /* Check for duplicated breakpoints, and also check that the second
2673 breakpoint is not within the atomic sequence. */
2675 && (breaks
[1] == breaks
[0]
2676 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2677 last_breakpoint
= 0;
2679 /* Insert the breakpoint at the end of the sequence, and one at the
2680 destination of the conditional branch, if it exists. */
2681 for (index
= 0; index
<= last_breakpoint
; index
++)
2682 insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
2687 /* Initialize the current architecture based on INFO. If possible,
2688 re-use an architecture from ARCHES, which is a list of
2689 architectures already created during this debugging session.
2691 Called e.g. at program startup, when reading a core file, and when
2692 reading a binary file. */
2694 static struct gdbarch
*
2695 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2697 struct gdbarch_tdep
*tdep
;
2698 struct gdbarch
*gdbarch
;
2699 struct gdbarch_list
*best_arch
;
2700 struct tdesc_arch_data
*tdesc_data
= NULL
;
2701 const struct target_desc
*tdesc
= info
.target_desc
;
2703 int have_fpa_registers
= 1;
2705 const struct tdesc_feature
*feature
;
2707 int num_pseudo_regs
= 0;
2709 /* Ensure we always have a target descriptor. */
2710 if (!tdesc_has_registers (tdesc
))
2711 tdesc
= tdesc_aarch64
;
2715 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2717 if (feature
== NULL
)
2720 tdesc_data
= tdesc_data_alloc ();
2722 /* Validate the descriptor provides the mandatory core R registers
2723 and allocate their numbers. */
2724 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
2726 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_X0_REGNUM
+ i
,
2727 aarch64_r_register_names
[i
]);
2729 num_regs
= AARCH64_X0_REGNUM
+ i
;
2731 /* Look for the V registers. */
2732 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2735 /* Validate the descriptor provides the mandatory V registers
2736 and allocate their numbers. */
2737 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
2739 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_V0_REGNUM
+ i
,
2740 aarch64_v_register_names
[i
]);
2742 num_regs
= AARCH64_V0_REGNUM
+ i
;
2744 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
2745 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
2746 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
2747 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
2748 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
2753 tdesc_data_cleanup (tdesc_data
);
2757 /* AArch64 code is always little-endian. */
2758 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
2760 /* If there is already a candidate, use it. */
2761 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
2763 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
2765 /* Found a match. */
2769 if (best_arch
!= NULL
)
2771 if (tdesc_data
!= NULL
)
2772 tdesc_data_cleanup (tdesc_data
);
2773 return best_arch
->gdbarch
;
2776 tdep
= xcalloc (1, sizeof (struct gdbarch_tdep
));
2777 gdbarch
= gdbarch_alloc (&info
, tdep
);
2779 /* This should be low enough for everything. */
2780 tdep
->lowest_pc
= 0x20;
2781 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
2782 tdep
->jb_elt_size
= 8;
2784 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
2785 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
2787 /* Frame handling. */
2788 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
2789 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
2790 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
2792 /* Advance PC across function entry code. */
2793 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
2795 /* The stack grows downward. */
2796 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2798 /* Breakpoint manipulation. */
2799 set_gdbarch_breakpoint_from_pc (gdbarch
, aarch64_breakpoint_from_pc
);
2800 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
2801 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
2803 /* Information about registers, etc. */
2804 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
2805 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
2806 set_gdbarch_num_regs (gdbarch
, num_regs
);
2808 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
2809 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
2810 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
2811 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
2812 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
2813 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
2814 aarch64_pseudo_register_reggroup_p
);
2817 set_gdbarch_short_bit (gdbarch
, 16);
2818 set_gdbarch_int_bit (gdbarch
, 32);
2819 set_gdbarch_float_bit (gdbarch
, 32);
2820 set_gdbarch_double_bit (gdbarch
, 64);
2821 set_gdbarch_long_double_bit (gdbarch
, 128);
2822 set_gdbarch_long_bit (gdbarch
, 64);
2823 set_gdbarch_long_long_bit (gdbarch
, 64);
2824 set_gdbarch_ptr_bit (gdbarch
, 64);
2825 set_gdbarch_char_signed (gdbarch
, 0);
2826 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2827 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2828 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
2830 /* Internal <-> external register number maps. */
2831 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
2833 /* Returning results. */
2834 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
2837 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
2839 /* Virtual tables. */
2840 set_gdbarch_vbit_in_delta (gdbarch
, 1);
2842 /* Hook in the ABI-specific overrides, if they have been registered. */
2843 info
.target_desc
= tdesc
;
2844 info
.tdep_info
= (void *) tdesc_data
;
2845 gdbarch_init_osabi (info
, gdbarch
);
2847 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
2849 /* Add some default predicates. */
2850 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
2851 dwarf2_append_unwinders (gdbarch
);
2852 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
2854 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
2856 /* Now we have tuned the configuration, set a few final things,
2857 based on what the OS ABI has told us. */
2859 if (tdep
->jb_pc
>= 0)
2860 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
2862 set_gdbarch_gen_return_address (gdbarch
, aarch64_gen_return_address
);
2864 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
2866 /* Add standard register aliases. */
2867 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
2868 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
2869 value_of_aarch64_user_reg
,
2870 &aarch64_register_aliases
[i
].regnum
);
2876 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
2878 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2883 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2884 paddress (gdbarch
, tdep
->lowest_pc
));
2887 /* Suppress warning from -Wmissing-prototypes. */
2888 extern initialize_file_ftype _initialize_aarch64_tdep
;
2891 _initialize_aarch64_tdep (void)
2893 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
2896 initialize_tdesc_aarch64 ();
2898 /* Debug this file's internals. */
2899 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
2900 Set AArch64 debugging."), _("\
2901 Show AArch64 debugging."), _("\
2902 When on, AArch64 specific debugging is enabled."),
2905 &setdebuglist
, &showdebuglist
);
2908 /* AArch64 process record-replay related structures, defines etc. */
2910 #define submask(x) ((1L << ((x) + 1)) - 1)
2911 #define bit(obj,st) (((obj) >> (st)) & 1)
2912 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2914 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2917 unsigned int reg_len = LENGTH; \
2920 REGS = XNEWVEC (uint32_t, reg_len); \
2921 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2926 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2929 unsigned int mem_len = LENGTH; \
2932 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2933 memcpy(&MEMS->len, &RECORD_BUF[0], \
2934 sizeof(struct aarch64_mem_r) * LENGTH); \
2939 /* AArch64 record/replay structures and enumerations. */
2941 struct aarch64_mem_r
2943 uint64_t len
; /* Record length. */
2944 uint64_t addr
; /* Memory address. */
2947 enum aarch64_record_result
2949 AARCH64_RECORD_SUCCESS
,
2950 AARCH64_RECORD_FAILURE
,
2951 AARCH64_RECORD_UNSUPPORTED
,
2952 AARCH64_RECORD_UNKNOWN
2955 typedef struct insn_decode_record_t
2957 struct gdbarch
*gdbarch
;
2958 struct regcache
*regcache
;
2959 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
2960 uint32_t aarch64_insn
; /* Insn to be recorded. */
2961 uint32_t mem_rec_count
; /* Count of memory records. */
2962 uint32_t reg_rec_count
; /* Count of register records. */
2963 uint32_t *aarch64_regs
; /* Registers to be recorded. */
2964 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
2965 } insn_decode_record
;
2967 /* Record handler for data processing - register instructions. */
2970 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
2972 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
2973 uint32_t record_buf
[4];
2975 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
2976 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2977 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
2979 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
2983 /* Logical (shifted register). */
2984 if (insn_bits24_27
== 0x0a)
2985 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
2987 else if (insn_bits24_27
== 0x0b)
2988 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
2990 return AARCH64_RECORD_UNKNOWN
;
2992 record_buf
[0] = reg_rd
;
2993 aarch64_insn_r
->reg_rec_count
= 1;
2995 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
2999 if (insn_bits24_27
== 0x0b)
3001 /* Data-processing (3 source). */
3002 record_buf
[0] = reg_rd
;
3003 aarch64_insn_r
->reg_rec_count
= 1;
3005 else if (insn_bits24_27
== 0x0a)
3007 if (insn_bits21_23
== 0x00)
3009 /* Add/subtract (with carry). */
3010 record_buf
[0] = reg_rd
;
3011 aarch64_insn_r
->reg_rec_count
= 1;
3012 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
3014 record_buf
[1] = AARCH64_CPSR_REGNUM
;
3015 aarch64_insn_r
->reg_rec_count
= 2;
3018 else if (insn_bits21_23
== 0x02)
3020 /* Conditional compare (register) and conditional compare
3021 (immediate) instructions. */
3022 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3023 aarch64_insn_r
->reg_rec_count
= 1;
3025 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
3027 /* CConditional select. */
3028 /* Data-processing (2 source). */
3029 /* Data-processing (1 source). */
3030 record_buf
[0] = reg_rd
;
3031 aarch64_insn_r
->reg_rec_count
= 1;
3034 return AARCH64_RECORD_UNKNOWN
;
3038 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3040 return AARCH64_RECORD_SUCCESS
;
3043 /* Record handler for data processing - immediate instructions. */
3046 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3048 uint8_t reg_rd
, insn_bit28
, insn_bit23
, insn_bits24_27
, setflags
;
3049 uint32_t record_buf
[4];
3051 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3052 insn_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3053 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3054 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3056 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3057 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3058 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3060 record_buf
[0] = reg_rd
;
3061 aarch64_insn_r
->reg_rec_count
= 1;
3063 else if (insn_bits24_27
== 0x01)
3065 /* Add/Subtract (immediate). */
3066 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3067 record_buf
[0] = reg_rd
;
3068 aarch64_insn_r
->reg_rec_count
= 1;
3070 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3072 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3074 /* Logical (immediate). */
3075 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3076 record_buf
[0] = reg_rd
;
3077 aarch64_insn_r
->reg_rec_count
= 1;
3079 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3082 return AARCH64_RECORD_UNKNOWN
;
3084 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3086 return AARCH64_RECORD_SUCCESS
;
3089 /* Record handler for branch, exception generation and system instructions. */
3092 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3094 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3095 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3096 uint32_t record_buf
[4];
3098 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3099 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3100 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3102 if (insn_bits28_31
== 0x0d)
3104 /* Exception generation instructions. */
3105 if (insn_bits24_27
== 0x04)
3107 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3108 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3109 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3111 ULONGEST svc_number
;
3113 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3115 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3119 return AARCH64_RECORD_UNSUPPORTED
;
3121 /* System instructions. */
3122 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3124 uint32_t reg_rt
, reg_crn
;
3126 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3127 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3129 /* Record rt in case of sysl and mrs instructions. */
3130 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3132 record_buf
[0] = reg_rt
;
3133 aarch64_insn_r
->reg_rec_count
= 1;
3135 /* Record cpsr for hint and msr(immediate) instructions. */
3136 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3138 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3139 aarch64_insn_r
->reg_rec_count
= 1;
3142 /* Unconditional branch (register). */
3143 else if((insn_bits24_27
& 0x0e) == 0x06)
3145 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3146 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3147 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3150 return AARCH64_RECORD_UNKNOWN
;
3152 /* Unconditional branch (immediate). */
3153 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3155 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3156 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3157 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3160 /* Compare & branch (immediate), Test & branch (immediate) and
3161 Conditional branch (immediate). */
3162 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3164 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3166 return AARCH64_RECORD_SUCCESS
;
3169 /* Record handler for advanced SIMD load and store instructions. */
3172 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3175 uint64_t addr_offset
= 0;
3176 uint32_t record_buf
[24];
3177 uint64_t record_buf_mem
[24];
3178 uint32_t reg_rn
, reg_rt
;
3179 uint32_t reg_index
= 0, mem_index
= 0;
3180 uint8_t opcode_bits
, size_bits
;
3182 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3183 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3184 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3185 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3186 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3190 fprintf_unfiltered (gdb_stdlog
,
3191 "Process record: Advanced SIMD load/store\n");
3194 /* Load/store single structure. */
3195 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3197 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3198 scale
= opcode_bits
>> 2;
3199 selem
= ((opcode_bits
& 0x02) |
3200 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3204 if (size_bits
& 0x01)
3205 return AARCH64_RECORD_UNKNOWN
;
3208 if ((size_bits
>> 1) & 0x01)
3209 return AARCH64_RECORD_UNKNOWN
;
3210 if (size_bits
& 0x01)
3212 if (!((opcode_bits
>> 1) & 0x01))
3215 return AARCH64_RECORD_UNKNOWN
;
3219 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3226 return AARCH64_RECORD_UNKNOWN
;
3232 for (sindex
= 0; sindex
< selem
; sindex
++)
3234 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3235 reg_rt
= (reg_rt
+ 1) % 32;
3239 for (sindex
= 0; sindex
< selem
; sindex
++)
3240 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3241 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3244 record_buf_mem
[mem_index
++] = esize
/ 8;
3245 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3247 addr_offset
= addr_offset
+ (esize
/ 8);
3248 reg_rt
= (reg_rt
+ 1) % 32;
3251 /* Load/store multiple structure. */
3254 uint8_t selem
, esize
, rpt
, elements
;
3255 uint8_t eindex
, rindex
;
3257 esize
= 8 << size_bits
;
3258 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3259 elements
= 128 / esize
;
3261 elements
= 64 / esize
;
3263 switch (opcode_bits
)
3265 /*LD/ST4 (4 Registers). */
3270 /*LD/ST1 (4 Registers). */
3275 /*LD/ST3 (3 Registers). */
3280 /*LD/ST1 (3 Registers). */
3285 /*LD/ST1 (1 Register). */
3290 /*LD/ST2 (2 Registers). */
3295 /*LD/ST1 (2 Registers). */
3301 return AARCH64_RECORD_UNSUPPORTED
;
3304 for (rindex
= 0; rindex
< rpt
; rindex
++)
3305 for (eindex
= 0; eindex
< elements
; eindex
++)
3307 uint8_t reg_tt
, sindex
;
3308 reg_tt
= (reg_rt
+ rindex
) % 32;
3309 for (sindex
= 0; sindex
< selem
; sindex
++)
3311 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3312 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3315 record_buf_mem
[mem_index
++] = esize
/ 8;
3316 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3318 addr_offset
= addr_offset
+ (esize
/ 8);
3319 reg_tt
= (reg_tt
+ 1) % 32;
3324 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3325 record_buf
[reg_index
++] = reg_rn
;
3327 aarch64_insn_r
->reg_rec_count
= reg_index
;
3328 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3329 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3331 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3333 return AARCH64_RECORD_SUCCESS
;
3336 /* Record handler for load and store instructions. */
3339 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3341 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3342 uint8_t insn_bit23
, insn_bit21
;
3343 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3344 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3345 uint64_t datasize
, offset
;
3346 uint32_t record_buf
[8];
3347 uint64_t record_buf_mem
[8];
3350 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3351 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3352 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3353 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3354 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3355 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3356 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3357 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3358 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3359 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3360 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3362 /* Load/store exclusive. */
3363 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3367 fprintf_unfiltered (gdb_stdlog
,
3368 "Process record: load/store exclusive\n");
3373 record_buf
[0] = reg_rt
;
3374 aarch64_insn_r
->reg_rec_count
= 1;
3377 record_buf
[1] = reg_rt2
;
3378 aarch64_insn_r
->reg_rec_count
= 2;
3384 datasize
= (8 << size_bits
) * 2;
3386 datasize
= (8 << size_bits
);
3387 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3389 record_buf_mem
[0] = datasize
/ 8;
3390 record_buf_mem
[1] = address
;
3391 aarch64_insn_r
->mem_rec_count
= 1;
3394 /* Save register rs. */
3395 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3396 aarch64_insn_r
->reg_rec_count
= 1;
3400 /* Load register (literal) instructions decoding. */
3401 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3405 fprintf_unfiltered (gdb_stdlog
,
3406 "Process record: load register (literal)\n");
3409 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3411 record_buf
[0] = reg_rt
;
3412 aarch64_insn_r
->reg_rec_count
= 1;
3414 /* All types of load/store pair instructions decoding. */
3415 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3419 fprintf_unfiltered (gdb_stdlog
,
3420 "Process record: load/store pair\n");
3427 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3428 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3432 record_buf
[0] = reg_rt
;
3433 record_buf
[1] = reg_rt2
;
3435 aarch64_insn_r
->reg_rec_count
= 2;
3440 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3442 size_bits
= size_bits
>> 1;
3443 datasize
= 8 << (2 + size_bits
);
3444 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3445 offset
= offset
<< (2 + size_bits
);
3446 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3448 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3450 if (imm7_off
& 0x40)
3451 address
= address
- offset
;
3453 address
= address
+ offset
;
3456 record_buf_mem
[0] = datasize
/ 8;
3457 record_buf_mem
[1] = address
;
3458 record_buf_mem
[2] = datasize
/ 8;
3459 record_buf_mem
[3] = address
+ (datasize
/ 8);
3460 aarch64_insn_r
->mem_rec_count
= 2;
3462 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3463 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3465 /* Load/store register (unsigned immediate) instructions. */
3466 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3468 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3475 if (size_bits
!= 0x03)
3478 return AARCH64_RECORD_UNKNOWN
;
3482 fprintf_unfiltered (gdb_stdlog
,
3483 "Process record: load/store (unsigned immediate):"
3484 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3490 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3491 datasize
= 8 << size_bits
;
3492 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3494 offset
= offset
<< size_bits
;
3495 address
= address
+ offset
;
3497 record_buf_mem
[0] = datasize
>> 3;
3498 record_buf_mem
[1] = address
;
3499 aarch64_insn_r
->mem_rec_count
= 1;
3504 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3506 record_buf
[0] = reg_rt
;
3507 aarch64_insn_r
->reg_rec_count
= 1;
3510 /* Load/store register (register offset) instructions. */
3511 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3512 && insn_bits10_11
== 0x02 && insn_bit21
)
3516 fprintf_unfiltered (gdb_stdlog
,
3517 "Process record: load/store (register offset)\n");
3519 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3526 if (size_bits
!= 0x03)
3529 return AARCH64_RECORD_UNKNOWN
;
3533 uint64_t reg_rm_val
;
3534 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3535 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3536 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3537 offset
= reg_rm_val
<< size_bits
;
3539 offset
= reg_rm_val
;
3540 datasize
= 8 << size_bits
;
3541 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3543 address
= address
+ offset
;
3544 record_buf_mem
[0] = datasize
>> 3;
3545 record_buf_mem
[1] = address
;
3546 aarch64_insn_r
->mem_rec_count
= 1;
3551 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3553 record_buf
[0] = reg_rt
;
3554 aarch64_insn_r
->reg_rec_count
= 1;
3557 /* Load/store register (immediate and unprivileged) instructions. */
3558 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3563 fprintf_unfiltered (gdb_stdlog
,
3564 "Process record: load/store (immediate and unprivileged)\n");
3566 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3573 if (size_bits
!= 0x03)
3576 return AARCH64_RECORD_UNKNOWN
;
3581 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3582 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3583 datasize
= 8 << size_bits
;
3584 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3586 if (insn_bits10_11
!= 0x01)
3588 if (imm9_off
& 0x0100)
3589 address
= address
- offset
;
3591 address
= address
+ offset
;
3593 record_buf_mem
[0] = datasize
>> 3;
3594 record_buf_mem
[1] = address
;
3595 aarch64_insn_r
->mem_rec_count
= 1;
3600 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3602 record_buf
[0] = reg_rt
;
3603 aarch64_insn_r
->reg_rec_count
= 1;
3605 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3606 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3608 /* Advanced SIMD load/store instructions. */
3610 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3612 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3614 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3616 return AARCH64_RECORD_SUCCESS
;
3619 /* Record handler for data processing SIMD and floating point instructions. */
3622 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3624 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3625 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3626 uint8_t insn_bits11_14
;
3627 uint32_t record_buf
[2];
3629 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3630 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3631 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3632 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3633 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3634 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3635 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3636 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3637 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3641 fprintf_unfiltered (gdb_stdlog
,
3642 "Process record: data processing SIMD/FP: ");
3645 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3647 /* Floating point - fixed point conversion instructions. */
3651 fprintf_unfiltered (gdb_stdlog
, "FP - fixed point conversion");
3653 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3654 record_buf
[0] = reg_rd
;
3656 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3658 /* Floating point - conditional compare instructions. */
3659 else if (insn_bits10_11
== 0x01)
3662 fprintf_unfiltered (gdb_stdlog
, "FP - conditional compare");
3664 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3666 /* Floating point - data processing (2-source) and
3667 conditional select instructions. */
3668 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3671 fprintf_unfiltered (gdb_stdlog
, "FP - DP (2-source)");
3673 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3675 else if (insn_bits10_11
== 0x00)
3677 /* Floating point - immediate instructions. */
3678 if ((insn_bits12_15
& 0x01) == 0x01
3679 || (insn_bits12_15
& 0x07) == 0x04)
3682 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3683 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3685 /* Floating point - compare instructions. */
3686 else if ((insn_bits12_15
& 0x03) == 0x02)
3689 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3690 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3692 /* Floating point - integer conversions instructions. */
3693 else if (insn_bits12_15
== 0x00)
3695 /* Convert float to integer instruction. */
3696 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
3699 fprintf_unfiltered (gdb_stdlog
, "float to int conversion");
3701 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3703 /* Convert integer to float instruction. */
3704 else if ((opcode
>> 1) == 0x01 && !rmode
)
3707 fprintf_unfiltered (gdb_stdlog
, "int to float conversion");
3709 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3711 /* Move float to integer instruction. */
3712 else if ((opcode
>> 1) == 0x03)
3715 fprintf_unfiltered (gdb_stdlog
, "move float to int");
3717 if (!(opcode
& 0x01))
3718 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3720 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3723 return AARCH64_RECORD_UNKNOWN
;
3726 return AARCH64_RECORD_UNKNOWN
;
3729 return AARCH64_RECORD_UNKNOWN
;
3731 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
3734 fprintf_unfiltered (gdb_stdlog
, "SIMD copy");
3736 /* Advanced SIMD copy instructions. */
3737 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3738 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
3739 && bit (aarch64_insn_r
->aarch64_insn
, 10))
3741 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
3742 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3744 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3747 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3749 /* All remaining floating point or advanced SIMD instructions. */
3753 fprintf_unfiltered (gdb_stdlog
, "all remain");
3755 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3759 fprintf_unfiltered (gdb_stdlog
, "\n");
3761 aarch64_insn_r
->reg_rec_count
++;
3762 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
3763 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3765 return AARCH64_RECORD_SUCCESS
;
3768 /* Decodes insns type and invokes its record handler. */
3771 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
3773 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
3775 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
3776 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3777 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
3778 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3780 /* Data processing - immediate instructions. */
3781 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
3782 return aarch64_record_data_proc_imm (aarch64_insn_r
);
3784 /* Branch, exception generation and system instructions. */
3785 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
3786 return aarch64_record_branch_except_sys (aarch64_insn_r
);
3788 /* Load and store instructions. */
3789 if (!ins_bit25
&& ins_bit27
)
3790 return aarch64_record_load_store (aarch64_insn_r
);
3792 /* Data processing - register instructions. */
3793 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
3794 return aarch64_record_data_proc_reg (aarch64_insn_r
);
3796 /* Data processing - SIMD and floating point instructions. */
3797 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
3798 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
3800 return AARCH64_RECORD_UNSUPPORTED
;
3803 /* Cleans up local record registers and memory allocations. */
3806 deallocate_reg_mem (insn_decode_record
*record
)
3808 xfree (record
->aarch64_regs
);
3809 xfree (record
->aarch64_mems
);
3812 /* Parse the current instruction and record the values of the registers and
3813 memory that will be changed in current instruction to record_arch_list
3814 return -1 if something is wrong. */
3817 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
3818 CORE_ADDR insn_addr
)
3820 uint32_t rec_no
= 0;
3821 uint8_t insn_size
= 4;
3823 ULONGEST t_bit
= 0, insn_id
= 0;
3824 gdb_byte buf
[insn_size
];
3825 insn_decode_record aarch64_record
;
3827 memset (&buf
[0], 0, insn_size
);
3828 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
3829 target_read_memory (insn_addr
, &buf
[0], insn_size
);
3830 aarch64_record
.aarch64_insn
3831 = (uint32_t) extract_unsigned_integer (&buf
[0],
3833 gdbarch_byte_order (gdbarch
));
3834 aarch64_record
.regcache
= regcache
;
3835 aarch64_record
.this_addr
= insn_addr
;
3836 aarch64_record
.gdbarch
= gdbarch
;
3838 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
3839 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
3841 printf_unfiltered (_("Process record does not support instruction "
3842 "0x%0x at address %s.\n"),
3843 aarch64_record
.aarch64_insn
,
3844 paddress (gdbarch
, insn_addr
));
3850 /* Record registers. */
3851 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3853 /* Always record register CPSR. */
3854 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3855 AARCH64_CPSR_REGNUM
);
3856 if (aarch64_record
.aarch64_regs
)
3857 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
3858 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
3859 aarch64_record
.aarch64_regs
[rec_no
]))
3862 /* Record memories. */
3863 if (aarch64_record
.aarch64_mems
)
3864 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
3865 if (record_full_arch_list_add_mem
3866 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
3867 aarch64_record
.aarch64_mems
[rec_no
].len
))
3870 if (record_full_arch_list_add_end ())
3874 deallocate_reg_mem (&aarch64_record
);