1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
46 #include "aarch64-tdep.h"
49 #include "elf/aarch64.h"
54 #include "record-full.h"
56 #include "features/aarch64.c"
58 /* Pseudo register base numbers. */
59 #define AARCH64_Q0_REGNUM 0
60 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
61 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
62 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
63 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
65 /* The standard register names, and all the valid aliases for them. */
68 const char *const name
;
70 } aarch64_register_aliases
[] =
72 /* 64-bit register names. */
73 {"fp", AARCH64_FP_REGNUM
},
74 {"lr", AARCH64_LR_REGNUM
},
75 {"sp", AARCH64_SP_REGNUM
},
77 /* 32-bit register names. */
78 {"w0", AARCH64_X0_REGNUM
+ 0},
79 {"w1", AARCH64_X0_REGNUM
+ 1},
80 {"w2", AARCH64_X0_REGNUM
+ 2},
81 {"w3", AARCH64_X0_REGNUM
+ 3},
82 {"w4", AARCH64_X0_REGNUM
+ 4},
83 {"w5", AARCH64_X0_REGNUM
+ 5},
84 {"w6", AARCH64_X0_REGNUM
+ 6},
85 {"w7", AARCH64_X0_REGNUM
+ 7},
86 {"w8", AARCH64_X0_REGNUM
+ 8},
87 {"w9", AARCH64_X0_REGNUM
+ 9},
88 {"w10", AARCH64_X0_REGNUM
+ 10},
89 {"w11", AARCH64_X0_REGNUM
+ 11},
90 {"w12", AARCH64_X0_REGNUM
+ 12},
91 {"w13", AARCH64_X0_REGNUM
+ 13},
92 {"w14", AARCH64_X0_REGNUM
+ 14},
93 {"w15", AARCH64_X0_REGNUM
+ 15},
94 {"w16", AARCH64_X0_REGNUM
+ 16},
95 {"w17", AARCH64_X0_REGNUM
+ 17},
96 {"w18", AARCH64_X0_REGNUM
+ 18},
97 {"w19", AARCH64_X0_REGNUM
+ 19},
98 {"w20", AARCH64_X0_REGNUM
+ 20},
99 {"w21", AARCH64_X0_REGNUM
+ 21},
100 {"w22", AARCH64_X0_REGNUM
+ 22},
101 {"w23", AARCH64_X0_REGNUM
+ 23},
102 {"w24", AARCH64_X0_REGNUM
+ 24},
103 {"w25", AARCH64_X0_REGNUM
+ 25},
104 {"w26", AARCH64_X0_REGNUM
+ 26},
105 {"w27", AARCH64_X0_REGNUM
+ 27},
106 {"w28", AARCH64_X0_REGNUM
+ 28},
107 {"w29", AARCH64_X0_REGNUM
+ 29},
108 {"w30", AARCH64_X0_REGNUM
+ 30},
111 {"ip0", AARCH64_X0_REGNUM
+ 16},
112 {"ip1", AARCH64_X0_REGNUM
+ 17}
115 /* The required core 'R' registers. */
116 static const char *const aarch64_r_register_names
[] =
118 /* These registers must appear in consecutive RAW register number
119 order and they must begin with AARCH64_X0_REGNUM! */
120 "x0", "x1", "x2", "x3",
121 "x4", "x5", "x6", "x7",
122 "x8", "x9", "x10", "x11",
123 "x12", "x13", "x14", "x15",
124 "x16", "x17", "x18", "x19",
125 "x20", "x21", "x22", "x23",
126 "x24", "x25", "x26", "x27",
127 "x28", "x29", "x30", "sp",
131 /* The FP/SIMD 'V' registers. */
132 static const char *const aarch64_v_register_names
[] =
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_V0_REGNUM! */
136 "v0", "v1", "v2", "v3",
137 "v4", "v5", "v6", "v7",
138 "v8", "v9", "v10", "v11",
139 "v12", "v13", "v14", "v15",
140 "v16", "v17", "v18", "v19",
141 "v20", "v21", "v22", "v23",
142 "v24", "v25", "v26", "v27",
143 "v28", "v29", "v30", "v31",
148 /* AArch64 prologue cache structure. */
149 struct aarch64_prologue_cache
151 /* The program counter at the start of the function. It is used to
152 identify this frame as a prologue frame. */
155 /* The program counter at the time this frame was created; i.e. where
156 this function was called from. It is used to identify this frame as a
160 /* The stack pointer at the time this frame was created; i.e. the
161 caller's stack pointer when this function was called. It is used
162 to identify this frame. */
165 /* Is the target available to read from? */
168 /* The frame base for this frame is just prev_sp - frame size.
169 FRAMESIZE is the distance from the frame pointer to the
170 initial stack pointer. */
173 /* The register used to hold the frame pointer for this frame. */
176 /* Saved register offsets. */
177 struct trad_frame_saved_reg
*saved_regs
;
180 /* Toggle this file's internal debugging dump. */
181 static int aarch64_debug
;
184 show_aarch64_debug (struct ui_file
*file
, int from_tty
,
185 struct cmd_list_element
*c
, const char *value
)
187 fprintf_filtered (file
, _("AArch64 debugging is %s.\n"), value
);
190 /* Extract a signed value from a bit field within an instruction
193 INSN is the instruction opcode.
195 WIDTH specifies the width of the bit field to extract (in bits).
197 OFFSET specifies the least significant bit of the field where bits
198 are numbered zero counting from least to most significant. */
201 extract_signed_bitfield (uint32_t insn
, unsigned width
, unsigned offset
)
203 unsigned shift_l
= sizeof (int32_t) * 8 - (offset
+ width
);
204 unsigned shift_r
= sizeof (int32_t) * 8 - width
;
206 return ((int32_t) insn
<< shift_l
) >> shift_r
;
209 /* Determine if specified bits within an instruction opcode matches a
212 INSN is the instruction opcode.
214 MASK specifies the bits within the opcode that are to be tested
215 agsinst for a match with PATTERN. */
218 decode_masked_match (uint32_t insn
, uint32_t mask
, uint32_t pattern
)
220 return (insn
& mask
) == pattern
;
223 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
225 ADDR specifies the address of the opcode.
226 INSN specifies the opcode to test.
227 RD receives the 'rd' field from the decoded instruction.
228 RN receives the 'rn' field from the decoded instruction.
230 Return 1 if the opcodes matches and is decoded, otherwise 0. */
232 decode_add_sub_imm (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
, unsigned *rn
,
235 if ((insn
& 0x9f000000) == 0x91000000)
240 *rd
= (insn
>> 0) & 0x1f;
241 *rn
= (insn
>> 5) & 0x1f;
242 *imm
= (insn
>> 10) & 0xfff;
243 shift
= (insn
>> 22) & 0x3;
244 op_is_sub
= (insn
>> 30) & 0x1;
262 fprintf_unfiltered (gdb_stdlog
,
263 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
264 core_addr_to_string_nz (addr
), insn
, *rd
, *rn
,
271 /* Decode an opcode if it represents an ADRP instruction.
273 ADDR specifies the address of the opcode.
274 INSN specifies the opcode to test.
275 RD receives the 'rd' field from the decoded instruction.
277 Return 1 if the opcodes matches and is decoded, otherwise 0. */
280 decode_adrp (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
282 if (decode_masked_match (insn
, 0x9f000000, 0x90000000))
284 *rd
= (insn
>> 0) & 0x1f;
287 fprintf_unfiltered (gdb_stdlog
,
288 "decode: 0x%s 0x%x adrp x%u, #?\n",
289 core_addr_to_string_nz (addr
), insn
, *rd
);
295 /* Decode an opcode if it represents an branch immediate or branch
296 and link immediate instruction.
298 ADDR specifies the address of the opcode.
299 INSN specifies the opcode to test.
300 LINK receives the 'link' bit from the decoded instruction.
301 OFFSET receives the immediate offset from the decoded instruction.
303 Return 1 if the opcodes matches and is decoded, otherwise 0. */
306 decode_b (CORE_ADDR addr
, uint32_t insn
, unsigned *link
, int32_t *offset
)
308 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
309 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
310 if (decode_masked_match (insn
, 0x7c000000, 0x14000000))
313 *offset
= extract_signed_bitfield (insn
, 26, 0) << 2;
316 fprintf_unfiltered (gdb_stdlog
,
317 "decode: 0x%s 0x%x %s 0x%s\n",
318 core_addr_to_string_nz (addr
), insn
,
320 core_addr_to_string_nz (addr
+ *offset
));
327 /* Decode an opcode if it represents a conditional branch instruction.
329 ADDR specifies the address of the opcode.
330 INSN specifies the opcode to test.
331 COND receives the branch condition field from the decoded
333 OFFSET receives the immediate offset from the decoded instruction.
335 Return 1 if the opcodes matches and is decoded, otherwise 0. */
338 decode_bcond (CORE_ADDR addr
, uint32_t insn
, unsigned *cond
, int32_t *offset
)
340 if (decode_masked_match (insn
, 0xfe000000, 0x54000000))
342 *cond
= (insn
>> 0) & 0xf;
343 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
346 fprintf_unfiltered (gdb_stdlog
,
347 "decode: 0x%s 0x%x b<%u> 0x%s\n",
348 core_addr_to_string_nz (addr
), insn
, *cond
,
349 core_addr_to_string_nz (addr
+ *offset
));
355 /* Decode an opcode if it represents a branch via register instruction.
357 ADDR specifies the address of the opcode.
358 INSN specifies the opcode to test.
359 LINK receives the 'link' bit from the decoded instruction.
360 RN receives the 'rn' field from the decoded instruction.
362 Return 1 if the opcodes matches and is decoded, otherwise 0. */
365 decode_br (CORE_ADDR addr
, uint32_t insn
, unsigned *link
, unsigned *rn
)
367 /* 8 4 0 6 2 8 4 0 */
368 /* blr 110101100011111100000000000rrrrr */
369 /* br 110101100001111100000000000rrrrr */
370 if (decode_masked_match (insn
, 0xffdffc1f, 0xd61f0000))
372 *link
= (insn
>> 21) & 1;
373 *rn
= (insn
>> 5) & 0x1f;
376 fprintf_unfiltered (gdb_stdlog
,
377 "decode: 0x%s 0x%x %s 0x%x\n",
378 core_addr_to_string_nz (addr
), insn
,
379 *link
? "blr" : "br", *rn
);
386 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
388 ADDR specifies the address of the opcode.
389 INSN specifies the opcode to test.
390 IS64 receives the 'sf' field from the decoded instruction.
391 OP receives the 'op' field from the decoded instruction.
392 RN receives the 'rn' field from the decoded instruction.
393 OFFSET receives the 'imm19' field from the decoded instruction.
395 Return 1 if the opcodes matches and is decoded, otherwise 0. */
398 decode_cb (CORE_ADDR addr
,
399 uint32_t insn
, int *is64
, unsigned *op
, unsigned *rn
,
402 if (decode_masked_match (insn
, 0x7e000000, 0x34000000))
404 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
405 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
407 *rn
= (insn
>> 0) & 0x1f;
408 *is64
= (insn
>> 31) & 0x1;
409 *op
= (insn
>> 24) & 0x1;
410 *offset
= extract_signed_bitfield (insn
, 19, 5) << 2;
413 fprintf_unfiltered (gdb_stdlog
,
414 "decode: 0x%s 0x%x %s 0x%s\n",
415 core_addr_to_string_nz (addr
), insn
,
416 *op
? "cbnz" : "cbz",
417 core_addr_to_string_nz (addr
+ *offset
));
423 /* Decode an opcode if it represents a ERET instruction.
425 ADDR specifies the address of the opcode.
426 INSN specifies the opcode to test.
428 Return 1 if the opcodes matches and is decoded, otherwise 0. */
431 decode_eret (CORE_ADDR addr
, uint32_t insn
)
433 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
434 if (insn
== 0xd69f03e0)
437 fprintf_unfiltered (gdb_stdlog
, "decode: 0x%s 0x%x eret\n",
438 core_addr_to_string_nz (addr
), insn
);
444 /* Decode an opcode if it represents a MOVZ instruction.
446 ADDR specifies the address of the opcode.
447 INSN specifies the opcode to test.
448 RD receives the 'rd' field from the decoded instruction.
450 Return 1 if the opcodes matches and is decoded, otherwise 0. */
453 decode_movz (CORE_ADDR addr
, uint32_t insn
, unsigned *rd
)
455 if (decode_masked_match (insn
, 0xff800000, 0x52800000))
457 *rd
= (insn
>> 0) & 0x1f;
460 fprintf_unfiltered (gdb_stdlog
,
461 "decode: 0x%s 0x%x movz x%u, #?\n",
462 core_addr_to_string_nz (addr
), insn
, *rd
);
468 /* Decode an opcode if it represents a ORR (shifted register)
471 ADDR specifies the address of the opcode.
472 INSN specifies the opcode to test.
473 RD receives the 'rd' field from the decoded instruction.
474 RN receives the 'rn' field from the decoded instruction.
475 RM receives the 'rm' field from the decoded instruction.
476 IMM receives the 'imm6' field from the decoded instruction.
478 Return 1 if the opcodes matches and is decoded, otherwise 0. */
481 decode_orr_shifted_register_x (CORE_ADDR addr
,
482 uint32_t insn
, unsigned *rd
, unsigned *rn
,
483 unsigned *rm
, int32_t *imm
)
485 if (decode_masked_match (insn
, 0xff200000, 0xaa000000))
487 *rd
= (insn
>> 0) & 0x1f;
488 *rn
= (insn
>> 5) & 0x1f;
489 *rm
= (insn
>> 16) & 0x1f;
490 *imm
= (insn
>> 10) & 0x3f;
493 fprintf_unfiltered (gdb_stdlog
,
494 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
495 core_addr_to_string_nz (addr
), insn
, *rd
,
502 /* Decode an opcode if it represents a RET instruction.
504 ADDR specifies the address of the opcode.
505 INSN specifies the opcode to test.
506 RN receives the 'rn' field from the decoded instruction.
508 Return 1 if the opcodes matches and is decoded, otherwise 0. */
511 decode_ret (CORE_ADDR addr
, uint32_t insn
, unsigned *rn
)
513 if (decode_masked_match (insn
, 0xfffffc1f, 0xd65f0000))
515 *rn
= (insn
>> 5) & 0x1f;
517 fprintf_unfiltered (gdb_stdlog
,
518 "decode: 0x%s 0x%x ret x%u\n",
519 core_addr_to_string_nz (addr
), insn
, *rn
);
525 /* Decode an opcode if it represents the following instruction:
526 STP rt, rt2, [rn, #imm]
528 ADDR specifies the address of the opcode.
529 INSN specifies the opcode to test.
530 RT1 receives the 'rt' field from the decoded instruction.
531 RT2 receives the 'rt2' field from the decoded instruction.
532 RN receives the 'rn' field from the decoded instruction.
533 IMM receives the 'imm' field from the decoded instruction.
535 Return 1 if the opcodes matches and is decoded, otherwise 0. */
538 decode_stp_offset (CORE_ADDR addr
,
540 unsigned *rt1
, unsigned *rt2
, unsigned *rn
, int32_t *imm
)
542 if (decode_masked_match (insn
, 0xffc00000, 0xa9000000))
544 *rt1
= (insn
>> 0) & 0x1f;
545 *rn
= (insn
>> 5) & 0x1f;
546 *rt2
= (insn
>> 10) & 0x1f;
547 *imm
= extract_signed_bitfield (insn
, 7, 15);
551 fprintf_unfiltered (gdb_stdlog
,
552 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
553 core_addr_to_string_nz (addr
), insn
,
554 *rt1
, *rt2
, *rn
, *imm
);
560 /* Decode an opcode if it represents the following instruction:
561 STP rt, rt2, [rn, #imm]!
563 ADDR specifies the address of the opcode.
564 INSN specifies the opcode to test.
565 RT1 receives the 'rt' field from the decoded instruction.
566 RT2 receives the 'rt2' field from the decoded instruction.
567 RN receives the 'rn' field from the decoded instruction.
568 IMM receives the 'imm' field from the decoded instruction.
570 Return 1 if the opcodes matches and is decoded, otherwise 0. */
573 decode_stp_offset_wb (CORE_ADDR addr
,
575 unsigned *rt1
, unsigned *rt2
, unsigned *rn
,
578 if (decode_masked_match (insn
, 0xffc00000, 0xa9800000))
580 *rt1
= (insn
>> 0) & 0x1f;
581 *rn
= (insn
>> 5) & 0x1f;
582 *rt2
= (insn
>> 10) & 0x1f;
583 *imm
= extract_signed_bitfield (insn
, 7, 15);
587 fprintf_unfiltered (gdb_stdlog
,
588 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
589 core_addr_to_string_nz (addr
), insn
,
590 *rt1
, *rt2
, *rn
, *imm
);
596 /* Decode an opcode if it represents the following instruction:
599 ADDR specifies the address of the opcode.
600 INSN specifies the opcode to test.
601 IS64 receives size field from the decoded instruction.
602 RT receives the 'rt' field from the decoded instruction.
603 RN receives the 'rn' field from the decoded instruction.
604 IMM receives the 'imm' field from the decoded instruction.
606 Return 1 if the opcodes matches and is decoded, otherwise 0. */
609 decode_stur (CORE_ADDR addr
, uint32_t insn
, int *is64
, unsigned *rt
,
610 unsigned *rn
, int32_t *imm
)
612 if (decode_masked_match (insn
, 0xbfe00c00, 0xb8000000))
614 *is64
= (insn
>> 30) & 1;
615 *rt
= (insn
>> 0) & 0x1f;
616 *rn
= (insn
>> 5) & 0x1f;
617 *imm
= extract_signed_bitfield (insn
, 9, 12);
620 fprintf_unfiltered (gdb_stdlog
,
621 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
622 core_addr_to_string_nz (addr
), insn
,
623 *is64
? 'x' : 'w', *rt
, *rn
, *imm
);
629 /* Decode an opcode if it represents a TB or TBNZ instruction.
631 ADDR specifies the address of the opcode.
632 INSN specifies the opcode to test.
633 OP receives the 'op' field from the decoded instruction.
634 BIT receives the bit position field from the decoded instruction.
635 RT receives 'rt' field from the decoded instruction.
636 IMM receives 'imm' field from the decoded instruction.
638 Return 1 if the opcodes matches and is decoded, otherwise 0. */
641 decode_tb (CORE_ADDR addr
,
642 uint32_t insn
, unsigned *op
, unsigned *bit
, unsigned *rt
,
645 if (decode_masked_match (insn
, 0x7e000000, 0x36000000))
647 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
648 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
650 *rt
= (insn
>> 0) & 0x1f;
651 *op
= insn
& (1 << 24);
652 *bit
= ((insn
>> (31 - 4)) & 0x20) | ((insn
>> 19) & 0x1f);
653 *imm
= extract_signed_bitfield (insn
, 14, 5) << 2;
656 fprintf_unfiltered (gdb_stdlog
,
657 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
658 core_addr_to_string_nz (addr
), insn
,
659 *op
? "tbnz" : "tbz", *rt
, *bit
,
660 core_addr_to_string_nz (addr
+ *imm
));
666 /* Analyze a prologue, looking for a recognizable stack frame
667 and frame pointer. Scan until we encounter a store that could
668 clobber the stack frame unexpectedly, or an unknown instruction. */
671 aarch64_analyze_prologue (struct gdbarch
*gdbarch
,
672 CORE_ADDR start
, CORE_ADDR limit
,
673 struct aarch64_prologue_cache
*cache
)
675 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
677 pv_t regs
[AARCH64_X_REGISTER_COUNT
];
678 struct pv_area
*stack
;
679 struct cleanup
*back_to
;
681 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
682 regs
[i
] = pv_register (i
, 0);
683 stack
= make_pv_area (AARCH64_SP_REGNUM
, gdbarch_addr_bit (gdbarch
));
684 back_to
= make_cleanup_free_pv_area (stack
);
686 for (; start
< limit
; start
+= 4)
704 insn
= read_memory_unsigned_integer (start
, 4, byte_order_for_code
);
706 if (decode_add_sub_imm (start
, insn
, &rd
, &rn
, &imm
))
707 regs
[rd
] = pv_add_constant (regs
[rn
], imm
);
708 else if (decode_adrp (start
, insn
, &rd
))
709 regs
[rd
] = pv_unknown ();
710 else if (decode_b (start
, insn
, &is_link
, &offset
))
712 /* Stop analysis on branch. */
715 else if (decode_bcond (start
, insn
, &cond
, &offset
))
717 /* Stop analysis on branch. */
720 else if (decode_br (start
, insn
, &is_link
, &rn
))
722 /* Stop analysis on branch. */
725 else if (decode_cb (start
, insn
, &is64
, &op
, &rn
, &offset
))
727 /* Stop analysis on branch. */
730 else if (decode_eret (start
, insn
))
732 /* Stop analysis on branch. */
735 else if (decode_movz (start
, insn
, &rd
))
736 regs
[rd
] = pv_unknown ();
738 if (decode_orr_shifted_register_x (start
, insn
, &rd
, &rn
, &rm
, &imm
))
740 if (imm
== 0 && rn
== 31)
747 "aarch64: prologue analysis gave up addr=0x%s "
748 "opcode=0x%x (orr x register)\n",
749 core_addr_to_string_nz (start
),
754 else if (decode_ret (start
, insn
, &rn
))
756 /* Stop analysis on branch. */
759 else if (decode_stur (start
, insn
, &is64
, &rt
, &rn
, &offset
))
761 pv_area_store (stack
, pv_add_constant (regs
[rn
], offset
),
762 is64
? 8 : 4, regs
[rt
]);
764 else if (decode_stp_offset (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
766 /* If recording this store would invalidate the store area
767 (perhaps because rn is not known) then we should abandon
768 further prologue analysis. */
769 if (pv_area_store_would_trash (stack
,
770 pv_add_constant (regs
[rn
], imm
)))
773 if (pv_area_store_would_trash (stack
,
774 pv_add_constant (regs
[rn
], imm
+ 8)))
777 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
779 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
782 else if (decode_stp_offset_wb (start
, insn
, &rt1
, &rt2
, &rn
, &imm
))
784 /* If recording this store would invalidate the store area
785 (perhaps because rn is not known) then we should abandon
786 further prologue analysis. */
787 if (pv_area_store_would_trash (stack
,
788 pv_add_constant (regs
[rn
], imm
)))
791 if (pv_area_store_would_trash (stack
,
792 pv_add_constant (regs
[rn
], imm
+ 8)))
795 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
), 8,
797 pv_area_store (stack
, pv_add_constant (regs
[rn
], imm
+ 8), 8,
799 regs
[rn
] = pv_add_constant (regs
[rn
], imm
);
801 else if (decode_tb (start
, insn
, &op
, &bit
, &rn
, &offset
))
803 /* Stop analysis on branch. */
809 fprintf_unfiltered (gdb_stdlog
,
810 "aarch64: prologue analysis gave up addr=0x%s"
812 core_addr_to_string_nz (start
), insn
);
819 do_cleanups (back_to
);
823 if (pv_is_register (regs
[AARCH64_FP_REGNUM
], AARCH64_SP_REGNUM
))
825 /* Frame pointer is fp. Frame size is constant. */
826 cache
->framereg
= AARCH64_FP_REGNUM
;
827 cache
->framesize
= -regs
[AARCH64_FP_REGNUM
].k
;
829 else if (pv_is_register (regs
[AARCH64_SP_REGNUM
], AARCH64_SP_REGNUM
))
831 /* Try the stack pointer. */
832 cache
->framesize
= -regs
[AARCH64_SP_REGNUM
].k
;
833 cache
->framereg
= AARCH64_SP_REGNUM
;
837 /* We're just out of luck. We don't know where the frame is. */
838 cache
->framereg
= -1;
839 cache
->framesize
= 0;
842 for (i
= 0; i
< AARCH64_X_REGISTER_COUNT
; i
++)
846 if (pv_area_find_reg (stack
, gdbarch
, i
, &offset
))
847 cache
->saved_regs
[i
].addr
= offset
;
850 do_cleanups (back_to
);
854 /* Implement the "skip_prologue" gdbarch method. */
857 aarch64_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
861 CORE_ADDR func_addr
, limit_pc
;
862 struct symtab_and_line sal
;
864 /* See if we can determine the end of the prologue via the symbol
865 table. If so, then return either PC, or the PC after the
866 prologue, whichever is greater. */
867 if (find_pc_partial_function (pc
, NULL
, &func_addr
, NULL
))
869 CORE_ADDR post_prologue_pc
870 = skip_prologue_using_sal (gdbarch
, func_addr
);
872 if (post_prologue_pc
!= 0)
873 return max (pc
, post_prologue_pc
);
876 /* Can't determine prologue from the symbol table, need to examine
879 /* Find an upper limit on the function prologue using the debug
880 information. If the debug information could not be used to
881 provide that bound, then use an arbitrary large number as the
883 limit_pc
= skip_prologue_using_sal (gdbarch
, pc
);
885 limit_pc
= pc
+ 128; /* Magic. */
887 /* Try disassembling prologue. */
888 return aarch64_analyze_prologue (gdbarch
, pc
, limit_pc
, NULL
);
891 /* Scan the function prologue for THIS_FRAME and populate the prologue
895 aarch64_scan_prologue (struct frame_info
*this_frame
,
896 struct aarch64_prologue_cache
*cache
)
898 CORE_ADDR block_addr
= get_frame_address_in_block (this_frame
);
899 CORE_ADDR prologue_start
;
900 CORE_ADDR prologue_end
;
901 CORE_ADDR prev_pc
= get_frame_pc (this_frame
);
902 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
904 cache
->prev_pc
= prev_pc
;
906 /* Assume we do not find a frame. */
907 cache
->framereg
= -1;
908 cache
->framesize
= 0;
910 if (find_pc_partial_function (block_addr
, NULL
, &prologue_start
,
913 struct symtab_and_line sal
= find_pc_line (prologue_start
, 0);
917 /* No line info so use the current PC. */
918 prologue_end
= prev_pc
;
920 else if (sal
.end
< prologue_end
)
922 /* The next line begins after the function end. */
923 prologue_end
= sal
.end
;
926 prologue_end
= min (prologue_end
, prev_pc
);
927 aarch64_analyze_prologue (gdbarch
, prologue_start
, prologue_end
, cache
);
934 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
936 frame_loc
= get_frame_register_unsigned (this_frame
, AARCH64_FP_REGNUM
);
940 cache
->framereg
= AARCH64_FP_REGNUM
;
941 cache
->framesize
= 16;
942 cache
->saved_regs
[29].addr
= 0;
943 cache
->saved_regs
[30].addr
= 8;
947 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
948 function may throw an exception if the inferior's registers or memory is
952 aarch64_make_prologue_cache_1 (struct frame_info
*this_frame
,
953 struct aarch64_prologue_cache
*cache
)
955 CORE_ADDR unwound_fp
;
958 aarch64_scan_prologue (this_frame
, cache
);
960 if (cache
->framereg
== -1)
963 unwound_fp
= get_frame_register_unsigned (this_frame
, cache
->framereg
);
967 cache
->prev_sp
= unwound_fp
+ cache
->framesize
;
969 /* Calculate actual addresses of saved registers using offsets
970 determined by aarch64_analyze_prologue. */
971 for (reg
= 0; reg
< gdbarch_num_regs (get_frame_arch (this_frame
)); reg
++)
972 if (trad_frame_addr_p (cache
->saved_regs
, reg
))
973 cache
->saved_regs
[reg
].addr
+= cache
->prev_sp
;
975 cache
->func
= get_frame_func (this_frame
);
977 cache
->available_p
= 1;
980 /* Allocate and fill in *THIS_CACHE with information about the prologue of
981 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
982 Return a pointer to the current aarch64_prologue_cache in
985 static struct aarch64_prologue_cache
*
986 aarch64_make_prologue_cache (struct frame_info
*this_frame
, void **this_cache
)
988 struct aarch64_prologue_cache
*cache
;
990 if (*this_cache
!= NULL
)
993 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
994 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
999 aarch64_make_prologue_cache_1 (this_frame
, cache
);
1001 CATCH (ex
, RETURN_MASK_ERROR
)
1003 if (ex
.error
!= NOT_AVAILABLE_ERROR
)
1004 throw_exception (ex
);
1011 /* Implement the "stop_reason" frame_unwind method. */
1013 static enum unwind_stop_reason
1014 aarch64_prologue_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1017 struct aarch64_prologue_cache
*cache
1018 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1020 if (!cache
->available_p
)
1021 return UNWIND_UNAVAILABLE
;
1023 /* Halt the backtrace at "_start". */
1024 if (cache
->prev_pc
<= gdbarch_tdep (get_frame_arch (this_frame
))->lowest_pc
)
1025 return UNWIND_OUTERMOST
;
1027 /* We've hit a wall, stop. */
1028 if (cache
->prev_sp
== 0)
1029 return UNWIND_OUTERMOST
;
1031 return UNWIND_NO_REASON
;
1034 /* Our frame ID for a normal frame is the current function's starting
1035 PC and the caller's SP when we were called. */
1038 aarch64_prologue_this_id (struct frame_info
*this_frame
,
1039 void **this_cache
, struct frame_id
*this_id
)
1041 struct aarch64_prologue_cache
*cache
1042 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1044 if (!cache
->available_p
)
1045 *this_id
= frame_id_build_unavailable_stack (cache
->func
);
1047 *this_id
= frame_id_build (cache
->prev_sp
, cache
->func
);
1050 /* Implement the "prev_register" frame_unwind method. */
1052 static struct value
*
1053 aarch64_prologue_prev_register (struct frame_info
*this_frame
,
1054 void **this_cache
, int prev_regnum
)
1056 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1057 struct aarch64_prologue_cache
*cache
1058 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1060 /* If we are asked to unwind the PC, then we need to return the LR
1061 instead. The prologue may save PC, but it will point into this
1062 frame's prologue, not the next frame's resume location. */
1063 if (prev_regnum
== AARCH64_PC_REGNUM
)
1067 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1068 return frame_unwind_got_constant (this_frame
, prev_regnum
, lr
);
1071 /* SP is generally not saved to the stack, but this frame is
1072 identified by the next frame's stack pointer at the time of the
1073 call. The value was already reconstructed into PREV_SP. */
1079 | | | <- Previous SP
1082 +--| saved fp |<- FP
1086 if (prev_regnum
== AARCH64_SP_REGNUM
)
1087 return frame_unwind_got_constant (this_frame
, prev_regnum
,
1090 return trad_frame_get_prev_register (this_frame
, cache
->saved_regs
,
1094 /* AArch64 prologue unwinder. */
1095 struct frame_unwind aarch64_prologue_unwind
=
1098 aarch64_prologue_frame_unwind_stop_reason
,
1099 aarch64_prologue_this_id
,
1100 aarch64_prologue_prev_register
,
1102 default_frame_sniffer
1105 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1106 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1107 Return a pointer to the current aarch64_prologue_cache in
1110 static struct aarch64_prologue_cache
*
1111 aarch64_make_stub_cache (struct frame_info
*this_frame
, void **this_cache
)
1113 struct aarch64_prologue_cache
*cache
;
1115 if (*this_cache
!= NULL
)
1118 cache
= FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache
);
1119 cache
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
1120 *this_cache
= cache
;
1123 = get_frame_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1124 cache
->prev_pc
= get_frame_pc (this_frame
);
1129 /* Our frame ID for a stub frame is the current SP and LR. */
1132 aarch64_stub_this_id (struct frame_info
*this_frame
,
1133 void **this_cache
, struct frame_id
*this_id
)
1135 struct aarch64_prologue_cache
*cache
1136 = aarch64_make_stub_cache (this_frame
, this_cache
);
1138 *this_id
= frame_id_build (cache
->prev_sp
, cache
->prev_pc
);
1141 /* Implement the "sniffer" frame_unwind method. */
1144 aarch64_stub_unwind_sniffer (const struct frame_unwind
*self
,
1145 struct frame_info
*this_frame
,
1146 void **this_prologue_cache
)
1148 CORE_ADDR addr_in_block
;
1151 addr_in_block
= get_frame_address_in_block (this_frame
);
1152 if (in_plt_section (addr_in_block
)
1153 /* We also use the stub winder if the target memory is unreadable
1154 to avoid having the prologue unwinder trying to read it. */
1155 || target_read_memory (get_frame_pc (this_frame
), dummy
, 4) != 0)
1161 /* AArch64 stub unwinder. */
1162 struct frame_unwind aarch64_stub_unwind
=
1165 default_frame_unwind_stop_reason
,
1166 aarch64_stub_this_id
,
1167 aarch64_prologue_prev_register
,
1169 aarch64_stub_unwind_sniffer
1172 /* Return the frame base address of *THIS_FRAME. */
1175 aarch64_normal_frame_base (struct frame_info
*this_frame
, void **this_cache
)
1177 struct aarch64_prologue_cache
*cache
1178 = aarch64_make_prologue_cache (this_frame
, this_cache
);
1180 return cache
->prev_sp
- cache
->framesize
;
1183 /* AArch64 default frame base information. */
1184 struct frame_base aarch64_normal_base
=
1186 &aarch64_prologue_unwind
,
1187 aarch64_normal_frame_base
,
1188 aarch64_normal_frame_base
,
1189 aarch64_normal_frame_base
1192 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1193 dummy frame. The frame ID's base needs to match the TOS value
1194 saved by save_dummy_frame_tos () and returned from
1195 aarch64_push_dummy_call, and the PC needs to match the dummy
1196 frame's breakpoint. */
1198 static struct frame_id
1199 aarch64_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1201 return frame_id_build (get_frame_register_unsigned (this_frame
,
1203 get_frame_pc (this_frame
));
1206 /* Implement the "unwind_pc" gdbarch method. */
1209 aarch64_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1212 = frame_unwind_register_unsigned (this_frame
, AARCH64_PC_REGNUM
);
1217 /* Implement the "unwind_sp" gdbarch method. */
1220 aarch64_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1222 return frame_unwind_register_unsigned (this_frame
, AARCH64_SP_REGNUM
);
1225 /* Return the value of the REGNUM register in the previous frame of
1228 static struct value
*
1229 aarch64_dwarf2_prev_register (struct frame_info
*this_frame
,
1230 void **this_cache
, int regnum
)
1232 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1237 case AARCH64_PC_REGNUM
:
1238 lr
= frame_unwind_register_unsigned (this_frame
, AARCH64_LR_REGNUM
);
1239 return frame_unwind_got_constant (this_frame
, regnum
, lr
);
1242 internal_error (__FILE__
, __LINE__
,
1243 _("Unexpected register %d"), regnum
);
1247 /* Implement the "init_reg" dwarf2_frame_ops method. */
1250 aarch64_dwarf2_frame_init_reg (struct gdbarch
*gdbarch
, int regnum
,
1251 struct dwarf2_frame_state_reg
*reg
,
1252 struct frame_info
*this_frame
)
1256 case AARCH64_PC_REGNUM
:
1257 reg
->how
= DWARF2_FRAME_REG_FN
;
1258 reg
->loc
.fn
= aarch64_dwarf2_prev_register
;
1260 case AARCH64_SP_REGNUM
:
1261 reg
->how
= DWARF2_FRAME_REG_CFA
;
1266 /* When arguments must be pushed onto the stack, they go on in reverse
1267 order. The code below implements a FILO (stack) to do this. */
1271 /* Value to pass on stack. */
1274 /* Size in bytes of value to pass on stack. */
1278 DEF_VEC_O (stack_item_t
);
1280 /* Return the alignment (in bytes) of the given type. */
1283 aarch64_type_align (struct type
*t
)
1289 t
= check_typedef (t
);
1290 switch (TYPE_CODE (t
))
1293 /* Should never happen. */
1294 internal_error (__FILE__
, __LINE__
, _("unknown type alignment"));
1298 case TYPE_CODE_ENUM
:
1302 case TYPE_CODE_RANGE
:
1303 case TYPE_CODE_BITSTRING
:
1305 case TYPE_CODE_CHAR
:
1306 case TYPE_CODE_BOOL
:
1307 return TYPE_LENGTH (t
);
1309 case TYPE_CODE_ARRAY
:
1310 case TYPE_CODE_COMPLEX
:
1311 return aarch64_type_align (TYPE_TARGET_TYPE (t
));
1313 case TYPE_CODE_STRUCT
:
1314 case TYPE_CODE_UNION
:
1316 for (n
= 0; n
< TYPE_NFIELDS (t
); n
++)
1318 falign
= aarch64_type_align (TYPE_FIELD_TYPE (t
, n
));
1326 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1327 defined in the AAPCS64 ABI document; otherwise return 0. */
1330 is_hfa (struct type
*ty
)
1332 switch (TYPE_CODE (ty
))
1334 case TYPE_CODE_ARRAY
:
1336 struct type
*target_ty
= TYPE_TARGET_TYPE (ty
);
1337 if (TYPE_CODE (target_ty
) == TYPE_CODE_FLT
&& TYPE_LENGTH (ty
) <= 4)
1342 case TYPE_CODE_UNION
:
1343 case TYPE_CODE_STRUCT
:
1345 if (TYPE_NFIELDS (ty
) > 0 && TYPE_NFIELDS (ty
) <= 4)
1347 struct type
*member0_type
;
1349 member0_type
= check_typedef (TYPE_FIELD_TYPE (ty
, 0));
1350 if (TYPE_CODE (member0_type
) == TYPE_CODE_FLT
)
1354 for (i
= 0; i
< TYPE_NFIELDS (ty
); i
++)
1356 struct type
*member1_type
;
1358 member1_type
= check_typedef (TYPE_FIELD_TYPE (ty
, i
));
1359 if (TYPE_CODE (member0_type
) != TYPE_CODE (member1_type
)
1360 || (TYPE_LENGTH (member0_type
)
1361 != TYPE_LENGTH (member1_type
)))
1377 /* AArch64 function call information structure. */
1378 struct aarch64_call_info
1380 /* the current argument number. */
1383 /* The next general purpose register number, equivalent to NGRN as
1384 described in the AArch64 Procedure Call Standard. */
1387 /* The next SIMD and floating point register number, equivalent to
1388 NSRN as described in the AArch64 Procedure Call Standard. */
1391 /* The next stacked argument address, equivalent to NSAA as
1392 described in the AArch64 Procedure Call Standard. */
1395 /* Stack item vector. */
1396 VEC(stack_item_t
) *si
;
1399 /* Pass a value in a sequence of consecutive X registers. The caller
1400 is responsbile for ensuring sufficient registers are available. */
1403 pass_in_x (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1404 struct aarch64_call_info
*info
, struct type
*type
,
1405 const bfd_byte
*buf
)
1407 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1408 int len
= TYPE_LENGTH (type
);
1409 enum type_code typecode
= TYPE_CODE (type
);
1410 int regnum
= AARCH64_X0_REGNUM
+ info
->ngrn
;
1416 int partial_len
= len
< X_REGISTER_SIZE
? len
: X_REGISTER_SIZE
;
1417 CORE_ADDR regval
= extract_unsigned_integer (buf
, partial_len
,
1421 /* Adjust sub-word struct/union args when big-endian. */
1422 if (byte_order
== BFD_ENDIAN_BIG
1423 && partial_len
< X_REGISTER_SIZE
1424 && (typecode
== TYPE_CODE_STRUCT
|| typecode
== TYPE_CODE_UNION
))
1425 regval
<<= ((X_REGISTER_SIZE
- partial_len
) * TARGET_CHAR_BIT
);
1428 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s = 0x%s\n",
1430 gdbarch_register_name (gdbarch
, regnum
),
1431 phex (regval
, X_REGISTER_SIZE
));
1432 regcache_cooked_write_unsigned (regcache
, regnum
, regval
);
1439 /* Attempt to marshall a value in a V register. Return 1 if
1440 successful, or 0 if insufficient registers are available. This
1441 function, unlike the equivalent pass_in_x() function does not
1442 handle arguments spread across multiple registers. */
1445 pass_in_v (struct gdbarch
*gdbarch
,
1446 struct regcache
*regcache
,
1447 struct aarch64_call_info
*info
,
1448 const bfd_byte
*buf
)
1452 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1453 int regnum
= AARCH64_V0_REGNUM
+ info
->nsrn
;
1458 regcache_cooked_write (regcache
, regnum
, buf
);
1460 fprintf_unfiltered (gdb_stdlog
, "arg %d in %s\n",
1462 gdbarch_register_name (gdbarch
, regnum
));
1469 /* Marshall an argument onto the stack. */
1472 pass_on_stack (struct aarch64_call_info
*info
, struct type
*type
,
1473 const bfd_byte
*buf
)
1475 int len
= TYPE_LENGTH (type
);
1481 align
= aarch64_type_align (type
);
1483 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1484 Natural alignment of the argument's type. */
1485 align
= align_up (align
, 8);
1487 /* The AArch64 PCS requires at most doubleword alignment. */
1492 fprintf_unfiltered (gdb_stdlog
, "arg %d len=%d @ sp + %d\n",
1493 info
->argnum
, len
, info
->nsaa
);
1497 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1500 if (info
->nsaa
& (align
- 1))
1502 /* Push stack alignment padding. */
1503 int pad
= align
- (info
->nsaa
& (align
- 1));
1508 VEC_safe_push (stack_item_t
, info
->si
, &item
);
1513 /* Marshall an argument into a sequence of one or more consecutive X
1514 registers or, if insufficient X registers are available then onto
1518 pass_in_x_or_stack (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
1519 struct aarch64_call_info
*info
, struct type
*type
,
1520 const bfd_byte
*buf
)
1522 int len
= TYPE_LENGTH (type
);
1523 int nregs
= (len
+ X_REGISTER_SIZE
- 1) / X_REGISTER_SIZE
;
1525 /* PCS C.13 - Pass in registers if we have enough spare */
1526 if (info
->ngrn
+ nregs
<= 8)
1528 pass_in_x (gdbarch
, regcache
, info
, type
, buf
);
1529 info
->ngrn
+= nregs
;
1534 pass_on_stack (info
, type
, buf
);
1538 /* Pass a value in a V register, or on the stack if insufficient are
1542 pass_in_v_or_stack (struct gdbarch
*gdbarch
,
1543 struct regcache
*regcache
,
1544 struct aarch64_call_info
*info
,
1546 const bfd_byte
*buf
)
1548 if (!pass_in_v (gdbarch
, regcache
, info
, buf
))
1549 pass_on_stack (info
, type
, buf
);
1552 /* Implement the "push_dummy_call" gdbarch method. */
1555 aarch64_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1556 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1558 struct value
**args
, CORE_ADDR sp
, int struct_return
,
1559 CORE_ADDR struct_addr
)
1565 struct aarch64_call_info info
;
1566 struct type
*func_type
;
1567 struct type
*return_type
;
1568 int lang_struct_return
;
1570 memset (&info
, 0, sizeof (info
));
1572 /* We need to know what the type of the called function is in order
1573 to determine the number of named/anonymous arguments for the
1574 actual argument placement, and the return type in order to handle
1575 return value correctly.
1577 The generic code above us views the decision of return in memory
1578 or return in registers as a two stage processes. The language
1579 handler is consulted first and may decide to return in memory (eg
1580 class with copy constructor returned by value), this will cause
1581 the generic code to allocate space AND insert an initial leading
1584 If the language code does not decide to pass in memory then the
1585 target code is consulted.
1587 If the language code decides to pass in memory we want to move
1588 the pointer inserted as the initial argument from the argument
1589 list and into X8, the conventional AArch64 struct return pointer
1592 This is slightly awkward, ideally the flag "lang_struct_return"
1593 would be passed to the targets implementation of push_dummy_call.
1594 Rather that change the target interface we call the language code
1595 directly ourselves. */
1597 func_type
= check_typedef (value_type (function
));
1599 /* Dereference function pointer types. */
1600 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1601 func_type
= TYPE_TARGET_TYPE (func_type
);
1603 gdb_assert (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1604 || TYPE_CODE (func_type
) == TYPE_CODE_METHOD
);
1606 /* If language_pass_by_reference () returned true we will have been
1607 given an additional initial argument, a hidden pointer to the
1608 return slot in memory. */
1609 return_type
= TYPE_TARGET_TYPE (func_type
);
1610 lang_struct_return
= language_pass_by_reference (return_type
);
1612 /* Set the return address. For the AArch64, the return breakpoint
1613 is always at BP_ADDR. */
1614 regcache_cooked_write_unsigned (regcache
, AARCH64_LR_REGNUM
, bp_addr
);
1616 /* If we were given an initial argument for the return slot because
1617 lang_struct_return was true, lose it. */
1618 if (lang_struct_return
)
1624 /* The struct_return pointer occupies X8. */
1625 if (struct_return
|| lang_struct_return
)
1628 fprintf_unfiltered (gdb_stdlog
, "struct return in %s = 0x%s\n",
1629 gdbarch_register_name
1631 AARCH64_STRUCT_RETURN_REGNUM
),
1632 paddress (gdbarch
, struct_addr
));
1633 regcache_cooked_write_unsigned (regcache
, AARCH64_STRUCT_RETURN_REGNUM
,
1637 for (argnum
= 0; argnum
< nargs
; argnum
++)
1639 struct value
*arg
= args
[argnum
];
1640 struct type
*arg_type
;
1643 arg_type
= check_typedef (value_type (arg
));
1644 len
= TYPE_LENGTH (arg_type
);
1646 switch (TYPE_CODE (arg_type
))
1649 case TYPE_CODE_BOOL
:
1650 case TYPE_CODE_CHAR
:
1651 case TYPE_CODE_RANGE
:
1652 case TYPE_CODE_ENUM
:
1655 /* Promote to 32 bit integer. */
1656 if (TYPE_UNSIGNED (arg_type
))
1657 arg_type
= builtin_type (gdbarch
)->builtin_uint32
;
1659 arg_type
= builtin_type (gdbarch
)->builtin_int32
;
1660 arg
= value_cast (arg_type
, arg
);
1662 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1663 value_contents (arg
));
1666 case TYPE_CODE_COMPLEX
:
1669 const bfd_byte
*buf
= value_contents (arg
);
1670 struct type
*target_type
=
1671 check_typedef (TYPE_TARGET_TYPE (arg_type
));
1673 pass_in_v (gdbarch
, regcache
, &info
, buf
);
1674 pass_in_v (gdbarch
, regcache
, &info
,
1675 buf
+ TYPE_LENGTH (target_type
));
1680 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1684 pass_in_v_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1685 value_contents (arg
));
1688 case TYPE_CODE_STRUCT
:
1689 case TYPE_CODE_ARRAY
:
1690 case TYPE_CODE_UNION
:
1691 if (is_hfa (arg_type
))
1693 int elements
= TYPE_NFIELDS (arg_type
);
1695 /* Homogeneous Aggregates */
1696 if (info
.nsrn
+ elements
< 8)
1700 for (i
= 0; i
< elements
; i
++)
1702 /* We know that we have sufficient registers
1703 available therefore this will never fallback
1705 struct value
*field
=
1706 value_primitive_field (arg
, 0, i
, arg_type
);
1707 struct type
*field_type
=
1708 check_typedef (value_type (field
));
1710 pass_in_v_or_stack (gdbarch
, regcache
, &info
, field_type
,
1711 value_contents_writeable (field
));
1717 pass_on_stack (&info
, arg_type
, value_contents (arg
));
1722 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1723 invisible reference. */
1725 /* Allocate aligned storage. */
1726 sp
= align_down (sp
- len
, 16);
1728 /* Write the real data into the stack. */
1729 write_memory (sp
, value_contents (arg
), len
);
1731 /* Construct the indirection. */
1732 arg_type
= lookup_pointer_type (arg_type
);
1733 arg
= value_from_pointer (arg_type
, sp
);
1734 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1735 value_contents (arg
));
1738 /* PCS C.15 / C.18 multiple values pass. */
1739 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1740 value_contents (arg
));
1744 pass_in_x_or_stack (gdbarch
, regcache
, &info
, arg_type
,
1745 value_contents (arg
));
1750 /* Make sure stack retains 16 byte alignment. */
1752 sp
-= 16 - (info
.nsaa
& 15);
1754 while (!VEC_empty (stack_item_t
, info
.si
))
1756 stack_item_t
*si
= VEC_last (stack_item_t
, info
.si
);
1759 write_memory (sp
, si
->data
, si
->len
);
1760 VEC_pop (stack_item_t
, info
.si
);
1763 VEC_free (stack_item_t
, info
.si
);
1765 /* Finally, update the SP register. */
1766 regcache_cooked_write_unsigned (regcache
, AARCH64_SP_REGNUM
, sp
);
1771 /* Implement the "frame_align" gdbarch method. */
1774 aarch64_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1776 /* Align the stack to sixteen bytes. */
1777 return sp
& ~(CORE_ADDR
) 15;
1780 /* Return the type for an AdvSISD Q register. */
1782 static struct type
*
1783 aarch64_vnq_type (struct gdbarch
*gdbarch
)
1785 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1787 if (tdep
->vnq_type
== NULL
)
1792 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnq",
1795 elem
= builtin_type (gdbarch
)->builtin_uint128
;
1796 append_composite_type_field (t
, "u", elem
);
1798 elem
= builtin_type (gdbarch
)->builtin_int128
;
1799 append_composite_type_field (t
, "s", elem
);
1804 return tdep
->vnq_type
;
1807 /* Return the type for an AdvSISD D register. */
1809 static struct type
*
1810 aarch64_vnd_type (struct gdbarch
*gdbarch
)
1812 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1814 if (tdep
->vnd_type
== NULL
)
1819 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnd",
1822 elem
= builtin_type (gdbarch
)->builtin_double
;
1823 append_composite_type_field (t
, "f", elem
);
1825 elem
= builtin_type (gdbarch
)->builtin_uint64
;
1826 append_composite_type_field (t
, "u", elem
);
1828 elem
= builtin_type (gdbarch
)->builtin_int64
;
1829 append_composite_type_field (t
, "s", elem
);
1834 return tdep
->vnd_type
;
1837 /* Return the type for an AdvSISD S register. */
1839 static struct type
*
1840 aarch64_vns_type (struct gdbarch
*gdbarch
)
1842 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1844 if (tdep
->vns_type
== NULL
)
1849 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vns",
1852 elem
= builtin_type (gdbarch
)->builtin_float
;
1853 append_composite_type_field (t
, "f", elem
);
1855 elem
= builtin_type (gdbarch
)->builtin_uint32
;
1856 append_composite_type_field (t
, "u", elem
);
1858 elem
= builtin_type (gdbarch
)->builtin_int32
;
1859 append_composite_type_field (t
, "s", elem
);
1864 return tdep
->vns_type
;
1867 /* Return the type for an AdvSISD H register. */
1869 static struct type
*
1870 aarch64_vnh_type (struct gdbarch
*gdbarch
)
1872 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1874 if (tdep
->vnh_type
== NULL
)
1879 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnh",
1882 elem
= builtin_type (gdbarch
)->builtin_uint16
;
1883 append_composite_type_field (t
, "u", elem
);
1885 elem
= builtin_type (gdbarch
)->builtin_int16
;
1886 append_composite_type_field (t
, "s", elem
);
1891 return tdep
->vnh_type
;
1894 /* Return the type for an AdvSISD B register. */
1896 static struct type
*
1897 aarch64_vnb_type (struct gdbarch
*gdbarch
)
1899 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1901 if (tdep
->vnb_type
== NULL
)
1906 t
= arch_composite_type (gdbarch
, "__gdb_builtin_type_vnb",
1909 elem
= builtin_type (gdbarch
)->builtin_uint8
;
1910 append_composite_type_field (t
, "u", elem
);
1912 elem
= builtin_type (gdbarch
)->builtin_int8
;
1913 append_composite_type_field (t
, "s", elem
);
1918 return tdep
->vnb_type
;
1921 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1924 aarch64_dwarf_reg_to_regnum (struct gdbarch
*gdbarch
, int reg
)
1926 if (reg
>= AARCH64_DWARF_X0
&& reg
<= AARCH64_DWARF_X0
+ 30)
1927 return AARCH64_X0_REGNUM
+ reg
- AARCH64_DWARF_X0
;
1929 if (reg
== AARCH64_DWARF_SP
)
1930 return AARCH64_SP_REGNUM
;
1932 if (reg
>= AARCH64_DWARF_V0
&& reg
<= AARCH64_DWARF_V0
+ 31)
1933 return AARCH64_V0_REGNUM
+ reg
- AARCH64_DWARF_V0
;
1939 /* Implement the "print_insn" gdbarch method. */
1942 aarch64_gdb_print_insn (bfd_vma memaddr
, disassemble_info
*info
)
1944 info
->symbols
= NULL
;
1945 return print_insn_aarch64 (memaddr
, info
);
1948 /* AArch64 BRK software debug mode instruction.
1949 Note that AArch64 code is always little-endian.
1950 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1951 static const gdb_byte aarch64_default_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
1953 /* Implement the "breakpoint_from_pc" gdbarch method. */
1955 static const gdb_byte
*
1956 aarch64_breakpoint_from_pc (struct gdbarch
*gdbarch
, CORE_ADDR
*pcptr
,
1959 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1961 *lenptr
= sizeof (aarch64_default_breakpoint
);
1962 return aarch64_default_breakpoint
;
1965 /* Extract from an array REGS containing the (raw) register state a
1966 function return value of type TYPE, and copy that, in virtual
1967 format, into VALBUF. */
1970 aarch64_extract_return_value (struct type
*type
, struct regcache
*regs
,
1973 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
1974 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1976 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
1978 bfd_byte buf
[V_REGISTER_SIZE
];
1979 int len
= TYPE_LENGTH (type
);
1981 regcache_cooked_read (regs
, AARCH64_V0_REGNUM
, buf
);
1982 memcpy (valbuf
, buf
, len
);
1984 else if (TYPE_CODE (type
) == TYPE_CODE_INT
1985 || TYPE_CODE (type
) == TYPE_CODE_CHAR
1986 || TYPE_CODE (type
) == TYPE_CODE_BOOL
1987 || TYPE_CODE (type
) == TYPE_CODE_PTR
1988 || TYPE_CODE (type
) == TYPE_CODE_REF
1989 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
1991 /* If the the type is a plain integer, then the access is
1992 straight-forward. Otherwise we have to play around a bit
1994 int len
= TYPE_LENGTH (type
);
1995 int regno
= AARCH64_X0_REGNUM
;
2000 /* By using store_unsigned_integer we avoid having to do
2001 anything special for small big-endian values. */
2002 regcache_cooked_read_unsigned (regs
, regno
++, &tmp
);
2003 store_unsigned_integer (valbuf
,
2004 (len
> X_REGISTER_SIZE
2005 ? X_REGISTER_SIZE
: len
), byte_order
, tmp
);
2006 len
-= X_REGISTER_SIZE
;
2007 valbuf
+= X_REGISTER_SIZE
;
2010 else if (TYPE_CODE (type
) == TYPE_CODE_COMPLEX
)
2012 int regno
= AARCH64_V0_REGNUM
;
2013 bfd_byte buf
[V_REGISTER_SIZE
];
2014 struct type
*target_type
= check_typedef (TYPE_TARGET_TYPE (type
));
2015 int len
= TYPE_LENGTH (target_type
);
2017 regcache_cooked_read (regs
, regno
, buf
);
2018 memcpy (valbuf
, buf
, len
);
2020 regcache_cooked_read (regs
, regno
+ 1, buf
);
2021 memcpy (valbuf
, buf
, len
);
2024 else if (is_hfa (type
))
2026 int elements
= TYPE_NFIELDS (type
);
2027 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2028 int len
= TYPE_LENGTH (member_type
);
2031 for (i
= 0; i
< elements
; i
++)
2033 int regno
= AARCH64_V0_REGNUM
+ i
;
2034 bfd_byte buf
[X_REGISTER_SIZE
];
2037 fprintf_unfiltered (gdb_stdlog
,
2038 "read HFA return value element %d from %s\n",
2040 gdbarch_register_name (gdbarch
, regno
));
2041 regcache_cooked_read (regs
, regno
, buf
);
2043 memcpy (valbuf
, buf
, len
);
2049 /* For a structure or union the behaviour is as if the value had
2050 been stored to word-aligned memory and then loaded into
2051 registers with 64-bit load instruction(s). */
2052 int len
= TYPE_LENGTH (type
);
2053 int regno
= AARCH64_X0_REGNUM
;
2054 bfd_byte buf
[X_REGISTER_SIZE
];
2058 regcache_cooked_read (regs
, regno
++, buf
);
2059 memcpy (valbuf
, buf
, len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2060 len
-= X_REGISTER_SIZE
;
2061 valbuf
+= X_REGISTER_SIZE
;
2067 /* Will a function return an aggregate type in memory or in a
2068 register? Return 0 if an aggregate type can be returned in a
2069 register, 1 if it must be returned in memory. */
2072 aarch64_return_in_memory (struct gdbarch
*gdbarch
, struct type
*type
)
2075 enum type_code code
;
2077 CHECK_TYPEDEF (type
);
2079 /* In the AArch64 ABI, "integer" like aggregate types are returned
2080 in registers. For an aggregate type to be integer like, its size
2081 must be less than or equal to 4 * X_REGISTER_SIZE. */
2085 /* PCS B.5 If the argument is a Named HFA, then the argument is
2090 if (TYPE_LENGTH (type
) > 16)
2092 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2093 invisible reference. */
2101 /* Write into appropriate registers a function return value of type
2102 TYPE, given in virtual format. */
2105 aarch64_store_return_value (struct type
*type
, struct regcache
*regs
,
2106 const gdb_byte
*valbuf
)
2108 struct gdbarch
*gdbarch
= get_regcache_arch (regs
);
2109 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2111 if (TYPE_CODE (type
) == TYPE_CODE_FLT
)
2113 bfd_byte buf
[V_REGISTER_SIZE
];
2114 int len
= TYPE_LENGTH (type
);
2116 memcpy (buf
, valbuf
, len
> V_REGISTER_SIZE
? V_REGISTER_SIZE
: len
);
2117 regcache_cooked_write (regs
, AARCH64_V0_REGNUM
, buf
);
2119 else if (TYPE_CODE (type
) == TYPE_CODE_INT
2120 || TYPE_CODE (type
) == TYPE_CODE_CHAR
2121 || TYPE_CODE (type
) == TYPE_CODE_BOOL
2122 || TYPE_CODE (type
) == TYPE_CODE_PTR
2123 || TYPE_CODE (type
) == TYPE_CODE_REF
2124 || TYPE_CODE (type
) == TYPE_CODE_ENUM
)
2126 if (TYPE_LENGTH (type
) <= X_REGISTER_SIZE
)
2128 /* Values of one word or less are zero/sign-extended and
2130 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2131 LONGEST val
= unpack_long (type
, valbuf
);
2133 store_signed_integer (tmpbuf
, X_REGISTER_SIZE
, byte_order
, val
);
2134 regcache_cooked_write (regs
, AARCH64_X0_REGNUM
, tmpbuf
);
2138 /* Integral values greater than one word are stored in
2139 consecutive registers starting with r0. This will always
2140 be a multiple of the regiser size. */
2141 int len
= TYPE_LENGTH (type
);
2142 int regno
= AARCH64_X0_REGNUM
;
2146 regcache_cooked_write (regs
, regno
++, valbuf
);
2147 len
-= X_REGISTER_SIZE
;
2148 valbuf
+= X_REGISTER_SIZE
;
2152 else if (is_hfa (type
))
2154 int elements
= TYPE_NFIELDS (type
);
2155 struct type
*member_type
= check_typedef (TYPE_FIELD_TYPE (type
, 0));
2156 int len
= TYPE_LENGTH (member_type
);
2159 for (i
= 0; i
< elements
; i
++)
2161 int regno
= AARCH64_V0_REGNUM
+ i
;
2162 bfd_byte tmpbuf
[MAX_REGISTER_SIZE
];
2165 fprintf_unfiltered (gdb_stdlog
,
2166 "write HFA return value element %d to %s\n",
2168 gdbarch_register_name (gdbarch
, regno
));
2170 memcpy (tmpbuf
, valbuf
, len
);
2171 regcache_cooked_write (regs
, regno
, tmpbuf
);
2177 /* For a structure or union the behaviour is as if the value had
2178 been stored to word-aligned memory and then loaded into
2179 registers with 64-bit load instruction(s). */
2180 int len
= TYPE_LENGTH (type
);
2181 int regno
= AARCH64_X0_REGNUM
;
2182 bfd_byte tmpbuf
[X_REGISTER_SIZE
];
2186 memcpy (tmpbuf
, valbuf
,
2187 len
> X_REGISTER_SIZE
? X_REGISTER_SIZE
: len
);
2188 regcache_cooked_write (regs
, regno
++, tmpbuf
);
2189 len
-= X_REGISTER_SIZE
;
2190 valbuf
+= X_REGISTER_SIZE
;
2195 /* Implement the "return_value" gdbarch method. */
2197 static enum return_value_convention
2198 aarch64_return_value (struct gdbarch
*gdbarch
, struct value
*func_value
,
2199 struct type
*valtype
, struct regcache
*regcache
,
2200 gdb_byte
*readbuf
, const gdb_byte
*writebuf
)
2202 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2204 if (TYPE_CODE (valtype
) == TYPE_CODE_STRUCT
2205 || TYPE_CODE (valtype
) == TYPE_CODE_UNION
2206 || TYPE_CODE (valtype
) == TYPE_CODE_ARRAY
)
2208 if (aarch64_return_in_memory (gdbarch
, valtype
))
2211 fprintf_unfiltered (gdb_stdlog
, "return value in memory\n");
2212 return RETURN_VALUE_STRUCT_CONVENTION
;
2217 aarch64_store_return_value (valtype
, regcache
, writebuf
);
2220 aarch64_extract_return_value (valtype
, regcache
, readbuf
);
2223 fprintf_unfiltered (gdb_stdlog
, "return value in registers\n");
2225 return RETURN_VALUE_REGISTER_CONVENTION
;
2228 /* Implement the "get_longjmp_target" gdbarch method. */
2231 aarch64_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
2234 gdb_byte buf
[X_REGISTER_SIZE
];
2235 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2236 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2237 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2239 jb_addr
= get_frame_register_unsigned (frame
, AARCH64_X0_REGNUM
);
2241 if (target_read_memory (jb_addr
+ tdep
->jb_pc
* tdep
->jb_elt_size
, buf
,
2245 *pc
= extract_unsigned_integer (buf
, X_REGISTER_SIZE
, byte_order
);
2250 /* Return the pseudo register name corresponding to register regnum. */
2253 aarch64_pseudo_register_name (struct gdbarch
*gdbarch
, int regnum
)
2255 static const char *const q_name
[] =
2257 "q0", "q1", "q2", "q3",
2258 "q4", "q5", "q6", "q7",
2259 "q8", "q9", "q10", "q11",
2260 "q12", "q13", "q14", "q15",
2261 "q16", "q17", "q18", "q19",
2262 "q20", "q21", "q22", "q23",
2263 "q24", "q25", "q26", "q27",
2264 "q28", "q29", "q30", "q31",
2267 static const char *const d_name
[] =
2269 "d0", "d1", "d2", "d3",
2270 "d4", "d5", "d6", "d7",
2271 "d8", "d9", "d10", "d11",
2272 "d12", "d13", "d14", "d15",
2273 "d16", "d17", "d18", "d19",
2274 "d20", "d21", "d22", "d23",
2275 "d24", "d25", "d26", "d27",
2276 "d28", "d29", "d30", "d31",
2279 static const char *const s_name
[] =
2281 "s0", "s1", "s2", "s3",
2282 "s4", "s5", "s6", "s7",
2283 "s8", "s9", "s10", "s11",
2284 "s12", "s13", "s14", "s15",
2285 "s16", "s17", "s18", "s19",
2286 "s20", "s21", "s22", "s23",
2287 "s24", "s25", "s26", "s27",
2288 "s28", "s29", "s30", "s31",
2291 static const char *const h_name
[] =
2293 "h0", "h1", "h2", "h3",
2294 "h4", "h5", "h6", "h7",
2295 "h8", "h9", "h10", "h11",
2296 "h12", "h13", "h14", "h15",
2297 "h16", "h17", "h18", "h19",
2298 "h20", "h21", "h22", "h23",
2299 "h24", "h25", "h26", "h27",
2300 "h28", "h29", "h30", "h31",
2303 static const char *const b_name
[] =
2305 "b0", "b1", "b2", "b3",
2306 "b4", "b5", "b6", "b7",
2307 "b8", "b9", "b10", "b11",
2308 "b12", "b13", "b14", "b15",
2309 "b16", "b17", "b18", "b19",
2310 "b20", "b21", "b22", "b23",
2311 "b24", "b25", "b26", "b27",
2312 "b28", "b29", "b30", "b31",
2315 regnum
-= gdbarch_num_regs (gdbarch
);
2317 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2318 return q_name
[regnum
- AARCH64_Q0_REGNUM
];
2320 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2321 return d_name
[regnum
- AARCH64_D0_REGNUM
];
2323 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2324 return s_name
[regnum
- AARCH64_S0_REGNUM
];
2326 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2327 return h_name
[regnum
- AARCH64_H0_REGNUM
];
2329 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2330 return b_name
[regnum
- AARCH64_B0_REGNUM
];
2332 internal_error (__FILE__
, __LINE__
,
2333 _("aarch64_pseudo_register_name: bad register number %d"),
2337 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2339 static struct type
*
2340 aarch64_pseudo_register_type (struct gdbarch
*gdbarch
, int regnum
)
2342 regnum
-= gdbarch_num_regs (gdbarch
);
2344 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2345 return aarch64_vnq_type (gdbarch
);
2347 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2348 return aarch64_vnd_type (gdbarch
);
2350 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2351 return aarch64_vns_type (gdbarch
);
2353 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2354 return aarch64_vnh_type (gdbarch
);
2356 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2357 return aarch64_vnb_type (gdbarch
);
2359 internal_error (__FILE__
, __LINE__
,
2360 _("aarch64_pseudo_register_type: bad register number %d"),
2364 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2367 aarch64_pseudo_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
2368 struct reggroup
*group
)
2370 regnum
-= gdbarch_num_regs (gdbarch
);
2372 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2373 return group
== all_reggroup
|| group
== vector_reggroup
;
2374 else if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2375 return (group
== all_reggroup
|| group
== vector_reggroup
2376 || group
== float_reggroup
);
2377 else if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2378 return (group
== all_reggroup
|| group
== vector_reggroup
2379 || group
== float_reggroup
);
2380 else if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2381 return group
== all_reggroup
|| group
== vector_reggroup
;
2382 else if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2383 return group
== all_reggroup
|| group
== vector_reggroup
;
2385 return group
== all_reggroup
;
2388 /* Implement the "pseudo_register_read_value" gdbarch method. */
2390 static struct value
*
2391 aarch64_pseudo_read_value (struct gdbarch
*gdbarch
,
2392 struct regcache
*regcache
,
2395 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2396 struct value
*result_value
;
2399 result_value
= allocate_value (register_type (gdbarch
, regnum
));
2400 VALUE_LVAL (result_value
) = lval_register
;
2401 VALUE_REGNUM (result_value
) = regnum
;
2402 buf
= value_contents_raw (result_value
);
2404 regnum
-= gdbarch_num_regs (gdbarch
);
2406 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2408 enum register_status status
;
2411 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2412 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2413 if (status
!= REG_VALID
)
2414 mark_value_bytes_unavailable (result_value
, 0,
2415 TYPE_LENGTH (value_type (result_value
)));
2417 memcpy (buf
, reg_buf
, Q_REGISTER_SIZE
);
2418 return result_value
;
2421 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2423 enum register_status status
;
2426 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2427 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2428 if (status
!= REG_VALID
)
2429 mark_value_bytes_unavailable (result_value
, 0,
2430 TYPE_LENGTH (value_type (result_value
)));
2432 memcpy (buf
, reg_buf
, D_REGISTER_SIZE
);
2433 return result_value
;
2436 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2438 enum register_status status
;
2441 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2442 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2443 memcpy (buf
, reg_buf
, S_REGISTER_SIZE
);
2444 return result_value
;
2447 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2449 enum register_status status
;
2452 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2453 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2454 if (status
!= REG_VALID
)
2455 mark_value_bytes_unavailable (result_value
, 0,
2456 TYPE_LENGTH (value_type (result_value
)));
2458 memcpy (buf
, reg_buf
, H_REGISTER_SIZE
);
2459 return result_value
;
2462 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2464 enum register_status status
;
2467 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2468 status
= regcache_raw_read (regcache
, v_regnum
, reg_buf
);
2469 if (status
!= REG_VALID
)
2470 mark_value_bytes_unavailable (result_value
, 0,
2471 TYPE_LENGTH (value_type (result_value
)));
2473 memcpy (buf
, reg_buf
, B_REGISTER_SIZE
);
2474 return result_value
;
2477 gdb_assert_not_reached ("regnum out of bound");
2480 /* Implement the "pseudo_register_write" gdbarch method. */
2483 aarch64_pseudo_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
2484 int regnum
, const gdb_byte
*buf
)
2486 gdb_byte reg_buf
[MAX_REGISTER_SIZE
];
2488 /* Ensure the register buffer is zero, we want gdb writes of the
2489 various 'scalar' pseudo registers to behavior like architectural
2490 writes, register width bytes are written the remainder are set to
2492 memset (reg_buf
, 0, sizeof (reg_buf
));
2494 regnum
-= gdbarch_num_regs (gdbarch
);
2496 if (regnum
>= AARCH64_Q0_REGNUM
&& regnum
< AARCH64_Q0_REGNUM
+ 32)
2498 /* pseudo Q registers */
2501 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_Q0_REGNUM
;
2502 memcpy (reg_buf
, buf
, Q_REGISTER_SIZE
);
2503 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2507 if (regnum
>= AARCH64_D0_REGNUM
&& regnum
< AARCH64_D0_REGNUM
+ 32)
2509 /* pseudo D registers */
2512 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_D0_REGNUM
;
2513 memcpy (reg_buf
, buf
, D_REGISTER_SIZE
);
2514 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2518 if (regnum
>= AARCH64_S0_REGNUM
&& regnum
< AARCH64_S0_REGNUM
+ 32)
2522 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_S0_REGNUM
;
2523 memcpy (reg_buf
, buf
, S_REGISTER_SIZE
);
2524 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2528 if (regnum
>= AARCH64_H0_REGNUM
&& regnum
< AARCH64_H0_REGNUM
+ 32)
2530 /* pseudo H registers */
2533 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_H0_REGNUM
;
2534 memcpy (reg_buf
, buf
, H_REGISTER_SIZE
);
2535 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2539 if (regnum
>= AARCH64_B0_REGNUM
&& regnum
< AARCH64_B0_REGNUM
+ 32)
2541 /* pseudo B registers */
2544 v_regnum
= AARCH64_V0_REGNUM
+ regnum
- AARCH64_B0_REGNUM
;
2545 memcpy (reg_buf
, buf
, B_REGISTER_SIZE
);
2546 regcache_raw_write (regcache
, v_regnum
, reg_buf
);
2550 gdb_assert_not_reached ("regnum out of bound");
2553 /* Callback function for user_reg_add. */
2555 static struct value
*
2556 value_of_aarch64_user_reg (struct frame_info
*frame
, const void *baton
)
2558 const int *reg_p
= baton
;
2560 return value_of_register (*reg_p
, frame
);
2564 /* Implement the "software_single_step" gdbarch method, needed to
2565 single step through atomic sequences on AArch64. */
2568 aarch64_software_single_step (struct frame_info
*frame
)
2570 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2571 struct address_space
*aspace
= get_frame_address_space (frame
);
2572 enum bfd_endian byte_order_for_code
= gdbarch_byte_order_for_code (gdbarch
);
2573 const int insn_size
= 4;
2574 const int atomic_sequence_length
= 16; /* Instruction sequence length. */
2575 CORE_ADDR pc
= get_frame_pc (frame
);
2576 CORE_ADDR breaks
[2] = { -1, -1 };
2578 CORE_ADDR closing_insn
= 0;
2579 uint32_t insn
= read_memory_unsigned_integer (loc
, insn_size
,
2580 byte_order_for_code
);
2583 int bc_insn_count
= 0; /* Conditional branch instruction count. */
2584 int last_breakpoint
= 0; /* Defaults to 0 (no breakpoints placed). */
2586 /* Look for a Load Exclusive instruction which begins the sequence. */
2587 if (!decode_masked_match (insn
, 0x3fc00000, 0x08400000))
2590 for (insn_count
= 0; insn_count
< atomic_sequence_length
; ++insn_count
)
2596 insn
= read_memory_unsigned_integer (loc
, insn_size
,
2597 byte_order_for_code
);
2599 /* Check if the instruction is a conditional branch. */
2600 if (decode_bcond (loc
, insn
, &cond
, &offset
))
2602 if (bc_insn_count
>= 1)
2605 /* It is, so we'll try to set a breakpoint at the destination. */
2606 breaks
[1] = loc
+ offset
;
2612 /* Look for the Store Exclusive which closes the atomic sequence. */
2613 if (decode_masked_match (insn
, 0x3fc00000, 0x08000000))
2620 /* We didn't find a closing Store Exclusive instruction, fall back. */
2624 /* Insert breakpoint after the end of the atomic sequence. */
2625 breaks
[0] = loc
+ insn_size
;
2627 /* Check for duplicated breakpoints, and also check that the second
2628 breakpoint is not within the atomic sequence. */
2630 && (breaks
[1] == breaks
[0]
2631 || (breaks
[1] >= pc
&& breaks
[1] <= closing_insn
)))
2632 last_breakpoint
= 0;
2634 /* Insert the breakpoint at the end of the sequence, and one at the
2635 destination of the conditional branch, if it exists. */
2636 for (index
= 0; index
<= last_breakpoint
; index
++)
2637 insert_single_step_breakpoint (gdbarch
, aspace
, breaks
[index
]);
2642 /* Initialize the current architecture based on INFO. If possible,
2643 re-use an architecture from ARCHES, which is a list of
2644 architectures already created during this debugging session.
2646 Called e.g. at program startup, when reading a core file, and when
2647 reading a binary file. */
2649 static struct gdbarch
*
2650 aarch64_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2652 struct gdbarch_tdep
*tdep
;
2653 struct gdbarch
*gdbarch
;
2654 struct gdbarch_list
*best_arch
;
2655 struct tdesc_arch_data
*tdesc_data
= NULL
;
2656 const struct target_desc
*tdesc
= info
.target_desc
;
2658 int have_fpa_registers
= 1;
2660 const struct tdesc_feature
*feature
;
2662 int num_pseudo_regs
= 0;
2664 /* Ensure we always have a target descriptor. */
2665 if (!tdesc_has_registers (tdesc
))
2666 tdesc
= tdesc_aarch64
;
2670 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.core");
2672 if (feature
== NULL
)
2675 tdesc_data
= tdesc_data_alloc ();
2677 /* Validate the descriptor provides the mandatory core R registers
2678 and allocate their numbers. */
2679 for (i
= 0; i
< ARRAY_SIZE (aarch64_r_register_names
); i
++)
2681 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_X0_REGNUM
+ i
,
2682 aarch64_r_register_names
[i
]);
2684 num_regs
= AARCH64_X0_REGNUM
+ i
;
2686 /* Look for the V registers. */
2687 feature
= tdesc_find_feature (tdesc
, "org.gnu.gdb.aarch64.fpu");
2690 /* Validate the descriptor provides the mandatory V registers
2691 and allocate their numbers. */
2692 for (i
= 0; i
< ARRAY_SIZE (aarch64_v_register_names
); i
++)
2694 tdesc_numbered_register (feature
, tdesc_data
, AARCH64_V0_REGNUM
+ i
,
2695 aarch64_v_register_names
[i
]);
2697 num_regs
= AARCH64_V0_REGNUM
+ i
;
2699 num_pseudo_regs
+= 32; /* add the Qn scalar register pseudos */
2700 num_pseudo_regs
+= 32; /* add the Dn scalar register pseudos */
2701 num_pseudo_regs
+= 32; /* add the Sn scalar register pseudos */
2702 num_pseudo_regs
+= 32; /* add the Hn scalar register pseudos */
2703 num_pseudo_regs
+= 32; /* add the Bn scalar register pseudos */
2708 tdesc_data_cleanup (tdesc_data
);
2712 /* AArch64 code is always little-endian. */
2713 info
.byte_order_for_code
= BFD_ENDIAN_LITTLE
;
2715 /* If there is already a candidate, use it. */
2716 for (best_arch
= gdbarch_list_lookup_by_info (arches
, &info
);
2718 best_arch
= gdbarch_list_lookup_by_info (best_arch
->next
, &info
))
2720 /* Found a match. */
2724 if (best_arch
!= NULL
)
2726 if (tdesc_data
!= NULL
)
2727 tdesc_data_cleanup (tdesc_data
);
2728 return best_arch
->gdbarch
;
2731 tdep
= xcalloc (1, sizeof (struct gdbarch_tdep
));
2732 gdbarch
= gdbarch_alloc (&info
, tdep
);
2734 /* This should be low enough for everything. */
2735 tdep
->lowest_pc
= 0x20;
2736 tdep
->jb_pc
= -1; /* Longjump support not enabled by default. */
2737 tdep
->jb_elt_size
= 8;
2739 set_gdbarch_push_dummy_call (gdbarch
, aarch64_push_dummy_call
);
2740 set_gdbarch_frame_align (gdbarch
, aarch64_frame_align
);
2742 /* Frame handling. */
2743 set_gdbarch_dummy_id (gdbarch
, aarch64_dummy_id
);
2744 set_gdbarch_unwind_pc (gdbarch
, aarch64_unwind_pc
);
2745 set_gdbarch_unwind_sp (gdbarch
, aarch64_unwind_sp
);
2747 /* Advance PC across function entry code. */
2748 set_gdbarch_skip_prologue (gdbarch
, aarch64_skip_prologue
);
2750 /* The stack grows downward. */
2751 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2753 /* Breakpoint manipulation. */
2754 set_gdbarch_breakpoint_from_pc (gdbarch
, aarch64_breakpoint_from_pc
);
2755 set_gdbarch_have_nonsteppable_watchpoint (gdbarch
, 1);
2756 set_gdbarch_software_single_step (gdbarch
, aarch64_software_single_step
);
2758 /* Information about registers, etc. */
2759 set_gdbarch_sp_regnum (gdbarch
, AARCH64_SP_REGNUM
);
2760 set_gdbarch_pc_regnum (gdbarch
, AARCH64_PC_REGNUM
);
2761 set_gdbarch_num_regs (gdbarch
, num_regs
);
2763 set_gdbarch_num_pseudo_regs (gdbarch
, num_pseudo_regs
);
2764 set_gdbarch_pseudo_register_read_value (gdbarch
, aarch64_pseudo_read_value
);
2765 set_gdbarch_pseudo_register_write (gdbarch
, aarch64_pseudo_write
);
2766 set_tdesc_pseudo_register_name (gdbarch
, aarch64_pseudo_register_name
);
2767 set_tdesc_pseudo_register_type (gdbarch
, aarch64_pseudo_register_type
);
2768 set_tdesc_pseudo_register_reggroup_p (gdbarch
,
2769 aarch64_pseudo_register_reggroup_p
);
2772 set_gdbarch_short_bit (gdbarch
, 16);
2773 set_gdbarch_int_bit (gdbarch
, 32);
2774 set_gdbarch_float_bit (gdbarch
, 32);
2775 set_gdbarch_double_bit (gdbarch
, 64);
2776 set_gdbarch_long_double_bit (gdbarch
, 128);
2777 set_gdbarch_long_bit (gdbarch
, 64);
2778 set_gdbarch_long_long_bit (gdbarch
, 64);
2779 set_gdbarch_ptr_bit (gdbarch
, 64);
2780 set_gdbarch_char_signed (gdbarch
, 0);
2781 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2782 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2783 set_gdbarch_long_double_format (gdbarch
, floatformats_ia64_quad
);
2785 /* Internal <-> external register number maps. */
2786 set_gdbarch_dwarf2_reg_to_regnum (gdbarch
, aarch64_dwarf_reg_to_regnum
);
2788 /* Returning results. */
2789 set_gdbarch_return_value (gdbarch
, aarch64_return_value
);
2792 set_gdbarch_print_insn (gdbarch
, aarch64_gdb_print_insn
);
2794 /* Virtual tables. */
2795 set_gdbarch_vbit_in_delta (gdbarch
, 1);
2797 /* Hook in the ABI-specific overrides, if they have been registered. */
2798 info
.target_desc
= tdesc
;
2799 info
.tdep_info
= (void *) tdesc_data
;
2800 gdbarch_init_osabi (info
, gdbarch
);
2802 dwarf2_frame_set_init_reg (gdbarch
, aarch64_dwarf2_frame_init_reg
);
2804 /* Add some default predicates. */
2805 frame_unwind_append_unwinder (gdbarch
, &aarch64_stub_unwind
);
2806 dwarf2_append_unwinders (gdbarch
);
2807 frame_unwind_append_unwinder (gdbarch
, &aarch64_prologue_unwind
);
2809 frame_base_set_default (gdbarch
, &aarch64_normal_base
);
2811 /* Now we have tuned the configuration, set a few final things,
2812 based on what the OS ABI has told us. */
2814 if (tdep
->jb_pc
>= 0)
2815 set_gdbarch_get_longjmp_target (gdbarch
, aarch64_get_longjmp_target
);
2817 tdesc_use_registers (gdbarch
, tdesc
, tdesc_data
);
2819 /* Add standard register aliases. */
2820 for (i
= 0; i
< ARRAY_SIZE (aarch64_register_aliases
); i
++)
2821 user_reg_add (gdbarch
, aarch64_register_aliases
[i
].name
,
2822 value_of_aarch64_user_reg
,
2823 &aarch64_register_aliases
[i
].regnum
);
2829 aarch64_dump_tdep (struct gdbarch
*gdbarch
, struct ui_file
*file
)
2831 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
2836 fprintf_unfiltered (file
, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2837 paddress (gdbarch
, tdep
->lowest_pc
));
2840 /* Suppress warning from -Wmissing-prototypes. */
2841 extern initialize_file_ftype _initialize_aarch64_tdep
;
2844 _initialize_aarch64_tdep (void)
2846 gdbarch_register (bfd_arch_aarch64
, aarch64_gdbarch_init
,
2849 initialize_tdesc_aarch64 ();
2851 /* Debug this file's internals. */
2852 add_setshow_boolean_cmd ("aarch64", class_maintenance
, &aarch64_debug
, _("\
2853 Set AArch64 debugging."), _("\
2854 Show AArch64 debugging."), _("\
2855 When on, AArch64 specific debugging is enabled."),
2858 &setdebuglist
, &showdebuglist
);
2861 /* AArch64 process record-replay related structures, defines etc. */
2863 #define submask(x) ((1L << ((x) + 1)) - 1)
2864 #define bit(obj,st) (((obj) >> (st)) & 1)
2865 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2867 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2870 unsigned int reg_len = LENGTH; \
2873 REGS = XNEWVEC (uint32_t, reg_len); \
2874 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2879 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2882 unsigned int mem_len = LENGTH; \
2885 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2886 memcpy(&MEMS->len, &RECORD_BUF[0], \
2887 sizeof(struct aarch64_mem_r) * LENGTH); \
2892 /* AArch64 record/replay structures and enumerations. */
2894 struct aarch64_mem_r
2896 uint64_t len
; /* Record length. */
2897 uint64_t addr
; /* Memory address. */
2900 enum aarch64_record_result
2902 AARCH64_RECORD_SUCCESS
,
2903 AARCH64_RECORD_FAILURE
,
2904 AARCH64_RECORD_UNSUPPORTED
,
2905 AARCH64_RECORD_UNKNOWN
2908 typedef struct insn_decode_record_t
2910 struct gdbarch
*gdbarch
;
2911 struct regcache
*regcache
;
2912 CORE_ADDR this_addr
; /* Address of insn to be recorded. */
2913 uint32_t aarch64_insn
; /* Insn to be recorded. */
2914 uint32_t mem_rec_count
; /* Count of memory records. */
2915 uint32_t reg_rec_count
; /* Count of register records. */
2916 uint32_t *aarch64_regs
; /* Registers to be recorded. */
2917 struct aarch64_mem_r
*aarch64_mems
; /* Memory locations to be recorded. */
2918 } insn_decode_record
;
2920 /* Record handler for data processing - register instructions. */
2923 aarch64_record_data_proc_reg (insn_decode_record
*aarch64_insn_r
)
2925 uint8_t reg_rd
, insn_bits24_27
, insn_bits21_23
;
2926 uint32_t record_buf
[4];
2928 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
2929 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
2930 insn_bits21_23
= bits (aarch64_insn_r
->aarch64_insn
, 21, 23);
2932 if (!bit (aarch64_insn_r
->aarch64_insn
, 28))
2936 /* Logical (shifted register). */
2937 if (insn_bits24_27
== 0x0a)
2938 setflags
= (bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03);
2940 else if (insn_bits24_27
== 0x0b)
2941 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
2943 return AARCH64_RECORD_UNKNOWN
;
2945 record_buf
[0] = reg_rd
;
2946 aarch64_insn_r
->reg_rec_count
= 1;
2948 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
2952 if (insn_bits24_27
== 0x0b)
2954 /* Data-processing (3 source). */
2955 record_buf
[0] = reg_rd
;
2956 aarch64_insn_r
->reg_rec_count
= 1;
2958 else if (insn_bits24_27
== 0x0a)
2960 if (insn_bits21_23
== 0x00)
2962 /* Add/subtract (with carry). */
2963 record_buf
[0] = reg_rd
;
2964 aarch64_insn_r
->reg_rec_count
= 1;
2965 if (bit (aarch64_insn_r
->aarch64_insn
, 29))
2967 record_buf
[1] = AARCH64_CPSR_REGNUM
;
2968 aarch64_insn_r
->reg_rec_count
= 2;
2971 else if (insn_bits21_23
== 0x02)
2973 /* Conditional compare (register) and conditional compare
2974 (immediate) instructions. */
2975 record_buf
[0] = AARCH64_CPSR_REGNUM
;
2976 aarch64_insn_r
->reg_rec_count
= 1;
2978 else if (insn_bits21_23
== 0x04 || insn_bits21_23
== 0x06)
2980 /* CConditional select. */
2981 /* Data-processing (2 source). */
2982 /* Data-processing (1 source). */
2983 record_buf
[0] = reg_rd
;
2984 aarch64_insn_r
->reg_rec_count
= 1;
2987 return AARCH64_RECORD_UNKNOWN
;
2991 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
2993 return AARCH64_RECORD_SUCCESS
;
2996 /* Record handler for data processing - immediate instructions. */
2999 aarch64_record_data_proc_imm (insn_decode_record
*aarch64_insn_r
)
3001 uint8_t reg_rd
, insn_bit28
, insn_bit23
, insn_bits24_27
, setflags
;
3002 uint32_t record_buf
[4];
3004 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3005 insn_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3006 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3007 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3009 if (insn_bits24_27
== 0x00 /* PC rel addressing. */
3010 || insn_bits24_27
== 0x03 /* Bitfield and Extract. */
3011 || (insn_bits24_27
== 0x02 && insn_bit23
)) /* Move wide (immediate). */
3013 record_buf
[0] = reg_rd
;
3014 aarch64_insn_r
->reg_rec_count
= 1;
3016 else if (insn_bits24_27
== 0x01)
3018 /* Add/Subtract (immediate). */
3019 setflags
= bit (aarch64_insn_r
->aarch64_insn
, 29);
3020 record_buf
[0] = reg_rd
;
3021 aarch64_insn_r
->reg_rec_count
= 1;
3023 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3025 else if (insn_bits24_27
== 0x02 && !insn_bit23
)
3027 /* Logical (immediate). */
3028 setflags
= bits (aarch64_insn_r
->aarch64_insn
, 29, 30) == 0x03;
3029 record_buf
[0] = reg_rd
;
3030 aarch64_insn_r
->reg_rec_count
= 1;
3032 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_CPSR_REGNUM
;
3035 return AARCH64_RECORD_UNKNOWN
;
3037 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3039 return AARCH64_RECORD_SUCCESS
;
3042 /* Record handler for branch, exception generation and system instructions. */
3045 aarch64_record_branch_except_sys (insn_decode_record
*aarch64_insn_r
)
3047 struct gdbarch_tdep
*tdep
= gdbarch_tdep (aarch64_insn_r
->gdbarch
);
3048 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits22_23
;
3049 uint32_t record_buf
[4];
3051 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3052 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3053 insn_bits22_23
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3055 if (insn_bits28_31
== 0x0d)
3057 /* Exception generation instructions. */
3058 if (insn_bits24_27
== 0x04)
3060 if (!bits (aarch64_insn_r
->aarch64_insn
, 2, 4)
3061 && !bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3062 && bits (aarch64_insn_r
->aarch64_insn
, 0, 1) == 0x01)
3064 ULONGEST svc_number
;
3066 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, 8,
3068 return tdep
->aarch64_syscall_record (aarch64_insn_r
->regcache
,
3072 return AARCH64_RECORD_UNSUPPORTED
;
3074 /* System instructions. */
3075 else if (insn_bits24_27
== 0x05 && insn_bits22_23
== 0x00)
3077 uint32_t reg_rt
, reg_crn
;
3079 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3080 reg_crn
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3082 /* Record rt in case of sysl and mrs instructions. */
3083 if (bit (aarch64_insn_r
->aarch64_insn
, 21))
3085 record_buf
[0] = reg_rt
;
3086 aarch64_insn_r
->reg_rec_count
= 1;
3088 /* Record cpsr for hint and msr(immediate) instructions. */
3089 else if (reg_crn
== 0x02 || reg_crn
== 0x04)
3091 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3092 aarch64_insn_r
->reg_rec_count
= 1;
3095 /* Unconditional branch (register). */
3096 else if((insn_bits24_27
& 0x0e) == 0x06)
3098 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3099 if (bits (aarch64_insn_r
->aarch64_insn
, 21, 22) == 0x01)
3100 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3103 return AARCH64_RECORD_UNKNOWN
;
3105 /* Unconditional branch (immediate). */
3106 else if ((insn_bits28_31
& 0x07) == 0x01 && (insn_bits24_27
& 0x0c) == 0x04)
3108 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3109 if (bit (aarch64_insn_r
->aarch64_insn
, 31))
3110 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_LR_REGNUM
;
3113 /* Compare & branch (immediate), Test & branch (immediate) and
3114 Conditional branch (immediate). */
3115 record_buf
[aarch64_insn_r
->reg_rec_count
++] = AARCH64_PC_REGNUM
;
3117 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3119 return AARCH64_RECORD_SUCCESS
;
3122 /* Record handler for advanced SIMD load and store instructions. */
3125 aarch64_record_asimd_load_store (insn_decode_record
*aarch64_insn_r
)
3128 uint64_t addr_offset
= 0;
3129 uint32_t record_buf
[24];
3130 uint64_t record_buf_mem
[24];
3131 uint32_t reg_rn
, reg_rt
;
3132 uint32_t reg_index
= 0, mem_index
= 0;
3133 uint8_t opcode_bits
, size_bits
;
3135 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3136 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3137 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3138 opcode_bits
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3139 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
, &address
);
3143 fprintf_unfiltered (gdb_stdlog
,
3144 "Process record: Advanced SIMD load/store\n");
3147 /* Load/store single structure. */
3148 if (bit (aarch64_insn_r
->aarch64_insn
, 24))
3150 uint8_t sindex
, scale
, selem
, esize
, replicate
= 0;
3151 scale
= opcode_bits
>> 2;
3152 selem
= ((opcode_bits
& 0x02) |
3153 bit (aarch64_insn_r
->aarch64_insn
, 21)) + 1;
3157 if (size_bits
& 0x01)
3158 return AARCH64_RECORD_UNKNOWN
;
3161 if ((size_bits
>> 1) & 0x01)
3162 return AARCH64_RECORD_UNKNOWN
;
3163 if (size_bits
& 0x01)
3165 if (!((opcode_bits
>> 1) & 0x01))
3168 return AARCH64_RECORD_UNKNOWN
;
3172 if (bit (aarch64_insn_r
->aarch64_insn
, 22) && !(opcode_bits
& 0x01))
3179 return AARCH64_RECORD_UNKNOWN
;
3185 for (sindex
= 0; sindex
< selem
; sindex
++)
3187 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3188 reg_rt
= (reg_rt
+ 1) % 32;
3192 for (sindex
= 0; sindex
< selem
; sindex
++)
3193 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3194 record_buf
[reg_index
++] = reg_rt
+ AARCH64_V0_REGNUM
;
3197 record_buf_mem
[mem_index
++] = esize
/ 8;
3198 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3200 addr_offset
= addr_offset
+ (esize
/ 8);
3201 reg_rt
= (reg_rt
+ 1) % 32;
3204 /* Load/store multiple structure. */
3207 uint8_t selem
, esize
, rpt
, elements
;
3208 uint8_t eindex
, rindex
;
3210 esize
= 8 << size_bits
;
3211 if (bit (aarch64_insn_r
->aarch64_insn
, 30))
3212 elements
= 128 / esize
;
3214 elements
= 64 / esize
;
3216 switch (opcode_bits
)
3218 /*LD/ST4 (4 Registers). */
3223 /*LD/ST1 (4 Registers). */
3228 /*LD/ST3 (3 Registers). */
3233 /*LD/ST1 (3 Registers). */
3238 /*LD/ST1 (1 Register). */
3243 /*LD/ST2 (2 Registers). */
3248 /*LD/ST1 (2 Registers). */
3254 return AARCH64_RECORD_UNSUPPORTED
;
3257 for (rindex
= 0; rindex
< rpt
; rindex
++)
3258 for (eindex
= 0; eindex
< elements
; eindex
++)
3260 uint8_t reg_tt
, sindex
;
3261 reg_tt
= (reg_rt
+ rindex
) % 32;
3262 for (sindex
= 0; sindex
< selem
; sindex
++)
3264 if (bit (aarch64_insn_r
->aarch64_insn
, 22))
3265 record_buf
[reg_index
++] = reg_tt
+ AARCH64_V0_REGNUM
;
3268 record_buf_mem
[mem_index
++] = esize
/ 8;
3269 record_buf_mem
[mem_index
++] = address
+ addr_offset
;
3271 addr_offset
= addr_offset
+ (esize
/ 8);
3272 reg_tt
= (reg_tt
+ 1) % 32;
3277 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3278 record_buf
[reg_index
++] = reg_rn
;
3280 aarch64_insn_r
->reg_rec_count
= reg_index
;
3281 aarch64_insn_r
->mem_rec_count
= mem_index
/ 2;
3282 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3284 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3286 return AARCH64_RECORD_SUCCESS
;
3289 /* Record handler for load and store instructions. */
3292 aarch64_record_load_store (insn_decode_record
*aarch64_insn_r
)
3294 uint8_t insn_bits24_27
, insn_bits28_29
, insn_bits10_11
;
3295 uint8_t insn_bit23
, insn_bit21
;
3296 uint8_t opc
, size_bits
, ld_flag
, vector_flag
;
3297 uint32_t reg_rn
, reg_rt
, reg_rt2
;
3298 uint64_t datasize
, offset
;
3299 uint32_t record_buf
[8];
3300 uint64_t record_buf_mem
[8];
3303 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3304 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3305 insn_bits28_29
= bits (aarch64_insn_r
->aarch64_insn
, 28, 29);
3306 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3307 insn_bit23
= bit (aarch64_insn_r
->aarch64_insn
, 23);
3308 ld_flag
= bit (aarch64_insn_r
->aarch64_insn
, 22);
3309 vector_flag
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3310 reg_rt
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3311 reg_rn
= bits (aarch64_insn_r
->aarch64_insn
, 5, 9);
3312 reg_rt2
= bits (aarch64_insn_r
->aarch64_insn
, 10, 14);
3313 size_bits
= bits (aarch64_insn_r
->aarch64_insn
, 30, 31);
3315 /* Load/store exclusive. */
3316 if (insn_bits24_27
== 0x08 && insn_bits28_29
== 0x00)
3320 fprintf_unfiltered (gdb_stdlog
,
3321 "Process record: load/store exclusive\n");
3326 record_buf
[0] = reg_rt
;
3327 aarch64_insn_r
->reg_rec_count
= 1;
3330 record_buf
[1] = reg_rt2
;
3331 aarch64_insn_r
->reg_rec_count
= 2;
3337 datasize
= (8 << size_bits
) * 2;
3339 datasize
= (8 << size_bits
);
3340 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3342 record_buf_mem
[0] = datasize
/ 8;
3343 record_buf_mem
[1] = address
;
3344 aarch64_insn_r
->mem_rec_count
= 1;
3347 /* Save register rs. */
3348 record_buf
[0] = bits (aarch64_insn_r
->aarch64_insn
, 16, 20);
3349 aarch64_insn_r
->reg_rec_count
= 1;
3353 /* Load register (literal) instructions decoding. */
3354 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x01)
3358 fprintf_unfiltered (gdb_stdlog
,
3359 "Process record: load register (literal)\n");
3362 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3364 record_buf
[0] = reg_rt
;
3365 aarch64_insn_r
->reg_rec_count
= 1;
3367 /* All types of load/store pair instructions decoding. */
3368 else if ((insn_bits24_27
& 0x0a) == 0x08 && insn_bits28_29
== 0x02)
3372 fprintf_unfiltered (gdb_stdlog
,
3373 "Process record: load/store pair\n");
3380 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3381 record_buf
[1] = reg_rt2
+ AARCH64_V0_REGNUM
;
3385 record_buf
[0] = reg_rt
;
3386 record_buf
[1] = reg_rt2
;
3388 aarch64_insn_r
->reg_rec_count
= 2;
3393 imm7_off
= bits (aarch64_insn_r
->aarch64_insn
, 15, 21);
3395 size_bits
= size_bits
>> 1;
3396 datasize
= 8 << (2 + size_bits
);
3397 offset
= (imm7_off
& 0x40) ? (~imm7_off
& 0x007f) + 1 : imm7_off
;
3398 offset
= offset
<< (2 + size_bits
);
3399 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3401 if (!((insn_bits24_27
& 0x0b) == 0x08 && insn_bit23
))
3403 if (imm7_off
& 0x40)
3404 address
= address
- offset
;
3406 address
= address
+ offset
;
3409 record_buf_mem
[0] = datasize
/ 8;
3410 record_buf_mem
[1] = address
;
3411 record_buf_mem
[2] = datasize
/ 8;
3412 record_buf_mem
[3] = address
+ (datasize
/ 8);
3413 aarch64_insn_r
->mem_rec_count
= 2;
3415 if (bit (aarch64_insn_r
->aarch64_insn
, 23))
3416 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3418 /* Load/store register (unsigned immediate) instructions. */
3419 else if ((insn_bits24_27
& 0x0b) == 0x09 && insn_bits28_29
== 0x03)
3421 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3428 if (size_bits
!= 0x03)
3431 return AARCH64_RECORD_UNKNOWN
;
3435 fprintf_unfiltered (gdb_stdlog
,
3436 "Process record: load/store (unsigned immediate):"
3437 " size %x V %d opc %x\n", size_bits
, vector_flag
,
3443 offset
= bits (aarch64_insn_r
->aarch64_insn
, 10, 21);
3444 datasize
= 8 << size_bits
;
3445 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3447 offset
= offset
<< size_bits
;
3448 address
= address
+ offset
;
3450 record_buf_mem
[0] = datasize
>> 3;
3451 record_buf_mem
[1] = address
;
3452 aarch64_insn_r
->mem_rec_count
= 1;
3457 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3459 record_buf
[0] = reg_rt
;
3460 aarch64_insn_r
->reg_rec_count
= 1;
3463 /* Load/store register (register offset) instructions. */
3464 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3465 && insn_bits10_11
== 0x02 && insn_bit21
)
3469 fprintf_unfiltered (gdb_stdlog
,
3470 "Process record: load/store (register offset)\n");
3472 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3479 if (size_bits
!= 0x03)
3482 return AARCH64_RECORD_UNKNOWN
;
3486 uint64_t reg_rm_val
;
3487 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
,
3488 bits (aarch64_insn_r
->aarch64_insn
, 16, 20), ®_rm_val
);
3489 if (bit (aarch64_insn_r
->aarch64_insn
, 12))
3490 offset
= reg_rm_val
<< size_bits
;
3492 offset
= reg_rm_val
;
3493 datasize
= 8 << size_bits
;
3494 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3496 address
= address
+ offset
;
3497 record_buf_mem
[0] = datasize
>> 3;
3498 record_buf_mem
[1] = address
;
3499 aarch64_insn_r
->mem_rec_count
= 1;
3504 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3506 record_buf
[0] = reg_rt
;
3507 aarch64_insn_r
->reg_rec_count
= 1;
3510 /* Load/store register (immediate and unprivileged) instructions. */
3511 else if ((insn_bits24_27
& 0x0b) == 0x08 && insn_bits28_29
== 0x03
3516 fprintf_unfiltered (gdb_stdlog
,
3517 "Process record: load/store (immediate and unprivileged)\n");
3519 opc
= bits (aarch64_insn_r
->aarch64_insn
, 22, 23);
3526 if (size_bits
!= 0x03)
3529 return AARCH64_RECORD_UNKNOWN
;
3534 imm9_off
= bits (aarch64_insn_r
->aarch64_insn
, 12, 20);
3535 offset
= (imm9_off
& 0x0100) ? (((~imm9_off
) & 0x01ff) + 1) : imm9_off
;
3536 datasize
= 8 << size_bits
;
3537 regcache_raw_read_unsigned (aarch64_insn_r
->regcache
, reg_rn
,
3539 if (insn_bits10_11
!= 0x01)
3541 if (imm9_off
& 0x0100)
3542 address
= address
- offset
;
3544 address
= address
+ offset
;
3546 record_buf_mem
[0] = datasize
>> 3;
3547 record_buf_mem
[1] = address
;
3548 aarch64_insn_r
->mem_rec_count
= 1;
3553 record_buf
[0] = reg_rt
+ AARCH64_V0_REGNUM
;
3555 record_buf
[0] = reg_rt
;
3556 aarch64_insn_r
->reg_rec_count
= 1;
3558 if (insn_bits10_11
== 0x01 || insn_bits10_11
== 0x03)
3559 record_buf
[aarch64_insn_r
->reg_rec_count
++] = reg_rn
;
3561 /* Advanced SIMD load/store instructions. */
3563 return aarch64_record_asimd_load_store (aarch64_insn_r
);
3565 MEM_ALLOC (aarch64_insn_r
->aarch64_mems
, aarch64_insn_r
->mem_rec_count
,
3567 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3569 return AARCH64_RECORD_SUCCESS
;
3572 /* Record handler for data processing SIMD and floating point instructions. */
3575 aarch64_record_data_proc_simd_fp (insn_decode_record
*aarch64_insn_r
)
3577 uint8_t insn_bit21
, opcode
, rmode
, reg_rd
;
3578 uint8_t insn_bits24_27
, insn_bits28_31
, insn_bits10_11
, insn_bits12_15
;
3579 uint8_t insn_bits11_14
;
3580 uint32_t record_buf
[2];
3582 insn_bits24_27
= bits (aarch64_insn_r
->aarch64_insn
, 24, 27);
3583 insn_bits28_31
= bits (aarch64_insn_r
->aarch64_insn
, 28, 31);
3584 insn_bits10_11
= bits (aarch64_insn_r
->aarch64_insn
, 10, 11);
3585 insn_bits12_15
= bits (aarch64_insn_r
->aarch64_insn
, 12, 15);
3586 insn_bits11_14
= bits (aarch64_insn_r
->aarch64_insn
, 11, 14);
3587 opcode
= bits (aarch64_insn_r
->aarch64_insn
, 16, 18);
3588 rmode
= bits (aarch64_insn_r
->aarch64_insn
, 19, 20);
3589 reg_rd
= bits (aarch64_insn_r
->aarch64_insn
, 0, 4);
3590 insn_bit21
= bit (aarch64_insn_r
->aarch64_insn
, 21);
3594 fprintf_unfiltered (gdb_stdlog
,
3595 "Process record: data processing SIMD/FP: ");
3598 if ((insn_bits28_31
& 0x05) == 0x01 && insn_bits24_27
== 0x0e)
3600 /* Floating point - fixed point conversion instructions. */
3604 fprintf_unfiltered (gdb_stdlog
, "FP - fixed point conversion");
3606 if ((opcode
>> 1) == 0x0 && rmode
== 0x03)
3607 record_buf
[0] = reg_rd
;
3609 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3611 /* Floating point - conditional compare instructions. */
3612 else if (insn_bits10_11
== 0x01)
3615 fprintf_unfiltered (gdb_stdlog
, "FP - conditional compare");
3617 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3619 /* Floating point - data processing (2-source) and
3620 conditional select instructions. */
3621 else if (insn_bits10_11
== 0x02 || insn_bits10_11
== 0x03)
3624 fprintf_unfiltered (gdb_stdlog
, "FP - DP (2-source)");
3626 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3628 else if (insn_bits10_11
== 0x00)
3630 /* Floating point - immediate instructions. */
3631 if ((insn_bits12_15
& 0x01) == 0x01
3632 || (insn_bits12_15
& 0x07) == 0x04)
3635 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3636 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3638 /* Floating point - compare instructions. */
3639 else if ((insn_bits12_15
& 0x03) == 0x02)
3642 fprintf_unfiltered (gdb_stdlog
, "FP - immediate");
3643 record_buf
[0] = AARCH64_CPSR_REGNUM
;
3645 /* Floating point - integer conversions instructions. */
3646 else if (insn_bits12_15
== 0x00)
3648 /* Convert float to integer instruction. */
3649 if (!(opcode
>> 1) || ((opcode
>> 1) == 0x02 && !rmode
))
3652 fprintf_unfiltered (gdb_stdlog
, "float to int conversion");
3654 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3656 /* Convert integer to float instruction. */
3657 else if ((opcode
>> 1) == 0x01 && !rmode
)
3660 fprintf_unfiltered (gdb_stdlog
, "int to float conversion");
3662 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3664 /* Move float to integer instruction. */
3665 else if ((opcode
>> 1) == 0x03)
3668 fprintf_unfiltered (gdb_stdlog
, "move float to int");
3670 if (!(opcode
& 0x01))
3671 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3673 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3676 return AARCH64_RECORD_UNKNOWN
;
3679 return AARCH64_RECORD_UNKNOWN
;
3682 return AARCH64_RECORD_UNKNOWN
;
3684 else if ((insn_bits28_31
& 0x09) == 0x00 && insn_bits24_27
== 0x0e)
3687 fprintf_unfiltered (gdb_stdlog
, "SIMD copy");
3689 /* Advanced SIMD copy instructions. */
3690 if (!bits (aarch64_insn_r
->aarch64_insn
, 21, 23)
3691 && !bit (aarch64_insn_r
->aarch64_insn
, 15)
3692 && bit (aarch64_insn_r
->aarch64_insn
, 10))
3694 if (insn_bits11_14
== 0x05 || insn_bits11_14
== 0x07)
3695 record_buf
[0] = reg_rd
+ AARCH64_X0_REGNUM
;
3697 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3700 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3702 /* All remaining floating point or advanced SIMD instructions. */
3706 fprintf_unfiltered (gdb_stdlog
, "all remain");
3708 record_buf
[0] = reg_rd
+ AARCH64_V0_REGNUM
;
3712 fprintf_unfiltered (gdb_stdlog
, "\n");
3714 aarch64_insn_r
->reg_rec_count
++;
3715 gdb_assert (aarch64_insn_r
->reg_rec_count
== 1);
3716 REG_ALLOC (aarch64_insn_r
->aarch64_regs
, aarch64_insn_r
->reg_rec_count
,
3718 return AARCH64_RECORD_SUCCESS
;
3721 /* Decodes insns type and invokes its record handler. */
3724 aarch64_record_decode_insn_handler (insn_decode_record
*aarch64_insn_r
)
3726 uint32_t ins_bit25
, ins_bit26
, ins_bit27
, ins_bit28
;
3728 ins_bit25
= bit (aarch64_insn_r
->aarch64_insn
, 25);
3729 ins_bit26
= bit (aarch64_insn_r
->aarch64_insn
, 26);
3730 ins_bit27
= bit (aarch64_insn_r
->aarch64_insn
, 27);
3731 ins_bit28
= bit (aarch64_insn_r
->aarch64_insn
, 28);
3733 /* Data processing - immediate instructions. */
3734 if (!ins_bit26
&& !ins_bit27
&& ins_bit28
)
3735 return aarch64_record_data_proc_imm (aarch64_insn_r
);
3737 /* Branch, exception generation and system instructions. */
3738 if (ins_bit26
&& !ins_bit27
&& ins_bit28
)
3739 return aarch64_record_branch_except_sys (aarch64_insn_r
);
3741 /* Load and store instructions. */
3742 if (!ins_bit25
&& ins_bit27
)
3743 return aarch64_record_load_store (aarch64_insn_r
);
3745 /* Data processing - register instructions. */
3746 if (ins_bit25
&& !ins_bit26
&& ins_bit27
)
3747 return aarch64_record_data_proc_reg (aarch64_insn_r
);
3749 /* Data processing - SIMD and floating point instructions. */
3750 if (ins_bit25
&& ins_bit26
&& ins_bit27
)
3751 return aarch64_record_data_proc_simd_fp (aarch64_insn_r
);
3753 return AARCH64_RECORD_UNSUPPORTED
;
3756 /* Cleans up local record registers and memory allocations. */
3759 deallocate_reg_mem (insn_decode_record
*record
)
3761 xfree (record
->aarch64_regs
);
3762 xfree (record
->aarch64_mems
);
3765 /* Parse the current instruction and record the values of the registers and
3766 memory that will be changed in current instruction to record_arch_list
3767 return -1 if something is wrong. */
3770 aarch64_process_record (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
3771 CORE_ADDR insn_addr
)
3773 uint32_t rec_no
= 0;
3774 uint8_t insn_size
= 4;
3776 ULONGEST t_bit
= 0, insn_id
= 0;
3777 gdb_byte buf
[insn_size
];
3778 insn_decode_record aarch64_record
;
3780 memset (&buf
[0], 0, insn_size
);
3781 memset (&aarch64_record
, 0, sizeof (insn_decode_record
));
3782 target_read_memory (insn_addr
, &buf
[0], insn_size
);
3783 aarch64_record
.aarch64_insn
3784 = (uint32_t) extract_unsigned_integer (&buf
[0],
3786 gdbarch_byte_order (gdbarch
));
3787 aarch64_record
.regcache
= regcache
;
3788 aarch64_record
.this_addr
= insn_addr
;
3789 aarch64_record
.gdbarch
= gdbarch
;
3791 ret
= aarch64_record_decode_insn_handler (&aarch64_record
);
3792 if (ret
== AARCH64_RECORD_UNSUPPORTED
)
3794 printf_unfiltered (_("Process record does not support instruction "
3795 "0x%0x at address %s.\n"),
3796 aarch64_record
.aarch64_insn
,
3797 paddress (gdbarch
, insn_addr
));
3803 /* Record registers. */
3804 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3806 /* Always record register CPSR. */
3807 record_full_arch_list_add_reg (aarch64_record
.regcache
,
3808 AARCH64_CPSR_REGNUM
);
3809 if (aarch64_record
.aarch64_regs
)
3810 for (rec_no
= 0; rec_no
< aarch64_record
.reg_rec_count
; rec_no
++)
3811 if (record_full_arch_list_add_reg (aarch64_record
.regcache
,
3812 aarch64_record
.aarch64_regs
[rec_no
]))
3815 /* Record memories. */
3816 if (aarch64_record
.aarch64_mems
)
3817 for (rec_no
= 0; rec_no
< aarch64_record
.mem_rec_count
; rec_no
++)
3818 if (record_full_arch_list_add_mem
3819 ((CORE_ADDR
)aarch64_record
.aarch64_mems
[rec_no
].addr
,
3820 aarch64_record
.aarch64_mems
[rec_no
].len
))
3823 if (record_full_arch_list_add_end ())
3827 deallocate_reg_mem (&aarch64_record
);