Commit | Line | Data |
---|---|---|
07b287a0 MS |
1 | /* Common target dependent code for GDB on AArch64 systems. |
2 | ||
42a4f53d | 3 | Copyright (C) 2009-2019 Free Software Foundation, Inc. |
07b287a0 MS |
4 | Contributed by ARM Ltd. |
5 | ||
6 | This file is part of GDB. | |
7 | ||
8 | This program is free software; you can redistribute it and/or modify | |
9 | it under the terms of the GNU General Public License as published by | |
10 | the Free Software Foundation; either version 3 of the License, or | |
11 | (at your option) any later version. | |
12 | ||
13 | This program is distributed in the hope that it will be useful, | |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | GNU General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
19 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
20 | ||
21 | #include "defs.h" | |
22 | ||
23 | #include "frame.h" | |
24 | #include "inferior.h" | |
25 | #include "gdbcmd.h" | |
26 | #include "gdbcore.h" | |
07b287a0 MS |
27 | #include "dis-asm.h" |
28 | #include "regcache.h" | |
29 | #include "reggroups.h" | |
07b287a0 MS |
30 | #include "value.h" |
31 | #include "arch-utils.h" | |
32 | #include "osabi.h" | |
33 | #include "frame-unwind.h" | |
34 | #include "frame-base.h" | |
35 | #include "trad-frame.h" | |
36 | #include "objfiles.h" | |
11e1b75f | 37 | #include "dwarf2.h" |
07b287a0 MS |
38 | #include "dwarf2-frame.h" |
39 | #include "gdbtypes.h" | |
40 | #include "prologue-value.h" | |
41 | #include "target-descriptions.h" | |
42 | #include "user-regs.h" | |
43 | #include "language.h" | |
44 | #include "infcall.h" | |
ea873d8e PL |
45 | #include "ax.h" |
46 | #include "ax-gdb.h" | |
0747795c | 47 | #include "common/selftest.h" |
07b287a0 MS |
48 | |
49 | #include "aarch64-tdep.h" | |
e8bf1ce4 | 50 | #include "aarch64-ravenscar-thread.h" |
07b287a0 MS |
51 | |
52 | #include "elf-bfd.h" | |
53 | #include "elf/aarch64.h" | |
54 | ||
0747795c | 55 | #include "common/vec.h" |
07b287a0 | 56 | |
99afc88b OJ |
57 | #include "record.h" |
58 | #include "record-full.h" | |
787749ea PL |
59 | #include "arch/aarch64-insn.h" |
60 | ||
f77ee802 | 61 | #include "opcode/aarch64.h" |
325fac50 | 62 | #include <algorithm> |
f77ee802 YQ |
63 | |
64 | #define submask(x) ((1L << ((x) + 1)) - 1) | |
65 | #define bit(obj,st) (((obj) >> (st)) & 1) | |
66 | #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st))) | |
67 | ||
ea92689a AH |
68 | /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most |
69 | four members. */ | |
70 | #define HA_MAX_NUM_FLDS 4 | |
71 | ||
95228a0d | 72 | /* All possible aarch64 target descriptors. */ |
6dc0ebde | 73 | struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/]; |
95228a0d | 74 | |
07b287a0 MS |
75 | /* The standard register names, and all the valid aliases for them. */ |
76 | static const struct | |
77 | { | |
78 | const char *const name; | |
79 | int regnum; | |
80 | } aarch64_register_aliases[] = | |
81 | { | |
82 | /* 64-bit register names. */ | |
83 | {"fp", AARCH64_FP_REGNUM}, | |
84 | {"lr", AARCH64_LR_REGNUM}, | |
85 | {"sp", AARCH64_SP_REGNUM}, | |
86 | ||
87 | /* 32-bit register names. */ | |
88 | {"w0", AARCH64_X0_REGNUM + 0}, | |
89 | {"w1", AARCH64_X0_REGNUM + 1}, | |
90 | {"w2", AARCH64_X0_REGNUM + 2}, | |
91 | {"w3", AARCH64_X0_REGNUM + 3}, | |
92 | {"w4", AARCH64_X0_REGNUM + 4}, | |
93 | {"w5", AARCH64_X0_REGNUM + 5}, | |
94 | {"w6", AARCH64_X0_REGNUM + 6}, | |
95 | {"w7", AARCH64_X0_REGNUM + 7}, | |
96 | {"w8", AARCH64_X0_REGNUM + 8}, | |
97 | {"w9", AARCH64_X0_REGNUM + 9}, | |
98 | {"w10", AARCH64_X0_REGNUM + 10}, | |
99 | {"w11", AARCH64_X0_REGNUM + 11}, | |
100 | {"w12", AARCH64_X0_REGNUM + 12}, | |
101 | {"w13", AARCH64_X0_REGNUM + 13}, | |
102 | {"w14", AARCH64_X0_REGNUM + 14}, | |
103 | {"w15", AARCH64_X0_REGNUM + 15}, | |
104 | {"w16", AARCH64_X0_REGNUM + 16}, | |
105 | {"w17", AARCH64_X0_REGNUM + 17}, | |
106 | {"w18", AARCH64_X0_REGNUM + 18}, | |
107 | {"w19", AARCH64_X0_REGNUM + 19}, | |
108 | {"w20", AARCH64_X0_REGNUM + 20}, | |
109 | {"w21", AARCH64_X0_REGNUM + 21}, | |
110 | {"w22", AARCH64_X0_REGNUM + 22}, | |
111 | {"w23", AARCH64_X0_REGNUM + 23}, | |
112 | {"w24", AARCH64_X0_REGNUM + 24}, | |
113 | {"w25", AARCH64_X0_REGNUM + 25}, | |
114 | {"w26", AARCH64_X0_REGNUM + 26}, | |
115 | {"w27", AARCH64_X0_REGNUM + 27}, | |
116 | {"w28", AARCH64_X0_REGNUM + 28}, | |
117 | {"w29", AARCH64_X0_REGNUM + 29}, | |
118 | {"w30", AARCH64_X0_REGNUM + 30}, | |
119 | ||
120 | /* specials */ | |
121 | {"ip0", AARCH64_X0_REGNUM + 16}, | |
122 | {"ip1", AARCH64_X0_REGNUM + 17} | |
123 | }; | |
124 | ||
125 | /* The required core 'R' registers. */ | |
126 | static const char *const aarch64_r_register_names[] = | |
127 | { | |
128 | /* These registers must appear in consecutive RAW register number | |
129 | order and they must begin with AARCH64_X0_REGNUM! */ | |
130 | "x0", "x1", "x2", "x3", | |
131 | "x4", "x5", "x6", "x7", | |
132 | "x8", "x9", "x10", "x11", | |
133 | "x12", "x13", "x14", "x15", | |
134 | "x16", "x17", "x18", "x19", | |
135 | "x20", "x21", "x22", "x23", | |
136 | "x24", "x25", "x26", "x27", | |
137 | "x28", "x29", "x30", "sp", | |
138 | "pc", "cpsr" | |
139 | }; | |
140 | ||
141 | /* The FP/SIMD 'V' registers. */ | |
142 | static const char *const aarch64_v_register_names[] = | |
143 | { | |
144 | /* These registers must appear in consecutive RAW register number | |
145 | order and they must begin with AARCH64_V0_REGNUM! */ | |
146 | "v0", "v1", "v2", "v3", | |
147 | "v4", "v5", "v6", "v7", | |
148 | "v8", "v9", "v10", "v11", | |
149 | "v12", "v13", "v14", "v15", | |
150 | "v16", "v17", "v18", "v19", | |
151 | "v20", "v21", "v22", "v23", | |
152 | "v24", "v25", "v26", "v27", | |
153 | "v28", "v29", "v30", "v31", | |
154 | "fpsr", | |
155 | "fpcr" | |
156 | }; | |
157 | ||
739e8682 AH |
158 | /* The SVE 'Z' and 'P' registers. */ |
159 | static const char *const aarch64_sve_register_names[] = | |
160 | { | |
161 | /* These registers must appear in consecutive RAW register number | |
162 | order and they must begin with AARCH64_SVE_Z0_REGNUM! */ | |
163 | "z0", "z1", "z2", "z3", | |
164 | "z4", "z5", "z6", "z7", | |
165 | "z8", "z9", "z10", "z11", | |
166 | "z12", "z13", "z14", "z15", | |
167 | "z16", "z17", "z18", "z19", | |
168 | "z20", "z21", "z22", "z23", | |
169 | "z24", "z25", "z26", "z27", | |
170 | "z28", "z29", "z30", "z31", | |
171 | "fpsr", "fpcr", | |
172 | "p0", "p1", "p2", "p3", | |
173 | "p4", "p5", "p6", "p7", | |
174 | "p8", "p9", "p10", "p11", | |
175 | "p12", "p13", "p14", "p15", | |
176 | "ffr", "vg" | |
177 | }; | |
178 | ||
76bed0fd AH |
179 | static const char *const aarch64_pauth_register_names[] = |
180 | { | |
181 | /* Authentication mask for data pointer. */ | |
182 | "pauth_dmask", | |
183 | /* Authentication mask for code pointer. */ | |
184 | "pauth_cmask" | |
185 | }; | |
186 | ||
07b287a0 MS |
187 | /* AArch64 prologue cache structure. */ |
188 | struct aarch64_prologue_cache | |
189 | { | |
db634143 PL |
190 | /* The program counter at the start of the function. It is used to |
191 | identify this frame as a prologue frame. */ | |
192 | CORE_ADDR func; | |
193 | ||
194 | /* The program counter at the time this frame was created; i.e. where | |
195 | this function was called from. It is used to identify this frame as a | |
196 | stub frame. */ | |
197 | CORE_ADDR prev_pc; | |
198 | ||
07b287a0 MS |
199 | /* The stack pointer at the time this frame was created; i.e. the |
200 | caller's stack pointer when this function was called. It is used | |
201 | to identify this frame. */ | |
202 | CORE_ADDR prev_sp; | |
203 | ||
7dfa3edc PL |
204 | /* Is the target available to read from? */ |
205 | int available_p; | |
206 | ||
07b287a0 MS |
207 | /* The frame base for this frame is just prev_sp - frame size. |
208 | FRAMESIZE is the distance from the frame pointer to the | |
209 | initial stack pointer. */ | |
210 | int framesize; | |
211 | ||
212 | /* The register used to hold the frame pointer for this frame. */ | |
213 | int framereg; | |
214 | ||
215 | /* Saved register offsets. */ | |
216 | struct trad_frame_saved_reg *saved_regs; | |
217 | }; | |
218 | ||
07b287a0 MS |
219 | static void |
220 | show_aarch64_debug (struct ui_file *file, int from_tty, | |
221 | struct cmd_list_element *c, const char *value) | |
222 | { | |
223 | fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value); | |
224 | } | |
225 | ||
ffdbe864 YQ |
226 | namespace { |
227 | ||
4d9a9006 YQ |
228 | /* Abstract instruction reader. */ |
229 | ||
230 | class abstract_instruction_reader | |
231 | { | |
232 | public: | |
233 | /* Read in one instruction. */ | |
234 | virtual ULONGEST read (CORE_ADDR memaddr, int len, | |
235 | enum bfd_endian byte_order) = 0; | |
236 | }; | |
237 | ||
238 | /* Instruction reader from real target. */ | |
239 | ||
240 | class instruction_reader : public abstract_instruction_reader | |
241 | { | |
242 | public: | |
243 | ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order) | |
632e107b | 244 | override |
4d9a9006 | 245 | { |
fc2f703e | 246 | return read_code_unsigned_integer (memaddr, len, byte_order); |
4d9a9006 YQ |
247 | } |
248 | }; | |
249 | ||
ffdbe864 YQ |
250 | } // namespace |
251 | ||
11e1b75f AH |
252 | /* If address signing is enabled, mask off the signature bits from ADDR, using |
253 | the register values in THIS_FRAME. */ | |
254 | ||
255 | static CORE_ADDR | |
256 | aarch64_frame_unmask_address (struct gdbarch_tdep *tdep, | |
257 | struct frame_info *this_frame, | |
258 | CORE_ADDR addr) | |
259 | { | |
260 | if (tdep->has_pauth () | |
261 | && frame_unwind_register_unsigned (this_frame, | |
262 | tdep->pauth_ra_state_regnum)) | |
263 | { | |
264 | int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base); | |
265 | CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num); | |
266 | addr = addr & ~cmask; | |
267 | } | |
268 | ||
269 | return addr; | |
270 | } | |
271 | ||
07b287a0 MS |
272 | /* Analyze a prologue, looking for a recognizable stack frame |
273 | and frame pointer. Scan until we encounter a store that could | |
274 | clobber the stack frame unexpectedly, or an unknown instruction. */ | |
275 | ||
276 | static CORE_ADDR | |
277 | aarch64_analyze_prologue (struct gdbarch *gdbarch, | |
278 | CORE_ADDR start, CORE_ADDR limit, | |
4d9a9006 YQ |
279 | struct aarch64_prologue_cache *cache, |
280 | abstract_instruction_reader& reader) | |
07b287a0 MS |
281 | { |
282 | enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); | |
283 | int i; | |
187f5d00 YQ |
284 | /* Track X registers and D registers in prologue. */ |
285 | pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT]; | |
07b287a0 | 286 | |
187f5d00 | 287 | for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++) |
07b287a0 | 288 | regs[i] = pv_register (i, 0); |
f7b7ed97 | 289 | pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch)); |
07b287a0 MS |
290 | |
291 | for (; start < limit; start += 4) | |
292 | { | |
293 | uint32_t insn; | |
d9ebcbce | 294 | aarch64_inst inst; |
07b287a0 | 295 | |
4d9a9006 | 296 | insn = reader.read (start, 4, byte_order_for_code); |
07b287a0 | 297 | |
561a72d4 | 298 | if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0) |
d9ebcbce YQ |
299 | break; |
300 | ||
301 | if (inst.opcode->iclass == addsub_imm | |
302 | && (inst.opcode->op == OP_ADD | |
303 | || strcmp ("sub", inst.opcode->name) == 0)) | |
07b287a0 | 304 | { |
d9ebcbce YQ |
305 | unsigned rd = inst.operands[0].reg.regno; |
306 | unsigned rn = inst.operands[1].reg.regno; | |
307 | ||
308 | gdb_assert (aarch64_num_of_operands (inst.opcode) == 3); | |
309 | gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP); | |
310 | gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP); | |
311 | gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM); | |
312 | ||
313 | if (inst.opcode->op == OP_ADD) | |
314 | { | |
315 | regs[rd] = pv_add_constant (regs[rn], | |
316 | inst.operands[2].imm.value); | |
317 | } | |
318 | else | |
319 | { | |
320 | regs[rd] = pv_add_constant (regs[rn], | |
321 | -inst.operands[2].imm.value); | |
322 | } | |
323 | } | |
324 | else if (inst.opcode->iclass == pcreladdr | |
325 | && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP) | |
326 | { | |
327 | gdb_assert (aarch64_num_of_operands (inst.opcode) == 2); | |
328 | gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd); | |
329 | ||
330 | regs[inst.operands[0].reg.regno] = pv_unknown (); | |
07b287a0 | 331 | } |
d9ebcbce | 332 | else if (inst.opcode->iclass == branch_imm) |
07b287a0 MS |
333 | { |
334 | /* Stop analysis on branch. */ | |
335 | break; | |
336 | } | |
d9ebcbce | 337 | else if (inst.opcode->iclass == condbranch) |
07b287a0 MS |
338 | { |
339 | /* Stop analysis on branch. */ | |
340 | break; | |
341 | } | |
d9ebcbce | 342 | else if (inst.opcode->iclass == branch_reg) |
07b287a0 MS |
343 | { |
344 | /* Stop analysis on branch. */ | |
345 | break; | |
346 | } | |
d9ebcbce | 347 | else if (inst.opcode->iclass == compbranch) |
07b287a0 MS |
348 | { |
349 | /* Stop analysis on branch. */ | |
350 | break; | |
351 | } | |
d9ebcbce YQ |
352 | else if (inst.opcode->op == OP_MOVZ) |
353 | { | |
354 | gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd); | |
355 | regs[inst.operands[0].reg.regno] = pv_unknown (); | |
356 | } | |
357 | else if (inst.opcode->iclass == log_shift | |
358 | && strcmp (inst.opcode->name, "orr") == 0) | |
07b287a0 | 359 | { |
d9ebcbce YQ |
360 | unsigned rd = inst.operands[0].reg.regno; |
361 | unsigned rn = inst.operands[1].reg.regno; | |
362 | unsigned rm = inst.operands[2].reg.regno; | |
363 | ||
364 | gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd); | |
365 | gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn); | |
366 | gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT); | |
367 | ||
368 | if (inst.operands[2].shifter.amount == 0 | |
369 | && rn == AARCH64_SP_REGNUM) | |
07b287a0 MS |
370 | regs[rd] = regs[rm]; |
371 | else | |
372 | { | |
373 | if (aarch64_debug) | |
b277c936 PL |
374 | { |
375 | debug_printf ("aarch64: prologue analysis gave up " | |
0a0da556 | 376 | "addr=%s opcode=0x%x (orr x register)\n", |
b277c936 PL |
377 | core_addr_to_string_nz (start), insn); |
378 | } | |
07b287a0 MS |
379 | break; |
380 | } | |
381 | } | |
d9ebcbce | 382 | else if (inst.opcode->op == OP_STUR) |
07b287a0 | 383 | { |
d9ebcbce YQ |
384 | unsigned rt = inst.operands[0].reg.regno; |
385 | unsigned rn = inst.operands[1].addr.base_regno; | |
386 | int is64 | |
387 | = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8); | |
388 | ||
389 | gdb_assert (aarch64_num_of_operands (inst.opcode) == 2); | |
390 | gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt); | |
391 | gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9); | |
392 | gdb_assert (!inst.operands[1].addr.offset.is_reg); | |
393 | ||
f7b7ed97 TT |
394 | stack.store (pv_add_constant (regs[rn], |
395 | inst.operands[1].addr.offset.imm), | |
396 | is64 ? 8 : 4, regs[rt]); | |
07b287a0 | 397 | } |
d9ebcbce | 398 | else if ((inst.opcode->iclass == ldstpair_off |
03bcd739 YQ |
399 | || (inst.opcode->iclass == ldstpair_indexed |
400 | && inst.operands[2].addr.preind)) | |
d9ebcbce | 401 | && strcmp ("stp", inst.opcode->name) == 0) |
07b287a0 | 402 | { |
03bcd739 | 403 | /* STP with addressing mode Pre-indexed and Base register. */ |
187f5d00 YQ |
404 | unsigned rt1; |
405 | unsigned rt2; | |
d9ebcbce YQ |
406 | unsigned rn = inst.operands[2].addr.base_regno; |
407 | int32_t imm = inst.operands[2].addr.offset.imm; | |
408 | ||
187f5d00 YQ |
409 | gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt |
410 | || inst.operands[0].type == AARCH64_OPND_Ft); | |
411 | gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2 | |
412 | || inst.operands[1].type == AARCH64_OPND_Ft2); | |
d9ebcbce YQ |
413 | gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7); |
414 | gdb_assert (!inst.operands[2].addr.offset.is_reg); | |
415 | ||
07b287a0 MS |
416 | /* If recording this store would invalidate the store area |
417 | (perhaps because rn is not known) then we should abandon | |
418 | further prologue analysis. */ | |
f7b7ed97 | 419 | if (stack.store_would_trash (pv_add_constant (regs[rn], imm))) |
07b287a0 MS |
420 | break; |
421 | ||
f7b7ed97 | 422 | if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8))) |
07b287a0 MS |
423 | break; |
424 | ||
187f5d00 YQ |
425 | rt1 = inst.operands[0].reg.regno; |
426 | rt2 = inst.operands[1].reg.regno; | |
427 | if (inst.operands[0].type == AARCH64_OPND_Ft) | |
428 | { | |
429 | /* Only bottom 64-bit of each V register (D register) need | |
430 | to be preserved. */ | |
431 | gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D); | |
432 | rt1 += AARCH64_X_REGISTER_COUNT; | |
433 | rt2 += AARCH64_X_REGISTER_COUNT; | |
434 | } | |
435 | ||
f7b7ed97 TT |
436 | stack.store (pv_add_constant (regs[rn], imm), 8, |
437 | regs[rt1]); | |
438 | stack.store (pv_add_constant (regs[rn], imm + 8), 8, | |
439 | regs[rt2]); | |
14ac654f | 440 | |
d9ebcbce | 441 | if (inst.operands[2].addr.writeback) |
93d96012 | 442 | regs[rn] = pv_add_constant (regs[rn], imm); |
07b287a0 | 443 | |
07b287a0 | 444 | } |
432ec081 YQ |
445 | else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */ |
446 | || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */ | |
447 | && (inst.opcode->op == OP_STR_POS | |
448 | || inst.opcode->op == OP_STRF_POS))) | |
449 | && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM | |
450 | && strcmp ("str", inst.opcode->name) == 0) | |
451 | { | |
452 | /* STR (immediate) */ | |
453 | unsigned int rt = inst.operands[0].reg.regno; | |
454 | int32_t imm = inst.operands[1].addr.offset.imm; | |
455 | unsigned int rn = inst.operands[1].addr.base_regno; | |
456 | bool is64 | |
457 | = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8); | |
458 | gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt | |
459 | || inst.operands[0].type == AARCH64_OPND_Ft); | |
460 | ||
461 | if (inst.operands[0].type == AARCH64_OPND_Ft) | |
462 | { | |
463 | /* Only bottom 64-bit of each V register (D register) need | |
464 | to be preserved. */ | |
465 | gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D); | |
466 | rt += AARCH64_X_REGISTER_COUNT; | |
467 | } | |
468 | ||
f7b7ed97 TT |
469 | stack.store (pv_add_constant (regs[rn], imm), |
470 | is64 ? 8 : 4, regs[rt]); | |
432ec081 YQ |
471 | if (inst.operands[1].addr.writeback) |
472 | regs[rn] = pv_add_constant (regs[rn], imm); | |
473 | } | |
d9ebcbce | 474 | else if (inst.opcode->iclass == testbranch) |
07b287a0 MS |
475 | { |
476 | /* Stop analysis on branch. */ | |
477 | break; | |
478 | } | |
17e116a7 AH |
479 | else if (inst.opcode->iclass == ic_system) |
480 | { | |
481 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
482 | int ra_state_val = 0; | |
483 | ||
484 | if (insn == 0xd503233f /* paciasp. */ | |
485 | || insn == 0xd503237f /* pacibsp. */) | |
486 | { | |
487 | /* Return addresses are mangled. */ | |
488 | ra_state_val = 1; | |
489 | } | |
490 | else if (insn == 0xd50323bf /* autiasp. */ | |
491 | || insn == 0xd50323ff /* autibsp. */) | |
492 | { | |
493 | /* Return addresses are not mangled. */ | |
494 | ra_state_val = 0; | |
495 | } | |
496 | else | |
497 | { | |
498 | if (aarch64_debug) | |
499 | debug_printf ("aarch64: prologue analysis gave up addr=%s" | |
500 | " opcode=0x%x (iclass)\n", | |
501 | core_addr_to_string_nz (start), insn); | |
502 | break; | |
503 | } | |
504 | ||
505 | if (tdep->has_pauth () && cache != nullptr) | |
506 | trad_frame_set_value (cache->saved_regs, | |
507 | tdep->pauth_ra_state_regnum, | |
508 | ra_state_val); | |
509 | } | |
07b287a0 MS |
510 | else |
511 | { | |
512 | if (aarch64_debug) | |
b277c936 | 513 | { |
0a0da556 | 514 | debug_printf ("aarch64: prologue analysis gave up addr=%s" |
b277c936 PL |
515 | " opcode=0x%x\n", |
516 | core_addr_to_string_nz (start), insn); | |
517 | } | |
07b287a0 MS |
518 | break; |
519 | } | |
520 | } | |
521 | ||
522 | if (cache == NULL) | |
f7b7ed97 | 523 | return start; |
07b287a0 MS |
524 | |
525 | if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM)) | |
526 | { | |
527 | /* Frame pointer is fp. Frame size is constant. */ | |
528 | cache->framereg = AARCH64_FP_REGNUM; | |
529 | cache->framesize = -regs[AARCH64_FP_REGNUM].k; | |
530 | } | |
531 | else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM)) | |
532 | { | |
533 | /* Try the stack pointer. */ | |
534 | cache->framesize = -regs[AARCH64_SP_REGNUM].k; | |
535 | cache->framereg = AARCH64_SP_REGNUM; | |
536 | } | |
537 | else | |
538 | { | |
539 | /* We're just out of luck. We don't know where the frame is. */ | |
540 | cache->framereg = -1; | |
541 | cache->framesize = 0; | |
542 | } | |
543 | ||
544 | for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++) | |
545 | { | |
546 | CORE_ADDR offset; | |
547 | ||
f7b7ed97 | 548 | if (stack.find_reg (gdbarch, i, &offset)) |
07b287a0 MS |
549 | cache->saved_regs[i].addr = offset; |
550 | } | |
551 | ||
187f5d00 YQ |
552 | for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++) |
553 | { | |
554 | int regnum = gdbarch_num_regs (gdbarch); | |
555 | CORE_ADDR offset; | |
556 | ||
f7b7ed97 TT |
557 | if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT, |
558 | &offset)) | |
187f5d00 YQ |
559 | cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset; |
560 | } | |
561 | ||
07b287a0 MS |
562 | return start; |
563 | } | |
564 | ||
4d9a9006 YQ |
565 | static CORE_ADDR |
566 | aarch64_analyze_prologue (struct gdbarch *gdbarch, | |
567 | CORE_ADDR start, CORE_ADDR limit, | |
568 | struct aarch64_prologue_cache *cache) | |
569 | { | |
570 | instruction_reader reader; | |
571 | ||
572 | return aarch64_analyze_prologue (gdbarch, start, limit, cache, | |
573 | reader); | |
574 | } | |
575 | ||
576 | #if GDB_SELF_TEST | |
577 | ||
578 | namespace selftests { | |
579 | ||
580 | /* Instruction reader from manually cooked instruction sequences. */ | |
581 | ||
582 | class instruction_reader_test : public abstract_instruction_reader | |
583 | { | |
584 | public: | |
585 | template<size_t SIZE> | |
586 | explicit instruction_reader_test (const uint32_t (&insns)[SIZE]) | |
587 | : m_insns (insns), m_insns_size (SIZE) | |
588 | {} | |
589 | ||
590 | ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order) | |
632e107b | 591 | override |
4d9a9006 YQ |
592 | { |
593 | SELF_CHECK (len == 4); | |
594 | SELF_CHECK (memaddr % 4 == 0); | |
595 | SELF_CHECK (memaddr / 4 < m_insns_size); | |
596 | ||
597 | return m_insns[memaddr / 4]; | |
598 | } | |
599 | ||
600 | private: | |
601 | const uint32_t *m_insns; | |
602 | size_t m_insns_size; | |
603 | }; | |
604 | ||
605 | static void | |
606 | aarch64_analyze_prologue_test (void) | |
607 | { | |
608 | struct gdbarch_info info; | |
609 | ||
610 | gdbarch_info_init (&info); | |
611 | info.bfd_arch_info = bfd_scan_arch ("aarch64"); | |
612 | ||
613 | struct gdbarch *gdbarch = gdbarch_find_by_info (info); | |
614 | SELF_CHECK (gdbarch != NULL); | |
615 | ||
17e116a7 AH |
616 | struct aarch64_prologue_cache cache; |
617 | cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch); | |
618 | ||
619 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
620 | ||
4d9a9006 YQ |
621 | /* Test the simple prologue in which frame pointer is used. */ |
622 | { | |
4d9a9006 YQ |
623 | static const uint32_t insns[] = { |
624 | 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */ | |
625 | 0x910003fd, /* mov x29, sp */ | |
626 | 0x97ffffe6, /* bl 0x400580 */ | |
627 | }; | |
628 | instruction_reader_test reader (insns); | |
629 | ||
630 | CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader); | |
631 | SELF_CHECK (end == 4 * 2); | |
632 | ||
633 | SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM); | |
634 | SELF_CHECK (cache.framesize == 272); | |
635 | ||
636 | for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++) | |
637 | { | |
638 | if (i == AARCH64_FP_REGNUM) | |
639 | SELF_CHECK (cache.saved_regs[i].addr == -272); | |
640 | else if (i == AARCH64_LR_REGNUM) | |
641 | SELF_CHECK (cache.saved_regs[i].addr == -264); | |
642 | else | |
643 | SELF_CHECK (cache.saved_regs[i].addr == -1); | |
644 | } | |
645 | ||
646 | for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++) | |
647 | { | |
648 | int regnum = gdbarch_num_regs (gdbarch); | |
649 | ||
650 | SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr | |
651 | == -1); | |
652 | } | |
653 | } | |
432ec081 YQ |
654 | |
655 | /* Test a prologue in which STR is used and frame pointer is not | |
656 | used. */ | |
657 | { | |
432ec081 YQ |
658 | static const uint32_t insns[] = { |
659 | 0xf81d0ff3, /* str x19, [sp, #-48]! */ | |
660 | 0xb9002fe0, /* str w0, [sp, #44] */ | |
661 | 0xf90013e1, /* str x1, [sp, #32]*/ | |
662 | 0xfd000fe0, /* str d0, [sp, #24] */ | |
663 | 0xaa0203f3, /* mov x19, x2 */ | |
664 | 0xf94013e0, /* ldr x0, [sp, #32] */ | |
665 | }; | |
666 | instruction_reader_test reader (insns); | |
667 | ||
668 | CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader); | |
669 | ||
670 | SELF_CHECK (end == 4 * 5); | |
671 | ||
672 | SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM); | |
673 | SELF_CHECK (cache.framesize == 48); | |
674 | ||
675 | for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++) | |
676 | { | |
677 | if (i == 1) | |
678 | SELF_CHECK (cache.saved_regs[i].addr == -16); | |
679 | else if (i == 19) | |
680 | SELF_CHECK (cache.saved_regs[i].addr == -48); | |
681 | else | |
682 | SELF_CHECK (cache.saved_regs[i].addr == -1); | |
683 | } | |
684 | ||
685 | for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++) | |
686 | { | |
687 | int regnum = gdbarch_num_regs (gdbarch); | |
688 | ||
689 | if (i == 0) | |
690 | SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr | |
691 | == -24); | |
692 | else | |
693 | SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr | |
694 | == -1); | |
695 | } | |
696 | } | |
17e116a7 AH |
697 | |
698 | /* Test a prologue in which there is a return address signing instruction. */ | |
699 | if (tdep->has_pauth ()) | |
700 | { | |
701 | static const uint32_t insns[] = { | |
702 | 0xd503233f, /* paciasp */ | |
703 | 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */ | |
704 | 0x910003fd, /* mov x29, sp */ | |
705 | 0xf801c3f3, /* str x19, [sp, #28] */ | |
706 | 0xb9401fa0, /* ldr x19, [x29, #28] */ | |
707 | }; | |
708 | instruction_reader_test reader (insns); | |
709 | ||
710 | CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, | |
711 | reader); | |
712 | ||
713 | SELF_CHECK (end == 4 * 4); | |
714 | SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM); | |
715 | SELF_CHECK (cache.framesize == 48); | |
716 | ||
717 | for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++) | |
718 | { | |
719 | if (i == 19) | |
720 | SELF_CHECK (cache.saved_regs[i].addr == -20); | |
721 | else if (i == AARCH64_FP_REGNUM) | |
722 | SELF_CHECK (cache.saved_regs[i].addr == -48); | |
723 | else if (i == AARCH64_LR_REGNUM) | |
724 | SELF_CHECK (cache.saved_regs[i].addr == -40); | |
725 | else | |
726 | SELF_CHECK (cache.saved_regs[i].addr == -1); | |
727 | } | |
728 | ||
729 | if (tdep->has_pauth ()) | |
730 | { | |
731 | SELF_CHECK (trad_frame_value_p (cache.saved_regs, | |
732 | tdep->pauth_ra_state_regnum)); | |
733 | SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1); | |
734 | } | |
735 | } | |
4d9a9006 YQ |
736 | } |
737 | } // namespace selftests | |
738 | #endif /* GDB_SELF_TEST */ | |
739 | ||
07b287a0 MS |
740 | /* Implement the "skip_prologue" gdbarch method. */ |
741 | ||
742 | static CORE_ADDR | |
743 | aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc) | |
744 | { | |
07b287a0 | 745 | CORE_ADDR func_addr, limit_pc; |
07b287a0 MS |
746 | |
747 | /* See if we can determine the end of the prologue via the symbol | |
748 | table. If so, then return either PC, or the PC after the | |
749 | prologue, whichever is greater. */ | |
750 | if (find_pc_partial_function (pc, NULL, &func_addr, NULL)) | |
751 | { | |
752 | CORE_ADDR post_prologue_pc | |
753 | = skip_prologue_using_sal (gdbarch, func_addr); | |
754 | ||
755 | if (post_prologue_pc != 0) | |
325fac50 | 756 | return std::max (pc, post_prologue_pc); |
07b287a0 MS |
757 | } |
758 | ||
759 | /* Can't determine prologue from the symbol table, need to examine | |
760 | instructions. */ | |
761 | ||
762 | /* Find an upper limit on the function prologue using the debug | |
763 | information. If the debug information could not be used to | |
764 | provide that bound, then use an arbitrary large number as the | |
765 | upper bound. */ | |
766 | limit_pc = skip_prologue_using_sal (gdbarch, pc); | |
767 | if (limit_pc == 0) | |
768 | limit_pc = pc + 128; /* Magic. */ | |
769 | ||
770 | /* Try disassembling prologue. */ | |
771 | return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL); | |
772 | } | |
773 | ||
774 | /* Scan the function prologue for THIS_FRAME and populate the prologue | |
775 | cache CACHE. */ | |
776 | ||
777 | static void | |
778 | aarch64_scan_prologue (struct frame_info *this_frame, | |
779 | struct aarch64_prologue_cache *cache) | |
780 | { | |
781 | CORE_ADDR block_addr = get_frame_address_in_block (this_frame); | |
782 | CORE_ADDR prologue_start; | |
783 | CORE_ADDR prologue_end; | |
784 | CORE_ADDR prev_pc = get_frame_pc (this_frame); | |
785 | struct gdbarch *gdbarch = get_frame_arch (this_frame); | |
786 | ||
db634143 PL |
787 | cache->prev_pc = prev_pc; |
788 | ||
07b287a0 MS |
789 | /* Assume we do not find a frame. */ |
790 | cache->framereg = -1; | |
791 | cache->framesize = 0; | |
792 | ||
793 | if (find_pc_partial_function (block_addr, NULL, &prologue_start, | |
794 | &prologue_end)) | |
795 | { | |
796 | struct symtab_and_line sal = find_pc_line (prologue_start, 0); | |
797 | ||
798 | if (sal.line == 0) | |
799 | { | |
800 | /* No line info so use the current PC. */ | |
801 | prologue_end = prev_pc; | |
802 | } | |
803 | else if (sal.end < prologue_end) | |
804 | { | |
805 | /* The next line begins after the function end. */ | |
806 | prologue_end = sal.end; | |
807 | } | |
808 | ||
325fac50 | 809 | prologue_end = std::min (prologue_end, prev_pc); |
07b287a0 MS |
810 | aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache); |
811 | } | |
812 | else | |
813 | { | |
814 | CORE_ADDR frame_loc; | |
07b287a0 MS |
815 | |
816 | frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM); | |
817 | if (frame_loc == 0) | |
818 | return; | |
819 | ||
820 | cache->framereg = AARCH64_FP_REGNUM; | |
821 | cache->framesize = 16; | |
822 | cache->saved_regs[29].addr = 0; | |
823 | cache->saved_regs[30].addr = 8; | |
824 | } | |
825 | } | |
826 | ||
7dfa3edc PL |
827 | /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This |
828 | function may throw an exception if the inferior's registers or memory is | |
829 | not available. */ | |
07b287a0 | 830 | |
7dfa3edc PL |
831 | static void |
832 | aarch64_make_prologue_cache_1 (struct frame_info *this_frame, | |
833 | struct aarch64_prologue_cache *cache) | |
07b287a0 | 834 | { |
07b287a0 MS |
835 | CORE_ADDR unwound_fp; |
836 | int reg; | |
837 | ||
07b287a0 MS |
838 | aarch64_scan_prologue (this_frame, cache); |
839 | ||
840 | if (cache->framereg == -1) | |
7dfa3edc | 841 | return; |
07b287a0 MS |
842 | |
843 | unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg); | |
844 | if (unwound_fp == 0) | |
7dfa3edc | 845 | return; |
07b287a0 MS |
846 | |
847 | cache->prev_sp = unwound_fp + cache->framesize; | |
848 | ||
849 | /* Calculate actual addresses of saved registers using offsets | |
850 | determined by aarch64_analyze_prologue. */ | |
851 | for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++) | |
852 | if (trad_frame_addr_p (cache->saved_regs, reg)) | |
853 | cache->saved_regs[reg].addr += cache->prev_sp; | |
854 | ||
db634143 PL |
855 | cache->func = get_frame_func (this_frame); |
856 | ||
7dfa3edc PL |
857 | cache->available_p = 1; |
858 | } | |
859 | ||
860 | /* Allocate and fill in *THIS_CACHE with information about the prologue of | |
861 | *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated. | |
862 | Return a pointer to the current aarch64_prologue_cache in | |
863 | *THIS_CACHE. */ | |
864 | ||
865 | static struct aarch64_prologue_cache * | |
866 | aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache) | |
867 | { | |
868 | struct aarch64_prologue_cache *cache; | |
869 | ||
870 | if (*this_cache != NULL) | |
9a3c8263 | 871 | return (struct aarch64_prologue_cache *) *this_cache; |
7dfa3edc PL |
872 | |
873 | cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache); | |
874 | cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); | |
875 | *this_cache = cache; | |
876 | ||
877 | TRY | |
878 | { | |
879 | aarch64_make_prologue_cache_1 (this_frame, cache); | |
880 | } | |
881 | CATCH (ex, RETURN_MASK_ERROR) | |
882 | { | |
883 | if (ex.error != NOT_AVAILABLE_ERROR) | |
884 | throw_exception (ex); | |
885 | } | |
886 | END_CATCH | |
887 | ||
07b287a0 MS |
888 | return cache; |
889 | } | |
890 | ||
7dfa3edc PL |
891 | /* Implement the "stop_reason" frame_unwind method. */ |
892 | ||
893 | static enum unwind_stop_reason | |
894 | aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame, | |
895 | void **this_cache) | |
896 | { | |
897 | struct aarch64_prologue_cache *cache | |
898 | = aarch64_make_prologue_cache (this_frame, this_cache); | |
899 | ||
900 | if (!cache->available_p) | |
901 | return UNWIND_UNAVAILABLE; | |
902 | ||
903 | /* Halt the backtrace at "_start". */ | |
904 | if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc) | |
905 | return UNWIND_OUTERMOST; | |
906 | ||
907 | /* We've hit a wall, stop. */ | |
908 | if (cache->prev_sp == 0) | |
909 | return UNWIND_OUTERMOST; | |
910 | ||
911 | return UNWIND_NO_REASON; | |
912 | } | |
913 | ||
07b287a0 MS |
914 | /* Our frame ID for a normal frame is the current function's starting |
915 | PC and the caller's SP when we were called. */ | |
916 | ||
917 | static void | |
918 | aarch64_prologue_this_id (struct frame_info *this_frame, | |
919 | void **this_cache, struct frame_id *this_id) | |
920 | { | |
7c8edfae PL |
921 | struct aarch64_prologue_cache *cache |
922 | = aarch64_make_prologue_cache (this_frame, this_cache); | |
07b287a0 | 923 | |
7dfa3edc PL |
924 | if (!cache->available_p) |
925 | *this_id = frame_id_build_unavailable_stack (cache->func); | |
926 | else | |
927 | *this_id = frame_id_build (cache->prev_sp, cache->func); | |
07b287a0 MS |
928 | } |
929 | ||
930 | /* Implement the "prev_register" frame_unwind method. */ | |
931 | ||
932 | static struct value * | |
933 | aarch64_prologue_prev_register (struct frame_info *this_frame, | |
934 | void **this_cache, int prev_regnum) | |
935 | { | |
7c8edfae PL |
936 | struct aarch64_prologue_cache *cache |
937 | = aarch64_make_prologue_cache (this_frame, this_cache); | |
07b287a0 MS |
938 | |
939 | /* If we are asked to unwind the PC, then we need to return the LR | |
940 | instead. The prologue may save PC, but it will point into this | |
941 | frame's prologue, not the next frame's resume location. */ | |
942 | if (prev_regnum == AARCH64_PC_REGNUM) | |
943 | { | |
944 | CORE_ADDR lr; | |
17e116a7 AH |
945 | struct gdbarch *gdbarch = get_frame_arch (this_frame); |
946 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
07b287a0 MS |
947 | |
948 | lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM); | |
17e116a7 AH |
949 | |
950 | if (tdep->has_pauth () | |
951 | && trad_frame_value_p (cache->saved_regs, | |
952 | tdep->pauth_ra_state_regnum)) | |
953 | lr = aarch64_frame_unmask_address (tdep, this_frame, lr); | |
954 | ||
07b287a0 MS |
955 | return frame_unwind_got_constant (this_frame, prev_regnum, lr); |
956 | } | |
957 | ||
958 | /* SP is generally not saved to the stack, but this frame is | |
959 | identified by the next frame's stack pointer at the time of the | |
960 | call. The value was already reconstructed into PREV_SP. */ | |
961 | /* | |
962 | +----------+ ^ | |
963 | | saved lr | | | |
964 | +->| saved fp |--+ | |
965 | | | | | |
966 | | | | <- Previous SP | |
967 | | +----------+ | |
968 | | | saved lr | | |
969 | +--| saved fp |<- FP | |
970 | | | | |
971 | | |<- SP | |
972 | +----------+ */ | |
973 | if (prev_regnum == AARCH64_SP_REGNUM) | |
974 | return frame_unwind_got_constant (this_frame, prev_regnum, | |
975 | cache->prev_sp); | |
976 | ||
977 | return trad_frame_get_prev_register (this_frame, cache->saved_regs, | |
978 | prev_regnum); | |
979 | } | |
980 | ||
981 | /* AArch64 prologue unwinder. */ | |
982 | struct frame_unwind aarch64_prologue_unwind = | |
983 | { | |
984 | NORMAL_FRAME, | |
7dfa3edc | 985 | aarch64_prologue_frame_unwind_stop_reason, |
07b287a0 MS |
986 | aarch64_prologue_this_id, |
987 | aarch64_prologue_prev_register, | |
988 | NULL, | |
989 | default_frame_sniffer | |
990 | }; | |
991 | ||
8b61f75d PL |
992 | /* Allocate and fill in *THIS_CACHE with information about the prologue of |
993 | *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated. | |
994 | Return a pointer to the current aarch64_prologue_cache in | |
995 | *THIS_CACHE. */ | |
07b287a0 MS |
996 | |
997 | static struct aarch64_prologue_cache * | |
8b61f75d | 998 | aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache) |
07b287a0 | 999 | { |
07b287a0 | 1000 | struct aarch64_prologue_cache *cache; |
8b61f75d PL |
1001 | |
1002 | if (*this_cache != NULL) | |
9a3c8263 | 1003 | return (struct aarch64_prologue_cache *) *this_cache; |
07b287a0 MS |
1004 | |
1005 | cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache); | |
1006 | cache->saved_regs = trad_frame_alloc_saved_regs (this_frame); | |
8b61f75d | 1007 | *this_cache = cache; |
07b287a0 | 1008 | |
02a2a705 PL |
1009 | TRY |
1010 | { | |
1011 | cache->prev_sp = get_frame_register_unsigned (this_frame, | |
1012 | AARCH64_SP_REGNUM); | |
1013 | cache->prev_pc = get_frame_pc (this_frame); | |
1014 | cache->available_p = 1; | |
1015 | } | |
1016 | CATCH (ex, RETURN_MASK_ERROR) | |
1017 | { | |
1018 | if (ex.error != NOT_AVAILABLE_ERROR) | |
1019 | throw_exception (ex); | |
1020 | } | |
1021 | END_CATCH | |
07b287a0 MS |
1022 | |
1023 | return cache; | |
1024 | } | |
1025 | ||
02a2a705 PL |
1026 | /* Implement the "stop_reason" frame_unwind method. */ |
1027 | ||
1028 | static enum unwind_stop_reason | |
1029 | aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame, | |
1030 | void **this_cache) | |
1031 | { | |
1032 | struct aarch64_prologue_cache *cache | |
1033 | = aarch64_make_stub_cache (this_frame, this_cache); | |
1034 | ||
1035 | if (!cache->available_p) | |
1036 | return UNWIND_UNAVAILABLE; | |
1037 | ||
1038 | return UNWIND_NO_REASON; | |
1039 | } | |
1040 | ||
07b287a0 MS |
1041 | /* Our frame ID for a stub frame is the current SP and LR. */ |
1042 | ||
1043 | static void | |
1044 | aarch64_stub_this_id (struct frame_info *this_frame, | |
1045 | void **this_cache, struct frame_id *this_id) | |
1046 | { | |
8b61f75d PL |
1047 | struct aarch64_prologue_cache *cache |
1048 | = aarch64_make_stub_cache (this_frame, this_cache); | |
07b287a0 | 1049 | |
02a2a705 PL |
1050 | if (cache->available_p) |
1051 | *this_id = frame_id_build (cache->prev_sp, cache->prev_pc); | |
1052 | else | |
1053 | *this_id = frame_id_build_unavailable_stack (cache->prev_pc); | |
07b287a0 MS |
1054 | } |
1055 | ||
1056 | /* Implement the "sniffer" frame_unwind method. */ | |
1057 | ||
1058 | static int | |
1059 | aarch64_stub_unwind_sniffer (const struct frame_unwind *self, | |
1060 | struct frame_info *this_frame, | |
1061 | void **this_prologue_cache) | |
1062 | { | |
1063 | CORE_ADDR addr_in_block; | |
1064 | gdb_byte dummy[4]; | |
1065 | ||
1066 | addr_in_block = get_frame_address_in_block (this_frame); | |
3e5d3a5a | 1067 | if (in_plt_section (addr_in_block) |
07b287a0 MS |
1068 | /* We also use the stub winder if the target memory is unreadable |
1069 | to avoid having the prologue unwinder trying to read it. */ | |
1070 | || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0) | |
1071 | return 1; | |
1072 | ||
1073 | return 0; | |
1074 | } | |
1075 | ||
1076 | /* AArch64 stub unwinder. */ | |
1077 | struct frame_unwind aarch64_stub_unwind = | |
1078 | { | |
1079 | NORMAL_FRAME, | |
02a2a705 | 1080 | aarch64_stub_frame_unwind_stop_reason, |
07b287a0 MS |
1081 | aarch64_stub_this_id, |
1082 | aarch64_prologue_prev_register, | |
1083 | NULL, | |
1084 | aarch64_stub_unwind_sniffer | |
1085 | }; | |
1086 | ||
1087 | /* Return the frame base address of *THIS_FRAME. */ | |
1088 | ||
1089 | static CORE_ADDR | |
1090 | aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache) | |
1091 | { | |
7c8edfae PL |
1092 | struct aarch64_prologue_cache *cache |
1093 | = aarch64_make_prologue_cache (this_frame, this_cache); | |
07b287a0 MS |
1094 | |
1095 | return cache->prev_sp - cache->framesize; | |
1096 | } | |
1097 | ||
1098 | /* AArch64 default frame base information. */ | |
1099 | struct frame_base aarch64_normal_base = | |
1100 | { | |
1101 | &aarch64_prologue_unwind, | |
1102 | aarch64_normal_frame_base, | |
1103 | aarch64_normal_frame_base, | |
1104 | aarch64_normal_frame_base | |
1105 | }; | |
1106 | ||
07b287a0 MS |
1107 | /* Return the value of the REGNUM register in the previous frame of |
1108 | *THIS_FRAME. */ | |
1109 | ||
1110 | static struct value * | |
1111 | aarch64_dwarf2_prev_register (struct frame_info *this_frame, | |
1112 | void **this_cache, int regnum) | |
1113 | { | |
11e1b75f | 1114 | struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame)); |
07b287a0 MS |
1115 | CORE_ADDR lr; |
1116 | ||
1117 | switch (regnum) | |
1118 | { | |
1119 | case AARCH64_PC_REGNUM: | |
1120 | lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM); | |
11e1b75f | 1121 | lr = aarch64_frame_unmask_address (tdep, this_frame, lr); |
07b287a0 MS |
1122 | return frame_unwind_got_constant (this_frame, regnum, lr); |
1123 | ||
1124 | default: | |
1125 | internal_error (__FILE__, __LINE__, | |
1126 | _("Unexpected register %d"), regnum); | |
1127 | } | |
1128 | } | |
1129 | ||
11e1b75f AH |
1130 | static const unsigned char op_lit0 = DW_OP_lit0; |
1131 | static const unsigned char op_lit1 = DW_OP_lit1; | |
1132 | ||
07b287a0 MS |
1133 | /* Implement the "init_reg" dwarf2_frame_ops method. */ |
1134 | ||
1135 | static void | |
1136 | aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum, | |
1137 | struct dwarf2_frame_state_reg *reg, | |
1138 | struct frame_info *this_frame) | |
1139 | { | |
11e1b75f AH |
1140 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); |
1141 | ||
07b287a0 MS |
1142 | switch (regnum) |
1143 | { | |
1144 | case AARCH64_PC_REGNUM: | |
1145 | reg->how = DWARF2_FRAME_REG_FN; | |
1146 | reg->loc.fn = aarch64_dwarf2_prev_register; | |
11e1b75f AH |
1147 | return; |
1148 | ||
07b287a0 MS |
1149 | case AARCH64_SP_REGNUM: |
1150 | reg->how = DWARF2_FRAME_REG_CFA; | |
11e1b75f AH |
1151 | return; |
1152 | } | |
1153 | ||
1154 | /* Init pauth registers. */ | |
1155 | if (tdep->has_pauth ()) | |
1156 | { | |
1157 | if (regnum == tdep->pauth_ra_state_regnum) | |
1158 | { | |
1159 | /* Initialize RA_STATE to zero. */ | |
1160 | reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP; | |
1161 | reg->loc.exp.start = &op_lit0; | |
1162 | reg->loc.exp.len = 1; | |
1163 | return; | |
1164 | } | |
1165 | else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base) | |
1166 | || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base)) | |
1167 | { | |
1168 | reg->how = DWARF2_FRAME_REG_SAME_VALUE; | |
1169 | return; | |
1170 | } | |
07b287a0 MS |
1171 | } |
1172 | } | |
1173 | ||
11e1b75f AH |
1174 | /* Implement the execute_dwarf_cfa_vendor_op method. */ |
1175 | ||
1176 | static bool | |
1177 | aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op, | |
1178 | struct dwarf2_frame_state *fs) | |
1179 | { | |
1180 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
1181 | struct dwarf2_frame_state_reg *ra_state; | |
1182 | ||
1183 | if (tdep->has_pauth () && op == DW_CFA_AARCH64_negate_ra_state) | |
1184 | { | |
1185 | /* Allocate RA_STATE column if it's not allocated yet. */ | |
1186 | fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1); | |
1187 | ||
1188 | /* Toggle the status of RA_STATE between 0 and 1. */ | |
1189 | ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]); | |
1190 | ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP; | |
1191 | ||
1192 | if (ra_state->loc.exp.start == nullptr | |
1193 | || ra_state->loc.exp.start == &op_lit0) | |
1194 | ra_state->loc.exp.start = &op_lit1; | |
1195 | else | |
1196 | ra_state->loc.exp.start = &op_lit0; | |
1197 | ||
1198 | ra_state->loc.exp.len = 1; | |
1199 | ||
1200 | return true; | |
1201 | } | |
1202 | ||
1203 | return false; | |
1204 | } | |
1205 | ||
07b287a0 MS |
1206 | /* When arguments must be pushed onto the stack, they go on in reverse |
1207 | order. The code below implements a FILO (stack) to do this. */ | |
1208 | ||
1209 | typedef struct | |
1210 | { | |
c3c87445 YQ |
1211 | /* Value to pass on stack. It can be NULL if this item is for stack |
1212 | padding. */ | |
7c543f7b | 1213 | const gdb_byte *data; |
07b287a0 MS |
1214 | |
1215 | /* Size in bytes of value to pass on stack. */ | |
1216 | int len; | |
1217 | } stack_item_t; | |
1218 | ||
1219 | DEF_VEC_O (stack_item_t); | |
1220 | ||
1221 | /* Return the alignment (in bytes) of the given type. */ | |
1222 | ||
1223 | static int | |
1224 | aarch64_type_align (struct type *t) | |
1225 | { | |
1226 | int n; | |
1227 | int align; | |
1228 | int falign; | |
1229 | ||
1230 | t = check_typedef (t); | |
1231 | switch (TYPE_CODE (t)) | |
1232 | { | |
1233 | default: | |
1234 | /* Should never happen. */ | |
1235 | internal_error (__FILE__, __LINE__, _("unknown type alignment")); | |
1236 | return 4; | |
1237 | ||
1238 | case TYPE_CODE_PTR: | |
1239 | case TYPE_CODE_ENUM: | |
1240 | case TYPE_CODE_INT: | |
1241 | case TYPE_CODE_FLT: | |
1242 | case TYPE_CODE_SET: | |
1243 | case TYPE_CODE_RANGE: | |
1244 | case TYPE_CODE_BITSTRING: | |
1245 | case TYPE_CODE_REF: | |
aa006118 | 1246 | case TYPE_CODE_RVALUE_REF: |
07b287a0 MS |
1247 | case TYPE_CODE_CHAR: |
1248 | case TYPE_CODE_BOOL: | |
1249 | return TYPE_LENGTH (t); | |
1250 | ||
1251 | case TYPE_CODE_ARRAY: | |
238f2452 YQ |
1252 | if (TYPE_VECTOR (t)) |
1253 | { | |
1254 | /* Use the natural alignment for vector types (the same for | |
1255 | scalar type), but the maximum alignment is 128-bit. */ | |
1256 | if (TYPE_LENGTH (t) > 16) | |
1257 | return 16; | |
1258 | else | |
1259 | return TYPE_LENGTH (t); | |
1260 | } | |
1261 | else | |
1262 | return aarch64_type_align (TYPE_TARGET_TYPE (t)); | |
07b287a0 MS |
1263 | case TYPE_CODE_COMPLEX: |
1264 | return aarch64_type_align (TYPE_TARGET_TYPE (t)); | |
1265 | ||
1266 | case TYPE_CODE_STRUCT: | |
1267 | case TYPE_CODE_UNION: | |
1268 | align = 1; | |
1269 | for (n = 0; n < TYPE_NFIELDS (t); n++) | |
1270 | { | |
1271 | falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n)); | |
1272 | if (falign > align) | |
1273 | align = falign; | |
1274 | } | |
1275 | return align; | |
1276 | } | |
1277 | } | |
1278 | ||
ea92689a AH |
1279 | /* Worker function for aapcs_is_vfp_call_or_return_candidate. |
1280 | ||
1281 | Return the number of register required, or -1 on failure. | |
1282 | ||
1283 | When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it | |
1284 | to the element, else fail if the type of this element does not match the | |
1285 | existing value. */ | |
1286 | ||
1287 | static int | |
1288 | aapcs_is_vfp_call_or_return_candidate_1 (struct type *type, | |
1289 | struct type **fundamental_type) | |
1290 | { | |
1291 | if (type == nullptr) | |
1292 | return -1; | |
1293 | ||
1294 | switch (TYPE_CODE (type)) | |
1295 | { | |
1296 | case TYPE_CODE_FLT: | |
1297 | if (TYPE_LENGTH (type) > 16) | |
1298 | return -1; | |
1299 | ||
1300 | if (*fundamental_type == nullptr) | |
1301 | *fundamental_type = type; | |
1302 | else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type) | |
1303 | || TYPE_CODE (type) != TYPE_CODE (*fundamental_type)) | |
1304 | return -1; | |
1305 | ||
1306 | return 1; | |
1307 | ||
1308 | case TYPE_CODE_COMPLEX: | |
1309 | { | |
1310 | struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type)); | |
1311 | if (TYPE_LENGTH (target_type) > 16) | |
1312 | return -1; | |
1313 | ||
1314 | if (*fundamental_type == nullptr) | |
1315 | *fundamental_type = target_type; | |
1316 | else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type) | |
1317 | || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type)) | |
1318 | return -1; | |
1319 | ||
1320 | return 2; | |
1321 | } | |
1322 | ||
1323 | case TYPE_CODE_ARRAY: | |
1324 | { | |
1325 | if (TYPE_VECTOR (type)) | |
1326 | { | |
1327 | if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16) | |
1328 | return -1; | |
1329 | ||
1330 | if (*fundamental_type == nullptr) | |
1331 | *fundamental_type = type; | |
1332 | else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type) | |
1333 | || TYPE_CODE (type) != TYPE_CODE (*fundamental_type)) | |
1334 | return -1; | |
1335 | ||
1336 | return 1; | |
1337 | } | |
1338 | else | |
1339 | { | |
1340 | struct type *target_type = TYPE_TARGET_TYPE (type); | |
1341 | int count = aapcs_is_vfp_call_or_return_candidate_1 | |
1342 | (target_type, fundamental_type); | |
1343 | ||
1344 | if (count == -1) | |
1345 | return count; | |
1346 | ||
d4718d5c | 1347 | count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type)); |
ea92689a AH |
1348 | return count; |
1349 | } | |
1350 | } | |
1351 | ||
1352 | case TYPE_CODE_STRUCT: | |
1353 | case TYPE_CODE_UNION: | |
1354 | { | |
1355 | int count = 0; | |
1356 | ||
1357 | for (int i = 0; i < TYPE_NFIELDS (type); i++) | |
1358 | { | |
353229bf AH |
1359 | /* Ignore any static fields. */ |
1360 | if (field_is_static (&TYPE_FIELD (type, i))) | |
1361 | continue; | |
1362 | ||
ea92689a AH |
1363 | struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i)); |
1364 | ||
1365 | int sub_count = aapcs_is_vfp_call_or_return_candidate_1 | |
1366 | (member, fundamental_type); | |
1367 | if (sub_count == -1) | |
1368 | return -1; | |
1369 | count += sub_count; | |
1370 | } | |
73021deb AH |
1371 | |
1372 | /* Ensure there is no padding between the fields (allowing for empty | |
1373 | zero length structs) */ | |
1374 | int ftype_length = (*fundamental_type == nullptr) | |
1375 | ? 0 : TYPE_LENGTH (*fundamental_type); | |
1376 | if (count * ftype_length != TYPE_LENGTH (type)) | |
1377 | return -1; | |
1378 | ||
ea92689a AH |
1379 | return count; |
1380 | } | |
1381 | ||
1382 | default: | |
1383 | break; | |
1384 | } | |
1385 | ||
1386 | return -1; | |
1387 | } | |
1388 | ||
1389 | /* Return true if an argument, whose type is described by TYPE, can be passed or | |
1390 | returned in simd/fp registers, providing enough parameter passing registers | |
1391 | are available. This is as described in the AAPCS64. | |
1392 | ||
1393 | Upon successful return, *COUNT returns the number of needed registers, | |
1394 | *FUNDAMENTAL_TYPE contains the type of those registers. | |
1395 | ||
1396 | Candidate as per the AAPCS64 5.4.2.C is either a: | |
1397 | - float. | |
1398 | - short-vector. | |
1399 | - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where | |
1400 | all the members are floats and has at most 4 members. | |
1401 | - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where | |
1402 | all the members are short vectors and has at most 4 members. | |
1403 | - Complex (7.1.1) | |
1404 | ||
1405 | Note that HFAs and HVAs can include nested structures and arrays. */ | |
1406 | ||
0e745c60 | 1407 | static bool |
ea92689a AH |
1408 | aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count, |
1409 | struct type **fundamental_type) | |
1410 | { | |
1411 | if (type == nullptr) | |
1412 | return false; | |
1413 | ||
1414 | *fundamental_type = nullptr; | |
1415 | ||
1416 | int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type, | |
1417 | fundamental_type); | |
1418 | ||
1419 | if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS) | |
1420 | { | |
1421 | *count = ag_count; | |
1422 | return true; | |
1423 | } | |
1424 | else | |
1425 | return false; | |
1426 | } | |
1427 | ||
07b287a0 MS |
1428 | /* AArch64 function call information structure. */ |
1429 | struct aarch64_call_info | |
1430 | { | |
1431 | /* the current argument number. */ | |
1432 | unsigned argnum; | |
1433 | ||
1434 | /* The next general purpose register number, equivalent to NGRN as | |
1435 | described in the AArch64 Procedure Call Standard. */ | |
1436 | unsigned ngrn; | |
1437 | ||
1438 | /* The next SIMD and floating point register number, equivalent to | |
1439 | NSRN as described in the AArch64 Procedure Call Standard. */ | |
1440 | unsigned nsrn; | |
1441 | ||
1442 | /* The next stacked argument address, equivalent to NSAA as | |
1443 | described in the AArch64 Procedure Call Standard. */ | |
1444 | unsigned nsaa; | |
1445 | ||
1446 | /* Stack item vector. */ | |
1447 | VEC(stack_item_t) *si; | |
1448 | }; | |
1449 | ||
1450 | /* Pass a value in a sequence of consecutive X registers. The caller | |
1451 | is responsbile for ensuring sufficient registers are available. */ | |
1452 | ||
1453 | static void | |
1454 | pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache, | |
1455 | struct aarch64_call_info *info, struct type *type, | |
8e80f9d1 | 1456 | struct value *arg) |
07b287a0 MS |
1457 | { |
1458 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
1459 | int len = TYPE_LENGTH (type); | |
1460 | enum type_code typecode = TYPE_CODE (type); | |
1461 | int regnum = AARCH64_X0_REGNUM + info->ngrn; | |
8e80f9d1 | 1462 | const bfd_byte *buf = value_contents (arg); |
07b287a0 MS |
1463 | |
1464 | info->argnum++; | |
1465 | ||
1466 | while (len > 0) | |
1467 | { | |
1468 | int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE; | |
1469 | CORE_ADDR regval = extract_unsigned_integer (buf, partial_len, | |
1470 | byte_order); | |
1471 | ||
1472 | ||
1473 | /* Adjust sub-word struct/union args when big-endian. */ | |
1474 | if (byte_order == BFD_ENDIAN_BIG | |
1475 | && partial_len < X_REGISTER_SIZE | |
1476 | && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION)) | |
1477 | regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT); | |
1478 | ||
1479 | if (aarch64_debug) | |
b277c936 PL |
1480 | { |
1481 | debug_printf ("arg %d in %s = 0x%s\n", info->argnum, | |
1482 | gdbarch_register_name (gdbarch, regnum), | |
1483 | phex (regval, X_REGISTER_SIZE)); | |
1484 | } | |
07b287a0 MS |
1485 | regcache_cooked_write_unsigned (regcache, regnum, regval); |
1486 | len -= partial_len; | |
1487 | buf += partial_len; | |
1488 | regnum++; | |
1489 | } | |
1490 | } | |
1491 | ||
1492 | /* Attempt to marshall a value in a V register. Return 1 if | |
1493 | successful, or 0 if insufficient registers are available. This | |
1494 | function, unlike the equivalent pass_in_x() function does not | |
1495 | handle arguments spread across multiple registers. */ | |
1496 | ||
1497 | static int | |
1498 | pass_in_v (struct gdbarch *gdbarch, | |
1499 | struct regcache *regcache, | |
1500 | struct aarch64_call_info *info, | |
0735fddd | 1501 | int len, const bfd_byte *buf) |
07b287a0 MS |
1502 | { |
1503 | if (info->nsrn < 8) | |
1504 | { | |
07b287a0 | 1505 | int regnum = AARCH64_V0_REGNUM + info->nsrn; |
3ff2c72e AH |
1506 | /* Enough space for a full vector register. */ |
1507 | gdb_byte reg[register_size (gdbarch, regnum)]; | |
1508 | gdb_assert (len <= sizeof (reg)); | |
07b287a0 MS |
1509 | |
1510 | info->argnum++; | |
1511 | info->nsrn++; | |
1512 | ||
0735fddd YQ |
1513 | memset (reg, 0, sizeof (reg)); |
1514 | /* PCS C.1, the argument is allocated to the least significant | |
1515 | bits of V register. */ | |
1516 | memcpy (reg, buf, len); | |
b66f5587 | 1517 | regcache->cooked_write (regnum, reg); |
0735fddd | 1518 | |
07b287a0 | 1519 | if (aarch64_debug) |
b277c936 PL |
1520 | { |
1521 | debug_printf ("arg %d in %s\n", info->argnum, | |
1522 | gdbarch_register_name (gdbarch, regnum)); | |
1523 | } | |
07b287a0 MS |
1524 | return 1; |
1525 | } | |
1526 | info->nsrn = 8; | |
1527 | return 0; | |
1528 | } | |
1529 | ||
1530 | /* Marshall an argument onto the stack. */ | |
1531 | ||
1532 | static void | |
1533 | pass_on_stack (struct aarch64_call_info *info, struct type *type, | |
8e80f9d1 | 1534 | struct value *arg) |
07b287a0 | 1535 | { |
8e80f9d1 | 1536 | const bfd_byte *buf = value_contents (arg); |
07b287a0 MS |
1537 | int len = TYPE_LENGTH (type); |
1538 | int align; | |
1539 | stack_item_t item; | |
1540 | ||
1541 | info->argnum++; | |
1542 | ||
1543 | align = aarch64_type_align (type); | |
1544 | ||
1545 | /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the | |
1546 | Natural alignment of the argument's type. */ | |
1547 | align = align_up (align, 8); | |
1548 | ||
1549 | /* The AArch64 PCS requires at most doubleword alignment. */ | |
1550 | if (align > 16) | |
1551 | align = 16; | |
1552 | ||
1553 | if (aarch64_debug) | |
b277c936 PL |
1554 | { |
1555 | debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len, | |
1556 | info->nsaa); | |
1557 | } | |
07b287a0 MS |
1558 | |
1559 | item.len = len; | |
1560 | item.data = buf; | |
1561 | VEC_safe_push (stack_item_t, info->si, &item); | |
1562 | ||
1563 | info->nsaa += len; | |
1564 | if (info->nsaa & (align - 1)) | |
1565 | { | |
1566 | /* Push stack alignment padding. */ | |
1567 | int pad = align - (info->nsaa & (align - 1)); | |
1568 | ||
1569 | item.len = pad; | |
c3c87445 | 1570 | item.data = NULL; |
07b287a0 MS |
1571 | |
1572 | VEC_safe_push (stack_item_t, info->si, &item); | |
1573 | info->nsaa += pad; | |
1574 | } | |
1575 | } | |
1576 | ||
1577 | /* Marshall an argument into a sequence of one or more consecutive X | |
1578 | registers or, if insufficient X registers are available then onto | |
1579 | the stack. */ | |
1580 | ||
1581 | static void | |
1582 | pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache, | |
1583 | struct aarch64_call_info *info, struct type *type, | |
8e80f9d1 | 1584 | struct value *arg) |
07b287a0 MS |
1585 | { |
1586 | int len = TYPE_LENGTH (type); | |
1587 | int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE; | |
1588 | ||
1589 | /* PCS C.13 - Pass in registers if we have enough spare */ | |
1590 | if (info->ngrn + nregs <= 8) | |
1591 | { | |
8e80f9d1 | 1592 | pass_in_x (gdbarch, regcache, info, type, arg); |
07b287a0 MS |
1593 | info->ngrn += nregs; |
1594 | } | |
1595 | else | |
1596 | { | |
1597 | info->ngrn = 8; | |
8e80f9d1 | 1598 | pass_on_stack (info, type, arg); |
07b287a0 MS |
1599 | } |
1600 | } | |
1601 | ||
0e745c60 AH |
1602 | /* Pass a value, which is of type arg_type, in a V register. Assumes value is a |
1603 | aapcs_is_vfp_call_or_return_candidate and there are enough spare V | |
1604 | registers. A return value of false is an error state as the value will have | |
1605 | been partially passed to the stack. */ | |
1606 | static bool | |
1607 | pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache, | |
1608 | struct aarch64_call_info *info, struct type *arg_type, | |
1609 | struct value *arg) | |
07b287a0 | 1610 | { |
0e745c60 AH |
1611 | switch (TYPE_CODE (arg_type)) |
1612 | { | |
1613 | case TYPE_CODE_FLT: | |
1614 | return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type), | |
1615 | value_contents (arg)); | |
1616 | break; | |
1617 | ||
1618 | case TYPE_CODE_COMPLEX: | |
1619 | { | |
1620 | const bfd_byte *buf = value_contents (arg); | |
1621 | struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type)); | |
1622 | ||
1623 | if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type), | |
1624 | buf)) | |
1625 | return false; | |
1626 | ||
1627 | return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type), | |
1628 | buf + TYPE_LENGTH (target_type)); | |
1629 | } | |
1630 | ||
1631 | case TYPE_CODE_ARRAY: | |
1632 | if (TYPE_VECTOR (arg_type)) | |
1633 | return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type), | |
1634 | value_contents (arg)); | |
1635 | /* fall through. */ | |
1636 | ||
1637 | case TYPE_CODE_STRUCT: | |
1638 | case TYPE_CODE_UNION: | |
1639 | for (int i = 0; i < TYPE_NFIELDS (arg_type); i++) | |
1640 | { | |
353229bf AH |
1641 | /* Don't include static fields. */ |
1642 | if (field_is_static (&TYPE_FIELD (arg_type, i))) | |
1643 | continue; | |
1644 | ||
0e745c60 AH |
1645 | struct value *field = value_primitive_field (arg, 0, i, arg_type); |
1646 | struct type *field_type = check_typedef (value_type (field)); | |
1647 | ||
1648 | if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type, | |
1649 | field)) | |
1650 | return false; | |
1651 | } | |
1652 | return true; | |
1653 | ||
1654 | default: | |
1655 | return false; | |
1656 | } | |
07b287a0 MS |
1657 | } |
1658 | ||
1659 | /* Implement the "push_dummy_call" gdbarch method. */ | |
1660 | ||
1661 | static CORE_ADDR | |
1662 | aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function, | |
1663 | struct regcache *regcache, CORE_ADDR bp_addr, | |
1664 | int nargs, | |
cf84fa6b AH |
1665 | struct value **args, CORE_ADDR sp, |
1666 | function_call_return_method return_method, | |
07b287a0 MS |
1667 | CORE_ADDR struct_addr) |
1668 | { | |
07b287a0 | 1669 | int argnum; |
07b287a0 | 1670 | struct aarch64_call_info info; |
07b287a0 MS |
1671 | |
1672 | memset (&info, 0, sizeof (info)); | |
1673 | ||
1674 | /* We need to know what the type of the called function is in order | |
1675 | to determine the number of named/anonymous arguments for the | |
1676 | actual argument placement, and the return type in order to handle | |
1677 | return value correctly. | |
1678 | ||
1679 | The generic code above us views the decision of return in memory | |
1680 | or return in registers as a two stage processes. The language | |
1681 | handler is consulted first and may decide to return in memory (eg | |
1682 | class with copy constructor returned by value), this will cause | |
1683 | the generic code to allocate space AND insert an initial leading | |
1684 | argument. | |
1685 | ||
1686 | If the language code does not decide to pass in memory then the | |
1687 | target code is consulted. | |
1688 | ||
1689 | If the language code decides to pass in memory we want to move | |
1690 | the pointer inserted as the initial argument from the argument | |
1691 | list and into X8, the conventional AArch64 struct return pointer | |
38a72da0 | 1692 | register. */ |
07b287a0 MS |
1693 | |
1694 | /* Set the return address. For the AArch64, the return breakpoint | |
1695 | is always at BP_ADDR. */ | |
1696 | regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr); | |
1697 | ||
38a72da0 AH |
1698 | /* If we were given an initial argument for the return slot, lose it. */ |
1699 | if (return_method == return_method_hidden_param) | |
07b287a0 MS |
1700 | { |
1701 | args++; | |
1702 | nargs--; | |
1703 | } | |
1704 | ||
1705 | /* The struct_return pointer occupies X8. */ | |
38a72da0 | 1706 | if (return_method != return_method_normal) |
07b287a0 MS |
1707 | { |
1708 | if (aarch64_debug) | |
b277c936 PL |
1709 | { |
1710 | debug_printf ("struct return in %s = 0x%s\n", | |
1711 | gdbarch_register_name (gdbarch, | |
1712 | AARCH64_STRUCT_RETURN_REGNUM), | |
1713 | paddress (gdbarch, struct_addr)); | |
1714 | } | |
07b287a0 MS |
1715 | regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM, |
1716 | struct_addr); | |
1717 | } | |
1718 | ||
1719 | for (argnum = 0; argnum < nargs; argnum++) | |
1720 | { | |
1721 | struct value *arg = args[argnum]; | |
0e745c60 AH |
1722 | struct type *arg_type, *fundamental_type; |
1723 | int len, elements; | |
07b287a0 MS |
1724 | |
1725 | arg_type = check_typedef (value_type (arg)); | |
1726 | len = TYPE_LENGTH (arg_type); | |
1727 | ||
0e745c60 AH |
1728 | /* If arg can be passed in v registers as per the AAPCS64, then do so if |
1729 | if there are enough spare registers. */ | |
1730 | if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements, | |
1731 | &fundamental_type)) | |
1732 | { | |
1733 | if (info.nsrn + elements <= 8) | |
1734 | { | |
1735 | /* We know that we have sufficient registers available therefore | |
1736 | this will never need to fallback to the stack. */ | |
1737 | if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type, | |
1738 | arg)) | |
1739 | gdb_assert_not_reached ("Failed to push args"); | |
1740 | } | |
1741 | else | |
1742 | { | |
1743 | info.nsrn = 8; | |
1744 | pass_on_stack (&info, arg_type, arg); | |
1745 | } | |
1746 | continue; | |
1747 | } | |
1748 | ||
07b287a0 MS |
1749 | switch (TYPE_CODE (arg_type)) |
1750 | { | |
1751 | case TYPE_CODE_INT: | |
1752 | case TYPE_CODE_BOOL: | |
1753 | case TYPE_CODE_CHAR: | |
1754 | case TYPE_CODE_RANGE: | |
1755 | case TYPE_CODE_ENUM: | |
1756 | if (len < 4) | |
1757 | { | |
1758 | /* Promote to 32 bit integer. */ | |
1759 | if (TYPE_UNSIGNED (arg_type)) | |
1760 | arg_type = builtin_type (gdbarch)->builtin_uint32; | |
1761 | else | |
1762 | arg_type = builtin_type (gdbarch)->builtin_int32; | |
1763 | arg = value_cast (arg_type, arg); | |
1764 | } | |
8e80f9d1 | 1765 | pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg); |
07b287a0 MS |
1766 | break; |
1767 | ||
07b287a0 MS |
1768 | case TYPE_CODE_STRUCT: |
1769 | case TYPE_CODE_ARRAY: | |
1770 | case TYPE_CODE_UNION: | |
0e745c60 | 1771 | if (len > 16) |
07b287a0 MS |
1772 | { |
1773 | /* PCS B.7 Aggregates larger than 16 bytes are passed by | |
1774 | invisible reference. */ | |
1775 | ||
1776 | /* Allocate aligned storage. */ | |
1777 | sp = align_down (sp - len, 16); | |
1778 | ||
1779 | /* Write the real data into the stack. */ | |
1780 | write_memory (sp, value_contents (arg), len); | |
1781 | ||
1782 | /* Construct the indirection. */ | |
1783 | arg_type = lookup_pointer_type (arg_type); | |
1784 | arg = value_from_pointer (arg_type, sp); | |
8e80f9d1 | 1785 | pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg); |
07b287a0 MS |
1786 | } |
1787 | else | |
1788 | /* PCS C.15 / C.18 multiple values pass. */ | |
8e80f9d1 | 1789 | pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg); |
07b287a0 MS |
1790 | break; |
1791 | ||
1792 | default: | |
8e80f9d1 | 1793 | pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg); |
07b287a0 MS |
1794 | break; |
1795 | } | |
1796 | } | |
1797 | ||
1798 | /* Make sure stack retains 16 byte alignment. */ | |
1799 | if (info.nsaa & 15) | |
1800 | sp -= 16 - (info.nsaa & 15); | |
1801 | ||
1802 | while (!VEC_empty (stack_item_t, info.si)) | |
1803 | { | |
1804 | stack_item_t *si = VEC_last (stack_item_t, info.si); | |
1805 | ||
1806 | sp -= si->len; | |
c3c87445 YQ |
1807 | if (si->data != NULL) |
1808 | write_memory (sp, si->data, si->len); | |
07b287a0 MS |
1809 | VEC_pop (stack_item_t, info.si); |
1810 | } | |
1811 | ||
1812 | VEC_free (stack_item_t, info.si); | |
1813 | ||
1814 | /* Finally, update the SP register. */ | |
1815 | regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp); | |
1816 | ||
1817 | return sp; | |
1818 | } | |
1819 | ||
1820 | /* Implement the "frame_align" gdbarch method. */ | |
1821 | ||
1822 | static CORE_ADDR | |
1823 | aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp) | |
1824 | { | |
1825 | /* Align the stack to sixteen bytes. */ | |
1826 | return sp & ~(CORE_ADDR) 15; | |
1827 | } | |
1828 | ||
1829 | /* Return the type for an AdvSISD Q register. */ | |
1830 | ||
1831 | static struct type * | |
1832 | aarch64_vnq_type (struct gdbarch *gdbarch) | |
1833 | { | |
1834 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
1835 | ||
1836 | if (tdep->vnq_type == NULL) | |
1837 | { | |
1838 | struct type *t; | |
1839 | struct type *elem; | |
1840 | ||
1841 | t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq", | |
1842 | TYPE_CODE_UNION); | |
1843 | ||
1844 | elem = builtin_type (gdbarch)->builtin_uint128; | |
1845 | append_composite_type_field (t, "u", elem); | |
1846 | ||
1847 | elem = builtin_type (gdbarch)->builtin_int128; | |
1848 | append_composite_type_field (t, "s", elem); | |
1849 | ||
1850 | tdep->vnq_type = t; | |
1851 | } | |
1852 | ||
1853 | return tdep->vnq_type; | |
1854 | } | |
1855 | ||
1856 | /* Return the type for an AdvSISD D register. */ | |
1857 | ||
1858 | static struct type * | |
1859 | aarch64_vnd_type (struct gdbarch *gdbarch) | |
1860 | { | |
1861 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
1862 | ||
1863 | if (tdep->vnd_type == NULL) | |
1864 | { | |
1865 | struct type *t; | |
1866 | struct type *elem; | |
1867 | ||
1868 | t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd", | |
1869 | TYPE_CODE_UNION); | |
1870 | ||
1871 | elem = builtin_type (gdbarch)->builtin_double; | |
1872 | append_composite_type_field (t, "f", elem); | |
1873 | ||
1874 | elem = builtin_type (gdbarch)->builtin_uint64; | |
1875 | append_composite_type_field (t, "u", elem); | |
1876 | ||
1877 | elem = builtin_type (gdbarch)->builtin_int64; | |
1878 | append_composite_type_field (t, "s", elem); | |
1879 | ||
1880 | tdep->vnd_type = t; | |
1881 | } | |
1882 | ||
1883 | return tdep->vnd_type; | |
1884 | } | |
1885 | ||
1886 | /* Return the type for an AdvSISD S register. */ | |
1887 | ||
1888 | static struct type * | |
1889 | aarch64_vns_type (struct gdbarch *gdbarch) | |
1890 | { | |
1891 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
1892 | ||
1893 | if (tdep->vns_type == NULL) | |
1894 | { | |
1895 | struct type *t; | |
1896 | struct type *elem; | |
1897 | ||
1898 | t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns", | |
1899 | TYPE_CODE_UNION); | |
1900 | ||
1901 | elem = builtin_type (gdbarch)->builtin_float; | |
1902 | append_composite_type_field (t, "f", elem); | |
1903 | ||
1904 | elem = builtin_type (gdbarch)->builtin_uint32; | |
1905 | append_composite_type_field (t, "u", elem); | |
1906 | ||
1907 | elem = builtin_type (gdbarch)->builtin_int32; | |
1908 | append_composite_type_field (t, "s", elem); | |
1909 | ||
1910 | tdep->vns_type = t; | |
1911 | } | |
1912 | ||
1913 | return tdep->vns_type; | |
1914 | } | |
1915 | ||
1916 | /* Return the type for an AdvSISD H register. */ | |
1917 | ||
1918 | static struct type * | |
1919 | aarch64_vnh_type (struct gdbarch *gdbarch) | |
1920 | { | |
1921 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
1922 | ||
1923 | if (tdep->vnh_type == NULL) | |
1924 | { | |
1925 | struct type *t; | |
1926 | struct type *elem; | |
1927 | ||
1928 | t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh", | |
1929 | TYPE_CODE_UNION); | |
1930 | ||
1931 | elem = builtin_type (gdbarch)->builtin_uint16; | |
1932 | append_composite_type_field (t, "u", elem); | |
1933 | ||
1934 | elem = builtin_type (gdbarch)->builtin_int16; | |
1935 | append_composite_type_field (t, "s", elem); | |
1936 | ||
1937 | tdep->vnh_type = t; | |
1938 | } | |
1939 | ||
1940 | return tdep->vnh_type; | |
1941 | } | |
1942 | ||
1943 | /* Return the type for an AdvSISD B register. */ | |
1944 | ||
1945 | static struct type * | |
1946 | aarch64_vnb_type (struct gdbarch *gdbarch) | |
1947 | { | |
1948 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
1949 | ||
1950 | if (tdep->vnb_type == NULL) | |
1951 | { | |
1952 | struct type *t; | |
1953 | struct type *elem; | |
1954 | ||
1955 | t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb", | |
1956 | TYPE_CODE_UNION); | |
1957 | ||
1958 | elem = builtin_type (gdbarch)->builtin_uint8; | |
1959 | append_composite_type_field (t, "u", elem); | |
1960 | ||
1961 | elem = builtin_type (gdbarch)->builtin_int8; | |
1962 | append_composite_type_field (t, "s", elem); | |
1963 | ||
1964 | tdep->vnb_type = t; | |
1965 | } | |
1966 | ||
1967 | return tdep->vnb_type; | |
1968 | } | |
1969 | ||
63bad7b6 AH |
1970 | /* Return the type for an AdvSISD V register. */ |
1971 | ||
1972 | static struct type * | |
1973 | aarch64_vnv_type (struct gdbarch *gdbarch) | |
1974 | { | |
1975 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
1976 | ||
1977 | if (tdep->vnv_type == NULL) | |
1978 | { | |
bffa1015 AH |
1979 | /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value |
1980 | slice from the non-pseudo vector registers. However NEON V registers | |
1981 | are always vector registers, and need constructing as such. */ | |
1982 | const struct builtin_type *bt = builtin_type (gdbarch); | |
1983 | ||
63bad7b6 AH |
1984 | struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv", |
1985 | TYPE_CODE_UNION); | |
1986 | ||
bffa1015 AH |
1987 | struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd", |
1988 | TYPE_CODE_UNION); | |
1989 | append_composite_type_field (sub, "f", | |
1990 | init_vector_type (bt->builtin_double, 2)); | |
1991 | append_composite_type_field (sub, "u", | |
1992 | init_vector_type (bt->builtin_uint64, 2)); | |
1993 | append_composite_type_field (sub, "s", | |
1994 | init_vector_type (bt->builtin_int64, 2)); | |
1995 | append_composite_type_field (t, "d", sub); | |
1996 | ||
1997 | sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns", | |
1998 | TYPE_CODE_UNION); | |
1999 | append_composite_type_field (sub, "f", | |
2000 | init_vector_type (bt->builtin_float, 4)); | |
2001 | append_composite_type_field (sub, "u", | |
2002 | init_vector_type (bt->builtin_uint32, 4)); | |
2003 | append_composite_type_field (sub, "s", | |
2004 | init_vector_type (bt->builtin_int32, 4)); | |
2005 | append_composite_type_field (t, "s", sub); | |
2006 | ||
2007 | sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh", | |
2008 | TYPE_CODE_UNION); | |
2009 | append_composite_type_field (sub, "u", | |
2010 | init_vector_type (bt->builtin_uint16, 8)); | |
2011 | append_composite_type_field (sub, "s", | |
2012 | init_vector_type (bt->builtin_int16, 8)); | |
2013 | append_composite_type_field (t, "h", sub); | |
2014 | ||
2015 | sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb", | |
2016 | TYPE_CODE_UNION); | |
2017 | append_composite_type_field (sub, "u", | |
2018 | init_vector_type (bt->builtin_uint8, 16)); | |
2019 | append_composite_type_field (sub, "s", | |
2020 | init_vector_type (bt->builtin_int8, 16)); | |
2021 | append_composite_type_field (t, "b", sub); | |
2022 | ||
2023 | sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq", | |
2024 | TYPE_CODE_UNION); | |
2025 | append_composite_type_field (sub, "u", | |
2026 | init_vector_type (bt->builtin_uint128, 1)); | |
2027 | append_composite_type_field (sub, "s", | |
2028 | init_vector_type (bt->builtin_int128, 1)); | |
2029 | append_composite_type_field (t, "q", sub); | |
63bad7b6 AH |
2030 | |
2031 | tdep->vnv_type = t; | |
2032 | } | |
2033 | ||
2034 | return tdep->vnv_type; | |
2035 | } | |
2036 | ||
07b287a0 MS |
2037 | /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */ |
2038 | ||
2039 | static int | |
2040 | aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg) | |
2041 | { | |
34dcc7cf AH |
2042 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); |
2043 | ||
07b287a0 MS |
2044 | if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30) |
2045 | return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0; | |
2046 | ||
2047 | if (reg == AARCH64_DWARF_SP) | |
2048 | return AARCH64_SP_REGNUM; | |
2049 | ||
2050 | if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31) | |
2051 | return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0; | |
2052 | ||
65d4cada AH |
2053 | if (reg == AARCH64_DWARF_SVE_VG) |
2054 | return AARCH64_SVE_VG_REGNUM; | |
2055 | ||
2056 | if (reg == AARCH64_DWARF_SVE_FFR) | |
2057 | return AARCH64_SVE_FFR_REGNUM; | |
2058 | ||
2059 | if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15) | |
2060 | return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0; | |
2061 | ||
2062 | if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15) | |
2063 | return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0; | |
2064 | ||
34dcc7cf AH |
2065 | if (tdep->has_pauth ()) |
2066 | { | |
2067 | if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK) | |
2068 | return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK; | |
2069 | ||
2070 | if (reg == AARCH64_DWARF_PAUTH_RA_STATE) | |
2071 | return tdep->pauth_ra_state_regnum; | |
2072 | } | |
2073 | ||
07b287a0 MS |
2074 | return -1; |
2075 | } | |
07b287a0 MS |
2076 | |
2077 | /* Implement the "print_insn" gdbarch method. */ | |
2078 | ||
2079 | static int | |
2080 | aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info) | |
2081 | { | |
2082 | info->symbols = NULL; | |
6394c606 | 2083 | return default_print_insn (memaddr, info); |
07b287a0 MS |
2084 | } |
2085 | ||
2086 | /* AArch64 BRK software debug mode instruction. | |
2087 | Note that AArch64 code is always little-endian. | |
2088 | 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */ | |
04180708 | 2089 | constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4}; |
07b287a0 | 2090 | |
04180708 | 2091 | typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint; |
07b287a0 MS |
2092 | |
2093 | /* Extract from an array REGS containing the (raw) register state a | |
2094 | function return value of type TYPE, and copy that, in virtual | |
2095 | format, into VALBUF. */ | |
2096 | ||
2097 | static void | |
2098 | aarch64_extract_return_value (struct type *type, struct regcache *regs, | |
2099 | gdb_byte *valbuf) | |
2100 | { | |
ac7936df | 2101 | struct gdbarch *gdbarch = regs->arch (); |
07b287a0 | 2102 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); |
4f4aedeb AH |
2103 | int elements; |
2104 | struct type *fundamental_type; | |
07b287a0 | 2105 | |
4f4aedeb AH |
2106 | if (aapcs_is_vfp_call_or_return_candidate (type, &elements, |
2107 | &fundamental_type)) | |
07b287a0 | 2108 | { |
4f4aedeb AH |
2109 | int len = TYPE_LENGTH (fundamental_type); |
2110 | ||
2111 | for (int i = 0; i < elements; i++) | |
2112 | { | |
2113 | int regno = AARCH64_V0_REGNUM + i; | |
3ff2c72e AH |
2114 | /* Enough space for a full vector register. */ |
2115 | gdb_byte buf[register_size (gdbarch, regno)]; | |
2116 | gdb_assert (len <= sizeof (buf)); | |
4f4aedeb AH |
2117 | |
2118 | if (aarch64_debug) | |
2119 | { | |
2120 | debug_printf ("read HFA or HVA return value element %d from %s\n", | |
2121 | i + 1, | |
2122 | gdbarch_register_name (gdbarch, regno)); | |
2123 | } | |
2124 | regs->cooked_read (regno, buf); | |
07b287a0 | 2125 | |
4f4aedeb AH |
2126 | memcpy (valbuf, buf, len); |
2127 | valbuf += len; | |
2128 | } | |
07b287a0 MS |
2129 | } |
2130 | else if (TYPE_CODE (type) == TYPE_CODE_INT | |
2131 | || TYPE_CODE (type) == TYPE_CODE_CHAR | |
2132 | || TYPE_CODE (type) == TYPE_CODE_BOOL | |
2133 | || TYPE_CODE (type) == TYPE_CODE_PTR | |
aa006118 | 2134 | || TYPE_IS_REFERENCE (type) |
07b287a0 MS |
2135 | || TYPE_CODE (type) == TYPE_CODE_ENUM) |
2136 | { | |
6471e7d2 | 2137 | /* If the type is a plain integer, then the access is |
07b287a0 MS |
2138 | straight-forward. Otherwise we have to play around a bit |
2139 | more. */ | |
2140 | int len = TYPE_LENGTH (type); | |
2141 | int regno = AARCH64_X0_REGNUM; | |
2142 | ULONGEST tmp; | |
2143 | ||
2144 | while (len > 0) | |
2145 | { | |
2146 | /* By using store_unsigned_integer we avoid having to do | |
2147 | anything special for small big-endian values. */ | |
2148 | regcache_cooked_read_unsigned (regs, regno++, &tmp); | |
2149 | store_unsigned_integer (valbuf, | |
2150 | (len > X_REGISTER_SIZE | |
2151 | ? X_REGISTER_SIZE : len), byte_order, tmp); | |
2152 | len -= X_REGISTER_SIZE; | |
2153 | valbuf += X_REGISTER_SIZE; | |
2154 | } | |
2155 | } | |
07b287a0 MS |
2156 | else |
2157 | { | |
2158 | /* For a structure or union the behaviour is as if the value had | |
2159 | been stored to word-aligned memory and then loaded into | |
2160 | registers with 64-bit load instruction(s). */ | |
2161 | int len = TYPE_LENGTH (type); | |
2162 | int regno = AARCH64_X0_REGNUM; | |
2163 | bfd_byte buf[X_REGISTER_SIZE]; | |
2164 | ||
2165 | while (len > 0) | |
2166 | { | |
dca08e1f | 2167 | regs->cooked_read (regno++, buf); |
07b287a0 MS |
2168 | memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len); |
2169 | len -= X_REGISTER_SIZE; | |
2170 | valbuf += X_REGISTER_SIZE; | |
2171 | } | |
2172 | } | |
2173 | } | |
2174 | ||
2175 | ||
2176 | /* Will a function return an aggregate type in memory or in a | |
2177 | register? Return 0 if an aggregate type can be returned in a | |
2178 | register, 1 if it must be returned in memory. */ | |
2179 | ||
2180 | static int | |
2181 | aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type) | |
2182 | { | |
f168693b | 2183 | type = check_typedef (type); |
4f4aedeb AH |
2184 | int elements; |
2185 | struct type *fundamental_type; | |
07b287a0 | 2186 | |
4f4aedeb AH |
2187 | if (aapcs_is_vfp_call_or_return_candidate (type, &elements, |
2188 | &fundamental_type)) | |
07b287a0 | 2189 | { |
cd635f74 YQ |
2190 | /* v0-v7 are used to return values and one register is allocated |
2191 | for one member. However, HFA or HVA has at most four members. */ | |
07b287a0 MS |
2192 | return 0; |
2193 | } | |
2194 | ||
2195 | if (TYPE_LENGTH (type) > 16) | |
2196 | { | |
2197 | /* PCS B.6 Aggregates larger than 16 bytes are passed by | |
2198 | invisible reference. */ | |
2199 | ||
2200 | return 1; | |
2201 | } | |
2202 | ||
2203 | return 0; | |
2204 | } | |
2205 | ||
2206 | /* Write into appropriate registers a function return value of type | |
2207 | TYPE, given in virtual format. */ | |
2208 | ||
2209 | static void | |
2210 | aarch64_store_return_value (struct type *type, struct regcache *regs, | |
2211 | const gdb_byte *valbuf) | |
2212 | { | |
ac7936df | 2213 | struct gdbarch *gdbarch = regs->arch (); |
07b287a0 | 2214 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); |
4f4aedeb AH |
2215 | int elements; |
2216 | struct type *fundamental_type; | |
07b287a0 | 2217 | |
4f4aedeb AH |
2218 | if (aapcs_is_vfp_call_or_return_candidate (type, &elements, |
2219 | &fundamental_type)) | |
07b287a0 | 2220 | { |
4f4aedeb AH |
2221 | int len = TYPE_LENGTH (fundamental_type); |
2222 | ||
2223 | for (int i = 0; i < elements; i++) | |
2224 | { | |
2225 | int regno = AARCH64_V0_REGNUM + i; | |
3ff2c72e AH |
2226 | /* Enough space for a full vector register. */ |
2227 | gdb_byte tmpbuf[register_size (gdbarch, regno)]; | |
2228 | gdb_assert (len <= sizeof (tmpbuf)); | |
4f4aedeb AH |
2229 | |
2230 | if (aarch64_debug) | |
2231 | { | |
2232 | debug_printf ("write HFA or HVA return value element %d to %s\n", | |
2233 | i + 1, | |
2234 | gdbarch_register_name (gdbarch, regno)); | |
2235 | } | |
07b287a0 | 2236 | |
4f4aedeb AH |
2237 | memcpy (tmpbuf, valbuf, |
2238 | len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len); | |
2239 | regs->cooked_write (regno, tmpbuf); | |
2240 | valbuf += len; | |
2241 | } | |
07b287a0 MS |
2242 | } |
2243 | else if (TYPE_CODE (type) == TYPE_CODE_INT | |
2244 | || TYPE_CODE (type) == TYPE_CODE_CHAR | |
2245 | || TYPE_CODE (type) == TYPE_CODE_BOOL | |
2246 | || TYPE_CODE (type) == TYPE_CODE_PTR | |
aa006118 | 2247 | || TYPE_IS_REFERENCE (type) |
07b287a0 MS |
2248 | || TYPE_CODE (type) == TYPE_CODE_ENUM) |
2249 | { | |
2250 | if (TYPE_LENGTH (type) <= X_REGISTER_SIZE) | |
2251 | { | |
2252 | /* Values of one word or less are zero/sign-extended and | |
2253 | returned in r0. */ | |
2254 | bfd_byte tmpbuf[X_REGISTER_SIZE]; | |
2255 | LONGEST val = unpack_long (type, valbuf); | |
2256 | ||
2257 | store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val); | |
b66f5587 | 2258 | regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf); |
07b287a0 MS |
2259 | } |
2260 | else | |
2261 | { | |
2262 | /* Integral values greater than one word are stored in | |
2263 | consecutive registers starting with r0. This will always | |
2264 | be a multiple of the regiser size. */ | |
2265 | int len = TYPE_LENGTH (type); | |
2266 | int regno = AARCH64_X0_REGNUM; | |
2267 | ||
2268 | while (len > 0) | |
2269 | { | |
b66f5587 | 2270 | regs->cooked_write (regno++, valbuf); |
07b287a0 MS |
2271 | len -= X_REGISTER_SIZE; |
2272 | valbuf += X_REGISTER_SIZE; | |
2273 | } | |
2274 | } | |
2275 | } | |
07b287a0 MS |
2276 | else |
2277 | { | |
2278 | /* For a structure or union the behaviour is as if the value had | |
2279 | been stored to word-aligned memory and then loaded into | |
2280 | registers with 64-bit load instruction(s). */ | |
2281 | int len = TYPE_LENGTH (type); | |
2282 | int regno = AARCH64_X0_REGNUM; | |
2283 | bfd_byte tmpbuf[X_REGISTER_SIZE]; | |
2284 | ||
2285 | while (len > 0) | |
2286 | { | |
2287 | memcpy (tmpbuf, valbuf, | |
2288 | len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len); | |
b66f5587 | 2289 | regs->cooked_write (regno++, tmpbuf); |
07b287a0 MS |
2290 | len -= X_REGISTER_SIZE; |
2291 | valbuf += X_REGISTER_SIZE; | |
2292 | } | |
2293 | } | |
2294 | } | |
2295 | ||
2296 | /* Implement the "return_value" gdbarch method. */ | |
2297 | ||
2298 | static enum return_value_convention | |
2299 | aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value, | |
2300 | struct type *valtype, struct regcache *regcache, | |
2301 | gdb_byte *readbuf, const gdb_byte *writebuf) | |
2302 | { | |
07b287a0 MS |
2303 | |
2304 | if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT | |
2305 | || TYPE_CODE (valtype) == TYPE_CODE_UNION | |
2306 | || TYPE_CODE (valtype) == TYPE_CODE_ARRAY) | |
2307 | { | |
2308 | if (aarch64_return_in_memory (gdbarch, valtype)) | |
2309 | { | |
2310 | if (aarch64_debug) | |
b277c936 | 2311 | debug_printf ("return value in memory\n"); |
07b287a0 MS |
2312 | return RETURN_VALUE_STRUCT_CONVENTION; |
2313 | } | |
2314 | } | |
2315 | ||
2316 | if (writebuf) | |
2317 | aarch64_store_return_value (valtype, regcache, writebuf); | |
2318 | ||
2319 | if (readbuf) | |
2320 | aarch64_extract_return_value (valtype, regcache, readbuf); | |
2321 | ||
2322 | if (aarch64_debug) | |
b277c936 | 2323 | debug_printf ("return value in registers\n"); |
07b287a0 MS |
2324 | |
2325 | return RETURN_VALUE_REGISTER_CONVENTION; | |
2326 | } | |
2327 | ||
2328 | /* Implement the "get_longjmp_target" gdbarch method. */ | |
2329 | ||
2330 | static int | |
2331 | aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc) | |
2332 | { | |
2333 | CORE_ADDR jb_addr; | |
2334 | gdb_byte buf[X_REGISTER_SIZE]; | |
2335 | struct gdbarch *gdbarch = get_frame_arch (frame); | |
2336 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
2337 | enum bfd_endian byte_order = gdbarch_byte_order (gdbarch); | |
2338 | ||
2339 | jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM); | |
2340 | ||
2341 | if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf, | |
2342 | X_REGISTER_SIZE)) | |
2343 | return 0; | |
2344 | ||
2345 | *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order); | |
2346 | return 1; | |
2347 | } | |
ea873d8e PL |
2348 | |
2349 | /* Implement the "gen_return_address" gdbarch method. */ | |
2350 | ||
2351 | static void | |
2352 | aarch64_gen_return_address (struct gdbarch *gdbarch, | |
2353 | struct agent_expr *ax, struct axs_value *value, | |
2354 | CORE_ADDR scope) | |
2355 | { | |
2356 | value->type = register_type (gdbarch, AARCH64_LR_REGNUM); | |
2357 | value->kind = axs_lvalue_register; | |
2358 | value->u.reg = AARCH64_LR_REGNUM; | |
2359 | } | |
07b287a0 MS |
2360 | \f |
2361 | ||
2362 | /* Return the pseudo register name corresponding to register regnum. */ | |
2363 | ||
2364 | static const char * | |
2365 | aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum) | |
2366 | { | |
63bad7b6 AH |
2367 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); |
2368 | ||
07b287a0 MS |
2369 | static const char *const q_name[] = |
2370 | { | |
2371 | "q0", "q1", "q2", "q3", | |
2372 | "q4", "q5", "q6", "q7", | |
2373 | "q8", "q9", "q10", "q11", | |
2374 | "q12", "q13", "q14", "q15", | |
2375 | "q16", "q17", "q18", "q19", | |
2376 | "q20", "q21", "q22", "q23", | |
2377 | "q24", "q25", "q26", "q27", | |
2378 | "q28", "q29", "q30", "q31", | |
2379 | }; | |
2380 | ||
2381 | static const char *const d_name[] = | |
2382 | { | |
2383 | "d0", "d1", "d2", "d3", | |
2384 | "d4", "d5", "d6", "d7", | |
2385 | "d8", "d9", "d10", "d11", | |
2386 | "d12", "d13", "d14", "d15", | |
2387 | "d16", "d17", "d18", "d19", | |
2388 | "d20", "d21", "d22", "d23", | |
2389 | "d24", "d25", "d26", "d27", | |
2390 | "d28", "d29", "d30", "d31", | |
2391 | }; | |
2392 | ||
2393 | static const char *const s_name[] = | |
2394 | { | |
2395 | "s0", "s1", "s2", "s3", | |
2396 | "s4", "s5", "s6", "s7", | |
2397 | "s8", "s9", "s10", "s11", | |
2398 | "s12", "s13", "s14", "s15", | |
2399 | "s16", "s17", "s18", "s19", | |
2400 | "s20", "s21", "s22", "s23", | |
2401 | "s24", "s25", "s26", "s27", | |
2402 | "s28", "s29", "s30", "s31", | |
2403 | }; | |
2404 | ||
2405 | static const char *const h_name[] = | |
2406 | { | |
2407 | "h0", "h1", "h2", "h3", | |
2408 | "h4", "h5", "h6", "h7", | |
2409 | "h8", "h9", "h10", "h11", | |
2410 | "h12", "h13", "h14", "h15", | |
2411 | "h16", "h17", "h18", "h19", | |
2412 | "h20", "h21", "h22", "h23", | |
2413 | "h24", "h25", "h26", "h27", | |
2414 | "h28", "h29", "h30", "h31", | |
2415 | }; | |
2416 | ||
2417 | static const char *const b_name[] = | |
2418 | { | |
2419 | "b0", "b1", "b2", "b3", | |
2420 | "b4", "b5", "b6", "b7", | |
2421 | "b8", "b9", "b10", "b11", | |
2422 | "b12", "b13", "b14", "b15", | |
2423 | "b16", "b17", "b18", "b19", | |
2424 | "b20", "b21", "b22", "b23", | |
2425 | "b24", "b25", "b26", "b27", | |
2426 | "b28", "b29", "b30", "b31", | |
2427 | }; | |
2428 | ||
34dcc7cf | 2429 | int p_regnum = regnum - gdbarch_num_regs (gdbarch); |
07b287a0 | 2430 | |
34dcc7cf AH |
2431 | if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32) |
2432 | return q_name[p_regnum - AARCH64_Q0_REGNUM]; | |
07b287a0 | 2433 | |
34dcc7cf AH |
2434 | if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32) |
2435 | return d_name[p_regnum - AARCH64_D0_REGNUM]; | |
07b287a0 | 2436 | |
34dcc7cf AH |
2437 | if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32) |
2438 | return s_name[p_regnum - AARCH64_S0_REGNUM]; | |
07b287a0 | 2439 | |
34dcc7cf AH |
2440 | if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32) |
2441 | return h_name[p_regnum - AARCH64_H0_REGNUM]; | |
07b287a0 | 2442 | |
34dcc7cf AH |
2443 | if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32) |
2444 | return b_name[p_regnum - AARCH64_B0_REGNUM]; | |
07b287a0 | 2445 | |
63bad7b6 AH |
2446 | if (tdep->has_sve ()) |
2447 | { | |
2448 | static const char *const sve_v_name[] = | |
2449 | { | |
2450 | "v0", "v1", "v2", "v3", | |
2451 | "v4", "v5", "v6", "v7", | |
2452 | "v8", "v9", "v10", "v11", | |
2453 | "v12", "v13", "v14", "v15", | |
2454 | "v16", "v17", "v18", "v19", | |
2455 | "v20", "v21", "v22", "v23", | |
2456 | "v24", "v25", "v26", "v27", | |
2457 | "v28", "v29", "v30", "v31", | |
2458 | }; | |
2459 | ||
34dcc7cf AH |
2460 | if (p_regnum >= AARCH64_SVE_V0_REGNUM |
2461 | && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM) | |
2462 | return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM]; | |
63bad7b6 AH |
2463 | } |
2464 | ||
34dcc7cf AH |
2465 | /* RA_STATE is used for unwinding only. Do not assign it a name - this |
2466 | prevents it from being read by methods such as | |
2467 | mi_cmd_trace_frame_collected. */ | |
2468 | if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum) | |
2469 | return ""; | |
2470 | ||
07b287a0 MS |
2471 | internal_error (__FILE__, __LINE__, |
2472 | _("aarch64_pseudo_register_name: bad register number %d"), | |
34dcc7cf | 2473 | p_regnum); |
07b287a0 MS |
2474 | } |
2475 | ||
2476 | /* Implement the "pseudo_register_type" tdesc_arch_data method. */ | |
2477 | ||
2478 | static struct type * | |
2479 | aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum) | |
2480 | { | |
63bad7b6 AH |
2481 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); |
2482 | ||
34dcc7cf | 2483 | int p_regnum = regnum - gdbarch_num_regs (gdbarch); |
07b287a0 | 2484 | |
34dcc7cf | 2485 | if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32) |
07b287a0 MS |
2486 | return aarch64_vnq_type (gdbarch); |
2487 | ||
34dcc7cf | 2488 | if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32) |
07b287a0 MS |
2489 | return aarch64_vnd_type (gdbarch); |
2490 | ||
34dcc7cf | 2491 | if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32) |
07b287a0 MS |
2492 | return aarch64_vns_type (gdbarch); |
2493 | ||
34dcc7cf | 2494 | if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32) |
07b287a0 MS |
2495 | return aarch64_vnh_type (gdbarch); |
2496 | ||
34dcc7cf | 2497 | if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32) |
07b287a0 MS |
2498 | return aarch64_vnb_type (gdbarch); |
2499 | ||
34dcc7cf AH |
2500 | if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM |
2501 | && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM) | |
63bad7b6 AH |
2502 | return aarch64_vnv_type (gdbarch); |
2503 | ||
34dcc7cf AH |
2504 | if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum) |
2505 | return builtin_type (gdbarch)->builtin_uint64; | |
2506 | ||
07b287a0 MS |
2507 | internal_error (__FILE__, __LINE__, |
2508 | _("aarch64_pseudo_register_type: bad register number %d"), | |
34dcc7cf | 2509 | p_regnum); |
07b287a0 MS |
2510 | } |
2511 | ||
2512 | /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */ | |
2513 | ||
2514 | static int | |
2515 | aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum, | |
2516 | struct reggroup *group) | |
2517 | { | |
63bad7b6 AH |
2518 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); |
2519 | ||
34dcc7cf | 2520 | int p_regnum = regnum - gdbarch_num_regs (gdbarch); |
07b287a0 | 2521 | |
34dcc7cf | 2522 | if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32) |
07b287a0 | 2523 | return group == all_reggroup || group == vector_reggroup; |
34dcc7cf | 2524 | else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32) |
07b287a0 MS |
2525 | return (group == all_reggroup || group == vector_reggroup |
2526 | || group == float_reggroup); | |
34dcc7cf | 2527 | else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32) |
07b287a0 MS |
2528 | return (group == all_reggroup || group == vector_reggroup |
2529 | || group == float_reggroup); | |
34dcc7cf | 2530 | else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32) |
07b287a0 | 2531 | return group == all_reggroup || group == vector_reggroup; |
34dcc7cf | 2532 | else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32) |
07b287a0 | 2533 | return group == all_reggroup || group == vector_reggroup; |
34dcc7cf AH |
2534 | else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM |
2535 | && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM) | |
63bad7b6 | 2536 | return group == all_reggroup || group == vector_reggroup; |
34dcc7cf AH |
2537 | /* RA_STATE is used for unwinding only. Do not assign it to any groups. */ |
2538 | if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum) | |
2539 | return 0; | |
07b287a0 MS |
2540 | |
2541 | return group == all_reggroup; | |
2542 | } | |
2543 | ||
3c5cd5c3 AH |
2544 | /* Helper for aarch64_pseudo_read_value. */ |
2545 | ||
2546 | static struct value * | |
63bad7b6 AH |
2547 | aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch, |
2548 | readable_regcache *regcache, int regnum_offset, | |
3c5cd5c3 AH |
2549 | int regsize, struct value *result_value) |
2550 | { | |
3c5cd5c3 AH |
2551 | unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset; |
2552 | ||
63bad7b6 AH |
2553 | /* Enough space for a full vector register. */ |
2554 | gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)]; | |
2555 | gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM); | |
2556 | ||
3c5cd5c3 AH |
2557 | if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID) |
2558 | mark_value_bytes_unavailable (result_value, 0, | |
2559 | TYPE_LENGTH (value_type (result_value))); | |
2560 | else | |
2561 | memcpy (value_contents_raw (result_value), reg_buf, regsize); | |
63bad7b6 | 2562 | |
3c5cd5c3 AH |
2563 | return result_value; |
2564 | } | |
2565 | ||
07b287a0 MS |
2566 | /* Implement the "pseudo_register_read_value" gdbarch method. */ |
2567 | ||
2568 | static struct value * | |
3c5cd5c3 | 2569 | aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache, |
07b287a0 MS |
2570 | int regnum) |
2571 | { | |
63bad7b6 | 2572 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); |
3c5cd5c3 | 2573 | struct value *result_value = allocate_value (register_type (gdbarch, regnum)); |
07b287a0 | 2574 | |
07b287a0 MS |
2575 | VALUE_LVAL (result_value) = lval_register; |
2576 | VALUE_REGNUM (result_value) = regnum; | |
07b287a0 MS |
2577 | |
2578 | regnum -= gdbarch_num_regs (gdbarch); | |
2579 | ||
2580 | if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32) | |
63bad7b6 AH |
2581 | return aarch64_pseudo_read_value_1 (gdbarch, regcache, |
2582 | regnum - AARCH64_Q0_REGNUM, | |
3c5cd5c3 | 2583 | Q_REGISTER_SIZE, result_value); |
07b287a0 MS |
2584 | |
2585 | if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32) | |
63bad7b6 AH |
2586 | return aarch64_pseudo_read_value_1 (gdbarch, regcache, |
2587 | regnum - AARCH64_D0_REGNUM, | |
3c5cd5c3 | 2588 | D_REGISTER_SIZE, result_value); |
07b287a0 MS |
2589 | |
2590 | if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32) | |
63bad7b6 AH |
2591 | return aarch64_pseudo_read_value_1 (gdbarch, regcache, |
2592 | regnum - AARCH64_S0_REGNUM, | |
3c5cd5c3 | 2593 | S_REGISTER_SIZE, result_value); |
07b287a0 MS |
2594 | |
2595 | if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32) | |
63bad7b6 AH |
2596 | return aarch64_pseudo_read_value_1 (gdbarch, regcache, |
2597 | regnum - AARCH64_H0_REGNUM, | |
3c5cd5c3 | 2598 | H_REGISTER_SIZE, result_value); |
07b287a0 MS |
2599 | |
2600 | if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32) | |
63bad7b6 AH |
2601 | return aarch64_pseudo_read_value_1 (gdbarch, regcache, |
2602 | regnum - AARCH64_B0_REGNUM, | |
3c5cd5c3 | 2603 | B_REGISTER_SIZE, result_value); |
07b287a0 | 2604 | |
63bad7b6 AH |
2605 | if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM |
2606 | && regnum < AARCH64_SVE_V0_REGNUM + 32) | |
2607 | return aarch64_pseudo_read_value_1 (gdbarch, regcache, | |
2608 | regnum - AARCH64_SVE_V0_REGNUM, | |
2609 | V_REGISTER_SIZE, result_value); | |
2610 | ||
07b287a0 MS |
2611 | gdb_assert_not_reached ("regnum out of bound"); |
2612 | } | |
2613 | ||
3c5cd5c3 | 2614 | /* Helper for aarch64_pseudo_write. */ |
07b287a0 MS |
2615 | |
2616 | static void | |
63bad7b6 AH |
2617 | aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache, |
2618 | int regnum_offset, int regsize, const gdb_byte *buf) | |
07b287a0 | 2619 | { |
3c5cd5c3 | 2620 | unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset; |
07b287a0 | 2621 | |
63bad7b6 AH |
2622 | /* Enough space for a full vector register. */ |
2623 | gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)]; | |
2624 | gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM); | |
2625 | ||
07b287a0 MS |
2626 | /* Ensure the register buffer is zero, we want gdb writes of the |
2627 | various 'scalar' pseudo registers to behavior like architectural | |
2628 | writes, register width bytes are written the remainder are set to | |
2629 | zero. */ | |
63bad7b6 | 2630 | memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM)); |
07b287a0 | 2631 | |
3c5cd5c3 AH |
2632 | memcpy (reg_buf, buf, regsize); |
2633 | regcache->raw_write (v_regnum, reg_buf); | |
2634 | } | |
2635 | ||
2636 | /* Implement the "pseudo_register_write" gdbarch method. */ | |
2637 | ||
2638 | static void | |
2639 | aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache, | |
2640 | int regnum, const gdb_byte *buf) | |
2641 | { | |
63bad7b6 | 2642 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); |
07b287a0 MS |
2643 | regnum -= gdbarch_num_regs (gdbarch); |
2644 | ||
2645 | if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32) | |
63bad7b6 AH |
2646 | return aarch64_pseudo_write_1 (gdbarch, regcache, |
2647 | regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE, | |
2648 | buf); | |
07b287a0 MS |
2649 | |
2650 | if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32) | |
63bad7b6 AH |
2651 | return aarch64_pseudo_write_1 (gdbarch, regcache, |
2652 | regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE, | |
2653 | buf); | |
07b287a0 MS |
2654 | |
2655 | if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32) | |
63bad7b6 AH |
2656 | return aarch64_pseudo_write_1 (gdbarch, regcache, |
2657 | regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE, | |
2658 | buf); | |
07b287a0 MS |
2659 | |
2660 | if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32) | |
63bad7b6 AH |
2661 | return aarch64_pseudo_write_1 (gdbarch, regcache, |
2662 | regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE, | |
2663 | buf); | |
07b287a0 MS |
2664 | |
2665 | if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32) | |
63bad7b6 AH |
2666 | return aarch64_pseudo_write_1 (gdbarch, regcache, |
2667 | regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE, | |
2668 | buf); | |
2669 | ||
2670 | if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM | |
2671 | && regnum < AARCH64_SVE_V0_REGNUM + 32) | |
2672 | return aarch64_pseudo_write_1 (gdbarch, regcache, | |
2673 | regnum - AARCH64_SVE_V0_REGNUM, | |
2674 | V_REGISTER_SIZE, buf); | |
07b287a0 MS |
2675 | |
2676 | gdb_assert_not_reached ("regnum out of bound"); | |
2677 | } | |
2678 | ||
07b287a0 MS |
2679 | /* Callback function for user_reg_add. */ |
2680 | ||
2681 | static struct value * | |
2682 | value_of_aarch64_user_reg (struct frame_info *frame, const void *baton) | |
2683 | { | |
9a3c8263 | 2684 | const int *reg_p = (const int *) baton; |
07b287a0 MS |
2685 | |
2686 | return value_of_register (*reg_p, frame); | |
2687 | } | |
2688 | \f | |
2689 | ||
9404b58f KM |
2690 | /* Implement the "software_single_step" gdbarch method, needed to |
2691 | single step through atomic sequences on AArch64. */ | |
2692 | ||
a0ff9e1a | 2693 | static std::vector<CORE_ADDR> |
f5ea389a | 2694 | aarch64_software_single_step (struct regcache *regcache) |
9404b58f | 2695 | { |
ac7936df | 2696 | struct gdbarch *gdbarch = regcache->arch (); |
9404b58f KM |
2697 | enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); |
2698 | const int insn_size = 4; | |
2699 | const int atomic_sequence_length = 16; /* Instruction sequence length. */ | |
0187a92f | 2700 | CORE_ADDR pc = regcache_read_pc (regcache); |
70ab8ccd | 2701 | CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX }; |
9404b58f KM |
2702 | CORE_ADDR loc = pc; |
2703 | CORE_ADDR closing_insn = 0; | |
2704 | uint32_t insn = read_memory_unsigned_integer (loc, insn_size, | |
2705 | byte_order_for_code); | |
2706 | int index; | |
2707 | int insn_count; | |
2708 | int bc_insn_count = 0; /* Conditional branch instruction count. */ | |
2709 | int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */ | |
f77ee802 YQ |
2710 | aarch64_inst inst; |
2711 | ||
561a72d4 | 2712 | if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0) |
a0ff9e1a | 2713 | return {}; |
9404b58f KM |
2714 | |
2715 | /* Look for a Load Exclusive instruction which begins the sequence. */ | |
f77ee802 | 2716 | if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0) |
a0ff9e1a | 2717 | return {}; |
9404b58f KM |
2718 | |
2719 | for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count) | |
2720 | { | |
9404b58f KM |
2721 | loc += insn_size; |
2722 | insn = read_memory_unsigned_integer (loc, insn_size, | |
2723 | byte_order_for_code); | |
2724 | ||
561a72d4 | 2725 | if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0) |
a0ff9e1a | 2726 | return {}; |
9404b58f | 2727 | /* Check if the instruction is a conditional branch. */ |
f77ee802 | 2728 | if (inst.opcode->iclass == condbranch) |
9404b58f | 2729 | { |
f77ee802 YQ |
2730 | gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19); |
2731 | ||
9404b58f | 2732 | if (bc_insn_count >= 1) |
a0ff9e1a | 2733 | return {}; |
9404b58f KM |
2734 | |
2735 | /* It is, so we'll try to set a breakpoint at the destination. */ | |
f77ee802 | 2736 | breaks[1] = loc + inst.operands[0].imm.value; |
9404b58f KM |
2737 | |
2738 | bc_insn_count++; | |
2739 | last_breakpoint++; | |
2740 | } | |
2741 | ||
2742 | /* Look for the Store Exclusive which closes the atomic sequence. */ | |
f77ee802 | 2743 | if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0) |
9404b58f KM |
2744 | { |
2745 | closing_insn = loc; | |
2746 | break; | |
2747 | } | |
2748 | } | |
2749 | ||
2750 | /* We didn't find a closing Store Exclusive instruction, fall back. */ | |
2751 | if (!closing_insn) | |
a0ff9e1a | 2752 | return {}; |
9404b58f KM |
2753 | |
2754 | /* Insert breakpoint after the end of the atomic sequence. */ | |
2755 | breaks[0] = loc + insn_size; | |
2756 | ||
2757 | /* Check for duplicated breakpoints, and also check that the second | |
2758 | breakpoint is not within the atomic sequence. */ | |
2759 | if (last_breakpoint | |
2760 | && (breaks[1] == breaks[0] | |
2761 | || (breaks[1] >= pc && breaks[1] <= closing_insn))) | |
2762 | last_breakpoint = 0; | |
2763 | ||
a0ff9e1a SM |
2764 | std::vector<CORE_ADDR> next_pcs; |
2765 | ||
9404b58f KM |
2766 | /* Insert the breakpoint at the end of the sequence, and one at the |
2767 | destination of the conditional branch, if it exists. */ | |
2768 | for (index = 0; index <= last_breakpoint; index++) | |
a0ff9e1a | 2769 | next_pcs.push_back (breaks[index]); |
9404b58f | 2770 | |
93f9a11f | 2771 | return next_pcs; |
9404b58f KM |
2772 | } |
2773 | ||
cfba9872 | 2774 | struct aarch64_displaced_step_closure : public displaced_step_closure |
b6542f81 YQ |
2775 | { |
2776 | /* It is true when condition instruction, such as B.CON, TBZ, etc, | |
2777 | is being displaced stepping. */ | |
cfba9872 | 2778 | int cond = 0; |
b6542f81 YQ |
2779 | |
2780 | /* PC adjustment offset after displaced stepping. */ | |
cfba9872 | 2781 | int32_t pc_adjust = 0; |
b6542f81 YQ |
2782 | }; |
2783 | ||
2784 | /* Data when visiting instructions for displaced stepping. */ | |
2785 | ||
2786 | struct aarch64_displaced_step_data | |
2787 | { | |
2788 | struct aarch64_insn_data base; | |
2789 | ||
2790 | /* The address where the instruction will be executed at. */ | |
2791 | CORE_ADDR new_addr; | |
2792 | /* Buffer of instructions to be copied to NEW_ADDR to execute. */ | |
2793 | uint32_t insn_buf[DISPLACED_MODIFIED_INSNS]; | |
2794 | /* Number of instructions in INSN_BUF. */ | |
2795 | unsigned insn_count; | |
2796 | /* Registers when doing displaced stepping. */ | |
2797 | struct regcache *regs; | |
2798 | ||
cfba9872 | 2799 | aarch64_displaced_step_closure *dsc; |
b6542f81 YQ |
2800 | }; |
2801 | ||
2802 | /* Implementation of aarch64_insn_visitor method "b". */ | |
2803 | ||
2804 | static void | |
2805 | aarch64_displaced_step_b (const int is_bl, const int32_t offset, | |
2806 | struct aarch64_insn_data *data) | |
2807 | { | |
2808 | struct aarch64_displaced_step_data *dsd | |
2809 | = (struct aarch64_displaced_step_data *) data; | |
2ac09a5b | 2810 | int64_t new_offset = data->insn_addr - dsd->new_addr + offset; |
b6542f81 YQ |
2811 | |
2812 | if (can_encode_int32 (new_offset, 28)) | |
2813 | { | |
2814 | /* Emit B rather than BL, because executing BL on a new address | |
2815 | will get the wrong address into LR. In order to avoid this, | |
2816 | we emit B, and update LR if the instruction is BL. */ | |
2817 | emit_b (dsd->insn_buf, 0, new_offset); | |
2818 | dsd->insn_count++; | |
2819 | } | |
2820 | else | |
2821 | { | |
2822 | /* Write NOP. */ | |
2823 | emit_nop (dsd->insn_buf); | |
2824 | dsd->insn_count++; | |
2825 | dsd->dsc->pc_adjust = offset; | |
2826 | } | |
2827 | ||
2828 | if (is_bl) | |
2829 | { | |
2830 | /* Update LR. */ | |
2831 | regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM, | |
2832 | data->insn_addr + 4); | |
2833 | } | |
2834 | } | |
2835 | ||
2836 | /* Implementation of aarch64_insn_visitor method "b_cond". */ | |
2837 | ||
2838 | static void | |
2839 | aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset, | |
2840 | struct aarch64_insn_data *data) | |
2841 | { | |
2842 | struct aarch64_displaced_step_data *dsd | |
2843 | = (struct aarch64_displaced_step_data *) data; | |
b6542f81 YQ |
2844 | |
2845 | /* GDB has to fix up PC after displaced step this instruction | |
2846 | differently according to the condition is true or false. Instead | |
2847 | of checking COND against conditional flags, we can use | |
2848 | the following instructions, and GDB can tell how to fix up PC | |
2849 | according to the PC value. | |
2850 | ||
2851 | B.COND TAKEN ; If cond is true, then jump to TAKEN. | |
2852 | INSN1 ; | |
2853 | TAKEN: | |
2854 | INSN2 | |
2855 | */ | |
2856 | ||
2857 | emit_bcond (dsd->insn_buf, cond, 8); | |
2858 | dsd->dsc->cond = 1; | |
2859 | dsd->dsc->pc_adjust = offset; | |
2860 | dsd->insn_count = 1; | |
2861 | } | |
2862 | ||
2863 | /* Dynamically allocate a new register. If we know the register | |
2864 | statically, we should make it a global as above instead of using this | |
2865 | helper function. */ | |
2866 | ||
2867 | static struct aarch64_register | |
2868 | aarch64_register (unsigned num, int is64) | |
2869 | { | |
2870 | return (struct aarch64_register) { num, is64 }; | |
2871 | } | |
2872 | ||
2873 | /* Implementation of aarch64_insn_visitor method "cb". */ | |
2874 | ||
2875 | static void | |
2876 | aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz, | |
2877 | const unsigned rn, int is64, | |
2878 | struct aarch64_insn_data *data) | |
2879 | { | |
2880 | struct aarch64_displaced_step_data *dsd | |
2881 | = (struct aarch64_displaced_step_data *) data; | |
b6542f81 YQ |
2882 | |
2883 | /* The offset is out of range for a compare and branch | |
2884 | instruction. We can use the following instructions instead: | |
2885 | ||
2886 | CBZ xn, TAKEN ; xn == 0, then jump to TAKEN. | |
2887 | INSN1 ; | |
2888 | TAKEN: | |
2889 | INSN2 | |
2890 | */ | |
2891 | emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8); | |
2892 | dsd->insn_count = 1; | |
2893 | dsd->dsc->cond = 1; | |
2894 | dsd->dsc->pc_adjust = offset; | |
2895 | } | |
2896 | ||
2897 | /* Implementation of aarch64_insn_visitor method "tb". */ | |
2898 | ||
2899 | static void | |
2900 | aarch64_displaced_step_tb (const int32_t offset, int is_tbnz, | |
2901 | const unsigned rt, unsigned bit, | |
2902 | struct aarch64_insn_data *data) | |
2903 | { | |
2904 | struct aarch64_displaced_step_data *dsd | |
2905 | = (struct aarch64_displaced_step_data *) data; | |
b6542f81 YQ |
2906 | |
2907 | /* The offset is out of range for a test bit and branch | |
2908 | instruction We can use the following instructions instead: | |
2909 | ||
2910 | TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN. | |
2911 | INSN1 ; | |
2912 | TAKEN: | |
2913 | INSN2 | |
2914 | ||
2915 | */ | |
2916 | emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8); | |
2917 | dsd->insn_count = 1; | |
2918 | dsd->dsc->cond = 1; | |
2919 | dsd->dsc->pc_adjust = offset; | |
2920 | } | |
2921 | ||
2922 | /* Implementation of aarch64_insn_visitor method "adr". */ | |
2923 | ||
2924 | static void | |
2925 | aarch64_displaced_step_adr (const int32_t offset, const unsigned rd, | |
2926 | const int is_adrp, struct aarch64_insn_data *data) | |
2927 | { | |
2928 | struct aarch64_displaced_step_data *dsd | |
2929 | = (struct aarch64_displaced_step_data *) data; | |
2930 | /* We know exactly the address the ADR{P,} instruction will compute. | |
2931 | We can just write it to the destination register. */ | |
2932 | CORE_ADDR address = data->insn_addr + offset; | |
2933 | ||
2934 | if (is_adrp) | |
2935 | { | |
2936 | /* Clear the lower 12 bits of the offset to get the 4K page. */ | |
2937 | regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd, | |
2938 | address & ~0xfff); | |
2939 | } | |
2940 | else | |
2941 | regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd, | |
2942 | address); | |
2943 | ||
2944 | dsd->dsc->pc_adjust = 4; | |
2945 | emit_nop (dsd->insn_buf); | |
2946 | dsd->insn_count = 1; | |
2947 | } | |
2948 | ||
2949 | /* Implementation of aarch64_insn_visitor method "ldr_literal". */ | |
2950 | ||
2951 | static void | |
2952 | aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw, | |
2953 | const unsigned rt, const int is64, | |
2954 | struct aarch64_insn_data *data) | |
2955 | { | |
2956 | struct aarch64_displaced_step_data *dsd | |
2957 | = (struct aarch64_displaced_step_data *) data; | |
2958 | CORE_ADDR address = data->insn_addr + offset; | |
2959 | struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 }; | |
2960 | ||
2961 | regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt, | |
2962 | address); | |
2963 | ||
2964 | if (is_sw) | |
2965 | dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1), | |
2966 | aarch64_register (rt, 1), zero); | |
2967 | else | |
2968 | dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64), | |
2969 | aarch64_register (rt, 1), zero); | |
2970 | ||
2971 | dsd->dsc->pc_adjust = 4; | |
2972 | } | |
2973 | ||
2974 | /* Implementation of aarch64_insn_visitor method "others". */ | |
2975 | ||
2976 | static void | |
2977 | aarch64_displaced_step_others (const uint32_t insn, | |
2978 | struct aarch64_insn_data *data) | |
2979 | { | |
2980 | struct aarch64_displaced_step_data *dsd | |
2981 | = (struct aarch64_displaced_step_data *) data; | |
2982 | ||
e1c587c3 | 2983 | aarch64_emit_insn (dsd->insn_buf, insn); |
b6542f81 YQ |
2984 | dsd->insn_count = 1; |
2985 | ||
2986 | if ((insn & 0xfffffc1f) == 0xd65f0000) | |
2987 | { | |
2988 | /* RET */ | |
2989 | dsd->dsc->pc_adjust = 0; | |
2990 | } | |
2991 | else | |
2992 | dsd->dsc->pc_adjust = 4; | |
2993 | } | |
2994 | ||
2995 | static const struct aarch64_insn_visitor visitor = | |
2996 | { | |
2997 | aarch64_displaced_step_b, | |
2998 | aarch64_displaced_step_b_cond, | |
2999 | aarch64_displaced_step_cb, | |
3000 | aarch64_displaced_step_tb, | |
3001 | aarch64_displaced_step_adr, | |
3002 | aarch64_displaced_step_ldr_literal, | |
3003 | aarch64_displaced_step_others, | |
3004 | }; | |
3005 | ||
3006 | /* Implement the "displaced_step_copy_insn" gdbarch method. */ | |
3007 | ||
3008 | struct displaced_step_closure * | |
3009 | aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch, | |
3010 | CORE_ADDR from, CORE_ADDR to, | |
3011 | struct regcache *regs) | |
3012 | { | |
b6542f81 YQ |
3013 | enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch); |
3014 | uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code); | |
3015 | struct aarch64_displaced_step_data dsd; | |
c86a40c6 YQ |
3016 | aarch64_inst inst; |
3017 | ||
561a72d4 | 3018 | if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0) |
c86a40c6 | 3019 | return NULL; |
b6542f81 YQ |
3020 | |
3021 | /* Look for a Load Exclusive instruction which begins the sequence. */ | |
c86a40c6 | 3022 | if (inst.opcode->iclass == ldstexcl && bit (insn, 22)) |
b6542f81 YQ |
3023 | { |
3024 | /* We can't displaced step atomic sequences. */ | |
3025 | return NULL; | |
3026 | } | |
3027 | ||
cfba9872 SM |
3028 | std::unique_ptr<aarch64_displaced_step_closure> dsc |
3029 | (new aarch64_displaced_step_closure); | |
b6542f81 YQ |
3030 | dsd.base.insn_addr = from; |
3031 | dsd.new_addr = to; | |
3032 | dsd.regs = regs; | |
cfba9872 | 3033 | dsd.dsc = dsc.get (); |
034f1a81 | 3034 | dsd.insn_count = 0; |
b6542f81 YQ |
3035 | aarch64_relocate_instruction (insn, &visitor, |
3036 | (struct aarch64_insn_data *) &dsd); | |
3037 | gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS); | |
3038 | ||
3039 | if (dsd.insn_count != 0) | |
3040 | { | |
3041 | int i; | |
3042 | ||
3043 | /* Instruction can be relocated to scratch pad. Copy | |
3044 | relocated instruction(s) there. */ | |
3045 | for (i = 0; i < dsd.insn_count; i++) | |
3046 | { | |
3047 | if (debug_displaced) | |
3048 | { | |
3049 | debug_printf ("displaced: writing insn "); | |
3050 | debug_printf ("%.8x", dsd.insn_buf[i]); | |
3051 | debug_printf (" at %s\n", paddress (gdbarch, to + i * 4)); | |
3052 | } | |
3053 | write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code, | |
3054 | (ULONGEST) dsd.insn_buf[i]); | |
3055 | } | |
3056 | } | |
3057 | else | |
3058 | { | |
b6542f81 YQ |
3059 | dsc = NULL; |
3060 | } | |
3061 | ||
cfba9872 | 3062 | return dsc.release (); |
b6542f81 YQ |
3063 | } |
3064 | ||
3065 | /* Implement the "displaced_step_fixup" gdbarch method. */ | |
3066 | ||
3067 | void | |
3068 | aarch64_displaced_step_fixup (struct gdbarch *gdbarch, | |
cfba9872 | 3069 | struct displaced_step_closure *dsc_, |
b6542f81 YQ |
3070 | CORE_ADDR from, CORE_ADDR to, |
3071 | struct regcache *regs) | |
3072 | { | |
cfba9872 SM |
3073 | aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_; |
3074 | ||
b6542f81 YQ |
3075 | if (dsc->cond) |
3076 | { | |
3077 | ULONGEST pc; | |
3078 | ||
3079 | regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc); | |
3080 | if (pc - to == 8) | |
3081 | { | |
3082 | /* Condition is true. */ | |
3083 | } | |
3084 | else if (pc - to == 4) | |
3085 | { | |
3086 | /* Condition is false. */ | |
3087 | dsc->pc_adjust = 4; | |
3088 | } | |
3089 | else | |
3090 | gdb_assert_not_reached ("Unexpected PC value after displaced stepping"); | |
3091 | } | |
3092 | ||
3093 | if (dsc->pc_adjust != 0) | |
3094 | { | |
3095 | if (debug_displaced) | |
3096 | { | |
3097 | debug_printf ("displaced: fixup: set PC to %s:%d\n", | |
3098 | paddress (gdbarch, from), dsc->pc_adjust); | |
3099 | } | |
3100 | regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM, | |
3101 | from + dsc->pc_adjust); | |
3102 | } | |
3103 | } | |
3104 | ||
3105 | /* Implement the "displaced_step_hw_singlestep" gdbarch method. */ | |
3106 | ||
3107 | int | |
3108 | aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch, | |
3109 | struct displaced_step_closure *closure) | |
3110 | { | |
3111 | return 1; | |
3112 | } | |
3113 | ||
95228a0d AH |
3114 | /* Get the correct target description for the given VQ value. |
3115 | If VQ is zero then it is assumed SVE is not supported. | |
3116 | (It is not possible to set VQ to zero on an SVE system). */ | |
da434ccb AH |
3117 | |
3118 | const target_desc * | |
6dc0ebde | 3119 | aarch64_read_description (uint64_t vq, bool pauth_p) |
da434ccb | 3120 | { |
95228a0d | 3121 | if (vq > AARCH64_MAX_SVE_VQ) |
39bfb937 | 3122 | error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq, |
95228a0d AH |
3123 | AARCH64_MAX_SVE_VQ); |
3124 | ||
6dc0ebde | 3125 | struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p]; |
da434ccb | 3126 | |
95228a0d AH |
3127 | if (tdesc == NULL) |
3128 | { | |
6dc0ebde AH |
3129 | tdesc = aarch64_create_target_description (vq, pauth_p); |
3130 | tdesc_aarch64_list[vq][pauth_p] = tdesc; | |
95228a0d | 3131 | } |
da434ccb | 3132 | |
95228a0d | 3133 | return tdesc; |
da434ccb AH |
3134 | } |
3135 | ||
ba2d2bb2 AH |
3136 | /* Return the VQ used when creating the target description TDESC. */ |
3137 | ||
1332a140 | 3138 | static uint64_t |
ba2d2bb2 AH |
3139 | aarch64_get_tdesc_vq (const struct target_desc *tdesc) |
3140 | { | |
3141 | const struct tdesc_feature *feature_sve; | |
3142 | ||
3143 | if (!tdesc_has_registers (tdesc)) | |
3144 | return 0; | |
3145 | ||
3146 | feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve"); | |
3147 | ||
3148 | if (feature_sve == nullptr) | |
3149 | return 0; | |
3150 | ||
12863263 AH |
3151 | uint64_t vl = tdesc_register_bitsize (feature_sve, |
3152 | aarch64_sve_register_names[0]) / 8; | |
ba2d2bb2 AH |
3153 | return sve_vq_from_vl (vl); |
3154 | } | |
3155 | ||
0ef8a082 AH |
3156 | /* Add all the expected register sets into GDBARCH. */ |
3157 | ||
3158 | static void | |
3159 | aarch64_add_reggroups (struct gdbarch *gdbarch) | |
3160 | { | |
3161 | reggroup_add (gdbarch, general_reggroup); | |
3162 | reggroup_add (gdbarch, float_reggroup); | |
3163 | reggroup_add (gdbarch, system_reggroup); | |
3164 | reggroup_add (gdbarch, vector_reggroup); | |
3165 | reggroup_add (gdbarch, all_reggroup); | |
3166 | reggroup_add (gdbarch, save_reggroup); | |
3167 | reggroup_add (gdbarch, restore_reggroup); | |
3168 | } | |
ba2d2bb2 | 3169 | |
76bed0fd AH |
3170 | /* Implement the "cannot_store_register" gdbarch method. */ |
3171 | ||
3172 | static int | |
3173 | aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum) | |
3174 | { | |
3175 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
3176 | ||
3177 | if (!tdep->has_pauth ()) | |
3178 | return 0; | |
3179 | ||
3180 | /* Pointer authentication registers are read-only. */ | |
3181 | return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base) | |
3182 | || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base)); | |
3183 | } | |
3184 | ||
07b287a0 MS |
3185 | /* Initialize the current architecture based on INFO. If possible, |
3186 | re-use an architecture from ARCHES, which is a list of | |
3187 | architectures already created during this debugging session. | |
3188 | ||
3189 | Called e.g. at program startup, when reading a core file, and when | |
3190 | reading a binary file. */ | |
3191 | ||
3192 | static struct gdbarch * | |
3193 | aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) | |
3194 | { | |
3195 | struct gdbarch_tdep *tdep; | |
3196 | struct gdbarch *gdbarch; | |
3197 | struct gdbarch_list *best_arch; | |
3198 | struct tdesc_arch_data *tdesc_data = NULL; | |
3199 | const struct target_desc *tdesc = info.target_desc; | |
3200 | int i; | |
07b287a0 | 3201 | int valid_p = 1; |
ba2d2bb2 AH |
3202 | const struct tdesc_feature *feature_core; |
3203 | const struct tdesc_feature *feature_fpu; | |
3204 | const struct tdesc_feature *feature_sve; | |
76bed0fd | 3205 | const struct tdesc_feature *feature_pauth; |
07b287a0 MS |
3206 | int num_regs = 0; |
3207 | int num_pseudo_regs = 0; | |
76bed0fd | 3208 | int first_pauth_regnum = -1; |
34dcc7cf | 3209 | int pauth_ra_state_offset = -1; |
07b287a0 | 3210 | |
ba2d2bb2 | 3211 | /* Ensure we always have a target description. */ |
07b287a0 | 3212 | if (!tdesc_has_registers (tdesc)) |
6dc0ebde | 3213 | tdesc = aarch64_read_description (0, false); |
07b287a0 MS |
3214 | gdb_assert (tdesc); |
3215 | ||
ba2d2bb2 AH |
3216 | feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core"); |
3217 | feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu"); | |
3218 | feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve"); | |
76bed0fd | 3219 | feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth"); |
07b287a0 | 3220 | |
ba2d2bb2 | 3221 | if (feature_core == NULL) |
07b287a0 MS |
3222 | return NULL; |
3223 | ||
3224 | tdesc_data = tdesc_data_alloc (); | |
3225 | ||
ba2d2bb2 | 3226 | /* Validate the description provides the mandatory core R registers |
07b287a0 MS |
3227 | and allocate their numbers. */ |
3228 | for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++) | |
ba2d2bb2 AH |
3229 | valid_p &= tdesc_numbered_register (feature_core, tdesc_data, |
3230 | AARCH64_X0_REGNUM + i, | |
3231 | aarch64_r_register_names[i]); | |
07b287a0 MS |
3232 | |
3233 | num_regs = AARCH64_X0_REGNUM + i; | |
3234 | ||
ba2d2bb2 AH |
3235 | /* Add the V registers. */ |
3236 | if (feature_fpu != NULL) | |
07b287a0 | 3237 | { |
ba2d2bb2 AH |
3238 | if (feature_sve != NULL) |
3239 | error (_("Program contains both fpu and SVE features.")); | |
3240 | ||
3241 | /* Validate the description provides the mandatory V registers | |
3242 | and allocate their numbers. */ | |
07b287a0 | 3243 | for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++) |
ba2d2bb2 AH |
3244 | valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data, |
3245 | AARCH64_V0_REGNUM + i, | |
3246 | aarch64_v_register_names[i]); | |
07b287a0 MS |
3247 | |
3248 | num_regs = AARCH64_V0_REGNUM + i; | |
ba2d2bb2 | 3249 | } |
07b287a0 | 3250 | |
ba2d2bb2 AH |
3251 | /* Add the SVE registers. */ |
3252 | if (feature_sve != NULL) | |
3253 | { | |
3254 | /* Validate the description provides the mandatory SVE registers | |
3255 | and allocate their numbers. */ | |
3256 | for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++) | |
3257 | valid_p &= tdesc_numbered_register (feature_sve, tdesc_data, | |
3258 | AARCH64_SVE_Z0_REGNUM + i, | |
3259 | aarch64_sve_register_names[i]); | |
3260 | ||
3261 | num_regs = AARCH64_SVE_Z0_REGNUM + i; | |
3262 | num_pseudo_regs += 32; /* add the Vn register pseudos. */ | |
3263 | } | |
3264 | ||
3265 | if (feature_fpu != NULL || feature_sve != NULL) | |
3266 | { | |
07b287a0 MS |
3267 | num_pseudo_regs += 32; /* add the Qn scalar register pseudos */ |
3268 | num_pseudo_regs += 32; /* add the Dn scalar register pseudos */ | |
3269 | num_pseudo_regs += 32; /* add the Sn scalar register pseudos */ | |
3270 | num_pseudo_regs += 32; /* add the Hn scalar register pseudos */ | |
3271 | num_pseudo_regs += 32; /* add the Bn scalar register pseudos */ | |
3272 | } | |
3273 | ||
76bed0fd AH |
3274 | /* Add the pauth registers. */ |
3275 | if (feature_pauth != NULL) | |
3276 | { | |
3277 | first_pauth_regnum = num_regs; | |
34dcc7cf | 3278 | pauth_ra_state_offset = num_pseudo_regs; |
76bed0fd AH |
3279 | /* Validate the descriptor provides the mandatory PAUTH registers and |
3280 | allocate their numbers. */ | |
3281 | for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++) | |
3282 | valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data, | |
3283 | first_pauth_regnum + i, | |
3284 | aarch64_pauth_register_names[i]); | |
3285 | ||
3286 | num_regs += i; | |
34dcc7cf | 3287 | num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */ |
76bed0fd AH |
3288 | } |
3289 | ||
07b287a0 MS |
3290 | if (!valid_p) |
3291 | { | |
3292 | tdesc_data_cleanup (tdesc_data); | |
3293 | return NULL; | |
3294 | } | |
3295 | ||
3296 | /* AArch64 code is always little-endian. */ | |
3297 | info.byte_order_for_code = BFD_ENDIAN_LITTLE; | |
3298 | ||
3299 | /* If there is already a candidate, use it. */ | |
3300 | for (best_arch = gdbarch_list_lookup_by_info (arches, &info); | |
3301 | best_arch != NULL; | |
3302 | best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info)) | |
3303 | { | |
3304 | /* Found a match. */ | |
3305 | break; | |
3306 | } | |
3307 | ||
3308 | if (best_arch != NULL) | |
3309 | { | |
3310 | if (tdesc_data != NULL) | |
3311 | tdesc_data_cleanup (tdesc_data); | |
3312 | return best_arch->gdbarch; | |
3313 | } | |
3314 | ||
8d749320 | 3315 | tdep = XCNEW (struct gdbarch_tdep); |
07b287a0 MS |
3316 | gdbarch = gdbarch_alloc (&info, tdep); |
3317 | ||
3318 | /* This should be low enough for everything. */ | |
3319 | tdep->lowest_pc = 0x20; | |
3320 | tdep->jb_pc = -1; /* Longjump support not enabled by default. */ | |
3321 | tdep->jb_elt_size = 8; | |
ba2d2bb2 | 3322 | tdep->vq = aarch64_get_tdesc_vq (tdesc); |
76bed0fd | 3323 | tdep->pauth_reg_base = first_pauth_regnum; |
34dcc7cf AH |
3324 | tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1 |
3325 | : pauth_ra_state_offset + num_regs; | |
3326 | ||
07b287a0 MS |
3327 | |
3328 | set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call); | |
3329 | set_gdbarch_frame_align (gdbarch, aarch64_frame_align); | |
3330 | ||
07b287a0 MS |
3331 | /* Advance PC across function entry code. */ |
3332 | set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue); | |
3333 | ||
3334 | /* The stack grows downward. */ | |
3335 | set_gdbarch_inner_than (gdbarch, core_addr_lessthan); | |
3336 | ||
3337 | /* Breakpoint manipulation. */ | |
04180708 YQ |
3338 | set_gdbarch_breakpoint_kind_from_pc (gdbarch, |
3339 | aarch64_breakpoint::kind_from_pc); | |
3340 | set_gdbarch_sw_breakpoint_from_kind (gdbarch, | |
3341 | aarch64_breakpoint::bp_from_kind); | |
07b287a0 | 3342 | set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1); |
9404b58f | 3343 | set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step); |
07b287a0 MS |
3344 | |
3345 | /* Information about registers, etc. */ | |
3346 | set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM); | |
3347 | set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM); | |
3348 | set_gdbarch_num_regs (gdbarch, num_regs); | |
3349 | ||
3350 | set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs); | |
3351 | set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value); | |
3352 | set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write); | |
3353 | set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name); | |
3354 | set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type); | |
3355 | set_tdesc_pseudo_register_reggroup_p (gdbarch, | |
3356 | aarch64_pseudo_register_reggroup_p); | |
76bed0fd | 3357 | set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register); |
07b287a0 MS |
3358 | |
3359 | /* ABI */ | |
3360 | set_gdbarch_short_bit (gdbarch, 16); | |
3361 | set_gdbarch_int_bit (gdbarch, 32); | |
3362 | set_gdbarch_float_bit (gdbarch, 32); | |
3363 | set_gdbarch_double_bit (gdbarch, 64); | |
3364 | set_gdbarch_long_double_bit (gdbarch, 128); | |
3365 | set_gdbarch_long_bit (gdbarch, 64); | |
3366 | set_gdbarch_long_long_bit (gdbarch, 64); | |
3367 | set_gdbarch_ptr_bit (gdbarch, 64); | |
3368 | set_gdbarch_char_signed (gdbarch, 0); | |
53375380 | 3369 | set_gdbarch_wchar_signed (gdbarch, 0); |
07b287a0 MS |
3370 | set_gdbarch_float_format (gdbarch, floatformats_ieee_single); |
3371 | set_gdbarch_double_format (gdbarch, floatformats_ieee_double); | |
3372 | set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad); | |
3373 | ||
3374 | /* Internal <-> external register number maps. */ | |
3375 | set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum); | |
3376 | ||
3377 | /* Returning results. */ | |
3378 | set_gdbarch_return_value (gdbarch, aarch64_return_value); | |
3379 | ||
3380 | /* Disassembly. */ | |
3381 | set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn); | |
3382 | ||
3383 | /* Virtual tables. */ | |
3384 | set_gdbarch_vbit_in_delta (gdbarch, 1); | |
3385 | ||
0ef8a082 AH |
3386 | /* Register architecture. */ |
3387 | aarch64_add_reggroups (gdbarch); | |
3388 | ||
07b287a0 MS |
3389 | /* Hook in the ABI-specific overrides, if they have been registered. */ |
3390 | info.target_desc = tdesc; | |
0dba2a6c | 3391 | info.tdesc_data = tdesc_data; |
07b287a0 MS |
3392 | gdbarch_init_osabi (info, gdbarch); |
3393 | ||
3394 | dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg); | |
11e1b75f AH |
3395 | /* Register DWARF CFA vendor handler. */ |
3396 | set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch, | |
3397 | aarch64_execute_dwarf_cfa_vendor_op); | |
07b287a0 MS |
3398 | |
3399 | /* Add some default predicates. */ | |
3400 | frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind); | |
3401 | dwarf2_append_unwinders (gdbarch); | |
3402 | frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind); | |
3403 | ||
3404 | frame_base_set_default (gdbarch, &aarch64_normal_base); | |
3405 | ||
3406 | /* Now we have tuned the configuration, set a few final things, | |
3407 | based on what the OS ABI has told us. */ | |
3408 | ||
3409 | if (tdep->jb_pc >= 0) | |
3410 | set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target); | |
3411 | ||
ea873d8e PL |
3412 | set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address); |
3413 | ||
07b287a0 MS |
3414 | tdesc_use_registers (gdbarch, tdesc, tdesc_data); |
3415 | ||
3416 | /* Add standard register aliases. */ | |
3417 | for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++) | |
3418 | user_reg_add (gdbarch, aarch64_register_aliases[i].name, | |
3419 | value_of_aarch64_user_reg, | |
3420 | &aarch64_register_aliases[i].regnum); | |
3421 | ||
e8bf1ce4 JB |
3422 | register_aarch64_ravenscar_ops (gdbarch); |
3423 | ||
07b287a0 MS |
3424 | return gdbarch; |
3425 | } | |
3426 | ||
3427 | static void | |
3428 | aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file) | |
3429 | { | |
3430 | struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch); | |
3431 | ||
3432 | if (tdep == NULL) | |
3433 | return; | |
3434 | ||
3435 | fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"), | |
3436 | paddress (gdbarch, tdep->lowest_pc)); | |
3437 | } | |
3438 | ||
0d4c07af | 3439 | #if GDB_SELF_TEST |
1e2b521d YQ |
3440 | namespace selftests |
3441 | { | |
3442 | static void aarch64_process_record_test (void); | |
3443 | } | |
0d4c07af | 3444 | #endif |
1e2b521d | 3445 | |
07b287a0 MS |
3446 | void |
3447 | _initialize_aarch64_tdep (void) | |
3448 | { | |
3449 | gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init, | |
3450 | aarch64_dump_tdep); | |
3451 | ||
07b287a0 MS |
3452 | /* Debug this file's internals. */ |
3453 | add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\ | |
3454 | Set AArch64 debugging."), _("\ | |
3455 | Show AArch64 debugging."), _("\ | |
3456 | When on, AArch64 specific debugging is enabled."), | |
3457 | NULL, | |
3458 | show_aarch64_debug, | |
3459 | &setdebuglist, &showdebuglist); | |
4d9a9006 YQ |
3460 | |
3461 | #if GDB_SELF_TEST | |
1526853e SM |
3462 | selftests::register_test ("aarch64-analyze-prologue", |
3463 | selftests::aarch64_analyze_prologue_test); | |
3464 | selftests::register_test ("aarch64-process-record", | |
3465 | selftests::aarch64_process_record_test); | |
6654d750 | 3466 | selftests::record_xml_tdesc ("aarch64.xml", |
6dc0ebde | 3467 | aarch64_create_target_description (0, false)); |
4d9a9006 | 3468 | #endif |
07b287a0 | 3469 | } |
99afc88b OJ |
3470 | |
3471 | /* AArch64 process record-replay related structures, defines etc. */ | |
3472 | ||
99afc88b OJ |
3473 | #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \ |
3474 | do \ | |
3475 | { \ | |
3476 | unsigned int reg_len = LENGTH; \ | |
3477 | if (reg_len) \ | |
3478 | { \ | |
3479 | REGS = XNEWVEC (uint32_t, reg_len); \ | |
3480 | memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \ | |
3481 | } \ | |
3482 | } \ | |
3483 | while (0) | |
3484 | ||
3485 | #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \ | |
3486 | do \ | |
3487 | { \ | |
3488 | unsigned int mem_len = LENGTH; \ | |
3489 | if (mem_len) \ | |
3490 | { \ | |
3491 | MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \ | |
3492 | memcpy(&MEMS->len, &RECORD_BUF[0], \ | |
3493 | sizeof(struct aarch64_mem_r) * LENGTH); \ | |
3494 | } \ | |
3495 | } \ | |
3496 | while (0) | |
3497 | ||
3498 | /* AArch64 record/replay structures and enumerations. */ | |
3499 | ||
3500 | struct aarch64_mem_r | |
3501 | { | |
3502 | uint64_t len; /* Record length. */ | |
3503 | uint64_t addr; /* Memory address. */ | |
3504 | }; | |
3505 | ||
3506 | enum aarch64_record_result | |
3507 | { | |
3508 | AARCH64_RECORD_SUCCESS, | |
99afc88b OJ |
3509 | AARCH64_RECORD_UNSUPPORTED, |
3510 | AARCH64_RECORD_UNKNOWN | |
3511 | }; | |
3512 | ||
3513 | typedef struct insn_decode_record_t | |
3514 | { | |
3515 | struct gdbarch *gdbarch; | |
3516 | struct regcache *regcache; | |
3517 | CORE_ADDR this_addr; /* Address of insn to be recorded. */ | |
3518 | uint32_t aarch64_insn; /* Insn to be recorded. */ | |
3519 | uint32_t mem_rec_count; /* Count of memory records. */ | |
3520 | uint32_t reg_rec_count; /* Count of register records. */ | |
3521 | uint32_t *aarch64_regs; /* Registers to be recorded. */ | |
3522 | struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */ | |
3523 | } insn_decode_record; | |
3524 | ||
3525 | /* Record handler for data processing - register instructions. */ | |
3526 | ||
3527 | static unsigned int | |
3528 | aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r) | |
3529 | { | |
3530 | uint8_t reg_rd, insn_bits24_27, insn_bits21_23; | |
3531 | uint32_t record_buf[4]; | |
3532 | ||
3533 | reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4); | |
3534 | insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27); | |
3535 | insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23); | |
3536 | ||
3537 | if (!bit (aarch64_insn_r->aarch64_insn, 28)) | |
3538 | { | |
3539 | uint8_t setflags; | |
3540 | ||
3541 | /* Logical (shifted register). */ | |
3542 | if (insn_bits24_27 == 0x0a) | |
3543 | setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03); | |
3544 | /* Add/subtract. */ | |
3545 | else if (insn_bits24_27 == 0x0b) | |
3546 | setflags = bit (aarch64_insn_r->aarch64_insn, 29); | |
3547 | else | |
3548 | return AARCH64_RECORD_UNKNOWN; | |
3549 | ||
3550 | record_buf[0] = reg_rd; | |
3551 | aarch64_insn_r->reg_rec_count = 1; | |
3552 | if (setflags) | |
3553 | record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM; | |
3554 | } | |
3555 | else | |
3556 | { | |
3557 | if (insn_bits24_27 == 0x0b) | |
3558 | { | |
3559 | /* Data-processing (3 source). */ | |
3560 | record_buf[0] = reg_rd; | |
3561 | aarch64_insn_r->reg_rec_count = 1; | |
3562 | } | |
3563 | else if (insn_bits24_27 == 0x0a) | |
3564 | { | |
3565 | if (insn_bits21_23 == 0x00) | |
3566 | { | |
3567 | /* Add/subtract (with carry). */ | |
3568 | record_buf[0] = reg_rd; | |
3569 | aarch64_insn_r->reg_rec_count = 1; | |
3570 | if (bit (aarch64_insn_r->aarch64_insn, 29)) | |
3571 | { | |
3572 | record_buf[1] = AARCH64_CPSR_REGNUM; | |
3573 | aarch64_insn_r->reg_rec_count = 2; | |
3574 | } | |
3575 | } | |
3576 | else if (insn_bits21_23 == 0x02) | |
3577 | { | |
3578 | /* Conditional compare (register) and conditional compare | |
3579 | (immediate) instructions. */ | |
3580 | record_buf[0] = AARCH64_CPSR_REGNUM; | |
3581 | aarch64_insn_r->reg_rec_count = 1; | |
3582 | } | |
3583 | else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06) | |
3584 | { | |
3585 | /* CConditional select. */ | |
3586 | /* Data-processing (2 source). */ | |
3587 | /* Data-processing (1 source). */ | |
3588 | record_buf[0] = reg_rd; | |
3589 | aarch64_insn_r->reg_rec_count = 1; | |
3590 | } | |
3591 | else | |
3592 | return AARCH64_RECORD_UNKNOWN; | |
3593 | } | |
3594 | } | |
3595 | ||
3596 | REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, | |
3597 | record_buf); | |
3598 | return AARCH64_RECORD_SUCCESS; | |
3599 | } | |
3600 | ||
3601 | /* Record handler for data processing - immediate instructions. */ | |
3602 | ||
3603 | static unsigned int | |
3604 | aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r) | |
3605 | { | |
78cc6c2d | 3606 | uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags; |
99afc88b OJ |
3607 | uint32_t record_buf[4]; |
3608 | ||
3609 | reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4); | |
99afc88b OJ |
3610 | insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23); |
3611 | insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27); | |
3612 | ||
3613 | if (insn_bits24_27 == 0x00 /* PC rel addressing. */ | |
3614 | || insn_bits24_27 == 0x03 /* Bitfield and Extract. */ | |
3615 | || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */ | |
3616 | { | |
3617 | record_buf[0] = reg_rd; | |
3618 | aarch64_insn_r->reg_rec_count = 1; | |
3619 | } | |
3620 | else if (insn_bits24_27 == 0x01) | |
3621 | { | |
3622 | /* Add/Subtract (immediate). */ | |
3623 | setflags = bit (aarch64_insn_r->aarch64_insn, 29); | |
3624 | record_buf[0] = reg_rd; | |
3625 | aarch64_insn_r->reg_rec_count = 1; | |
3626 | if (setflags) | |
3627 | record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM; | |
3628 | } | |
3629 | else if (insn_bits24_27 == 0x02 && !insn_bit23) | |
3630 | { | |
3631 | /* Logical (immediate). */ | |
3632 | setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03; | |
3633 | record_buf[0] = reg_rd; | |
3634 | aarch64_insn_r->reg_rec_count = 1; | |
3635 | if (setflags) | |
3636 | record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM; | |
3637 | } | |
3638 | else | |
3639 | return AARCH64_RECORD_UNKNOWN; | |
3640 | ||
3641 | REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, | |
3642 | record_buf); | |
3643 | return AARCH64_RECORD_SUCCESS; | |
3644 | } | |
3645 | ||
3646 | /* Record handler for branch, exception generation and system instructions. */ | |
3647 | ||
3648 | static unsigned int | |
3649 | aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r) | |
3650 | { | |
3651 | struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch); | |
3652 | uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23; | |
3653 | uint32_t record_buf[4]; | |
3654 | ||
3655 | insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27); | |
3656 | insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31); | |
3657 | insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23); | |
3658 | ||
3659 | if (insn_bits28_31 == 0x0d) | |
3660 | { | |
3661 | /* Exception generation instructions. */ | |
3662 | if (insn_bits24_27 == 0x04) | |
3663 | { | |
5d98d3cd YQ |
3664 | if (!bits (aarch64_insn_r->aarch64_insn, 2, 4) |
3665 | && !bits (aarch64_insn_r->aarch64_insn, 21, 23) | |
3666 | && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01) | |
99afc88b OJ |
3667 | { |
3668 | ULONGEST svc_number; | |
3669 | ||
3670 | regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8, | |
3671 | &svc_number); | |
3672 | return tdep->aarch64_syscall_record (aarch64_insn_r->regcache, | |
3673 | svc_number); | |
3674 | } | |
3675 | else | |
3676 | return AARCH64_RECORD_UNSUPPORTED; | |
3677 | } | |
3678 | /* System instructions. */ | |
3679 | else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00) | |
3680 | { | |
3681 | uint32_t reg_rt, reg_crn; | |
3682 | ||
3683 | reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4); | |
3684 | reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15); | |
3685 | ||
3686 | /* Record rt in case of sysl and mrs instructions. */ | |
3687 | if (bit (aarch64_insn_r->aarch64_insn, 21)) | |
3688 | { | |
3689 | record_buf[0] = reg_rt; | |
3690 | aarch64_insn_r->reg_rec_count = 1; | |
3691 | } | |
3692 | /* Record cpsr for hint and msr(immediate) instructions. */ | |
3693 | else if (reg_crn == 0x02 || reg_crn == 0x04) | |
3694 | { | |
3695 | record_buf[0] = AARCH64_CPSR_REGNUM; | |
3696 | aarch64_insn_r->reg_rec_count = 1; | |
3697 | } | |
3698 | } | |
3699 | /* Unconditional branch (register). */ | |
3700 | else if((insn_bits24_27 & 0x0e) == 0x06) | |
3701 | { | |
3702 | record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM; | |
3703 | if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01) | |
3704 | record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM; | |
3705 | } | |
3706 | else | |
3707 | return AARCH64_RECORD_UNKNOWN; | |
3708 | } | |
3709 | /* Unconditional branch (immediate). */ | |
3710 | else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04) | |
3711 | { | |
3712 | record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM; | |
3713 | if (bit (aarch64_insn_r->aarch64_insn, 31)) | |
3714 | record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM; | |
3715 | } | |
3716 | else | |
3717 | /* Compare & branch (immediate), Test & branch (immediate) and | |
3718 | Conditional branch (immediate). */ | |
3719 | record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM; | |
3720 | ||
3721 | REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, | |
3722 | record_buf); | |
3723 | return AARCH64_RECORD_SUCCESS; | |
3724 | } | |
3725 | ||
3726 | /* Record handler for advanced SIMD load and store instructions. */ | |
3727 | ||
3728 | static unsigned int | |
3729 | aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r) | |
3730 | { | |
3731 | CORE_ADDR address; | |
3732 | uint64_t addr_offset = 0; | |
3733 | uint32_t record_buf[24]; | |
3734 | uint64_t record_buf_mem[24]; | |
3735 | uint32_t reg_rn, reg_rt; | |
3736 | uint32_t reg_index = 0, mem_index = 0; | |
3737 | uint8_t opcode_bits, size_bits; | |
3738 | ||
3739 | reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4); | |
3740 | reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9); | |
3741 | size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11); | |
3742 | opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15); | |
3743 | regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address); | |
3744 | ||
3745 | if (record_debug) | |
b277c936 | 3746 | debug_printf ("Process record: Advanced SIMD load/store\n"); |
99afc88b OJ |
3747 | |
3748 | /* Load/store single structure. */ | |
3749 | if (bit (aarch64_insn_r->aarch64_insn, 24)) | |
3750 | { | |
3751 | uint8_t sindex, scale, selem, esize, replicate = 0; | |
3752 | scale = opcode_bits >> 2; | |
3753 | selem = ((opcode_bits & 0x02) | | |
3754 | bit (aarch64_insn_r->aarch64_insn, 21)) + 1; | |
3755 | switch (scale) | |
3756 | { | |
3757 | case 1: | |
3758 | if (size_bits & 0x01) | |
3759 | return AARCH64_RECORD_UNKNOWN; | |
3760 | break; | |
3761 | case 2: | |
3762 | if ((size_bits >> 1) & 0x01) | |
3763 | return AARCH64_RECORD_UNKNOWN; | |
3764 | if (size_bits & 0x01) | |
3765 | { | |
3766 | if (!((opcode_bits >> 1) & 0x01)) | |
3767 | scale = 3; | |
3768 | else | |
3769 | return AARCH64_RECORD_UNKNOWN; | |
3770 | } | |
3771 | break; | |
3772 | case 3: | |
3773 | if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01)) | |
3774 | { | |
3775 | scale = size_bits; | |
3776 | replicate = 1; | |
3777 | break; | |
3778 | } | |
3779 | else | |
3780 | return AARCH64_RECORD_UNKNOWN; | |
3781 | default: | |
3782 | break; | |
3783 | } | |
3784 | esize = 8 << scale; | |
3785 | if (replicate) | |
3786 | for (sindex = 0; sindex < selem; sindex++) | |
3787 | { | |
3788 | record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM; | |
3789 | reg_rt = (reg_rt + 1) % 32; | |
3790 | } | |
3791 | else | |
3792 | { | |
3793 | for (sindex = 0; sindex < selem; sindex++) | |
a2e3e93f SM |
3794 | { |
3795 | if (bit (aarch64_insn_r->aarch64_insn, 22)) | |
3796 | record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM; | |
3797 | else | |
3798 | { | |
3799 | record_buf_mem[mem_index++] = esize / 8; | |
3800 | record_buf_mem[mem_index++] = address + addr_offset; | |
3801 | } | |
3802 | addr_offset = addr_offset + (esize / 8); | |
3803 | reg_rt = (reg_rt + 1) % 32; | |
3804 | } | |
99afc88b OJ |
3805 | } |
3806 | } | |
3807 | /* Load/store multiple structure. */ | |
3808 | else | |
3809 | { | |
3810 | uint8_t selem, esize, rpt, elements; | |
3811 | uint8_t eindex, rindex; | |
3812 | ||
3813 | esize = 8 << size_bits; | |
3814 | if (bit (aarch64_insn_r->aarch64_insn, 30)) | |
3815 | elements = 128 / esize; | |
3816 | else | |
3817 | elements = 64 / esize; | |
3818 | ||
3819 | switch (opcode_bits) | |
3820 | { | |
3821 | /*LD/ST4 (4 Registers). */ | |
3822 | case 0: | |
3823 | rpt = 1; | |
3824 | selem = 4; | |
3825 | break; | |
3826 | /*LD/ST1 (4 Registers). */ | |
3827 | case 2: | |
3828 | rpt = 4; | |
3829 | selem = 1; | |
3830 | break; | |
3831 | /*LD/ST3 (3 Registers). */ | |
3832 | case 4: | |
3833 | rpt = 1; | |
3834 | selem = 3; | |
3835 | break; | |
3836 | /*LD/ST1 (3 Registers). */ | |
3837 | case 6: | |
3838 | rpt = 3; | |
3839 | selem = 1; | |
3840 | break; | |
3841 | /*LD/ST1 (1 Register). */ | |
3842 | case 7: | |
3843 | rpt = 1; | |
3844 | selem = 1; | |
3845 | break; | |
3846 | /*LD/ST2 (2 Registers). */ | |
3847 | case 8: | |
3848 | rpt = 1; | |
3849 | selem = 2; | |
3850 | break; | |
3851 | /*LD/ST1 (2 Registers). */ | |
3852 | case 10: | |
3853 | rpt = 2; | |
3854 | selem = 1; | |
3855 | break; | |
3856 | default: | |
3857 | return AARCH64_RECORD_UNSUPPORTED; | |
3858 | break; | |
3859 | } | |
3860 | for (rindex = 0; rindex < rpt; rindex++) | |
3861 | for (eindex = 0; eindex < elements; eindex++) | |
3862 | { | |
3863 | uint8_t reg_tt, sindex; | |
3864 | reg_tt = (reg_rt + rindex) % 32; | |
3865 | for (sindex = 0; sindex < selem; sindex++) | |
3866 | { | |
3867 | if (bit (aarch64_insn_r->aarch64_insn, 22)) | |
3868 | record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM; | |
3869 | else | |
3870 | { | |
3871 | record_buf_mem[mem_index++] = esize / 8; | |
3872 | record_buf_mem[mem_index++] = address + addr_offset; | |
3873 | } | |
3874 | addr_offset = addr_offset + (esize / 8); | |
3875 | reg_tt = (reg_tt + 1) % 32; | |
3876 | } | |
3877 | } | |
3878 | } | |
3879 | ||
3880 | if (bit (aarch64_insn_r->aarch64_insn, 23)) | |
3881 | record_buf[reg_index++] = reg_rn; | |
3882 | ||
3883 | aarch64_insn_r->reg_rec_count = reg_index; | |
3884 | aarch64_insn_r->mem_rec_count = mem_index / 2; | |
3885 | MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count, | |
3886 | record_buf_mem); | |
3887 | REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, | |
3888 | record_buf); | |
3889 | return AARCH64_RECORD_SUCCESS; | |
3890 | } | |
3891 | ||
3892 | /* Record handler for load and store instructions. */ | |
3893 | ||
3894 | static unsigned int | |
3895 | aarch64_record_load_store (insn_decode_record *aarch64_insn_r) | |
3896 | { | |
3897 | uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11; | |
3898 | uint8_t insn_bit23, insn_bit21; | |
3899 | uint8_t opc, size_bits, ld_flag, vector_flag; | |
3900 | uint32_t reg_rn, reg_rt, reg_rt2; | |
3901 | uint64_t datasize, offset; | |
3902 | uint32_t record_buf[8]; | |
3903 | uint64_t record_buf_mem[8]; | |
3904 | CORE_ADDR address; | |
3905 | ||
3906 | insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11); | |
3907 | insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27); | |
3908 | insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29); | |
3909 | insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21); | |
3910 | insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23); | |
3911 | ld_flag = bit (aarch64_insn_r->aarch64_insn, 22); | |
3912 | vector_flag = bit (aarch64_insn_r->aarch64_insn, 26); | |
3913 | reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4); | |
3914 | reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9); | |
3915 | reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14); | |
3916 | size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31); | |
3917 | ||
3918 | /* Load/store exclusive. */ | |
3919 | if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00) | |
3920 | { | |
3921 | if (record_debug) | |
b277c936 | 3922 | debug_printf ("Process record: load/store exclusive\n"); |
99afc88b OJ |
3923 | |
3924 | if (ld_flag) | |
3925 | { | |
3926 | record_buf[0] = reg_rt; | |
3927 | aarch64_insn_r->reg_rec_count = 1; | |
3928 | if (insn_bit21) | |
3929 | { | |
3930 | record_buf[1] = reg_rt2; | |
3931 | aarch64_insn_r->reg_rec_count = 2; | |
3932 | } | |
3933 | } | |
3934 | else | |
3935 | { | |
3936 | if (insn_bit21) | |
3937 | datasize = (8 << size_bits) * 2; | |
3938 | else | |
3939 | datasize = (8 << size_bits); | |
3940 | regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, | |
3941 | &address); | |
3942 | record_buf_mem[0] = datasize / 8; | |
3943 | record_buf_mem[1] = address; | |
3944 | aarch64_insn_r->mem_rec_count = 1; | |
3945 | if (!insn_bit23) | |
3946 | { | |
3947 | /* Save register rs. */ | |
3948 | record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20); | |
3949 | aarch64_insn_r->reg_rec_count = 1; | |
3950 | } | |
3951 | } | |
3952 | } | |
3953 | /* Load register (literal) instructions decoding. */ | |
3954 | else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01) | |
3955 | { | |
3956 | if (record_debug) | |
b277c936 | 3957 | debug_printf ("Process record: load register (literal)\n"); |
99afc88b OJ |
3958 | if (vector_flag) |
3959 | record_buf[0] = reg_rt + AARCH64_V0_REGNUM; | |
3960 | else | |
3961 | record_buf[0] = reg_rt; | |
3962 | aarch64_insn_r->reg_rec_count = 1; | |
3963 | } | |
3964 | /* All types of load/store pair instructions decoding. */ | |
3965 | else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02) | |
3966 | { | |
3967 | if (record_debug) | |
b277c936 | 3968 | debug_printf ("Process record: load/store pair\n"); |
99afc88b OJ |
3969 | |
3970 | if (ld_flag) | |
3971 | { | |
3972 | if (vector_flag) | |
3973 | { | |
3974 | record_buf[0] = reg_rt + AARCH64_V0_REGNUM; | |
3975 | record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM; | |
3976 | } | |
3977 | else | |
3978 | { | |
3979 | record_buf[0] = reg_rt; | |
3980 | record_buf[1] = reg_rt2; | |
3981 | } | |
3982 | aarch64_insn_r->reg_rec_count = 2; | |
3983 | } | |
3984 | else | |
3985 | { | |
3986 | uint16_t imm7_off; | |
3987 | imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21); | |
3988 | if (!vector_flag) | |
3989 | size_bits = size_bits >> 1; | |
3990 | datasize = 8 << (2 + size_bits); | |
3991 | offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off; | |
3992 | offset = offset << (2 + size_bits); | |
3993 | regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, | |
3994 | &address); | |
3995 | if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23)) | |
3996 | { | |
3997 | if (imm7_off & 0x40) | |
3998 | address = address - offset; | |
3999 | else | |
4000 | address = address + offset; | |
4001 | } | |
4002 | ||
4003 | record_buf_mem[0] = datasize / 8; | |
4004 | record_buf_mem[1] = address; | |
4005 | record_buf_mem[2] = datasize / 8; | |
4006 | record_buf_mem[3] = address + (datasize / 8); | |
4007 | aarch64_insn_r->mem_rec_count = 2; | |
4008 | } | |
4009 | if (bit (aarch64_insn_r->aarch64_insn, 23)) | |
4010 | record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn; | |
4011 | } | |
4012 | /* Load/store register (unsigned immediate) instructions. */ | |
4013 | else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03) | |
4014 | { | |
4015 | opc = bits (aarch64_insn_r->aarch64_insn, 22, 23); | |
4016 | if (!(opc >> 1)) | |
33877125 YQ |
4017 | { |
4018 | if (opc & 0x01) | |
4019 | ld_flag = 0x01; | |
4020 | else | |
4021 | ld_flag = 0x0; | |
4022 | } | |
99afc88b | 4023 | else |
33877125 | 4024 | { |
1e2b521d YQ |
4025 | if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2) |
4026 | { | |
4027 | /* PRFM (immediate) */ | |
4028 | return AARCH64_RECORD_SUCCESS; | |
4029 | } | |
4030 | else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2) | |
4031 | { | |
4032 | /* LDRSW (immediate) */ | |
4033 | ld_flag = 0x1; | |
4034 | } | |
33877125 | 4035 | else |
1e2b521d YQ |
4036 | { |
4037 | if (opc & 0x01) | |
4038 | ld_flag = 0x01; | |
4039 | else | |
4040 | ld_flag = 0x0; | |
4041 | } | |
33877125 | 4042 | } |
99afc88b OJ |
4043 | |
4044 | if (record_debug) | |
4045 | { | |
b277c936 PL |
4046 | debug_printf ("Process record: load/store (unsigned immediate):" |
4047 | " size %x V %d opc %x\n", size_bits, vector_flag, | |
4048 | opc); | |
99afc88b OJ |
4049 | } |
4050 | ||
4051 | if (!ld_flag) | |
4052 | { | |
4053 | offset = bits (aarch64_insn_r->aarch64_insn, 10, 21); | |
4054 | datasize = 8 << size_bits; | |
4055 | regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, | |
4056 | &address); | |
4057 | offset = offset << size_bits; | |
4058 | address = address + offset; | |
4059 | ||
4060 | record_buf_mem[0] = datasize >> 3; | |
4061 | record_buf_mem[1] = address; | |
4062 | aarch64_insn_r->mem_rec_count = 1; | |
4063 | } | |
4064 | else | |
4065 | { | |
4066 | if (vector_flag) | |
4067 | record_buf[0] = reg_rt + AARCH64_V0_REGNUM; | |
4068 | else | |
4069 | record_buf[0] = reg_rt; | |
4070 | aarch64_insn_r->reg_rec_count = 1; | |
4071 | } | |
4072 | } | |
4073 | /* Load/store register (register offset) instructions. */ | |
5d98d3cd YQ |
4074 | else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03 |
4075 | && insn_bits10_11 == 0x02 && insn_bit21) | |
99afc88b OJ |
4076 | { |
4077 | if (record_debug) | |
b277c936 | 4078 | debug_printf ("Process record: load/store (register offset)\n"); |
99afc88b OJ |
4079 | opc = bits (aarch64_insn_r->aarch64_insn, 22, 23); |
4080 | if (!(opc >> 1)) | |
4081 | if (opc & 0x01) | |
4082 | ld_flag = 0x01; | |
4083 | else | |
4084 | ld_flag = 0x0; | |
4085 | else | |
4086 | if (size_bits != 0x03) | |
4087 | ld_flag = 0x01; | |
4088 | else | |
4089 | return AARCH64_RECORD_UNKNOWN; | |
4090 | ||
4091 | if (!ld_flag) | |
4092 | { | |
d9436c7c PA |
4093 | ULONGEST reg_rm_val; |
4094 | ||
99afc88b OJ |
4095 | regcache_raw_read_unsigned (aarch64_insn_r->regcache, |
4096 | bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val); | |
4097 | if (bit (aarch64_insn_r->aarch64_insn, 12)) | |
4098 | offset = reg_rm_val << size_bits; | |
4099 | else | |
4100 | offset = reg_rm_val; | |
4101 | datasize = 8 << size_bits; | |
4102 | regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, | |
4103 | &address); | |
4104 | address = address + offset; | |
4105 | record_buf_mem[0] = datasize >> 3; | |
4106 | record_buf_mem[1] = address; | |
4107 | aarch64_insn_r->mem_rec_count = 1; | |
4108 | } | |
4109 | else | |
4110 | { | |
4111 | if (vector_flag) | |
4112 | record_buf[0] = reg_rt + AARCH64_V0_REGNUM; | |
4113 | else | |
4114 | record_buf[0] = reg_rt; | |
4115 | aarch64_insn_r->reg_rec_count = 1; | |
4116 | } | |
4117 | } | |
4118 | /* Load/store register (immediate and unprivileged) instructions. */ | |
5d98d3cd YQ |
4119 | else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03 |
4120 | && !insn_bit21) | |
99afc88b OJ |
4121 | { |
4122 | if (record_debug) | |
4123 | { | |
b277c936 PL |
4124 | debug_printf ("Process record: load/store " |
4125 | "(immediate and unprivileged)\n"); | |
99afc88b OJ |
4126 | } |
4127 | opc = bits (aarch64_insn_r->aarch64_insn, 22, 23); | |
4128 | if (!(opc >> 1)) | |
4129 | if (opc & 0x01) | |
4130 | ld_flag = 0x01; | |
4131 | else | |
4132 | ld_flag = 0x0; | |
4133 | else | |
4134 | if (size_bits != 0x03) | |
4135 | ld_flag = 0x01; | |
4136 | else | |
4137 | return AARCH64_RECORD_UNKNOWN; | |
4138 | ||
4139 | if (!ld_flag) | |
4140 | { | |
4141 | uint16_t imm9_off; | |
4142 | imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20); | |
4143 | offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off; | |
4144 | datasize = 8 << size_bits; | |
4145 | regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, | |
4146 | &address); | |
4147 | if (insn_bits10_11 != 0x01) | |
4148 | { | |
4149 | if (imm9_off & 0x0100) | |
4150 | address = address - offset; | |
4151 | else | |
4152 | address = address + offset; | |
4153 | } | |
4154 | record_buf_mem[0] = datasize >> 3; | |
4155 | record_buf_mem[1] = address; | |
4156 | aarch64_insn_r->mem_rec_count = 1; | |
4157 | } | |
4158 | else | |
4159 | { | |
4160 | if (vector_flag) | |
4161 | record_buf[0] = reg_rt + AARCH64_V0_REGNUM; | |
4162 | else | |
4163 | record_buf[0] = reg_rt; | |
4164 | aarch64_insn_r->reg_rec_count = 1; | |
4165 | } | |
4166 | if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03) | |
4167 | record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn; | |
4168 | } | |
4169 | /* Advanced SIMD load/store instructions. */ | |
4170 | else | |
4171 | return aarch64_record_asimd_load_store (aarch64_insn_r); | |
4172 | ||
4173 | MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count, | |
4174 | record_buf_mem); | |
4175 | REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, | |
4176 | record_buf); | |
4177 | return AARCH64_RECORD_SUCCESS; | |
4178 | } | |
4179 | ||
4180 | /* Record handler for data processing SIMD and floating point instructions. */ | |
4181 | ||
4182 | static unsigned int | |
4183 | aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r) | |
4184 | { | |
4185 | uint8_t insn_bit21, opcode, rmode, reg_rd; | |
4186 | uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15; | |
4187 | uint8_t insn_bits11_14; | |
4188 | uint32_t record_buf[2]; | |
4189 | ||
4190 | insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27); | |
4191 | insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31); | |
4192 | insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11); | |
4193 | insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15); | |
4194 | insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14); | |
4195 | opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18); | |
4196 | rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20); | |
4197 | reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4); | |
4198 | insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21); | |
4199 | ||
4200 | if (record_debug) | |
b277c936 | 4201 | debug_printf ("Process record: data processing SIMD/FP: "); |
99afc88b OJ |
4202 | |
4203 | if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e) | |
4204 | { | |
4205 | /* Floating point - fixed point conversion instructions. */ | |
4206 | if (!insn_bit21) | |
4207 | { | |
4208 | if (record_debug) | |
b277c936 | 4209 | debug_printf ("FP - fixed point conversion"); |
99afc88b OJ |
4210 | |
4211 | if ((opcode >> 1) == 0x0 && rmode == 0x03) | |
4212 | record_buf[0] = reg_rd; | |
4213 | else | |
4214 | record_buf[0] = reg_rd + AARCH64_V0_REGNUM; | |
4215 | } | |
4216 | /* Floating point - conditional compare instructions. */ | |
4217 | else if (insn_bits10_11 == 0x01) | |
4218 | { | |
4219 | if (record_debug) | |
b277c936 | 4220 | debug_printf ("FP - conditional compare"); |
99afc88b OJ |
4221 | |
4222 | record_buf[0] = AARCH64_CPSR_REGNUM; | |
4223 | } | |
4224 | /* Floating point - data processing (2-source) and | |
4225 | conditional select instructions. */ | |
4226 | else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03) | |
4227 | { | |
4228 | if (record_debug) | |
b277c936 | 4229 | debug_printf ("FP - DP (2-source)"); |
99afc88b OJ |
4230 | |
4231 | record_buf[0] = reg_rd + AARCH64_V0_REGNUM; | |
4232 | } | |
4233 | else if (insn_bits10_11 == 0x00) | |
4234 | { | |
4235 | /* Floating point - immediate instructions. */ | |
4236 | if ((insn_bits12_15 & 0x01) == 0x01 | |
4237 | || (insn_bits12_15 & 0x07) == 0x04) | |
4238 | { | |
4239 | if (record_debug) | |
b277c936 | 4240 | debug_printf ("FP - immediate"); |
99afc88b OJ |
4241 | record_buf[0] = reg_rd + AARCH64_V0_REGNUM; |
4242 | } | |
4243 | /* Floating point - compare instructions. */ | |
4244 | else if ((insn_bits12_15 & 0x03) == 0x02) | |
4245 | { | |
4246 | if (record_debug) | |
b277c936 | 4247 | debug_printf ("FP - immediate"); |
99afc88b OJ |
4248 | record_buf[0] = AARCH64_CPSR_REGNUM; |
4249 | } | |
4250 | /* Floating point - integer conversions instructions. */ | |
f62fce35 | 4251 | else if (insn_bits12_15 == 0x00) |
99afc88b OJ |
4252 | { |
4253 | /* Convert float to integer instruction. */ | |
4254 | if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode)) | |
4255 | { | |
4256 | if (record_debug) | |
b277c936 | 4257 | debug_printf ("float to int conversion"); |
99afc88b OJ |
4258 | |
4259 | record_buf[0] = reg_rd + AARCH64_X0_REGNUM; | |
4260 | } | |
4261 | /* Convert integer to float instruction. */ | |
4262 | else if ((opcode >> 1) == 0x01 && !rmode) | |
4263 | { | |
4264 | if (record_debug) | |
b277c936 | 4265 | debug_printf ("int to float conversion"); |
99afc88b OJ |
4266 | |
4267 | record_buf[0] = reg_rd + AARCH64_V0_REGNUM; | |
4268 | } | |
4269 | /* Move float to integer instruction. */ | |
4270 | else if ((opcode >> 1) == 0x03) | |
4271 | { | |
4272 | if (record_debug) | |
b277c936 | 4273 | debug_printf ("move float to int"); |
99afc88b OJ |
4274 | |
4275 | if (!(opcode & 0x01)) | |
4276 | record_buf[0] = reg_rd + AARCH64_X0_REGNUM; | |
4277 | else | |
4278 | record_buf[0] = reg_rd + AARCH64_V0_REGNUM; | |
4279 | } | |
f62fce35 YQ |
4280 | else |
4281 | return AARCH64_RECORD_UNKNOWN; | |
99afc88b | 4282 | } |
f62fce35 YQ |
4283 | else |
4284 | return AARCH64_RECORD_UNKNOWN; | |
99afc88b | 4285 | } |
f62fce35 YQ |
4286 | else |
4287 | return AARCH64_RECORD_UNKNOWN; | |
99afc88b OJ |
4288 | } |
4289 | else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e) | |
4290 | { | |
4291 | if (record_debug) | |
b277c936 | 4292 | debug_printf ("SIMD copy"); |
99afc88b OJ |
4293 | |
4294 | /* Advanced SIMD copy instructions. */ | |
4295 | if (!bits (aarch64_insn_r->aarch64_insn, 21, 23) | |
4296 | && !bit (aarch64_insn_r->aarch64_insn, 15) | |
4297 | && bit (aarch64_insn_r->aarch64_insn, 10)) | |
4298 | { | |
4299 | if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07) | |
4300 | record_buf[0] = reg_rd + AARCH64_X0_REGNUM; | |
4301 | else | |
4302 | record_buf[0] = reg_rd + AARCH64_V0_REGNUM; | |
4303 | } | |
4304 | else | |
4305 | record_buf[0] = reg_rd + AARCH64_V0_REGNUM; | |
4306 | } | |
4307 | /* All remaining floating point or advanced SIMD instructions. */ | |
4308 | else | |
4309 | { | |
4310 | if (record_debug) | |
b277c936 | 4311 | debug_printf ("all remain"); |
99afc88b OJ |
4312 | |
4313 | record_buf[0] = reg_rd + AARCH64_V0_REGNUM; | |
4314 | } | |
4315 | ||
4316 | if (record_debug) | |
b277c936 | 4317 | debug_printf ("\n"); |
99afc88b OJ |
4318 | |
4319 | aarch64_insn_r->reg_rec_count++; | |
4320 | gdb_assert (aarch64_insn_r->reg_rec_count == 1); | |
4321 | REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count, | |
4322 | record_buf); | |
4323 | return AARCH64_RECORD_SUCCESS; | |
4324 | } | |
4325 | ||
4326 | /* Decodes insns type and invokes its record handler. */ | |
4327 | ||
4328 | static unsigned int | |
4329 | aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r) | |
4330 | { | |
4331 | uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28; | |
4332 | ||
4333 | ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25); | |
4334 | ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26); | |
4335 | ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27); | |
4336 | ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28); | |
4337 | ||
4338 | /* Data processing - immediate instructions. */ | |
4339 | if (!ins_bit26 && !ins_bit27 && ins_bit28) | |
4340 | return aarch64_record_data_proc_imm (aarch64_insn_r); | |
4341 | ||
4342 | /* Branch, exception generation and system instructions. */ | |
4343 | if (ins_bit26 && !ins_bit27 && ins_bit28) | |
4344 | return aarch64_record_branch_except_sys (aarch64_insn_r); | |
4345 | ||
4346 | /* Load and store instructions. */ | |
4347 | if (!ins_bit25 && ins_bit27) | |
4348 | return aarch64_record_load_store (aarch64_insn_r); | |
4349 | ||
4350 | /* Data processing - register instructions. */ | |
4351 | if (ins_bit25 && !ins_bit26 && ins_bit27) | |
4352 | return aarch64_record_data_proc_reg (aarch64_insn_r); | |
4353 | ||
4354 | /* Data processing - SIMD and floating point instructions. */ | |
4355 | if (ins_bit25 && ins_bit26 && ins_bit27) | |
4356 | return aarch64_record_data_proc_simd_fp (aarch64_insn_r); | |
4357 | ||
4358 | return AARCH64_RECORD_UNSUPPORTED; | |
4359 | } | |
4360 | ||
4361 | /* Cleans up local record registers and memory allocations. */ | |
4362 | ||
4363 | static void | |
4364 | deallocate_reg_mem (insn_decode_record *record) | |
4365 | { | |
4366 | xfree (record->aarch64_regs); | |
4367 | xfree (record->aarch64_mems); | |
4368 | } | |
4369 | ||
1e2b521d YQ |
4370 | #if GDB_SELF_TEST |
4371 | namespace selftests { | |
4372 | ||
4373 | static void | |
4374 | aarch64_process_record_test (void) | |
4375 | { | |
4376 | struct gdbarch_info info; | |
4377 | uint32_t ret; | |
4378 | ||
4379 | gdbarch_info_init (&info); | |
4380 | info.bfd_arch_info = bfd_scan_arch ("aarch64"); | |
4381 | ||
4382 | struct gdbarch *gdbarch = gdbarch_find_by_info (info); | |
4383 | SELF_CHECK (gdbarch != NULL); | |
4384 | ||
4385 | insn_decode_record aarch64_record; | |
4386 | ||
4387 | memset (&aarch64_record, 0, sizeof (insn_decode_record)); | |
4388 | aarch64_record.regcache = NULL; | |
4389 | aarch64_record.this_addr = 0; | |
4390 | aarch64_record.gdbarch = gdbarch; | |
4391 | ||
4392 | /* 20 00 80 f9 prfm pldl1keep, [x1] */ | |
4393 | aarch64_record.aarch64_insn = 0xf9800020; | |
4394 | ret = aarch64_record_decode_insn_handler (&aarch64_record); | |
4395 | SELF_CHECK (ret == AARCH64_RECORD_SUCCESS); | |
4396 | SELF_CHECK (aarch64_record.reg_rec_count == 0); | |
4397 | SELF_CHECK (aarch64_record.mem_rec_count == 0); | |
4398 | ||
4399 | deallocate_reg_mem (&aarch64_record); | |
4400 | } | |
4401 | ||
4402 | } // namespace selftests | |
4403 | #endif /* GDB_SELF_TEST */ | |
4404 | ||
99afc88b OJ |
4405 | /* Parse the current instruction and record the values of the registers and |
4406 | memory that will be changed in current instruction to record_arch_list | |
4407 | return -1 if something is wrong. */ | |
4408 | ||
4409 | int | |
4410 | aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache, | |
4411 | CORE_ADDR insn_addr) | |
4412 | { | |
4413 | uint32_t rec_no = 0; | |
4414 | uint8_t insn_size = 4; | |
4415 | uint32_t ret = 0; | |
99afc88b OJ |
4416 | gdb_byte buf[insn_size]; |
4417 | insn_decode_record aarch64_record; | |
4418 | ||
4419 | memset (&buf[0], 0, insn_size); | |
4420 | memset (&aarch64_record, 0, sizeof (insn_decode_record)); | |
4421 | target_read_memory (insn_addr, &buf[0], insn_size); | |
4422 | aarch64_record.aarch64_insn | |
4423 | = (uint32_t) extract_unsigned_integer (&buf[0], | |
4424 | insn_size, | |
4425 | gdbarch_byte_order (gdbarch)); | |
4426 | aarch64_record.regcache = regcache; | |
4427 | aarch64_record.this_addr = insn_addr; | |
4428 | aarch64_record.gdbarch = gdbarch; | |
4429 | ||
4430 | ret = aarch64_record_decode_insn_handler (&aarch64_record); | |
4431 | if (ret == AARCH64_RECORD_UNSUPPORTED) | |
4432 | { | |
4433 | printf_unfiltered (_("Process record does not support instruction " | |
4434 | "0x%0x at address %s.\n"), | |
4435 | aarch64_record.aarch64_insn, | |
4436 | paddress (gdbarch, insn_addr)); | |
4437 | ret = -1; | |
4438 | } | |
4439 | ||
4440 | if (0 == ret) | |
4441 | { | |
4442 | /* Record registers. */ | |
4443 | record_full_arch_list_add_reg (aarch64_record.regcache, | |
4444 | AARCH64_PC_REGNUM); | |
4445 | /* Always record register CPSR. */ | |
4446 | record_full_arch_list_add_reg (aarch64_record.regcache, | |
4447 | AARCH64_CPSR_REGNUM); | |
4448 | if (aarch64_record.aarch64_regs) | |
4449 | for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++) | |
4450 | if (record_full_arch_list_add_reg (aarch64_record.regcache, | |
4451 | aarch64_record.aarch64_regs[rec_no])) | |
4452 | ret = -1; | |
4453 | ||
4454 | /* Record memories. */ | |
4455 | if (aarch64_record.aarch64_mems) | |
4456 | for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++) | |
4457 | if (record_full_arch_list_add_mem | |
4458 | ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr, | |
4459 | aarch64_record.aarch64_mems[rec_no].len)) | |
4460 | ret = -1; | |
4461 | ||
4462 | if (record_full_arch_list_add_end ()) | |
4463 | ret = -1; | |
4464 | } | |
4465 | ||
4466 | deallocate_reg_mem (&aarch64_record); | |
4467 | return ret; | |
4468 | } |