Automatic date update in version.in
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
32d0add0 3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
57
07b287a0 58#include "features/aarch64.c"
07b287a0 59
787749ea
PL
60#include "arch/aarch64-insn.h"
61
07b287a0
MS
62/* Pseudo register base numbers. */
63#define AARCH64_Q0_REGNUM 0
64#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
65#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
66#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
67#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
68
69/* The standard register names, and all the valid aliases for them. */
70static const struct
71{
72 const char *const name;
73 int regnum;
74} aarch64_register_aliases[] =
75{
76 /* 64-bit register names. */
77 {"fp", AARCH64_FP_REGNUM},
78 {"lr", AARCH64_LR_REGNUM},
79 {"sp", AARCH64_SP_REGNUM},
80
81 /* 32-bit register names. */
82 {"w0", AARCH64_X0_REGNUM + 0},
83 {"w1", AARCH64_X0_REGNUM + 1},
84 {"w2", AARCH64_X0_REGNUM + 2},
85 {"w3", AARCH64_X0_REGNUM + 3},
86 {"w4", AARCH64_X0_REGNUM + 4},
87 {"w5", AARCH64_X0_REGNUM + 5},
88 {"w6", AARCH64_X0_REGNUM + 6},
89 {"w7", AARCH64_X0_REGNUM + 7},
90 {"w8", AARCH64_X0_REGNUM + 8},
91 {"w9", AARCH64_X0_REGNUM + 9},
92 {"w10", AARCH64_X0_REGNUM + 10},
93 {"w11", AARCH64_X0_REGNUM + 11},
94 {"w12", AARCH64_X0_REGNUM + 12},
95 {"w13", AARCH64_X0_REGNUM + 13},
96 {"w14", AARCH64_X0_REGNUM + 14},
97 {"w15", AARCH64_X0_REGNUM + 15},
98 {"w16", AARCH64_X0_REGNUM + 16},
99 {"w17", AARCH64_X0_REGNUM + 17},
100 {"w18", AARCH64_X0_REGNUM + 18},
101 {"w19", AARCH64_X0_REGNUM + 19},
102 {"w20", AARCH64_X0_REGNUM + 20},
103 {"w21", AARCH64_X0_REGNUM + 21},
104 {"w22", AARCH64_X0_REGNUM + 22},
105 {"w23", AARCH64_X0_REGNUM + 23},
106 {"w24", AARCH64_X0_REGNUM + 24},
107 {"w25", AARCH64_X0_REGNUM + 25},
108 {"w26", AARCH64_X0_REGNUM + 26},
109 {"w27", AARCH64_X0_REGNUM + 27},
110 {"w28", AARCH64_X0_REGNUM + 28},
111 {"w29", AARCH64_X0_REGNUM + 29},
112 {"w30", AARCH64_X0_REGNUM + 30},
113
114 /* specials */
115 {"ip0", AARCH64_X0_REGNUM + 16},
116 {"ip1", AARCH64_X0_REGNUM + 17}
117};
118
119/* The required core 'R' registers. */
120static const char *const aarch64_r_register_names[] =
121{
122 /* These registers must appear in consecutive RAW register number
123 order and they must begin with AARCH64_X0_REGNUM! */
124 "x0", "x1", "x2", "x3",
125 "x4", "x5", "x6", "x7",
126 "x8", "x9", "x10", "x11",
127 "x12", "x13", "x14", "x15",
128 "x16", "x17", "x18", "x19",
129 "x20", "x21", "x22", "x23",
130 "x24", "x25", "x26", "x27",
131 "x28", "x29", "x30", "sp",
132 "pc", "cpsr"
133};
134
135/* The FP/SIMD 'V' registers. */
136static const char *const aarch64_v_register_names[] =
137{
138 /* These registers must appear in consecutive RAW register number
139 order and they must begin with AARCH64_V0_REGNUM! */
140 "v0", "v1", "v2", "v3",
141 "v4", "v5", "v6", "v7",
142 "v8", "v9", "v10", "v11",
143 "v12", "v13", "v14", "v15",
144 "v16", "v17", "v18", "v19",
145 "v20", "v21", "v22", "v23",
146 "v24", "v25", "v26", "v27",
147 "v28", "v29", "v30", "v31",
148 "fpsr",
149 "fpcr"
150};
151
152/* AArch64 prologue cache structure. */
153struct aarch64_prologue_cache
154{
db634143
PL
155 /* The program counter at the start of the function. It is used to
156 identify this frame as a prologue frame. */
157 CORE_ADDR func;
158
159 /* The program counter at the time this frame was created; i.e. where
160 this function was called from. It is used to identify this frame as a
161 stub frame. */
162 CORE_ADDR prev_pc;
163
07b287a0
MS
164 /* The stack pointer at the time this frame was created; i.e. the
165 caller's stack pointer when this function was called. It is used
166 to identify this frame. */
167 CORE_ADDR prev_sp;
168
7dfa3edc
PL
169 /* Is the target available to read from? */
170 int available_p;
171
07b287a0
MS
172 /* The frame base for this frame is just prev_sp - frame size.
173 FRAMESIZE is the distance from the frame pointer to the
174 initial stack pointer. */
175 int framesize;
176
177 /* The register used to hold the frame pointer for this frame. */
178 int framereg;
179
180 /* Saved register offsets. */
181 struct trad_frame_saved_reg *saved_regs;
182};
183
07b287a0
MS
184static void
185show_aarch64_debug (struct ui_file *file, int from_tty,
186 struct cmd_list_element *c, const char *value)
187{
188 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
189}
190
191/* Extract a signed value from a bit field within an instruction
192 encoding.
193
194 INSN is the instruction opcode.
195
196 WIDTH specifies the width of the bit field to extract (in bits).
197
198 OFFSET specifies the least significant bit of the field where bits
199 are numbered zero counting from least to most significant. */
200
201static int32_t
202extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
203{
204 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
205 unsigned shift_r = sizeof (int32_t) * 8 - width;
206
207 return ((int32_t) insn << shift_l) >> shift_r;
208}
209
210/* Determine if specified bits within an instruction opcode matches a
211 specific pattern.
212
213 INSN is the instruction opcode.
214
215 MASK specifies the bits within the opcode that are to be tested
216 agsinst for a match with PATTERN. */
217
218static int
219decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
220{
221 return (insn & mask) == pattern;
222}
223
224/* Decode an opcode if it represents an immediate ADD or SUB instruction.
225
226 ADDR specifies the address of the opcode.
227 INSN specifies the opcode to test.
228 RD receives the 'rd' field from the decoded instruction.
229 RN receives the 'rn' field from the decoded instruction.
230
231 Return 1 if the opcodes matches and is decoded, otherwise 0. */
232static int
787749ea
PL
233aarch64_decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd,
234 unsigned *rn, int32_t *imm)
07b287a0
MS
235{
236 if ((insn & 0x9f000000) == 0x91000000)
237 {
238 unsigned shift;
239 unsigned op_is_sub;
240
241 *rd = (insn >> 0) & 0x1f;
242 *rn = (insn >> 5) & 0x1f;
243 *imm = (insn >> 10) & 0xfff;
244 shift = (insn >> 22) & 0x3;
245 op_is_sub = (insn >> 30) & 0x1;
246
247 switch (shift)
248 {
249 case 0:
250 break;
251 case 1:
252 *imm <<= 12;
253 break;
254 default:
255 /* UNDEFINED */
256 return 0;
257 }
258
259 if (op_is_sub)
260 *imm = -*imm;
261
262 if (aarch64_debug)
b277c936
PL
263 {
264 debug_printf ("decode: 0x%s 0x%x add x%u, x%u, #%d\n",
265 core_addr_to_string_nz (addr), insn, *rd, *rn,
266 *imm);
267 }
07b287a0
MS
268 return 1;
269 }
270 return 0;
271}
272
07b287a0
MS
273/* Decode an opcode if it represents a branch via register instruction.
274
275 ADDR specifies the address of the opcode.
276 INSN specifies the opcode to test.
0ea6402e 277 IS_BLR receives the 'op' bit from the decoded instruction.
07b287a0
MS
278 RN receives the 'rn' field from the decoded instruction.
279
280 Return 1 if the opcodes matches and is decoded, otherwise 0. */
281
282static int
787749ea
PL
283aarch64_decode_br (CORE_ADDR addr, uint32_t insn, int *is_blr,
284 unsigned *rn)
07b287a0
MS
285{
286 /* 8 4 0 6 2 8 4 0 */
287 /* blr 110101100011111100000000000rrrrr */
288 /* br 110101100001111100000000000rrrrr */
289 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
290 {
0ea6402e 291 *is_blr = (insn >> 21) & 1;
07b287a0
MS
292 *rn = (insn >> 5) & 0x1f;
293
294 if (aarch64_debug)
b277c936
PL
295 {
296 debug_printf ("decode: 0x%s 0x%x %s 0x%x\n",
297 core_addr_to_string_nz (addr), insn,
298 *is_blr ? "blr" : "br", *rn);
299 }
07b287a0
MS
300
301 return 1;
302 }
303 return 0;
304}
305
07b287a0
MS
306/* Decode an opcode if it represents a ERET instruction.
307
308 ADDR specifies the address of the opcode.
309 INSN specifies the opcode to test.
310
311 Return 1 if the opcodes matches and is decoded, otherwise 0. */
312
313static int
787749ea 314aarch64_decode_eret (CORE_ADDR addr, uint32_t insn)
07b287a0
MS
315{
316 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
317 if (insn == 0xd69f03e0)
318 {
319 if (aarch64_debug)
b277c936
PL
320 {
321 debug_printf ("decode: 0x%s 0x%x eret\n",
322 core_addr_to_string_nz (addr), insn);
323 }
07b287a0
MS
324 return 1;
325 }
326 return 0;
327}
328
329/* Decode an opcode if it represents a MOVZ instruction.
330
331 ADDR specifies the address of the opcode.
332 INSN specifies the opcode to test.
333 RD receives the 'rd' field from the decoded instruction.
334
335 Return 1 if the opcodes matches and is decoded, otherwise 0. */
336
337static int
787749ea 338aarch64_decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
07b287a0
MS
339{
340 if (decode_masked_match (insn, 0xff800000, 0x52800000))
341 {
342 *rd = (insn >> 0) & 0x1f;
343
344 if (aarch64_debug)
b277c936
PL
345 {
346 debug_printf ("decode: 0x%s 0x%x movz x%u, #?\n",
347 core_addr_to_string_nz (addr), insn, *rd);
348 }
07b287a0
MS
349 return 1;
350 }
351 return 0;
352}
353
354/* Decode an opcode if it represents a ORR (shifted register)
355 instruction.
356
357 ADDR specifies the address of the opcode.
358 INSN specifies the opcode to test.
359 RD receives the 'rd' field from the decoded instruction.
360 RN receives the 'rn' field from the decoded instruction.
361 RM receives the 'rm' field from the decoded instruction.
362 IMM receives the 'imm6' field from the decoded instruction.
363
364 Return 1 if the opcodes matches and is decoded, otherwise 0. */
365
366static int
787749ea
PL
367aarch64_decode_orr_shifted_register_x (CORE_ADDR addr, uint32_t insn,
368 unsigned *rd, unsigned *rn,
369 unsigned *rm, int32_t *imm)
07b287a0
MS
370{
371 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
372 {
373 *rd = (insn >> 0) & 0x1f;
374 *rn = (insn >> 5) & 0x1f;
375 *rm = (insn >> 16) & 0x1f;
376 *imm = (insn >> 10) & 0x3f;
377
378 if (aarch64_debug)
b277c936
PL
379 {
380 debug_printf ("decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
381 core_addr_to_string_nz (addr), insn, *rd, *rn,
382 *rm, *imm);
383 }
07b287a0
MS
384 return 1;
385 }
386 return 0;
387}
388
389/* Decode an opcode if it represents a RET instruction.
390
391 ADDR specifies the address of the opcode.
392 INSN specifies the opcode to test.
393 RN receives the 'rn' field from the decoded instruction.
394
395 Return 1 if the opcodes matches and is decoded, otherwise 0. */
396
397static int
787749ea 398aarch64_decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
07b287a0
MS
399{
400 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
401 {
402 *rn = (insn >> 5) & 0x1f;
403 if (aarch64_debug)
b277c936
PL
404 {
405 debug_printf ("decode: 0x%s 0x%x ret x%u\n",
406 core_addr_to_string_nz (addr), insn, *rn);
407 }
07b287a0
MS
408 return 1;
409 }
410 return 0;
411}
412
413/* Decode an opcode if it represents the following instruction:
414 STP rt, rt2, [rn, #imm]
415
416 ADDR specifies the address of the opcode.
417 INSN specifies the opcode to test.
418 RT1 receives the 'rt' field from the decoded instruction.
419 RT2 receives the 'rt2' field from the decoded instruction.
420 RN receives the 'rn' field from the decoded instruction.
421 IMM receives the 'imm' field from the decoded instruction.
422
423 Return 1 if the opcodes matches and is decoded, otherwise 0. */
424
425static int
787749ea
PL
426aarch64_decode_stp_offset (CORE_ADDR addr, uint32_t insn, unsigned *rt1,
427 unsigned *rt2, unsigned *rn, int32_t *imm)
07b287a0
MS
428{
429 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
430 {
431 *rt1 = (insn >> 0) & 0x1f;
432 *rn = (insn >> 5) & 0x1f;
433 *rt2 = (insn >> 10) & 0x1f;
434 *imm = extract_signed_bitfield (insn, 7, 15);
435 *imm <<= 3;
436
437 if (aarch64_debug)
b277c936
PL
438 {
439 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
440 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
441 *rn, *imm);
442 }
07b287a0
MS
443 return 1;
444 }
445 return 0;
446}
447
448/* Decode an opcode if it represents the following instruction:
449 STP rt, rt2, [rn, #imm]!
450
451 ADDR specifies the address of the opcode.
452 INSN specifies the opcode to test.
453 RT1 receives the 'rt' field from the decoded instruction.
454 RT2 receives the 'rt2' field from the decoded instruction.
455 RN receives the 'rn' field from the decoded instruction.
456 IMM receives the 'imm' field from the decoded instruction.
457
458 Return 1 if the opcodes matches and is decoded, otherwise 0. */
459
460static int
787749ea
PL
461aarch64_decode_stp_offset_wb (CORE_ADDR addr, uint32_t insn, unsigned *rt1,
462 unsigned *rt2, unsigned *rn, int32_t *imm)
07b287a0
MS
463{
464 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
465 {
466 *rt1 = (insn >> 0) & 0x1f;
467 *rn = (insn >> 5) & 0x1f;
468 *rt2 = (insn >> 10) & 0x1f;
469 *imm = extract_signed_bitfield (insn, 7, 15);
470 *imm <<= 3;
471
472 if (aarch64_debug)
b277c936
PL
473 {
474 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
475 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
476 *rn, *imm);
477 }
07b287a0
MS
478 return 1;
479 }
480 return 0;
481}
482
483/* Decode an opcode if it represents the following instruction:
484 STUR rt, [rn, #imm]
485
486 ADDR specifies the address of the opcode.
487 INSN specifies the opcode to test.
488 IS64 receives size field from the decoded instruction.
489 RT receives the 'rt' field from the decoded instruction.
490 RN receives the 'rn' field from the decoded instruction.
491 IMM receives the 'imm' field from the decoded instruction.
492
493 Return 1 if the opcodes matches and is decoded, otherwise 0. */
494
495static int
787749ea
PL
496aarch64_decode_stur (CORE_ADDR addr, uint32_t insn, int *is64,
497 unsigned *rt, unsigned *rn, int32_t *imm)
07b287a0
MS
498{
499 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
500 {
501 *is64 = (insn >> 30) & 1;
502 *rt = (insn >> 0) & 0x1f;
503 *rn = (insn >> 5) & 0x1f;
504 *imm = extract_signed_bitfield (insn, 9, 12);
505
506 if (aarch64_debug)
b277c936
PL
507 {
508 debug_printf ("decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
509 core_addr_to_string_nz (addr), insn,
510 *is64 ? 'x' : 'w', *rt, *rn, *imm);
511 }
07b287a0
MS
512 return 1;
513 }
514 return 0;
515}
516
07b287a0
MS
517/* Analyze a prologue, looking for a recognizable stack frame
518 and frame pointer. Scan until we encounter a store that could
519 clobber the stack frame unexpectedly, or an unknown instruction. */
520
521static CORE_ADDR
522aarch64_analyze_prologue (struct gdbarch *gdbarch,
523 CORE_ADDR start, CORE_ADDR limit,
524 struct aarch64_prologue_cache *cache)
525{
526 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
527 int i;
528 pv_t regs[AARCH64_X_REGISTER_COUNT];
529 struct pv_area *stack;
530 struct cleanup *back_to;
531
532 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
533 regs[i] = pv_register (i, 0);
534 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
535 back_to = make_cleanup_free_pv_area (stack);
536
537 for (; start < limit; start += 4)
538 {
539 uint32_t insn;
540 unsigned rd;
541 unsigned rn;
542 unsigned rm;
543 unsigned rt;
544 unsigned rt1;
545 unsigned rt2;
546 int op_is_sub;
547 int32_t imm;
548 unsigned cond;
96b32e50 549 int is64;
0ea6402e
PL
550 int is_link;
551 int is_cbnz;
552 int is_tbnz;
07b287a0 553 unsigned bit;
6ec5f4be 554 int is_adrp;
07b287a0
MS
555 int32_t offset;
556
557 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
558
787749ea 559 if (aarch64_decode_add_sub_imm (start, insn, &rd, &rn, &imm))
07b287a0 560 regs[rd] = pv_add_constant (regs[rn], imm);
6ec5f4be
PL
561 else if (aarch64_decode_adr (start, insn, &is_adrp, &rd, &offset)
562 && is_adrp)
07b287a0 563 regs[rd] = pv_unknown ();
787749ea 564 else if (aarch64_decode_b (start, insn, &is_link, &offset))
07b287a0
MS
565 {
566 /* Stop analysis on branch. */
567 break;
568 }
787749ea 569 else if (aarch64_decode_bcond (start, insn, &cond, &offset))
07b287a0
MS
570 {
571 /* Stop analysis on branch. */
572 break;
573 }
787749ea 574 else if (aarch64_decode_br (start, insn, &is_link, &rn))
07b287a0
MS
575 {
576 /* Stop analysis on branch. */
577 break;
578 }
787749ea
PL
579 else if (aarch64_decode_cb (start, insn, &is64, &is_cbnz, &rn,
580 &offset))
07b287a0
MS
581 {
582 /* Stop analysis on branch. */
583 break;
584 }
787749ea 585 else if (aarch64_decode_eret (start, insn))
07b287a0
MS
586 {
587 /* Stop analysis on branch. */
588 break;
589 }
787749ea 590 else if (aarch64_decode_movz (start, insn, &rd))
07b287a0 591 regs[rd] = pv_unknown ();
787749ea
PL
592 else if (aarch64_decode_orr_shifted_register_x (start, insn, &rd,
593 &rn, &rm, &imm))
07b287a0
MS
594 {
595 if (imm == 0 && rn == 31)
596 regs[rd] = regs[rm];
597 else
598 {
599 if (aarch64_debug)
b277c936
PL
600 {
601 debug_printf ("aarch64: prologue analysis gave up "
602 "addr=0x%s opcode=0x%x (orr x register)\n",
603 core_addr_to_string_nz (start), insn);
604 }
07b287a0
MS
605 break;
606 }
607 }
787749ea 608 else if (aarch64_decode_ret (start, insn, &rn))
07b287a0
MS
609 {
610 /* Stop analysis on branch. */
611 break;
612 }
787749ea 613 else if (aarch64_decode_stur (start, insn, &is64, &rt, &rn, &offset))
07b287a0
MS
614 {
615 pv_area_store (stack, pv_add_constant (regs[rn], offset),
616 is64 ? 8 : 4, regs[rt]);
617 }
787749ea
PL
618 else if (aarch64_decode_stp_offset (start, insn, &rt1, &rt2, &rn,
619 &imm))
07b287a0
MS
620 {
621 /* If recording this store would invalidate the store area
622 (perhaps because rn is not known) then we should abandon
623 further prologue analysis. */
624 if (pv_area_store_would_trash (stack,
625 pv_add_constant (regs[rn], imm)))
626 break;
627
628 if (pv_area_store_would_trash (stack,
629 pv_add_constant (regs[rn], imm + 8)))
630 break;
631
632 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
633 regs[rt1]);
634 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
635 regs[rt2]);
636 }
787749ea
PL
637 else if (aarch64_decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn,
638 &imm))
07b287a0
MS
639 {
640 /* If recording this store would invalidate the store area
641 (perhaps because rn is not known) then we should abandon
642 further prologue analysis. */
643 if (pv_area_store_would_trash (stack,
14ac654f
MS
644 pv_add_constant (regs[rn], imm)))
645 break;
646
647 if (pv_area_store_would_trash (stack,
07b287a0
MS
648 pv_add_constant (regs[rn], imm + 8)))
649 break;
650
651 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
652 regs[rt1]);
653 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
654 regs[rt2]);
655 regs[rn] = pv_add_constant (regs[rn], imm);
656 }
787749ea
PL
657 else if (aarch64_decode_tb (start, insn, &is_tbnz, &bit, &rn,
658 &offset))
07b287a0
MS
659 {
660 /* Stop analysis on branch. */
661 break;
662 }
663 else
664 {
665 if (aarch64_debug)
b277c936
PL
666 {
667 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
668 " opcode=0x%x\n",
669 core_addr_to_string_nz (start), insn);
670 }
07b287a0
MS
671 break;
672 }
673 }
674
675 if (cache == NULL)
676 {
677 do_cleanups (back_to);
678 return start;
679 }
680
681 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
682 {
683 /* Frame pointer is fp. Frame size is constant. */
684 cache->framereg = AARCH64_FP_REGNUM;
685 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
686 }
687 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
688 {
689 /* Try the stack pointer. */
690 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
691 cache->framereg = AARCH64_SP_REGNUM;
692 }
693 else
694 {
695 /* We're just out of luck. We don't know where the frame is. */
696 cache->framereg = -1;
697 cache->framesize = 0;
698 }
699
700 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
701 {
702 CORE_ADDR offset;
703
704 if (pv_area_find_reg (stack, gdbarch, i, &offset))
705 cache->saved_regs[i].addr = offset;
706 }
707
708 do_cleanups (back_to);
709 return start;
710}
711
712/* Implement the "skip_prologue" gdbarch method. */
713
714static CORE_ADDR
715aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
716{
717 unsigned long inst;
718 CORE_ADDR skip_pc;
719 CORE_ADDR func_addr, limit_pc;
720 struct symtab_and_line sal;
721
722 /* See if we can determine the end of the prologue via the symbol
723 table. If so, then return either PC, or the PC after the
724 prologue, whichever is greater. */
725 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
726 {
727 CORE_ADDR post_prologue_pc
728 = skip_prologue_using_sal (gdbarch, func_addr);
729
730 if (post_prologue_pc != 0)
731 return max (pc, post_prologue_pc);
732 }
733
734 /* Can't determine prologue from the symbol table, need to examine
735 instructions. */
736
737 /* Find an upper limit on the function prologue using the debug
738 information. If the debug information could not be used to
739 provide that bound, then use an arbitrary large number as the
740 upper bound. */
741 limit_pc = skip_prologue_using_sal (gdbarch, pc);
742 if (limit_pc == 0)
743 limit_pc = pc + 128; /* Magic. */
744
745 /* Try disassembling prologue. */
746 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
747}
748
749/* Scan the function prologue for THIS_FRAME and populate the prologue
750 cache CACHE. */
751
752static void
753aarch64_scan_prologue (struct frame_info *this_frame,
754 struct aarch64_prologue_cache *cache)
755{
756 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
757 CORE_ADDR prologue_start;
758 CORE_ADDR prologue_end;
759 CORE_ADDR prev_pc = get_frame_pc (this_frame);
760 struct gdbarch *gdbarch = get_frame_arch (this_frame);
761
db634143
PL
762 cache->prev_pc = prev_pc;
763
07b287a0
MS
764 /* Assume we do not find a frame. */
765 cache->framereg = -1;
766 cache->framesize = 0;
767
768 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
769 &prologue_end))
770 {
771 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
772
773 if (sal.line == 0)
774 {
775 /* No line info so use the current PC. */
776 prologue_end = prev_pc;
777 }
778 else if (sal.end < prologue_end)
779 {
780 /* The next line begins after the function end. */
781 prologue_end = sal.end;
782 }
783
784 prologue_end = min (prologue_end, prev_pc);
785 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
786 }
787 else
788 {
789 CORE_ADDR frame_loc;
790 LONGEST saved_fp;
791 LONGEST saved_lr;
792 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
793
794 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
795 if (frame_loc == 0)
796 return;
797
798 cache->framereg = AARCH64_FP_REGNUM;
799 cache->framesize = 16;
800 cache->saved_regs[29].addr = 0;
801 cache->saved_regs[30].addr = 8;
802 }
803}
804
7dfa3edc
PL
805/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
806 function may throw an exception if the inferior's registers or memory is
807 not available. */
07b287a0 808
7dfa3edc
PL
809static void
810aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
811 struct aarch64_prologue_cache *cache)
07b287a0 812{
07b287a0
MS
813 CORE_ADDR unwound_fp;
814 int reg;
815
07b287a0
MS
816 aarch64_scan_prologue (this_frame, cache);
817
818 if (cache->framereg == -1)
7dfa3edc 819 return;
07b287a0
MS
820
821 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
822 if (unwound_fp == 0)
7dfa3edc 823 return;
07b287a0
MS
824
825 cache->prev_sp = unwound_fp + cache->framesize;
826
827 /* Calculate actual addresses of saved registers using offsets
828 determined by aarch64_analyze_prologue. */
829 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
830 if (trad_frame_addr_p (cache->saved_regs, reg))
831 cache->saved_regs[reg].addr += cache->prev_sp;
832
db634143
PL
833 cache->func = get_frame_func (this_frame);
834
7dfa3edc
PL
835 cache->available_p = 1;
836}
837
838/* Allocate and fill in *THIS_CACHE with information about the prologue of
839 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
840 Return a pointer to the current aarch64_prologue_cache in
841 *THIS_CACHE. */
842
843static struct aarch64_prologue_cache *
844aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
845{
846 struct aarch64_prologue_cache *cache;
847
848 if (*this_cache != NULL)
9a3c8263 849 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
850
851 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
852 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
853 *this_cache = cache;
854
855 TRY
856 {
857 aarch64_make_prologue_cache_1 (this_frame, cache);
858 }
859 CATCH (ex, RETURN_MASK_ERROR)
860 {
861 if (ex.error != NOT_AVAILABLE_ERROR)
862 throw_exception (ex);
863 }
864 END_CATCH
865
07b287a0
MS
866 return cache;
867}
868
7dfa3edc
PL
869/* Implement the "stop_reason" frame_unwind method. */
870
871static enum unwind_stop_reason
872aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
873 void **this_cache)
874{
875 struct aarch64_prologue_cache *cache
876 = aarch64_make_prologue_cache (this_frame, this_cache);
877
878 if (!cache->available_p)
879 return UNWIND_UNAVAILABLE;
880
881 /* Halt the backtrace at "_start". */
882 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
883 return UNWIND_OUTERMOST;
884
885 /* We've hit a wall, stop. */
886 if (cache->prev_sp == 0)
887 return UNWIND_OUTERMOST;
888
889 return UNWIND_NO_REASON;
890}
891
07b287a0
MS
892/* Our frame ID for a normal frame is the current function's starting
893 PC and the caller's SP when we were called. */
894
895static void
896aarch64_prologue_this_id (struct frame_info *this_frame,
897 void **this_cache, struct frame_id *this_id)
898{
7c8edfae
PL
899 struct aarch64_prologue_cache *cache
900 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 901
7dfa3edc
PL
902 if (!cache->available_p)
903 *this_id = frame_id_build_unavailable_stack (cache->func);
904 else
905 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
906}
907
908/* Implement the "prev_register" frame_unwind method. */
909
910static struct value *
911aarch64_prologue_prev_register (struct frame_info *this_frame,
912 void **this_cache, int prev_regnum)
913{
914 struct gdbarch *gdbarch = get_frame_arch (this_frame);
7c8edfae
PL
915 struct aarch64_prologue_cache *cache
916 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
917
918 /* If we are asked to unwind the PC, then we need to return the LR
919 instead. The prologue may save PC, but it will point into this
920 frame's prologue, not the next frame's resume location. */
921 if (prev_regnum == AARCH64_PC_REGNUM)
922 {
923 CORE_ADDR lr;
924
925 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
926 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
927 }
928
929 /* SP is generally not saved to the stack, but this frame is
930 identified by the next frame's stack pointer at the time of the
931 call. The value was already reconstructed into PREV_SP. */
932 /*
933 +----------+ ^
934 | saved lr | |
935 +->| saved fp |--+
936 | | |
937 | | | <- Previous SP
938 | +----------+
939 | | saved lr |
940 +--| saved fp |<- FP
941 | |
942 | |<- SP
943 +----------+ */
944 if (prev_regnum == AARCH64_SP_REGNUM)
945 return frame_unwind_got_constant (this_frame, prev_regnum,
946 cache->prev_sp);
947
948 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
949 prev_regnum);
950}
951
952/* AArch64 prologue unwinder. */
953struct frame_unwind aarch64_prologue_unwind =
954{
955 NORMAL_FRAME,
7dfa3edc 956 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
957 aarch64_prologue_this_id,
958 aarch64_prologue_prev_register,
959 NULL,
960 default_frame_sniffer
961};
962
8b61f75d
PL
963/* Allocate and fill in *THIS_CACHE with information about the prologue of
964 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
965 Return a pointer to the current aarch64_prologue_cache in
966 *THIS_CACHE. */
07b287a0
MS
967
968static struct aarch64_prologue_cache *
8b61f75d 969aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 970{
07b287a0 971 struct aarch64_prologue_cache *cache;
8b61f75d
PL
972
973 if (*this_cache != NULL)
9a3c8263 974 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
975
976 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
977 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 978 *this_cache = cache;
07b287a0 979
02a2a705
PL
980 TRY
981 {
982 cache->prev_sp = get_frame_register_unsigned (this_frame,
983 AARCH64_SP_REGNUM);
984 cache->prev_pc = get_frame_pc (this_frame);
985 cache->available_p = 1;
986 }
987 CATCH (ex, RETURN_MASK_ERROR)
988 {
989 if (ex.error != NOT_AVAILABLE_ERROR)
990 throw_exception (ex);
991 }
992 END_CATCH
07b287a0
MS
993
994 return cache;
995}
996
02a2a705
PL
997/* Implement the "stop_reason" frame_unwind method. */
998
999static enum unwind_stop_reason
1000aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1001 void **this_cache)
1002{
1003 struct aarch64_prologue_cache *cache
1004 = aarch64_make_stub_cache (this_frame, this_cache);
1005
1006 if (!cache->available_p)
1007 return UNWIND_UNAVAILABLE;
1008
1009 return UNWIND_NO_REASON;
1010}
1011
07b287a0
MS
1012/* Our frame ID for a stub frame is the current SP and LR. */
1013
1014static void
1015aarch64_stub_this_id (struct frame_info *this_frame,
1016 void **this_cache, struct frame_id *this_id)
1017{
8b61f75d
PL
1018 struct aarch64_prologue_cache *cache
1019 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 1020
02a2a705
PL
1021 if (cache->available_p)
1022 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1023 else
1024 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
1025}
1026
1027/* Implement the "sniffer" frame_unwind method. */
1028
1029static int
1030aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1031 struct frame_info *this_frame,
1032 void **this_prologue_cache)
1033{
1034 CORE_ADDR addr_in_block;
1035 gdb_byte dummy[4];
1036
1037 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1038 if (in_plt_section (addr_in_block)
07b287a0
MS
1039 /* We also use the stub winder if the target memory is unreadable
1040 to avoid having the prologue unwinder trying to read it. */
1041 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1042 return 1;
1043
1044 return 0;
1045}
1046
1047/* AArch64 stub unwinder. */
1048struct frame_unwind aarch64_stub_unwind =
1049{
1050 NORMAL_FRAME,
02a2a705 1051 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
1052 aarch64_stub_this_id,
1053 aarch64_prologue_prev_register,
1054 NULL,
1055 aarch64_stub_unwind_sniffer
1056};
1057
1058/* Return the frame base address of *THIS_FRAME. */
1059
1060static CORE_ADDR
1061aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1062{
7c8edfae
PL
1063 struct aarch64_prologue_cache *cache
1064 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
1065
1066 return cache->prev_sp - cache->framesize;
1067}
1068
1069/* AArch64 default frame base information. */
1070struct frame_base aarch64_normal_base =
1071{
1072 &aarch64_prologue_unwind,
1073 aarch64_normal_frame_base,
1074 aarch64_normal_frame_base,
1075 aarch64_normal_frame_base
1076};
1077
1078/* Assuming THIS_FRAME is a dummy, return the frame ID of that
1079 dummy frame. The frame ID's base needs to match the TOS value
1080 saved by save_dummy_frame_tos () and returned from
1081 aarch64_push_dummy_call, and the PC needs to match the dummy
1082 frame's breakpoint. */
1083
1084static struct frame_id
1085aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1086{
1087 return frame_id_build (get_frame_register_unsigned (this_frame,
1088 AARCH64_SP_REGNUM),
1089 get_frame_pc (this_frame));
1090}
1091
1092/* Implement the "unwind_pc" gdbarch method. */
1093
1094static CORE_ADDR
1095aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1096{
1097 CORE_ADDR pc
1098 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1099
1100 return pc;
1101}
1102
1103/* Implement the "unwind_sp" gdbarch method. */
1104
1105static CORE_ADDR
1106aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1107{
1108 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1109}
1110
1111/* Return the value of the REGNUM register in the previous frame of
1112 *THIS_FRAME. */
1113
1114static struct value *
1115aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1116 void **this_cache, int regnum)
1117{
1118 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1119 CORE_ADDR lr;
1120
1121 switch (regnum)
1122 {
1123 case AARCH64_PC_REGNUM:
1124 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1125 return frame_unwind_got_constant (this_frame, regnum, lr);
1126
1127 default:
1128 internal_error (__FILE__, __LINE__,
1129 _("Unexpected register %d"), regnum);
1130 }
1131}
1132
1133/* Implement the "init_reg" dwarf2_frame_ops method. */
1134
1135static void
1136aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1137 struct dwarf2_frame_state_reg *reg,
1138 struct frame_info *this_frame)
1139{
1140 switch (regnum)
1141 {
1142 case AARCH64_PC_REGNUM:
1143 reg->how = DWARF2_FRAME_REG_FN;
1144 reg->loc.fn = aarch64_dwarf2_prev_register;
1145 break;
1146 case AARCH64_SP_REGNUM:
1147 reg->how = DWARF2_FRAME_REG_CFA;
1148 break;
1149 }
1150}
1151
1152/* When arguments must be pushed onto the stack, they go on in reverse
1153 order. The code below implements a FILO (stack) to do this. */
1154
1155typedef struct
1156{
1157 /* Value to pass on stack. */
1158 const void *data;
1159
1160 /* Size in bytes of value to pass on stack. */
1161 int len;
1162} stack_item_t;
1163
1164DEF_VEC_O (stack_item_t);
1165
1166/* Return the alignment (in bytes) of the given type. */
1167
1168static int
1169aarch64_type_align (struct type *t)
1170{
1171 int n;
1172 int align;
1173 int falign;
1174
1175 t = check_typedef (t);
1176 switch (TYPE_CODE (t))
1177 {
1178 default:
1179 /* Should never happen. */
1180 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1181 return 4;
1182
1183 case TYPE_CODE_PTR:
1184 case TYPE_CODE_ENUM:
1185 case TYPE_CODE_INT:
1186 case TYPE_CODE_FLT:
1187 case TYPE_CODE_SET:
1188 case TYPE_CODE_RANGE:
1189 case TYPE_CODE_BITSTRING:
1190 case TYPE_CODE_REF:
1191 case TYPE_CODE_CHAR:
1192 case TYPE_CODE_BOOL:
1193 return TYPE_LENGTH (t);
1194
1195 case TYPE_CODE_ARRAY:
1196 case TYPE_CODE_COMPLEX:
1197 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1198
1199 case TYPE_CODE_STRUCT:
1200 case TYPE_CODE_UNION:
1201 align = 1;
1202 for (n = 0; n < TYPE_NFIELDS (t); n++)
1203 {
1204 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1205 if (falign > align)
1206 align = falign;
1207 }
1208 return align;
1209 }
1210}
1211
1212/* Return 1 if *TY is a homogeneous floating-point aggregate as
1213 defined in the AAPCS64 ABI document; otherwise return 0. */
1214
1215static int
1216is_hfa (struct type *ty)
1217{
1218 switch (TYPE_CODE (ty))
1219 {
1220 case TYPE_CODE_ARRAY:
1221 {
1222 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1223 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1224 return 1;
1225 break;
1226 }
1227
1228 case TYPE_CODE_UNION:
1229 case TYPE_CODE_STRUCT:
1230 {
1231 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1232 {
1233 struct type *member0_type;
1234
1235 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1236 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1237 {
1238 int i;
1239
1240 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1241 {
1242 struct type *member1_type;
1243
1244 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1245 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1246 || (TYPE_LENGTH (member0_type)
1247 != TYPE_LENGTH (member1_type)))
1248 return 0;
1249 }
1250 return 1;
1251 }
1252 }
1253 return 0;
1254 }
1255
1256 default:
1257 break;
1258 }
1259
1260 return 0;
1261}
1262
1263/* AArch64 function call information structure. */
1264struct aarch64_call_info
1265{
1266 /* the current argument number. */
1267 unsigned argnum;
1268
1269 /* The next general purpose register number, equivalent to NGRN as
1270 described in the AArch64 Procedure Call Standard. */
1271 unsigned ngrn;
1272
1273 /* The next SIMD and floating point register number, equivalent to
1274 NSRN as described in the AArch64 Procedure Call Standard. */
1275 unsigned nsrn;
1276
1277 /* The next stacked argument address, equivalent to NSAA as
1278 described in the AArch64 Procedure Call Standard. */
1279 unsigned nsaa;
1280
1281 /* Stack item vector. */
1282 VEC(stack_item_t) *si;
1283};
1284
1285/* Pass a value in a sequence of consecutive X registers. The caller
1286 is responsbile for ensuring sufficient registers are available. */
1287
1288static void
1289pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1290 struct aarch64_call_info *info, struct type *type,
1291 const bfd_byte *buf)
1292{
1293 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1294 int len = TYPE_LENGTH (type);
1295 enum type_code typecode = TYPE_CODE (type);
1296 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1297
1298 info->argnum++;
1299
1300 while (len > 0)
1301 {
1302 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1303 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1304 byte_order);
1305
1306
1307 /* Adjust sub-word struct/union args when big-endian. */
1308 if (byte_order == BFD_ENDIAN_BIG
1309 && partial_len < X_REGISTER_SIZE
1310 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1311 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1312
1313 if (aarch64_debug)
b277c936
PL
1314 {
1315 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1316 gdbarch_register_name (gdbarch, regnum),
1317 phex (regval, X_REGISTER_SIZE));
1318 }
07b287a0
MS
1319 regcache_cooked_write_unsigned (regcache, regnum, regval);
1320 len -= partial_len;
1321 buf += partial_len;
1322 regnum++;
1323 }
1324}
1325
1326/* Attempt to marshall a value in a V register. Return 1 if
1327 successful, or 0 if insufficient registers are available. This
1328 function, unlike the equivalent pass_in_x() function does not
1329 handle arguments spread across multiple registers. */
1330
1331static int
1332pass_in_v (struct gdbarch *gdbarch,
1333 struct regcache *regcache,
1334 struct aarch64_call_info *info,
1335 const bfd_byte *buf)
1336{
1337 if (info->nsrn < 8)
1338 {
1339 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1340 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1341
1342 info->argnum++;
1343 info->nsrn++;
1344
1345 regcache_cooked_write (regcache, regnum, buf);
1346 if (aarch64_debug)
b277c936
PL
1347 {
1348 debug_printf ("arg %d in %s\n", info->argnum,
1349 gdbarch_register_name (gdbarch, regnum));
1350 }
07b287a0
MS
1351 return 1;
1352 }
1353 info->nsrn = 8;
1354 return 0;
1355}
1356
1357/* Marshall an argument onto the stack. */
1358
1359static void
1360pass_on_stack (struct aarch64_call_info *info, struct type *type,
1361 const bfd_byte *buf)
1362{
1363 int len = TYPE_LENGTH (type);
1364 int align;
1365 stack_item_t item;
1366
1367 info->argnum++;
1368
1369 align = aarch64_type_align (type);
1370
1371 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1372 Natural alignment of the argument's type. */
1373 align = align_up (align, 8);
1374
1375 /* The AArch64 PCS requires at most doubleword alignment. */
1376 if (align > 16)
1377 align = 16;
1378
1379 if (aarch64_debug)
b277c936
PL
1380 {
1381 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1382 info->nsaa);
1383 }
07b287a0
MS
1384
1385 item.len = len;
1386 item.data = buf;
1387 VEC_safe_push (stack_item_t, info->si, &item);
1388
1389 info->nsaa += len;
1390 if (info->nsaa & (align - 1))
1391 {
1392 /* Push stack alignment padding. */
1393 int pad = align - (info->nsaa & (align - 1));
1394
1395 item.len = pad;
1396 item.data = buf;
1397
1398 VEC_safe_push (stack_item_t, info->si, &item);
1399 info->nsaa += pad;
1400 }
1401}
1402
1403/* Marshall an argument into a sequence of one or more consecutive X
1404 registers or, if insufficient X registers are available then onto
1405 the stack. */
1406
1407static void
1408pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1409 struct aarch64_call_info *info, struct type *type,
1410 const bfd_byte *buf)
1411{
1412 int len = TYPE_LENGTH (type);
1413 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1414
1415 /* PCS C.13 - Pass in registers if we have enough spare */
1416 if (info->ngrn + nregs <= 8)
1417 {
1418 pass_in_x (gdbarch, regcache, info, type, buf);
1419 info->ngrn += nregs;
1420 }
1421 else
1422 {
1423 info->ngrn = 8;
1424 pass_on_stack (info, type, buf);
1425 }
1426}
1427
1428/* Pass a value in a V register, or on the stack if insufficient are
1429 available. */
1430
1431static void
1432pass_in_v_or_stack (struct gdbarch *gdbarch,
1433 struct regcache *regcache,
1434 struct aarch64_call_info *info,
1435 struct type *type,
1436 const bfd_byte *buf)
1437{
1438 if (!pass_in_v (gdbarch, regcache, info, buf))
1439 pass_on_stack (info, type, buf);
1440}
1441
1442/* Implement the "push_dummy_call" gdbarch method. */
1443
1444static CORE_ADDR
1445aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1446 struct regcache *regcache, CORE_ADDR bp_addr,
1447 int nargs,
1448 struct value **args, CORE_ADDR sp, int struct_return,
1449 CORE_ADDR struct_addr)
1450{
1451 int nstack = 0;
1452 int argnum;
1453 int x_argreg;
1454 int v_argreg;
1455 struct aarch64_call_info info;
1456 struct type *func_type;
1457 struct type *return_type;
1458 int lang_struct_return;
1459
1460 memset (&info, 0, sizeof (info));
1461
1462 /* We need to know what the type of the called function is in order
1463 to determine the number of named/anonymous arguments for the
1464 actual argument placement, and the return type in order to handle
1465 return value correctly.
1466
1467 The generic code above us views the decision of return in memory
1468 or return in registers as a two stage processes. The language
1469 handler is consulted first and may decide to return in memory (eg
1470 class with copy constructor returned by value), this will cause
1471 the generic code to allocate space AND insert an initial leading
1472 argument.
1473
1474 If the language code does not decide to pass in memory then the
1475 target code is consulted.
1476
1477 If the language code decides to pass in memory we want to move
1478 the pointer inserted as the initial argument from the argument
1479 list and into X8, the conventional AArch64 struct return pointer
1480 register.
1481
1482 This is slightly awkward, ideally the flag "lang_struct_return"
1483 would be passed to the targets implementation of push_dummy_call.
1484 Rather that change the target interface we call the language code
1485 directly ourselves. */
1486
1487 func_type = check_typedef (value_type (function));
1488
1489 /* Dereference function pointer types. */
1490 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1491 func_type = TYPE_TARGET_TYPE (func_type);
1492
1493 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1494 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1495
1496 /* If language_pass_by_reference () returned true we will have been
1497 given an additional initial argument, a hidden pointer to the
1498 return slot in memory. */
1499 return_type = TYPE_TARGET_TYPE (func_type);
1500 lang_struct_return = language_pass_by_reference (return_type);
1501
1502 /* Set the return address. For the AArch64, the return breakpoint
1503 is always at BP_ADDR. */
1504 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1505
1506 /* If we were given an initial argument for the return slot because
1507 lang_struct_return was true, lose it. */
1508 if (lang_struct_return)
1509 {
1510 args++;
1511 nargs--;
1512 }
1513
1514 /* The struct_return pointer occupies X8. */
1515 if (struct_return || lang_struct_return)
1516 {
1517 if (aarch64_debug)
b277c936
PL
1518 {
1519 debug_printf ("struct return in %s = 0x%s\n",
1520 gdbarch_register_name (gdbarch,
1521 AARCH64_STRUCT_RETURN_REGNUM),
1522 paddress (gdbarch, struct_addr));
1523 }
07b287a0
MS
1524 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1525 struct_addr);
1526 }
1527
1528 for (argnum = 0; argnum < nargs; argnum++)
1529 {
1530 struct value *arg = args[argnum];
1531 struct type *arg_type;
1532 int len;
1533
1534 arg_type = check_typedef (value_type (arg));
1535 len = TYPE_LENGTH (arg_type);
1536
1537 switch (TYPE_CODE (arg_type))
1538 {
1539 case TYPE_CODE_INT:
1540 case TYPE_CODE_BOOL:
1541 case TYPE_CODE_CHAR:
1542 case TYPE_CODE_RANGE:
1543 case TYPE_CODE_ENUM:
1544 if (len < 4)
1545 {
1546 /* Promote to 32 bit integer. */
1547 if (TYPE_UNSIGNED (arg_type))
1548 arg_type = builtin_type (gdbarch)->builtin_uint32;
1549 else
1550 arg_type = builtin_type (gdbarch)->builtin_int32;
1551 arg = value_cast (arg_type, arg);
1552 }
1553 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1554 value_contents (arg));
1555 break;
1556
1557 case TYPE_CODE_COMPLEX:
1558 if (info.nsrn <= 6)
1559 {
1560 const bfd_byte *buf = value_contents (arg);
1561 struct type *target_type =
1562 check_typedef (TYPE_TARGET_TYPE (arg_type));
1563
1564 pass_in_v (gdbarch, regcache, &info, buf);
1565 pass_in_v (gdbarch, regcache, &info,
1566 buf + TYPE_LENGTH (target_type));
1567 }
1568 else
1569 {
1570 info.nsrn = 8;
1571 pass_on_stack (&info, arg_type, value_contents (arg));
1572 }
1573 break;
1574 case TYPE_CODE_FLT:
1575 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1576 value_contents (arg));
1577 break;
1578
1579 case TYPE_CODE_STRUCT:
1580 case TYPE_CODE_ARRAY:
1581 case TYPE_CODE_UNION:
1582 if (is_hfa (arg_type))
1583 {
1584 int elements = TYPE_NFIELDS (arg_type);
1585
1586 /* Homogeneous Aggregates */
1587 if (info.nsrn + elements < 8)
1588 {
1589 int i;
1590
1591 for (i = 0; i < elements; i++)
1592 {
1593 /* We know that we have sufficient registers
1594 available therefore this will never fallback
1595 to the stack. */
1596 struct value *field =
1597 value_primitive_field (arg, 0, i, arg_type);
1598 struct type *field_type =
1599 check_typedef (value_type (field));
1600
1601 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1602 value_contents_writeable (field));
1603 }
1604 }
1605 else
1606 {
1607 info.nsrn = 8;
1608 pass_on_stack (&info, arg_type, value_contents (arg));
1609 }
1610 }
1611 else if (len > 16)
1612 {
1613 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1614 invisible reference. */
1615
1616 /* Allocate aligned storage. */
1617 sp = align_down (sp - len, 16);
1618
1619 /* Write the real data into the stack. */
1620 write_memory (sp, value_contents (arg), len);
1621
1622 /* Construct the indirection. */
1623 arg_type = lookup_pointer_type (arg_type);
1624 arg = value_from_pointer (arg_type, sp);
1625 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1626 value_contents (arg));
1627 }
1628 else
1629 /* PCS C.15 / C.18 multiple values pass. */
1630 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1631 value_contents (arg));
1632 break;
1633
1634 default:
1635 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1636 value_contents (arg));
1637 break;
1638 }
1639 }
1640
1641 /* Make sure stack retains 16 byte alignment. */
1642 if (info.nsaa & 15)
1643 sp -= 16 - (info.nsaa & 15);
1644
1645 while (!VEC_empty (stack_item_t, info.si))
1646 {
1647 stack_item_t *si = VEC_last (stack_item_t, info.si);
1648
1649 sp -= si->len;
1650 write_memory (sp, si->data, si->len);
1651 VEC_pop (stack_item_t, info.si);
1652 }
1653
1654 VEC_free (stack_item_t, info.si);
1655
1656 /* Finally, update the SP register. */
1657 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1658
1659 return sp;
1660}
1661
1662/* Implement the "frame_align" gdbarch method. */
1663
1664static CORE_ADDR
1665aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1666{
1667 /* Align the stack to sixteen bytes. */
1668 return sp & ~(CORE_ADDR) 15;
1669}
1670
1671/* Return the type for an AdvSISD Q register. */
1672
1673static struct type *
1674aarch64_vnq_type (struct gdbarch *gdbarch)
1675{
1676 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1677
1678 if (tdep->vnq_type == NULL)
1679 {
1680 struct type *t;
1681 struct type *elem;
1682
1683 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1684 TYPE_CODE_UNION);
1685
1686 elem = builtin_type (gdbarch)->builtin_uint128;
1687 append_composite_type_field (t, "u", elem);
1688
1689 elem = builtin_type (gdbarch)->builtin_int128;
1690 append_composite_type_field (t, "s", elem);
1691
1692 tdep->vnq_type = t;
1693 }
1694
1695 return tdep->vnq_type;
1696}
1697
1698/* Return the type for an AdvSISD D register. */
1699
1700static struct type *
1701aarch64_vnd_type (struct gdbarch *gdbarch)
1702{
1703 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1704
1705 if (tdep->vnd_type == NULL)
1706 {
1707 struct type *t;
1708 struct type *elem;
1709
1710 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1711 TYPE_CODE_UNION);
1712
1713 elem = builtin_type (gdbarch)->builtin_double;
1714 append_composite_type_field (t, "f", elem);
1715
1716 elem = builtin_type (gdbarch)->builtin_uint64;
1717 append_composite_type_field (t, "u", elem);
1718
1719 elem = builtin_type (gdbarch)->builtin_int64;
1720 append_composite_type_field (t, "s", elem);
1721
1722 tdep->vnd_type = t;
1723 }
1724
1725 return tdep->vnd_type;
1726}
1727
1728/* Return the type for an AdvSISD S register. */
1729
1730static struct type *
1731aarch64_vns_type (struct gdbarch *gdbarch)
1732{
1733 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1734
1735 if (tdep->vns_type == NULL)
1736 {
1737 struct type *t;
1738 struct type *elem;
1739
1740 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1741 TYPE_CODE_UNION);
1742
1743 elem = builtin_type (gdbarch)->builtin_float;
1744 append_composite_type_field (t, "f", elem);
1745
1746 elem = builtin_type (gdbarch)->builtin_uint32;
1747 append_composite_type_field (t, "u", elem);
1748
1749 elem = builtin_type (gdbarch)->builtin_int32;
1750 append_composite_type_field (t, "s", elem);
1751
1752 tdep->vns_type = t;
1753 }
1754
1755 return tdep->vns_type;
1756}
1757
1758/* Return the type for an AdvSISD H register. */
1759
1760static struct type *
1761aarch64_vnh_type (struct gdbarch *gdbarch)
1762{
1763 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1764
1765 if (tdep->vnh_type == NULL)
1766 {
1767 struct type *t;
1768 struct type *elem;
1769
1770 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1771 TYPE_CODE_UNION);
1772
1773 elem = builtin_type (gdbarch)->builtin_uint16;
1774 append_composite_type_field (t, "u", elem);
1775
1776 elem = builtin_type (gdbarch)->builtin_int16;
1777 append_composite_type_field (t, "s", elem);
1778
1779 tdep->vnh_type = t;
1780 }
1781
1782 return tdep->vnh_type;
1783}
1784
1785/* Return the type for an AdvSISD B register. */
1786
1787static struct type *
1788aarch64_vnb_type (struct gdbarch *gdbarch)
1789{
1790 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1791
1792 if (tdep->vnb_type == NULL)
1793 {
1794 struct type *t;
1795 struct type *elem;
1796
1797 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1798 TYPE_CODE_UNION);
1799
1800 elem = builtin_type (gdbarch)->builtin_uint8;
1801 append_composite_type_field (t, "u", elem);
1802
1803 elem = builtin_type (gdbarch)->builtin_int8;
1804 append_composite_type_field (t, "s", elem);
1805
1806 tdep->vnb_type = t;
1807 }
1808
1809 return tdep->vnb_type;
1810}
1811
1812/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1813
1814static int
1815aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1816{
1817 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1818 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1819
1820 if (reg == AARCH64_DWARF_SP)
1821 return AARCH64_SP_REGNUM;
1822
1823 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1824 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1825
1826 return -1;
1827}
1828\f
1829
1830/* Implement the "print_insn" gdbarch method. */
1831
1832static int
1833aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1834{
1835 info->symbols = NULL;
1836 return print_insn_aarch64 (memaddr, info);
1837}
1838
1839/* AArch64 BRK software debug mode instruction.
1840 Note that AArch64 code is always little-endian.
1841 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
948f8e3d 1842static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0
MS
1843
1844/* Implement the "breakpoint_from_pc" gdbarch method. */
1845
948f8e3d 1846static const gdb_byte *
07b287a0
MS
1847aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1848 int *lenptr)
1849{
1850 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1851
1852 *lenptr = sizeof (aarch64_default_breakpoint);
1853 return aarch64_default_breakpoint;
1854}
1855
1856/* Extract from an array REGS containing the (raw) register state a
1857 function return value of type TYPE, and copy that, in virtual
1858 format, into VALBUF. */
1859
1860static void
1861aarch64_extract_return_value (struct type *type, struct regcache *regs,
1862 gdb_byte *valbuf)
1863{
1864 struct gdbarch *gdbarch = get_regcache_arch (regs);
1865 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1866
1867 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1868 {
1869 bfd_byte buf[V_REGISTER_SIZE];
1870 int len = TYPE_LENGTH (type);
1871
1872 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1873 memcpy (valbuf, buf, len);
1874 }
1875 else if (TYPE_CODE (type) == TYPE_CODE_INT
1876 || TYPE_CODE (type) == TYPE_CODE_CHAR
1877 || TYPE_CODE (type) == TYPE_CODE_BOOL
1878 || TYPE_CODE (type) == TYPE_CODE_PTR
1879 || TYPE_CODE (type) == TYPE_CODE_REF
1880 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1881 {
1882 /* If the the type is a plain integer, then the access is
1883 straight-forward. Otherwise we have to play around a bit
1884 more. */
1885 int len = TYPE_LENGTH (type);
1886 int regno = AARCH64_X0_REGNUM;
1887 ULONGEST tmp;
1888
1889 while (len > 0)
1890 {
1891 /* By using store_unsigned_integer we avoid having to do
1892 anything special for small big-endian values. */
1893 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1894 store_unsigned_integer (valbuf,
1895 (len > X_REGISTER_SIZE
1896 ? X_REGISTER_SIZE : len), byte_order, tmp);
1897 len -= X_REGISTER_SIZE;
1898 valbuf += X_REGISTER_SIZE;
1899 }
1900 }
1901 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1902 {
1903 int regno = AARCH64_V0_REGNUM;
1904 bfd_byte buf[V_REGISTER_SIZE];
1905 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1906 int len = TYPE_LENGTH (target_type);
1907
1908 regcache_cooked_read (regs, regno, buf);
1909 memcpy (valbuf, buf, len);
1910 valbuf += len;
1911 regcache_cooked_read (regs, regno + 1, buf);
1912 memcpy (valbuf, buf, len);
1913 valbuf += len;
1914 }
1915 else if (is_hfa (type))
1916 {
1917 int elements = TYPE_NFIELDS (type);
1918 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1919 int len = TYPE_LENGTH (member_type);
1920 int i;
1921
1922 for (i = 0; i < elements; i++)
1923 {
1924 int regno = AARCH64_V0_REGNUM + i;
1925 bfd_byte buf[X_REGISTER_SIZE];
1926
1927 if (aarch64_debug)
b277c936
PL
1928 {
1929 debug_printf ("read HFA return value element %d from %s\n",
1930 i + 1,
1931 gdbarch_register_name (gdbarch, regno));
1932 }
07b287a0
MS
1933 regcache_cooked_read (regs, regno, buf);
1934
1935 memcpy (valbuf, buf, len);
1936 valbuf += len;
1937 }
1938 }
1939 else
1940 {
1941 /* For a structure or union the behaviour is as if the value had
1942 been stored to word-aligned memory and then loaded into
1943 registers with 64-bit load instruction(s). */
1944 int len = TYPE_LENGTH (type);
1945 int regno = AARCH64_X0_REGNUM;
1946 bfd_byte buf[X_REGISTER_SIZE];
1947
1948 while (len > 0)
1949 {
1950 regcache_cooked_read (regs, regno++, buf);
1951 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1952 len -= X_REGISTER_SIZE;
1953 valbuf += X_REGISTER_SIZE;
1954 }
1955 }
1956}
1957
1958
1959/* Will a function return an aggregate type in memory or in a
1960 register? Return 0 if an aggregate type can be returned in a
1961 register, 1 if it must be returned in memory. */
1962
1963static int
1964aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1965{
1966 int nRc;
1967 enum type_code code;
1968
f168693b 1969 type = check_typedef (type);
07b287a0
MS
1970
1971 /* In the AArch64 ABI, "integer" like aggregate types are returned
1972 in registers. For an aggregate type to be integer like, its size
1973 must be less than or equal to 4 * X_REGISTER_SIZE. */
1974
1975 if (is_hfa (type))
1976 {
1977 /* PCS B.5 If the argument is a Named HFA, then the argument is
1978 used unmodified. */
1979 return 0;
1980 }
1981
1982 if (TYPE_LENGTH (type) > 16)
1983 {
1984 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1985 invisible reference. */
1986
1987 return 1;
1988 }
1989
1990 return 0;
1991}
1992
1993/* Write into appropriate registers a function return value of type
1994 TYPE, given in virtual format. */
1995
1996static void
1997aarch64_store_return_value (struct type *type, struct regcache *regs,
1998 const gdb_byte *valbuf)
1999{
2000 struct gdbarch *gdbarch = get_regcache_arch (regs);
2001 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2002
2003 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2004 {
2005 bfd_byte buf[V_REGISTER_SIZE];
2006 int len = TYPE_LENGTH (type);
2007
2008 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2009 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2010 }
2011 else if (TYPE_CODE (type) == TYPE_CODE_INT
2012 || TYPE_CODE (type) == TYPE_CODE_CHAR
2013 || TYPE_CODE (type) == TYPE_CODE_BOOL
2014 || TYPE_CODE (type) == TYPE_CODE_PTR
2015 || TYPE_CODE (type) == TYPE_CODE_REF
2016 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2017 {
2018 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2019 {
2020 /* Values of one word or less are zero/sign-extended and
2021 returned in r0. */
2022 bfd_byte tmpbuf[X_REGISTER_SIZE];
2023 LONGEST val = unpack_long (type, valbuf);
2024
2025 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2026 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2027 }
2028 else
2029 {
2030 /* Integral values greater than one word are stored in
2031 consecutive registers starting with r0. This will always
2032 be a multiple of the regiser size. */
2033 int len = TYPE_LENGTH (type);
2034 int regno = AARCH64_X0_REGNUM;
2035
2036 while (len > 0)
2037 {
2038 regcache_cooked_write (regs, regno++, valbuf);
2039 len -= X_REGISTER_SIZE;
2040 valbuf += X_REGISTER_SIZE;
2041 }
2042 }
2043 }
2044 else if (is_hfa (type))
2045 {
2046 int elements = TYPE_NFIELDS (type);
2047 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2048 int len = TYPE_LENGTH (member_type);
2049 int i;
2050
2051 for (i = 0; i < elements; i++)
2052 {
2053 int regno = AARCH64_V0_REGNUM + i;
2054 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2055
2056 if (aarch64_debug)
b277c936
PL
2057 {
2058 debug_printf ("write HFA return value element %d to %s\n",
2059 i + 1,
2060 gdbarch_register_name (gdbarch, regno));
2061 }
07b287a0
MS
2062
2063 memcpy (tmpbuf, valbuf, len);
2064 regcache_cooked_write (regs, regno, tmpbuf);
2065 valbuf += len;
2066 }
2067 }
2068 else
2069 {
2070 /* For a structure or union the behaviour is as if the value had
2071 been stored to word-aligned memory and then loaded into
2072 registers with 64-bit load instruction(s). */
2073 int len = TYPE_LENGTH (type);
2074 int regno = AARCH64_X0_REGNUM;
2075 bfd_byte tmpbuf[X_REGISTER_SIZE];
2076
2077 while (len > 0)
2078 {
2079 memcpy (tmpbuf, valbuf,
2080 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2081 regcache_cooked_write (regs, regno++, tmpbuf);
2082 len -= X_REGISTER_SIZE;
2083 valbuf += X_REGISTER_SIZE;
2084 }
2085 }
2086}
2087
2088/* Implement the "return_value" gdbarch method. */
2089
2090static enum return_value_convention
2091aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2092 struct type *valtype, struct regcache *regcache,
2093 gdb_byte *readbuf, const gdb_byte *writebuf)
2094{
2095 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2096
2097 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2098 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2099 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2100 {
2101 if (aarch64_return_in_memory (gdbarch, valtype))
2102 {
2103 if (aarch64_debug)
b277c936 2104 debug_printf ("return value in memory\n");
07b287a0
MS
2105 return RETURN_VALUE_STRUCT_CONVENTION;
2106 }
2107 }
2108
2109 if (writebuf)
2110 aarch64_store_return_value (valtype, regcache, writebuf);
2111
2112 if (readbuf)
2113 aarch64_extract_return_value (valtype, regcache, readbuf);
2114
2115 if (aarch64_debug)
b277c936 2116 debug_printf ("return value in registers\n");
07b287a0
MS
2117
2118 return RETURN_VALUE_REGISTER_CONVENTION;
2119}
2120
2121/* Implement the "get_longjmp_target" gdbarch method. */
2122
2123static int
2124aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2125{
2126 CORE_ADDR jb_addr;
2127 gdb_byte buf[X_REGISTER_SIZE];
2128 struct gdbarch *gdbarch = get_frame_arch (frame);
2129 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2130 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2131
2132 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2133
2134 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2135 X_REGISTER_SIZE))
2136 return 0;
2137
2138 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2139 return 1;
2140}
ea873d8e
PL
2141
2142/* Implement the "gen_return_address" gdbarch method. */
2143
2144static void
2145aarch64_gen_return_address (struct gdbarch *gdbarch,
2146 struct agent_expr *ax, struct axs_value *value,
2147 CORE_ADDR scope)
2148{
2149 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2150 value->kind = axs_lvalue_register;
2151 value->u.reg = AARCH64_LR_REGNUM;
2152}
07b287a0
MS
2153\f
2154
2155/* Return the pseudo register name corresponding to register regnum. */
2156
2157static const char *
2158aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2159{
2160 static const char *const q_name[] =
2161 {
2162 "q0", "q1", "q2", "q3",
2163 "q4", "q5", "q6", "q7",
2164 "q8", "q9", "q10", "q11",
2165 "q12", "q13", "q14", "q15",
2166 "q16", "q17", "q18", "q19",
2167 "q20", "q21", "q22", "q23",
2168 "q24", "q25", "q26", "q27",
2169 "q28", "q29", "q30", "q31",
2170 };
2171
2172 static const char *const d_name[] =
2173 {
2174 "d0", "d1", "d2", "d3",
2175 "d4", "d5", "d6", "d7",
2176 "d8", "d9", "d10", "d11",
2177 "d12", "d13", "d14", "d15",
2178 "d16", "d17", "d18", "d19",
2179 "d20", "d21", "d22", "d23",
2180 "d24", "d25", "d26", "d27",
2181 "d28", "d29", "d30", "d31",
2182 };
2183
2184 static const char *const s_name[] =
2185 {
2186 "s0", "s1", "s2", "s3",
2187 "s4", "s5", "s6", "s7",
2188 "s8", "s9", "s10", "s11",
2189 "s12", "s13", "s14", "s15",
2190 "s16", "s17", "s18", "s19",
2191 "s20", "s21", "s22", "s23",
2192 "s24", "s25", "s26", "s27",
2193 "s28", "s29", "s30", "s31",
2194 };
2195
2196 static const char *const h_name[] =
2197 {
2198 "h0", "h1", "h2", "h3",
2199 "h4", "h5", "h6", "h7",
2200 "h8", "h9", "h10", "h11",
2201 "h12", "h13", "h14", "h15",
2202 "h16", "h17", "h18", "h19",
2203 "h20", "h21", "h22", "h23",
2204 "h24", "h25", "h26", "h27",
2205 "h28", "h29", "h30", "h31",
2206 };
2207
2208 static const char *const b_name[] =
2209 {
2210 "b0", "b1", "b2", "b3",
2211 "b4", "b5", "b6", "b7",
2212 "b8", "b9", "b10", "b11",
2213 "b12", "b13", "b14", "b15",
2214 "b16", "b17", "b18", "b19",
2215 "b20", "b21", "b22", "b23",
2216 "b24", "b25", "b26", "b27",
2217 "b28", "b29", "b30", "b31",
2218 };
2219
2220 regnum -= gdbarch_num_regs (gdbarch);
2221
2222 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2223 return q_name[regnum - AARCH64_Q0_REGNUM];
2224
2225 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2226 return d_name[regnum - AARCH64_D0_REGNUM];
2227
2228 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2229 return s_name[regnum - AARCH64_S0_REGNUM];
2230
2231 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2232 return h_name[regnum - AARCH64_H0_REGNUM];
2233
2234 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2235 return b_name[regnum - AARCH64_B0_REGNUM];
2236
2237 internal_error (__FILE__, __LINE__,
2238 _("aarch64_pseudo_register_name: bad register number %d"),
2239 regnum);
2240}
2241
2242/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2243
2244static struct type *
2245aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2246{
2247 regnum -= gdbarch_num_regs (gdbarch);
2248
2249 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2250 return aarch64_vnq_type (gdbarch);
2251
2252 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2253 return aarch64_vnd_type (gdbarch);
2254
2255 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2256 return aarch64_vns_type (gdbarch);
2257
2258 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2259 return aarch64_vnh_type (gdbarch);
2260
2261 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2262 return aarch64_vnb_type (gdbarch);
2263
2264 internal_error (__FILE__, __LINE__,
2265 _("aarch64_pseudo_register_type: bad register number %d"),
2266 regnum);
2267}
2268
2269/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2270
2271static int
2272aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2273 struct reggroup *group)
2274{
2275 regnum -= gdbarch_num_regs (gdbarch);
2276
2277 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2278 return group == all_reggroup || group == vector_reggroup;
2279 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2280 return (group == all_reggroup || group == vector_reggroup
2281 || group == float_reggroup);
2282 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2283 return (group == all_reggroup || group == vector_reggroup
2284 || group == float_reggroup);
2285 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2286 return group == all_reggroup || group == vector_reggroup;
2287 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2288 return group == all_reggroup || group == vector_reggroup;
2289
2290 return group == all_reggroup;
2291}
2292
2293/* Implement the "pseudo_register_read_value" gdbarch method. */
2294
2295static struct value *
2296aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2297 struct regcache *regcache,
2298 int regnum)
2299{
2300 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2301 struct value *result_value;
2302 gdb_byte *buf;
2303
2304 result_value = allocate_value (register_type (gdbarch, regnum));
2305 VALUE_LVAL (result_value) = lval_register;
2306 VALUE_REGNUM (result_value) = regnum;
2307 buf = value_contents_raw (result_value);
2308
2309 regnum -= gdbarch_num_regs (gdbarch);
2310
2311 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2312 {
2313 enum register_status status;
2314 unsigned v_regnum;
2315
2316 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2317 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2318 if (status != REG_VALID)
2319 mark_value_bytes_unavailable (result_value, 0,
2320 TYPE_LENGTH (value_type (result_value)));
2321 else
2322 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2323 return result_value;
2324 }
2325
2326 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2327 {
2328 enum register_status status;
2329 unsigned v_regnum;
2330
2331 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2332 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2333 if (status != REG_VALID)
2334 mark_value_bytes_unavailable (result_value, 0,
2335 TYPE_LENGTH (value_type (result_value)));
2336 else
2337 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2338 return result_value;
2339 }
2340
2341 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2342 {
2343 enum register_status status;
2344 unsigned v_regnum;
2345
2346 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2347 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2348 if (status != REG_VALID)
2349 mark_value_bytes_unavailable (result_value, 0,
2350 TYPE_LENGTH (value_type (result_value)));
2351 else
2352 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2353 return result_value;
2354 }
2355
2356 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2357 {
2358 enum register_status status;
2359 unsigned v_regnum;
2360
2361 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2362 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2363 if (status != REG_VALID)
2364 mark_value_bytes_unavailable (result_value, 0,
2365 TYPE_LENGTH (value_type (result_value)));
2366 else
2367 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2368 return result_value;
2369 }
2370
2371 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2372 {
2373 enum register_status status;
2374 unsigned v_regnum;
2375
2376 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2377 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2378 if (status != REG_VALID)
2379 mark_value_bytes_unavailable (result_value, 0,
2380 TYPE_LENGTH (value_type (result_value)));
2381 else
2382 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2383 return result_value;
2384 }
2385
2386 gdb_assert_not_reached ("regnum out of bound");
2387}
2388
2389/* Implement the "pseudo_register_write" gdbarch method. */
2390
2391static void
2392aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2393 int regnum, const gdb_byte *buf)
2394{
2395 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2396
2397 /* Ensure the register buffer is zero, we want gdb writes of the
2398 various 'scalar' pseudo registers to behavior like architectural
2399 writes, register width bytes are written the remainder are set to
2400 zero. */
2401 memset (reg_buf, 0, sizeof (reg_buf));
2402
2403 regnum -= gdbarch_num_regs (gdbarch);
2404
2405 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2406 {
2407 /* pseudo Q registers */
2408 unsigned v_regnum;
2409
2410 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2411 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2412 regcache_raw_write (regcache, v_regnum, reg_buf);
2413 return;
2414 }
2415
2416 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2417 {
2418 /* pseudo D registers */
2419 unsigned v_regnum;
2420
2421 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2422 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2423 regcache_raw_write (regcache, v_regnum, reg_buf);
2424 return;
2425 }
2426
2427 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2428 {
2429 unsigned v_regnum;
2430
2431 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2432 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2433 regcache_raw_write (regcache, v_regnum, reg_buf);
2434 return;
2435 }
2436
2437 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2438 {
2439 /* pseudo H registers */
2440 unsigned v_regnum;
2441
2442 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2443 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2444 regcache_raw_write (regcache, v_regnum, reg_buf);
2445 return;
2446 }
2447
2448 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2449 {
2450 /* pseudo B registers */
2451 unsigned v_regnum;
2452
2453 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2454 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2455 regcache_raw_write (regcache, v_regnum, reg_buf);
2456 return;
2457 }
2458
2459 gdb_assert_not_reached ("regnum out of bound");
2460}
2461
07b287a0
MS
2462/* Callback function for user_reg_add. */
2463
2464static struct value *
2465value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2466{
9a3c8263 2467 const int *reg_p = (const int *) baton;
07b287a0
MS
2468
2469 return value_of_register (*reg_p, frame);
2470}
2471\f
2472
9404b58f
KM
2473/* Implement the "software_single_step" gdbarch method, needed to
2474 single step through atomic sequences on AArch64. */
2475
2476static int
2477aarch64_software_single_step (struct frame_info *frame)
2478{
2479 struct gdbarch *gdbarch = get_frame_arch (frame);
2480 struct address_space *aspace = get_frame_address_space (frame);
2481 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2482 const int insn_size = 4;
2483 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2484 CORE_ADDR pc = get_frame_pc (frame);
2485 CORE_ADDR breaks[2] = { -1, -1 };
2486 CORE_ADDR loc = pc;
2487 CORE_ADDR closing_insn = 0;
2488 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2489 byte_order_for_code);
2490 int index;
2491 int insn_count;
2492 int bc_insn_count = 0; /* Conditional branch instruction count. */
2493 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2494
2495 /* Look for a Load Exclusive instruction which begins the sequence. */
2496 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2497 return 0;
2498
2499 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2500 {
2501 int32_t offset;
2502 unsigned cond;
2503
2504 loc += insn_size;
2505 insn = read_memory_unsigned_integer (loc, insn_size,
2506 byte_order_for_code);
2507
2508 /* Check if the instruction is a conditional branch. */
787749ea 2509 if (aarch64_decode_bcond (loc, insn, &cond, &offset))
9404b58f
KM
2510 {
2511 if (bc_insn_count >= 1)
2512 return 0;
2513
2514 /* It is, so we'll try to set a breakpoint at the destination. */
2515 breaks[1] = loc + offset;
2516
2517 bc_insn_count++;
2518 last_breakpoint++;
2519 }
2520
2521 /* Look for the Store Exclusive which closes the atomic sequence. */
2522 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2523 {
2524 closing_insn = loc;
2525 break;
2526 }
2527 }
2528
2529 /* We didn't find a closing Store Exclusive instruction, fall back. */
2530 if (!closing_insn)
2531 return 0;
2532
2533 /* Insert breakpoint after the end of the atomic sequence. */
2534 breaks[0] = loc + insn_size;
2535
2536 /* Check for duplicated breakpoints, and also check that the second
2537 breakpoint is not within the atomic sequence. */
2538 if (last_breakpoint
2539 && (breaks[1] == breaks[0]
2540 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2541 last_breakpoint = 0;
2542
2543 /* Insert the breakpoint at the end of the sequence, and one at the
2544 destination of the conditional branch, if it exists. */
2545 for (index = 0; index <= last_breakpoint; index++)
2546 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2547
2548 return 1;
2549}
2550
07b287a0
MS
2551/* Initialize the current architecture based on INFO. If possible,
2552 re-use an architecture from ARCHES, which is a list of
2553 architectures already created during this debugging session.
2554
2555 Called e.g. at program startup, when reading a core file, and when
2556 reading a binary file. */
2557
2558static struct gdbarch *
2559aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2560{
2561 struct gdbarch_tdep *tdep;
2562 struct gdbarch *gdbarch;
2563 struct gdbarch_list *best_arch;
2564 struct tdesc_arch_data *tdesc_data = NULL;
2565 const struct target_desc *tdesc = info.target_desc;
2566 int i;
2567 int have_fpa_registers = 1;
2568 int valid_p = 1;
2569 const struct tdesc_feature *feature;
2570 int num_regs = 0;
2571 int num_pseudo_regs = 0;
2572
2573 /* Ensure we always have a target descriptor. */
2574 if (!tdesc_has_registers (tdesc))
2575 tdesc = tdesc_aarch64;
2576
2577 gdb_assert (tdesc);
2578
2579 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2580
2581 if (feature == NULL)
2582 return NULL;
2583
2584 tdesc_data = tdesc_data_alloc ();
2585
2586 /* Validate the descriptor provides the mandatory core R registers
2587 and allocate their numbers. */
2588 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2589 valid_p &=
2590 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2591 aarch64_r_register_names[i]);
2592
2593 num_regs = AARCH64_X0_REGNUM + i;
2594
2595 /* Look for the V registers. */
2596 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2597 if (feature)
2598 {
2599 /* Validate the descriptor provides the mandatory V registers
2600 and allocate their numbers. */
2601 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2602 valid_p &=
2603 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2604 aarch64_v_register_names[i]);
2605
2606 num_regs = AARCH64_V0_REGNUM + i;
2607
2608 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2609 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2610 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2611 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2612 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2613 }
2614
2615 if (!valid_p)
2616 {
2617 tdesc_data_cleanup (tdesc_data);
2618 return NULL;
2619 }
2620
2621 /* AArch64 code is always little-endian. */
2622 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2623
2624 /* If there is already a candidate, use it. */
2625 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2626 best_arch != NULL;
2627 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2628 {
2629 /* Found a match. */
2630 break;
2631 }
2632
2633 if (best_arch != NULL)
2634 {
2635 if (tdesc_data != NULL)
2636 tdesc_data_cleanup (tdesc_data);
2637 return best_arch->gdbarch;
2638 }
2639
8d749320 2640 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2641 gdbarch = gdbarch_alloc (&info, tdep);
2642
2643 /* This should be low enough for everything. */
2644 tdep->lowest_pc = 0x20;
2645 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2646 tdep->jb_elt_size = 8;
2647
2648 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2649 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2650
07b287a0
MS
2651 /* Frame handling. */
2652 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2653 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2654 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2655
2656 /* Advance PC across function entry code. */
2657 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2658
2659 /* The stack grows downward. */
2660 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2661
2662 /* Breakpoint manipulation. */
2663 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
07b287a0 2664 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2665 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2666
2667 /* Information about registers, etc. */
2668 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2669 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2670 set_gdbarch_num_regs (gdbarch, num_regs);
2671
2672 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2673 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2674 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2675 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2676 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2677 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2678 aarch64_pseudo_register_reggroup_p);
2679
2680 /* ABI */
2681 set_gdbarch_short_bit (gdbarch, 16);
2682 set_gdbarch_int_bit (gdbarch, 32);
2683 set_gdbarch_float_bit (gdbarch, 32);
2684 set_gdbarch_double_bit (gdbarch, 64);
2685 set_gdbarch_long_double_bit (gdbarch, 128);
2686 set_gdbarch_long_bit (gdbarch, 64);
2687 set_gdbarch_long_long_bit (gdbarch, 64);
2688 set_gdbarch_ptr_bit (gdbarch, 64);
2689 set_gdbarch_char_signed (gdbarch, 0);
2690 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2691 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2692 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2693
2694 /* Internal <-> external register number maps. */
2695 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2696
2697 /* Returning results. */
2698 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2699
2700 /* Disassembly. */
2701 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2702
2703 /* Virtual tables. */
2704 set_gdbarch_vbit_in_delta (gdbarch, 1);
2705
2706 /* Hook in the ABI-specific overrides, if they have been registered. */
2707 info.target_desc = tdesc;
2708 info.tdep_info = (void *) tdesc_data;
2709 gdbarch_init_osabi (info, gdbarch);
2710
2711 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2712
2713 /* Add some default predicates. */
2714 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2715 dwarf2_append_unwinders (gdbarch);
2716 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2717
2718 frame_base_set_default (gdbarch, &aarch64_normal_base);
2719
2720 /* Now we have tuned the configuration, set a few final things,
2721 based on what the OS ABI has told us. */
2722
2723 if (tdep->jb_pc >= 0)
2724 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2725
ea873d8e
PL
2726 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2727
07b287a0
MS
2728 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2729
2730 /* Add standard register aliases. */
2731 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2732 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2733 value_of_aarch64_user_reg,
2734 &aarch64_register_aliases[i].regnum);
2735
2736 return gdbarch;
2737}
2738
2739static void
2740aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2741{
2742 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2743
2744 if (tdep == NULL)
2745 return;
2746
2747 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2748 paddress (gdbarch, tdep->lowest_pc));
2749}
2750
2751/* Suppress warning from -Wmissing-prototypes. */
2752extern initialize_file_ftype _initialize_aarch64_tdep;
2753
2754void
2755_initialize_aarch64_tdep (void)
2756{
2757 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2758 aarch64_dump_tdep);
2759
2760 initialize_tdesc_aarch64 ();
07b287a0
MS
2761
2762 /* Debug this file's internals. */
2763 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2764Set AArch64 debugging."), _("\
2765Show AArch64 debugging."), _("\
2766When on, AArch64 specific debugging is enabled."),
2767 NULL,
2768 show_aarch64_debug,
2769 &setdebuglist, &showdebuglist);
2770}
99afc88b
OJ
2771
2772/* AArch64 process record-replay related structures, defines etc. */
2773
2774#define submask(x) ((1L << ((x) + 1)) - 1)
2775#define bit(obj,st) (((obj) >> (st)) & 1)
2776#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2777
2778#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2779 do \
2780 { \
2781 unsigned int reg_len = LENGTH; \
2782 if (reg_len) \
2783 { \
2784 REGS = XNEWVEC (uint32_t, reg_len); \
2785 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2786 } \
2787 } \
2788 while (0)
2789
2790#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2791 do \
2792 { \
2793 unsigned int mem_len = LENGTH; \
2794 if (mem_len) \
2795 { \
2796 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2797 memcpy(&MEMS->len, &RECORD_BUF[0], \
2798 sizeof(struct aarch64_mem_r) * LENGTH); \
2799 } \
2800 } \
2801 while (0)
2802
2803/* AArch64 record/replay structures and enumerations. */
2804
2805struct aarch64_mem_r
2806{
2807 uint64_t len; /* Record length. */
2808 uint64_t addr; /* Memory address. */
2809};
2810
2811enum aarch64_record_result
2812{
2813 AARCH64_RECORD_SUCCESS,
2814 AARCH64_RECORD_FAILURE,
2815 AARCH64_RECORD_UNSUPPORTED,
2816 AARCH64_RECORD_UNKNOWN
2817};
2818
2819typedef struct insn_decode_record_t
2820{
2821 struct gdbarch *gdbarch;
2822 struct regcache *regcache;
2823 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2824 uint32_t aarch64_insn; /* Insn to be recorded. */
2825 uint32_t mem_rec_count; /* Count of memory records. */
2826 uint32_t reg_rec_count; /* Count of register records. */
2827 uint32_t *aarch64_regs; /* Registers to be recorded. */
2828 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2829} insn_decode_record;
2830
2831/* Record handler for data processing - register instructions. */
2832
2833static unsigned int
2834aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2835{
2836 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2837 uint32_t record_buf[4];
2838
2839 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2840 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2841 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2842
2843 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2844 {
2845 uint8_t setflags;
2846
2847 /* Logical (shifted register). */
2848 if (insn_bits24_27 == 0x0a)
2849 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2850 /* Add/subtract. */
2851 else if (insn_bits24_27 == 0x0b)
2852 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2853 else
2854 return AARCH64_RECORD_UNKNOWN;
2855
2856 record_buf[0] = reg_rd;
2857 aarch64_insn_r->reg_rec_count = 1;
2858 if (setflags)
2859 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2860 }
2861 else
2862 {
2863 if (insn_bits24_27 == 0x0b)
2864 {
2865 /* Data-processing (3 source). */
2866 record_buf[0] = reg_rd;
2867 aarch64_insn_r->reg_rec_count = 1;
2868 }
2869 else if (insn_bits24_27 == 0x0a)
2870 {
2871 if (insn_bits21_23 == 0x00)
2872 {
2873 /* Add/subtract (with carry). */
2874 record_buf[0] = reg_rd;
2875 aarch64_insn_r->reg_rec_count = 1;
2876 if (bit (aarch64_insn_r->aarch64_insn, 29))
2877 {
2878 record_buf[1] = AARCH64_CPSR_REGNUM;
2879 aarch64_insn_r->reg_rec_count = 2;
2880 }
2881 }
2882 else if (insn_bits21_23 == 0x02)
2883 {
2884 /* Conditional compare (register) and conditional compare
2885 (immediate) instructions. */
2886 record_buf[0] = AARCH64_CPSR_REGNUM;
2887 aarch64_insn_r->reg_rec_count = 1;
2888 }
2889 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2890 {
2891 /* CConditional select. */
2892 /* Data-processing (2 source). */
2893 /* Data-processing (1 source). */
2894 record_buf[0] = reg_rd;
2895 aarch64_insn_r->reg_rec_count = 1;
2896 }
2897 else
2898 return AARCH64_RECORD_UNKNOWN;
2899 }
2900 }
2901
2902 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2903 record_buf);
2904 return AARCH64_RECORD_SUCCESS;
2905}
2906
2907/* Record handler for data processing - immediate instructions. */
2908
2909static unsigned int
2910aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2911{
2912 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2913 uint32_t record_buf[4];
2914
2915 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2916 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2917 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2918 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2919
2920 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2921 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2922 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2923 {
2924 record_buf[0] = reg_rd;
2925 aarch64_insn_r->reg_rec_count = 1;
2926 }
2927 else if (insn_bits24_27 == 0x01)
2928 {
2929 /* Add/Subtract (immediate). */
2930 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2931 record_buf[0] = reg_rd;
2932 aarch64_insn_r->reg_rec_count = 1;
2933 if (setflags)
2934 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2935 }
2936 else if (insn_bits24_27 == 0x02 && !insn_bit23)
2937 {
2938 /* Logical (immediate). */
2939 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
2940 record_buf[0] = reg_rd;
2941 aarch64_insn_r->reg_rec_count = 1;
2942 if (setflags)
2943 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2944 }
2945 else
2946 return AARCH64_RECORD_UNKNOWN;
2947
2948 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2949 record_buf);
2950 return AARCH64_RECORD_SUCCESS;
2951}
2952
2953/* Record handler for branch, exception generation and system instructions. */
2954
2955static unsigned int
2956aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
2957{
2958 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
2959 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
2960 uint32_t record_buf[4];
2961
2962 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2963 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
2964 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
2965
2966 if (insn_bits28_31 == 0x0d)
2967 {
2968 /* Exception generation instructions. */
2969 if (insn_bits24_27 == 0x04)
2970 {
5d98d3cd
YQ
2971 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
2972 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
2973 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
2974 {
2975 ULONGEST svc_number;
2976
2977 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
2978 &svc_number);
2979 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
2980 svc_number);
2981 }
2982 else
2983 return AARCH64_RECORD_UNSUPPORTED;
2984 }
2985 /* System instructions. */
2986 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
2987 {
2988 uint32_t reg_rt, reg_crn;
2989
2990 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2991 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
2992
2993 /* Record rt in case of sysl and mrs instructions. */
2994 if (bit (aarch64_insn_r->aarch64_insn, 21))
2995 {
2996 record_buf[0] = reg_rt;
2997 aarch64_insn_r->reg_rec_count = 1;
2998 }
2999 /* Record cpsr for hint and msr(immediate) instructions. */
3000 else if (reg_crn == 0x02 || reg_crn == 0x04)
3001 {
3002 record_buf[0] = AARCH64_CPSR_REGNUM;
3003 aarch64_insn_r->reg_rec_count = 1;
3004 }
3005 }
3006 /* Unconditional branch (register). */
3007 else if((insn_bits24_27 & 0x0e) == 0x06)
3008 {
3009 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3010 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3011 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3012 }
3013 else
3014 return AARCH64_RECORD_UNKNOWN;
3015 }
3016 /* Unconditional branch (immediate). */
3017 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3018 {
3019 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3020 if (bit (aarch64_insn_r->aarch64_insn, 31))
3021 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3022 }
3023 else
3024 /* Compare & branch (immediate), Test & branch (immediate) and
3025 Conditional branch (immediate). */
3026 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3027
3028 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3029 record_buf);
3030 return AARCH64_RECORD_SUCCESS;
3031}
3032
3033/* Record handler for advanced SIMD load and store instructions. */
3034
3035static unsigned int
3036aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3037{
3038 CORE_ADDR address;
3039 uint64_t addr_offset = 0;
3040 uint32_t record_buf[24];
3041 uint64_t record_buf_mem[24];
3042 uint32_t reg_rn, reg_rt;
3043 uint32_t reg_index = 0, mem_index = 0;
3044 uint8_t opcode_bits, size_bits;
3045
3046 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3047 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3048 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3049 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3050 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3051
3052 if (record_debug)
b277c936 3053 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3054
3055 /* Load/store single structure. */
3056 if (bit (aarch64_insn_r->aarch64_insn, 24))
3057 {
3058 uint8_t sindex, scale, selem, esize, replicate = 0;
3059 scale = opcode_bits >> 2;
3060 selem = ((opcode_bits & 0x02) |
3061 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3062 switch (scale)
3063 {
3064 case 1:
3065 if (size_bits & 0x01)
3066 return AARCH64_RECORD_UNKNOWN;
3067 break;
3068 case 2:
3069 if ((size_bits >> 1) & 0x01)
3070 return AARCH64_RECORD_UNKNOWN;
3071 if (size_bits & 0x01)
3072 {
3073 if (!((opcode_bits >> 1) & 0x01))
3074 scale = 3;
3075 else
3076 return AARCH64_RECORD_UNKNOWN;
3077 }
3078 break;
3079 case 3:
3080 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3081 {
3082 scale = size_bits;
3083 replicate = 1;
3084 break;
3085 }
3086 else
3087 return AARCH64_RECORD_UNKNOWN;
3088 default:
3089 break;
3090 }
3091 esize = 8 << scale;
3092 if (replicate)
3093 for (sindex = 0; sindex < selem; sindex++)
3094 {
3095 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3096 reg_rt = (reg_rt + 1) % 32;
3097 }
3098 else
3099 {
3100 for (sindex = 0; sindex < selem; sindex++)
3101 if (bit (aarch64_insn_r->aarch64_insn, 22))
3102 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3103 else
3104 {
3105 record_buf_mem[mem_index++] = esize / 8;
3106 record_buf_mem[mem_index++] = address + addr_offset;
3107 }
3108 addr_offset = addr_offset + (esize / 8);
3109 reg_rt = (reg_rt + 1) % 32;
3110 }
3111 }
3112 /* Load/store multiple structure. */
3113 else
3114 {
3115 uint8_t selem, esize, rpt, elements;
3116 uint8_t eindex, rindex;
3117
3118 esize = 8 << size_bits;
3119 if (bit (aarch64_insn_r->aarch64_insn, 30))
3120 elements = 128 / esize;
3121 else
3122 elements = 64 / esize;
3123
3124 switch (opcode_bits)
3125 {
3126 /*LD/ST4 (4 Registers). */
3127 case 0:
3128 rpt = 1;
3129 selem = 4;
3130 break;
3131 /*LD/ST1 (4 Registers). */
3132 case 2:
3133 rpt = 4;
3134 selem = 1;
3135 break;
3136 /*LD/ST3 (3 Registers). */
3137 case 4:
3138 rpt = 1;
3139 selem = 3;
3140 break;
3141 /*LD/ST1 (3 Registers). */
3142 case 6:
3143 rpt = 3;
3144 selem = 1;
3145 break;
3146 /*LD/ST1 (1 Register). */
3147 case 7:
3148 rpt = 1;
3149 selem = 1;
3150 break;
3151 /*LD/ST2 (2 Registers). */
3152 case 8:
3153 rpt = 1;
3154 selem = 2;
3155 break;
3156 /*LD/ST1 (2 Registers). */
3157 case 10:
3158 rpt = 2;
3159 selem = 1;
3160 break;
3161 default:
3162 return AARCH64_RECORD_UNSUPPORTED;
3163 break;
3164 }
3165 for (rindex = 0; rindex < rpt; rindex++)
3166 for (eindex = 0; eindex < elements; eindex++)
3167 {
3168 uint8_t reg_tt, sindex;
3169 reg_tt = (reg_rt + rindex) % 32;
3170 for (sindex = 0; sindex < selem; sindex++)
3171 {
3172 if (bit (aarch64_insn_r->aarch64_insn, 22))
3173 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3174 else
3175 {
3176 record_buf_mem[mem_index++] = esize / 8;
3177 record_buf_mem[mem_index++] = address + addr_offset;
3178 }
3179 addr_offset = addr_offset + (esize / 8);
3180 reg_tt = (reg_tt + 1) % 32;
3181 }
3182 }
3183 }
3184
3185 if (bit (aarch64_insn_r->aarch64_insn, 23))
3186 record_buf[reg_index++] = reg_rn;
3187
3188 aarch64_insn_r->reg_rec_count = reg_index;
3189 aarch64_insn_r->mem_rec_count = mem_index / 2;
3190 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3191 record_buf_mem);
3192 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3193 record_buf);
3194 return AARCH64_RECORD_SUCCESS;
3195}
3196
3197/* Record handler for load and store instructions. */
3198
3199static unsigned int
3200aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3201{
3202 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3203 uint8_t insn_bit23, insn_bit21;
3204 uint8_t opc, size_bits, ld_flag, vector_flag;
3205 uint32_t reg_rn, reg_rt, reg_rt2;
3206 uint64_t datasize, offset;
3207 uint32_t record_buf[8];
3208 uint64_t record_buf_mem[8];
3209 CORE_ADDR address;
3210
3211 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3212 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3213 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3214 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3215 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3216 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3217 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3218 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3219 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3220 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3221 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3222
3223 /* Load/store exclusive. */
3224 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3225 {
3226 if (record_debug)
b277c936 3227 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3228
3229 if (ld_flag)
3230 {
3231 record_buf[0] = reg_rt;
3232 aarch64_insn_r->reg_rec_count = 1;
3233 if (insn_bit21)
3234 {
3235 record_buf[1] = reg_rt2;
3236 aarch64_insn_r->reg_rec_count = 2;
3237 }
3238 }
3239 else
3240 {
3241 if (insn_bit21)
3242 datasize = (8 << size_bits) * 2;
3243 else
3244 datasize = (8 << size_bits);
3245 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3246 &address);
3247 record_buf_mem[0] = datasize / 8;
3248 record_buf_mem[1] = address;
3249 aarch64_insn_r->mem_rec_count = 1;
3250 if (!insn_bit23)
3251 {
3252 /* Save register rs. */
3253 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3254 aarch64_insn_r->reg_rec_count = 1;
3255 }
3256 }
3257 }
3258 /* Load register (literal) instructions decoding. */
3259 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3260 {
3261 if (record_debug)
b277c936 3262 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3263 if (vector_flag)
3264 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3265 else
3266 record_buf[0] = reg_rt;
3267 aarch64_insn_r->reg_rec_count = 1;
3268 }
3269 /* All types of load/store pair instructions decoding. */
3270 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3271 {
3272 if (record_debug)
b277c936 3273 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3274
3275 if (ld_flag)
3276 {
3277 if (vector_flag)
3278 {
3279 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3280 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3281 }
3282 else
3283 {
3284 record_buf[0] = reg_rt;
3285 record_buf[1] = reg_rt2;
3286 }
3287 aarch64_insn_r->reg_rec_count = 2;
3288 }
3289 else
3290 {
3291 uint16_t imm7_off;
3292 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3293 if (!vector_flag)
3294 size_bits = size_bits >> 1;
3295 datasize = 8 << (2 + size_bits);
3296 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3297 offset = offset << (2 + size_bits);
3298 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3299 &address);
3300 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3301 {
3302 if (imm7_off & 0x40)
3303 address = address - offset;
3304 else
3305 address = address + offset;
3306 }
3307
3308 record_buf_mem[0] = datasize / 8;
3309 record_buf_mem[1] = address;
3310 record_buf_mem[2] = datasize / 8;
3311 record_buf_mem[3] = address + (datasize / 8);
3312 aarch64_insn_r->mem_rec_count = 2;
3313 }
3314 if (bit (aarch64_insn_r->aarch64_insn, 23))
3315 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3316 }
3317 /* Load/store register (unsigned immediate) instructions. */
3318 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3319 {
3320 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3321 if (!(opc >> 1))
3322 if (opc & 0x01)
3323 ld_flag = 0x01;
3324 else
3325 ld_flag = 0x0;
3326 else
3327 if (size_bits != 0x03)
3328 ld_flag = 0x01;
3329 else
3330 return AARCH64_RECORD_UNKNOWN;
3331
3332 if (record_debug)
3333 {
b277c936
PL
3334 debug_printf ("Process record: load/store (unsigned immediate):"
3335 " size %x V %d opc %x\n", size_bits, vector_flag,
3336 opc);
99afc88b
OJ
3337 }
3338
3339 if (!ld_flag)
3340 {
3341 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3342 datasize = 8 << size_bits;
3343 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3344 &address);
3345 offset = offset << size_bits;
3346 address = address + offset;
3347
3348 record_buf_mem[0] = datasize >> 3;
3349 record_buf_mem[1] = address;
3350 aarch64_insn_r->mem_rec_count = 1;
3351 }
3352 else
3353 {
3354 if (vector_flag)
3355 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3356 else
3357 record_buf[0] = reg_rt;
3358 aarch64_insn_r->reg_rec_count = 1;
3359 }
3360 }
3361 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3362 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3363 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3364 {
3365 if (record_debug)
b277c936 3366 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3367 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3368 if (!(opc >> 1))
3369 if (opc & 0x01)
3370 ld_flag = 0x01;
3371 else
3372 ld_flag = 0x0;
3373 else
3374 if (size_bits != 0x03)
3375 ld_flag = 0x01;
3376 else
3377 return AARCH64_RECORD_UNKNOWN;
3378
3379 if (!ld_flag)
3380 {
3381 uint64_t reg_rm_val;
3382 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3383 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3384 if (bit (aarch64_insn_r->aarch64_insn, 12))
3385 offset = reg_rm_val << size_bits;
3386 else
3387 offset = reg_rm_val;
3388 datasize = 8 << size_bits;
3389 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3390 &address);
3391 address = address + offset;
3392 record_buf_mem[0] = datasize >> 3;
3393 record_buf_mem[1] = address;
3394 aarch64_insn_r->mem_rec_count = 1;
3395 }
3396 else
3397 {
3398 if (vector_flag)
3399 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3400 else
3401 record_buf[0] = reg_rt;
3402 aarch64_insn_r->reg_rec_count = 1;
3403 }
3404 }
3405 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3406 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3407 && !insn_bit21)
99afc88b
OJ
3408 {
3409 if (record_debug)
3410 {
b277c936
PL
3411 debug_printf ("Process record: load/store "
3412 "(immediate and unprivileged)\n");
99afc88b
OJ
3413 }
3414 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3415 if (!(opc >> 1))
3416 if (opc & 0x01)
3417 ld_flag = 0x01;
3418 else
3419 ld_flag = 0x0;
3420 else
3421 if (size_bits != 0x03)
3422 ld_flag = 0x01;
3423 else
3424 return AARCH64_RECORD_UNKNOWN;
3425
3426 if (!ld_flag)
3427 {
3428 uint16_t imm9_off;
3429 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3430 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3431 datasize = 8 << size_bits;
3432 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3433 &address);
3434 if (insn_bits10_11 != 0x01)
3435 {
3436 if (imm9_off & 0x0100)
3437 address = address - offset;
3438 else
3439 address = address + offset;
3440 }
3441 record_buf_mem[0] = datasize >> 3;
3442 record_buf_mem[1] = address;
3443 aarch64_insn_r->mem_rec_count = 1;
3444 }
3445 else
3446 {
3447 if (vector_flag)
3448 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3449 else
3450 record_buf[0] = reg_rt;
3451 aarch64_insn_r->reg_rec_count = 1;
3452 }
3453 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3454 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3455 }
3456 /* Advanced SIMD load/store instructions. */
3457 else
3458 return aarch64_record_asimd_load_store (aarch64_insn_r);
3459
3460 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3461 record_buf_mem);
3462 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3463 record_buf);
3464 return AARCH64_RECORD_SUCCESS;
3465}
3466
3467/* Record handler for data processing SIMD and floating point instructions. */
3468
3469static unsigned int
3470aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3471{
3472 uint8_t insn_bit21, opcode, rmode, reg_rd;
3473 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3474 uint8_t insn_bits11_14;
3475 uint32_t record_buf[2];
3476
3477 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3478 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3479 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3480 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3481 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3482 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3483 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3484 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3485 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3486
3487 if (record_debug)
b277c936 3488 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3489
3490 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3491 {
3492 /* Floating point - fixed point conversion instructions. */
3493 if (!insn_bit21)
3494 {
3495 if (record_debug)
b277c936 3496 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3497
3498 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3499 record_buf[0] = reg_rd;
3500 else
3501 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3502 }
3503 /* Floating point - conditional compare instructions. */
3504 else if (insn_bits10_11 == 0x01)
3505 {
3506 if (record_debug)
b277c936 3507 debug_printf ("FP - conditional compare");
99afc88b
OJ
3508
3509 record_buf[0] = AARCH64_CPSR_REGNUM;
3510 }
3511 /* Floating point - data processing (2-source) and
3512 conditional select instructions. */
3513 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3514 {
3515 if (record_debug)
b277c936 3516 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3517
3518 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3519 }
3520 else if (insn_bits10_11 == 0x00)
3521 {
3522 /* Floating point - immediate instructions. */
3523 if ((insn_bits12_15 & 0x01) == 0x01
3524 || (insn_bits12_15 & 0x07) == 0x04)
3525 {
3526 if (record_debug)
b277c936 3527 debug_printf ("FP - immediate");
99afc88b
OJ
3528 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3529 }
3530 /* Floating point - compare instructions. */
3531 else if ((insn_bits12_15 & 0x03) == 0x02)
3532 {
3533 if (record_debug)
b277c936 3534 debug_printf ("FP - immediate");
99afc88b
OJ
3535 record_buf[0] = AARCH64_CPSR_REGNUM;
3536 }
3537 /* Floating point - integer conversions instructions. */
f62fce35 3538 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3539 {
3540 /* Convert float to integer instruction. */
3541 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3542 {
3543 if (record_debug)
b277c936 3544 debug_printf ("float to int conversion");
99afc88b
OJ
3545
3546 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3547 }
3548 /* Convert integer to float instruction. */
3549 else if ((opcode >> 1) == 0x01 && !rmode)
3550 {
3551 if (record_debug)
b277c936 3552 debug_printf ("int to float conversion");
99afc88b
OJ
3553
3554 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3555 }
3556 /* Move float to integer instruction. */
3557 else if ((opcode >> 1) == 0x03)
3558 {
3559 if (record_debug)
b277c936 3560 debug_printf ("move float to int");
99afc88b
OJ
3561
3562 if (!(opcode & 0x01))
3563 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3564 else
3565 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3566 }
f62fce35
YQ
3567 else
3568 return AARCH64_RECORD_UNKNOWN;
99afc88b 3569 }
f62fce35
YQ
3570 else
3571 return AARCH64_RECORD_UNKNOWN;
99afc88b 3572 }
f62fce35
YQ
3573 else
3574 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3575 }
3576 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3577 {
3578 if (record_debug)
b277c936 3579 debug_printf ("SIMD copy");
99afc88b
OJ
3580
3581 /* Advanced SIMD copy instructions. */
3582 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3583 && !bit (aarch64_insn_r->aarch64_insn, 15)
3584 && bit (aarch64_insn_r->aarch64_insn, 10))
3585 {
3586 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3587 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3588 else
3589 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3590 }
3591 else
3592 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3593 }
3594 /* All remaining floating point or advanced SIMD instructions. */
3595 else
3596 {
3597 if (record_debug)
b277c936 3598 debug_printf ("all remain");
99afc88b
OJ
3599
3600 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3601 }
3602
3603 if (record_debug)
b277c936 3604 debug_printf ("\n");
99afc88b
OJ
3605
3606 aarch64_insn_r->reg_rec_count++;
3607 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3608 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3609 record_buf);
3610 return AARCH64_RECORD_SUCCESS;
3611}
3612
3613/* Decodes insns type and invokes its record handler. */
3614
3615static unsigned int
3616aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3617{
3618 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3619
3620 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3621 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3622 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3623 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3624
3625 /* Data processing - immediate instructions. */
3626 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3627 return aarch64_record_data_proc_imm (aarch64_insn_r);
3628
3629 /* Branch, exception generation and system instructions. */
3630 if (ins_bit26 && !ins_bit27 && ins_bit28)
3631 return aarch64_record_branch_except_sys (aarch64_insn_r);
3632
3633 /* Load and store instructions. */
3634 if (!ins_bit25 && ins_bit27)
3635 return aarch64_record_load_store (aarch64_insn_r);
3636
3637 /* Data processing - register instructions. */
3638 if (ins_bit25 && !ins_bit26 && ins_bit27)
3639 return aarch64_record_data_proc_reg (aarch64_insn_r);
3640
3641 /* Data processing - SIMD and floating point instructions. */
3642 if (ins_bit25 && ins_bit26 && ins_bit27)
3643 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3644
3645 return AARCH64_RECORD_UNSUPPORTED;
3646}
3647
3648/* Cleans up local record registers and memory allocations. */
3649
3650static void
3651deallocate_reg_mem (insn_decode_record *record)
3652{
3653 xfree (record->aarch64_regs);
3654 xfree (record->aarch64_mems);
3655}
3656
3657/* Parse the current instruction and record the values of the registers and
3658 memory that will be changed in current instruction to record_arch_list
3659 return -1 if something is wrong. */
3660
3661int
3662aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3663 CORE_ADDR insn_addr)
3664{
3665 uint32_t rec_no = 0;
3666 uint8_t insn_size = 4;
3667 uint32_t ret = 0;
3668 ULONGEST t_bit = 0, insn_id = 0;
3669 gdb_byte buf[insn_size];
3670 insn_decode_record aarch64_record;
3671
3672 memset (&buf[0], 0, insn_size);
3673 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3674 target_read_memory (insn_addr, &buf[0], insn_size);
3675 aarch64_record.aarch64_insn
3676 = (uint32_t) extract_unsigned_integer (&buf[0],
3677 insn_size,
3678 gdbarch_byte_order (gdbarch));
3679 aarch64_record.regcache = regcache;
3680 aarch64_record.this_addr = insn_addr;
3681 aarch64_record.gdbarch = gdbarch;
3682
3683 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3684 if (ret == AARCH64_RECORD_UNSUPPORTED)
3685 {
3686 printf_unfiltered (_("Process record does not support instruction "
3687 "0x%0x at address %s.\n"),
3688 aarch64_record.aarch64_insn,
3689 paddress (gdbarch, insn_addr));
3690 ret = -1;
3691 }
3692
3693 if (0 == ret)
3694 {
3695 /* Record registers. */
3696 record_full_arch_list_add_reg (aarch64_record.regcache,
3697 AARCH64_PC_REGNUM);
3698 /* Always record register CPSR. */
3699 record_full_arch_list_add_reg (aarch64_record.regcache,
3700 AARCH64_CPSR_REGNUM);
3701 if (aarch64_record.aarch64_regs)
3702 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3703 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3704 aarch64_record.aarch64_regs[rec_no]))
3705 ret = -1;
3706
3707 /* Record memories. */
3708 if (aarch64_record.aarch64_mems)
3709 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3710 if (record_full_arch_list_add_mem
3711 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3712 aarch64_record.aarch64_mems[rec_no].len))
3713 ret = -1;
3714
3715 if (record_full_arch_list_add_end ())
3716 ret = -1;
3717 }
3718
3719 deallocate_reg_mem (&aarch64_record);
3720 return ret;
3721}
This page took 0.420522 seconds and 4 git commands to generate.