PR c++/18141, c++/18417.
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
32d0add0 3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
45
46#include "aarch64-tdep.h"
47
48#include "elf-bfd.h"
49#include "elf/aarch64.h"
50
07b287a0
MS
51#include "vec.h"
52
99afc88b
OJ
53#include "record.h"
54#include "record-full.h"
55
07b287a0 56#include "features/aarch64.c"
07b287a0
MS
57
58/* Pseudo register base numbers. */
59#define AARCH64_Q0_REGNUM 0
60#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
61#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
62#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
63#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
64
65/* The standard register names, and all the valid aliases for them. */
66static const struct
67{
68 const char *const name;
69 int regnum;
70} aarch64_register_aliases[] =
71{
72 /* 64-bit register names. */
73 {"fp", AARCH64_FP_REGNUM},
74 {"lr", AARCH64_LR_REGNUM},
75 {"sp", AARCH64_SP_REGNUM},
76
77 /* 32-bit register names. */
78 {"w0", AARCH64_X0_REGNUM + 0},
79 {"w1", AARCH64_X0_REGNUM + 1},
80 {"w2", AARCH64_X0_REGNUM + 2},
81 {"w3", AARCH64_X0_REGNUM + 3},
82 {"w4", AARCH64_X0_REGNUM + 4},
83 {"w5", AARCH64_X0_REGNUM + 5},
84 {"w6", AARCH64_X0_REGNUM + 6},
85 {"w7", AARCH64_X0_REGNUM + 7},
86 {"w8", AARCH64_X0_REGNUM + 8},
87 {"w9", AARCH64_X0_REGNUM + 9},
88 {"w10", AARCH64_X0_REGNUM + 10},
89 {"w11", AARCH64_X0_REGNUM + 11},
90 {"w12", AARCH64_X0_REGNUM + 12},
91 {"w13", AARCH64_X0_REGNUM + 13},
92 {"w14", AARCH64_X0_REGNUM + 14},
93 {"w15", AARCH64_X0_REGNUM + 15},
94 {"w16", AARCH64_X0_REGNUM + 16},
95 {"w17", AARCH64_X0_REGNUM + 17},
96 {"w18", AARCH64_X0_REGNUM + 18},
97 {"w19", AARCH64_X0_REGNUM + 19},
98 {"w20", AARCH64_X0_REGNUM + 20},
99 {"w21", AARCH64_X0_REGNUM + 21},
100 {"w22", AARCH64_X0_REGNUM + 22},
101 {"w23", AARCH64_X0_REGNUM + 23},
102 {"w24", AARCH64_X0_REGNUM + 24},
103 {"w25", AARCH64_X0_REGNUM + 25},
104 {"w26", AARCH64_X0_REGNUM + 26},
105 {"w27", AARCH64_X0_REGNUM + 27},
106 {"w28", AARCH64_X0_REGNUM + 28},
107 {"w29", AARCH64_X0_REGNUM + 29},
108 {"w30", AARCH64_X0_REGNUM + 30},
109
110 /* specials */
111 {"ip0", AARCH64_X0_REGNUM + 16},
112 {"ip1", AARCH64_X0_REGNUM + 17}
113};
114
115/* The required core 'R' registers. */
116static const char *const aarch64_r_register_names[] =
117{
118 /* These registers must appear in consecutive RAW register number
119 order and they must begin with AARCH64_X0_REGNUM! */
120 "x0", "x1", "x2", "x3",
121 "x4", "x5", "x6", "x7",
122 "x8", "x9", "x10", "x11",
123 "x12", "x13", "x14", "x15",
124 "x16", "x17", "x18", "x19",
125 "x20", "x21", "x22", "x23",
126 "x24", "x25", "x26", "x27",
127 "x28", "x29", "x30", "sp",
128 "pc", "cpsr"
129};
130
131/* The FP/SIMD 'V' registers. */
132static const char *const aarch64_v_register_names[] =
133{
134 /* These registers must appear in consecutive RAW register number
135 order and they must begin with AARCH64_V0_REGNUM! */
136 "v0", "v1", "v2", "v3",
137 "v4", "v5", "v6", "v7",
138 "v8", "v9", "v10", "v11",
139 "v12", "v13", "v14", "v15",
140 "v16", "v17", "v18", "v19",
141 "v20", "v21", "v22", "v23",
142 "v24", "v25", "v26", "v27",
143 "v28", "v29", "v30", "v31",
144 "fpsr",
145 "fpcr"
146};
147
148/* AArch64 prologue cache structure. */
149struct aarch64_prologue_cache
150{
151 /* The stack pointer at the time this frame was created; i.e. the
152 caller's stack pointer when this function was called. It is used
153 to identify this frame. */
154 CORE_ADDR prev_sp;
155
156 /* The frame base for this frame is just prev_sp - frame size.
157 FRAMESIZE is the distance from the frame pointer to the
158 initial stack pointer. */
159 int framesize;
160
161 /* The register used to hold the frame pointer for this frame. */
162 int framereg;
163
164 /* Saved register offsets. */
165 struct trad_frame_saved_reg *saved_regs;
166};
167
168/* Toggle this file's internal debugging dump. */
169static int aarch64_debug;
170
171static void
172show_aarch64_debug (struct ui_file *file, int from_tty,
173 struct cmd_list_element *c, const char *value)
174{
175 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
176}
177
178/* Extract a signed value from a bit field within an instruction
179 encoding.
180
181 INSN is the instruction opcode.
182
183 WIDTH specifies the width of the bit field to extract (in bits).
184
185 OFFSET specifies the least significant bit of the field where bits
186 are numbered zero counting from least to most significant. */
187
188static int32_t
189extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
190{
191 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
192 unsigned shift_r = sizeof (int32_t) * 8 - width;
193
194 return ((int32_t) insn << shift_l) >> shift_r;
195}
196
197/* Determine if specified bits within an instruction opcode matches a
198 specific pattern.
199
200 INSN is the instruction opcode.
201
202 MASK specifies the bits within the opcode that are to be tested
203 agsinst for a match with PATTERN. */
204
205static int
206decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
207{
208 return (insn & mask) == pattern;
209}
210
211/* Decode an opcode if it represents an immediate ADD or SUB instruction.
212
213 ADDR specifies the address of the opcode.
214 INSN specifies the opcode to test.
215 RD receives the 'rd' field from the decoded instruction.
216 RN receives the 'rn' field from the decoded instruction.
217
218 Return 1 if the opcodes matches and is decoded, otherwise 0. */
219static int
220decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
221 int32_t *imm)
222{
223 if ((insn & 0x9f000000) == 0x91000000)
224 {
225 unsigned shift;
226 unsigned op_is_sub;
227
228 *rd = (insn >> 0) & 0x1f;
229 *rn = (insn >> 5) & 0x1f;
230 *imm = (insn >> 10) & 0xfff;
231 shift = (insn >> 22) & 0x3;
232 op_is_sub = (insn >> 30) & 0x1;
233
234 switch (shift)
235 {
236 case 0:
237 break;
238 case 1:
239 *imm <<= 12;
240 break;
241 default:
242 /* UNDEFINED */
243 return 0;
244 }
245
246 if (op_is_sub)
247 *imm = -*imm;
248
249 if (aarch64_debug)
250 fprintf_unfiltered (gdb_stdlog,
251 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
252 core_addr_to_string_nz (addr), insn, *rd, *rn,
253 *imm);
254 return 1;
255 }
256 return 0;
257}
258
259/* Decode an opcode if it represents an ADRP instruction.
260
261 ADDR specifies the address of the opcode.
262 INSN specifies the opcode to test.
263 RD receives the 'rd' field from the decoded instruction.
264
265 Return 1 if the opcodes matches and is decoded, otherwise 0. */
266
267static int
268decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
269{
270 if (decode_masked_match (insn, 0x9f000000, 0x90000000))
271 {
272 *rd = (insn >> 0) & 0x1f;
273
274 if (aarch64_debug)
275 fprintf_unfiltered (gdb_stdlog,
276 "decode: 0x%s 0x%x adrp x%u, #?\n",
277 core_addr_to_string_nz (addr), insn, *rd);
278 return 1;
279 }
280 return 0;
281}
282
283/* Decode an opcode if it represents an branch immediate or branch
284 and link immediate instruction.
285
286 ADDR specifies the address of the opcode.
287 INSN specifies the opcode to test.
288 LINK receives the 'link' bit from the decoded instruction.
289 OFFSET receives the immediate offset from the decoded instruction.
290
291 Return 1 if the opcodes matches and is decoded, otherwise 0. */
292
293static int
294decode_b (CORE_ADDR addr, uint32_t insn, unsigned *link, int32_t *offset)
295{
296 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
297 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
298 if (decode_masked_match (insn, 0x7c000000, 0x14000000))
299 {
300 *link = insn >> 31;
301 *offset = extract_signed_bitfield (insn, 26, 0) << 2;
302
303 if (aarch64_debug)
304 fprintf_unfiltered (gdb_stdlog,
305 "decode: 0x%s 0x%x %s 0x%s\n",
306 core_addr_to_string_nz (addr), insn,
307 *link ? "bl" : "b",
308 core_addr_to_string_nz (addr + *offset));
309
310 return 1;
311 }
312 return 0;
313}
314
315/* Decode an opcode if it represents a conditional branch instruction.
316
317 ADDR specifies the address of the opcode.
318 INSN specifies the opcode to test.
319 COND receives the branch condition field from the decoded
320 instruction.
321 OFFSET receives the immediate offset from the decoded instruction.
322
323 Return 1 if the opcodes matches and is decoded, otherwise 0. */
324
325static int
326decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
327{
328 if (decode_masked_match (insn, 0xfe000000, 0x54000000))
329 {
330 *cond = (insn >> 0) & 0xf;
331 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
332
333 if (aarch64_debug)
334 fprintf_unfiltered (gdb_stdlog,
335 "decode: 0x%s 0x%x b<%u> 0x%s\n",
336 core_addr_to_string_nz (addr), insn, *cond,
337 core_addr_to_string_nz (addr + *offset));
338 return 1;
339 }
340 return 0;
341}
342
343/* Decode an opcode if it represents a branch via register instruction.
344
345 ADDR specifies the address of the opcode.
346 INSN specifies the opcode to test.
347 LINK receives the 'link' bit from the decoded instruction.
348 RN receives the 'rn' field from the decoded instruction.
349
350 Return 1 if the opcodes matches and is decoded, otherwise 0. */
351
352static int
353decode_br (CORE_ADDR addr, uint32_t insn, unsigned *link, unsigned *rn)
354{
355 /* 8 4 0 6 2 8 4 0 */
356 /* blr 110101100011111100000000000rrrrr */
357 /* br 110101100001111100000000000rrrrr */
358 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
359 {
360 *link = (insn >> 21) & 1;
361 *rn = (insn >> 5) & 0x1f;
362
363 if (aarch64_debug)
364 fprintf_unfiltered (gdb_stdlog,
365 "decode: 0x%s 0x%x %s 0x%x\n",
366 core_addr_to_string_nz (addr), insn,
367 *link ? "blr" : "br", *rn);
368
369 return 1;
370 }
371 return 0;
372}
373
374/* Decode an opcode if it represents a CBZ or CBNZ instruction.
375
376 ADDR specifies the address of the opcode.
377 INSN specifies the opcode to test.
378 IS64 receives the 'sf' field from the decoded instruction.
379 OP receives the 'op' field from the decoded instruction.
380 RN receives the 'rn' field from the decoded instruction.
381 OFFSET receives the 'imm19' field from the decoded instruction.
382
383 Return 1 if the opcodes matches and is decoded, otherwise 0. */
384
385static int
386decode_cb (CORE_ADDR addr,
387 uint32_t insn, int *is64, unsigned *op, unsigned *rn,
388 int32_t *offset)
389{
390 if (decode_masked_match (insn, 0x7e000000, 0x34000000))
391 {
392 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
393 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
394
395 *rn = (insn >> 0) & 0x1f;
396 *is64 = (insn >> 31) & 0x1;
397 *op = (insn >> 24) & 0x1;
398 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
399
400 if (aarch64_debug)
401 fprintf_unfiltered (gdb_stdlog,
402 "decode: 0x%s 0x%x %s 0x%s\n",
403 core_addr_to_string_nz (addr), insn,
404 *op ? "cbnz" : "cbz",
405 core_addr_to_string_nz (addr + *offset));
406 return 1;
407 }
408 return 0;
409}
410
411/* Decode an opcode if it represents a ERET instruction.
412
413 ADDR specifies the address of the opcode.
414 INSN specifies the opcode to test.
415
416 Return 1 if the opcodes matches and is decoded, otherwise 0. */
417
418static int
419decode_eret (CORE_ADDR addr, uint32_t insn)
420{
421 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
422 if (insn == 0xd69f03e0)
423 {
424 if (aarch64_debug)
425 fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n",
426 core_addr_to_string_nz (addr), insn);
427 return 1;
428 }
429 return 0;
430}
431
432/* Decode an opcode if it represents a MOVZ instruction.
433
434 ADDR specifies the address of the opcode.
435 INSN specifies the opcode to test.
436 RD receives the 'rd' field from the decoded instruction.
437
438 Return 1 if the opcodes matches and is decoded, otherwise 0. */
439
440static int
441decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
442{
443 if (decode_masked_match (insn, 0xff800000, 0x52800000))
444 {
445 *rd = (insn >> 0) & 0x1f;
446
447 if (aarch64_debug)
448 fprintf_unfiltered (gdb_stdlog,
449 "decode: 0x%s 0x%x movz x%u, #?\n",
450 core_addr_to_string_nz (addr), insn, *rd);
451 return 1;
452 }
453 return 0;
454}
455
456/* Decode an opcode if it represents a ORR (shifted register)
457 instruction.
458
459 ADDR specifies the address of the opcode.
460 INSN specifies the opcode to test.
461 RD receives the 'rd' field from the decoded instruction.
462 RN receives the 'rn' field from the decoded instruction.
463 RM receives the 'rm' field from the decoded instruction.
464 IMM receives the 'imm6' field from the decoded instruction.
465
466 Return 1 if the opcodes matches and is decoded, otherwise 0. */
467
468static int
469decode_orr_shifted_register_x (CORE_ADDR addr,
470 uint32_t insn, unsigned *rd, unsigned *rn,
471 unsigned *rm, int32_t *imm)
472{
473 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
474 {
475 *rd = (insn >> 0) & 0x1f;
476 *rn = (insn >> 5) & 0x1f;
477 *rm = (insn >> 16) & 0x1f;
478 *imm = (insn >> 10) & 0x3f;
479
480 if (aarch64_debug)
481 fprintf_unfiltered (gdb_stdlog,
482 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
483 core_addr_to_string_nz (addr), insn, *rd,
484 *rn, *rm, *imm);
485 return 1;
486 }
487 return 0;
488}
489
490/* Decode an opcode if it represents a RET instruction.
491
492 ADDR specifies the address of the opcode.
493 INSN specifies the opcode to test.
494 RN receives the 'rn' field from the decoded instruction.
495
496 Return 1 if the opcodes matches and is decoded, otherwise 0. */
497
498static int
499decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
500{
501 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
502 {
503 *rn = (insn >> 5) & 0x1f;
504 if (aarch64_debug)
505 fprintf_unfiltered (gdb_stdlog,
506 "decode: 0x%s 0x%x ret x%u\n",
507 core_addr_to_string_nz (addr), insn, *rn);
508 return 1;
509 }
510 return 0;
511}
512
513/* Decode an opcode if it represents the following instruction:
514 STP rt, rt2, [rn, #imm]
515
516 ADDR specifies the address of the opcode.
517 INSN specifies the opcode to test.
518 RT1 receives the 'rt' field from the decoded instruction.
519 RT2 receives the 'rt2' field from the decoded instruction.
520 RN receives the 'rn' field from the decoded instruction.
521 IMM receives the 'imm' field from the decoded instruction.
522
523 Return 1 if the opcodes matches and is decoded, otherwise 0. */
524
525static int
526decode_stp_offset (CORE_ADDR addr,
527 uint32_t insn,
528 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
529{
530 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
531 {
532 *rt1 = (insn >> 0) & 0x1f;
533 *rn = (insn >> 5) & 0x1f;
534 *rt2 = (insn >> 10) & 0x1f;
535 *imm = extract_signed_bitfield (insn, 7, 15);
536 *imm <<= 3;
537
538 if (aarch64_debug)
539 fprintf_unfiltered (gdb_stdlog,
540 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
541 core_addr_to_string_nz (addr), insn,
542 *rt1, *rt2, *rn, *imm);
543 return 1;
544 }
545 return 0;
546}
547
548/* Decode an opcode if it represents the following instruction:
549 STP rt, rt2, [rn, #imm]!
550
551 ADDR specifies the address of the opcode.
552 INSN specifies the opcode to test.
553 RT1 receives the 'rt' field from the decoded instruction.
554 RT2 receives the 'rt2' field from the decoded instruction.
555 RN receives the 'rn' field from the decoded instruction.
556 IMM receives the 'imm' field from the decoded instruction.
557
558 Return 1 if the opcodes matches and is decoded, otherwise 0. */
559
560static int
561decode_stp_offset_wb (CORE_ADDR addr,
562 uint32_t insn,
563 unsigned *rt1, unsigned *rt2, unsigned *rn,
564 int32_t *imm)
565{
566 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
567 {
568 *rt1 = (insn >> 0) & 0x1f;
569 *rn = (insn >> 5) & 0x1f;
570 *rt2 = (insn >> 10) & 0x1f;
571 *imm = extract_signed_bitfield (insn, 7, 15);
572 *imm <<= 3;
573
574 if (aarch64_debug)
575 fprintf_unfiltered (gdb_stdlog,
576 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
577 core_addr_to_string_nz (addr), insn,
578 *rt1, *rt2, *rn, *imm);
579 return 1;
580 }
581 return 0;
582}
583
584/* Decode an opcode if it represents the following instruction:
585 STUR rt, [rn, #imm]
586
587 ADDR specifies the address of the opcode.
588 INSN specifies the opcode to test.
589 IS64 receives size field from the decoded instruction.
590 RT receives the 'rt' field from the decoded instruction.
591 RN receives the 'rn' field from the decoded instruction.
592 IMM receives the 'imm' field from the decoded instruction.
593
594 Return 1 if the opcodes matches and is decoded, otherwise 0. */
595
596static int
597decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
598 unsigned *rn, int32_t *imm)
599{
600 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
601 {
602 *is64 = (insn >> 30) & 1;
603 *rt = (insn >> 0) & 0x1f;
604 *rn = (insn >> 5) & 0x1f;
605 *imm = extract_signed_bitfield (insn, 9, 12);
606
607 if (aarch64_debug)
608 fprintf_unfiltered (gdb_stdlog,
609 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
610 core_addr_to_string_nz (addr), insn,
611 *is64 ? 'x' : 'w', *rt, *rn, *imm);
612 return 1;
613 }
614 return 0;
615}
616
617/* Decode an opcode if it represents a TB or TBNZ instruction.
618
619 ADDR specifies the address of the opcode.
620 INSN specifies the opcode to test.
621 OP receives the 'op' field from the decoded instruction.
622 BIT receives the bit position field from the decoded instruction.
623 RT receives 'rt' field from the decoded instruction.
624 IMM receives 'imm' field from the decoded instruction.
625
626 Return 1 if the opcodes matches and is decoded, otherwise 0. */
627
628static int
629decode_tb (CORE_ADDR addr,
630 uint32_t insn, unsigned *op, unsigned *bit, unsigned *rt,
631 int32_t *imm)
632{
633 if (decode_masked_match (insn, 0x7e000000, 0x36000000))
634 {
635 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
636 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
637
638 *rt = (insn >> 0) & 0x1f;
639 *op = insn & (1 << 24);
640 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
641 *imm = extract_signed_bitfield (insn, 14, 5) << 2;
642
643 if (aarch64_debug)
644 fprintf_unfiltered (gdb_stdlog,
645 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
646 core_addr_to_string_nz (addr), insn,
647 *op ? "tbnz" : "tbz", *rt, *bit,
648 core_addr_to_string_nz (addr + *imm));
649 return 1;
650 }
651 return 0;
652}
653
654/* Analyze a prologue, looking for a recognizable stack frame
655 and frame pointer. Scan until we encounter a store that could
656 clobber the stack frame unexpectedly, or an unknown instruction. */
657
658static CORE_ADDR
659aarch64_analyze_prologue (struct gdbarch *gdbarch,
660 CORE_ADDR start, CORE_ADDR limit,
661 struct aarch64_prologue_cache *cache)
662{
663 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
664 int i;
665 pv_t regs[AARCH64_X_REGISTER_COUNT];
666 struct pv_area *stack;
667 struct cleanup *back_to;
668
669 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
670 regs[i] = pv_register (i, 0);
671 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
672 back_to = make_cleanup_free_pv_area (stack);
673
674 for (; start < limit; start += 4)
675 {
676 uint32_t insn;
677 unsigned rd;
678 unsigned rn;
679 unsigned rm;
680 unsigned rt;
681 unsigned rt1;
682 unsigned rt2;
683 int op_is_sub;
684 int32_t imm;
685 unsigned cond;
96b32e50 686 int is64;
07b287a0
MS
687 unsigned is_link;
688 unsigned op;
689 unsigned bit;
690 int32_t offset;
691
692 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
693
694 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
695 regs[rd] = pv_add_constant (regs[rn], imm);
696 else if (decode_adrp (start, insn, &rd))
697 regs[rd] = pv_unknown ();
698 else if (decode_b (start, insn, &is_link, &offset))
699 {
700 /* Stop analysis on branch. */
701 break;
702 }
703 else if (decode_bcond (start, insn, &cond, &offset))
704 {
705 /* Stop analysis on branch. */
706 break;
707 }
708 else if (decode_br (start, insn, &is_link, &rn))
709 {
710 /* Stop analysis on branch. */
711 break;
712 }
713 else if (decode_cb (start, insn, &is64, &op, &rn, &offset))
714 {
715 /* Stop analysis on branch. */
716 break;
717 }
718 else if (decode_eret (start, insn))
719 {
720 /* Stop analysis on branch. */
721 break;
722 }
723 else if (decode_movz (start, insn, &rd))
724 regs[rd] = pv_unknown ();
725 else
726 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
727 {
728 if (imm == 0 && rn == 31)
729 regs[rd] = regs[rm];
730 else
731 {
732 if (aarch64_debug)
733 fprintf_unfiltered
734 (gdb_stdlog,
735 "aarch64: prologue analysis gave up addr=0x%s "
736 "opcode=0x%x (orr x register)\n",
737 core_addr_to_string_nz (start),
738 insn);
739 break;
740 }
741 }
742 else if (decode_ret (start, insn, &rn))
743 {
744 /* Stop analysis on branch. */
745 break;
746 }
747 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
748 {
749 pv_area_store (stack, pv_add_constant (regs[rn], offset),
750 is64 ? 8 : 4, regs[rt]);
751 }
752 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
753 {
754 /* If recording this store would invalidate the store area
755 (perhaps because rn is not known) then we should abandon
756 further prologue analysis. */
757 if (pv_area_store_would_trash (stack,
758 pv_add_constant (regs[rn], imm)))
759 break;
760
761 if (pv_area_store_would_trash (stack,
762 pv_add_constant (regs[rn], imm + 8)))
763 break;
764
765 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
766 regs[rt1]);
767 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
768 regs[rt2]);
769 }
770 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
771 {
772 /* If recording this store would invalidate the store area
773 (perhaps because rn is not known) then we should abandon
774 further prologue analysis. */
775 if (pv_area_store_would_trash (stack,
14ac654f
MS
776 pv_add_constant (regs[rn], imm)))
777 break;
778
779 if (pv_area_store_would_trash (stack,
07b287a0
MS
780 pv_add_constant (regs[rn], imm + 8)))
781 break;
782
783 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
784 regs[rt1]);
785 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
786 regs[rt2]);
787 regs[rn] = pv_add_constant (regs[rn], imm);
788 }
789 else if (decode_tb (start, insn, &op, &bit, &rn, &offset))
790 {
791 /* Stop analysis on branch. */
792 break;
793 }
794 else
795 {
796 if (aarch64_debug)
797 fprintf_unfiltered (gdb_stdlog,
798 "aarch64: prologue analysis gave up addr=0x%s"
799 " opcode=0x%x\n",
800 core_addr_to_string_nz (start), insn);
801 break;
802 }
803 }
804
805 if (cache == NULL)
806 {
807 do_cleanups (back_to);
808 return start;
809 }
810
811 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
812 {
813 /* Frame pointer is fp. Frame size is constant. */
814 cache->framereg = AARCH64_FP_REGNUM;
815 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
816 }
817 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
818 {
819 /* Try the stack pointer. */
820 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
821 cache->framereg = AARCH64_SP_REGNUM;
822 }
823 else
824 {
825 /* We're just out of luck. We don't know where the frame is. */
826 cache->framereg = -1;
827 cache->framesize = 0;
828 }
829
830 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
831 {
832 CORE_ADDR offset;
833
834 if (pv_area_find_reg (stack, gdbarch, i, &offset))
835 cache->saved_regs[i].addr = offset;
836 }
837
838 do_cleanups (back_to);
839 return start;
840}
841
842/* Implement the "skip_prologue" gdbarch method. */
843
844static CORE_ADDR
845aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
846{
847 unsigned long inst;
848 CORE_ADDR skip_pc;
849 CORE_ADDR func_addr, limit_pc;
850 struct symtab_and_line sal;
851
852 /* See if we can determine the end of the prologue via the symbol
853 table. If so, then return either PC, or the PC after the
854 prologue, whichever is greater. */
855 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
856 {
857 CORE_ADDR post_prologue_pc
858 = skip_prologue_using_sal (gdbarch, func_addr);
859
860 if (post_prologue_pc != 0)
861 return max (pc, post_prologue_pc);
862 }
863
864 /* Can't determine prologue from the symbol table, need to examine
865 instructions. */
866
867 /* Find an upper limit on the function prologue using the debug
868 information. If the debug information could not be used to
869 provide that bound, then use an arbitrary large number as the
870 upper bound. */
871 limit_pc = skip_prologue_using_sal (gdbarch, pc);
872 if (limit_pc == 0)
873 limit_pc = pc + 128; /* Magic. */
874
875 /* Try disassembling prologue. */
876 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
877}
878
879/* Scan the function prologue for THIS_FRAME and populate the prologue
880 cache CACHE. */
881
882static void
883aarch64_scan_prologue (struct frame_info *this_frame,
884 struct aarch64_prologue_cache *cache)
885{
886 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
887 CORE_ADDR prologue_start;
888 CORE_ADDR prologue_end;
889 CORE_ADDR prev_pc = get_frame_pc (this_frame);
890 struct gdbarch *gdbarch = get_frame_arch (this_frame);
891
892 /* Assume we do not find a frame. */
893 cache->framereg = -1;
894 cache->framesize = 0;
895
896 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
897 &prologue_end))
898 {
899 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
900
901 if (sal.line == 0)
902 {
903 /* No line info so use the current PC. */
904 prologue_end = prev_pc;
905 }
906 else if (sal.end < prologue_end)
907 {
908 /* The next line begins after the function end. */
909 prologue_end = sal.end;
910 }
911
912 prologue_end = min (prologue_end, prev_pc);
913 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
914 }
915 else
916 {
917 CORE_ADDR frame_loc;
918 LONGEST saved_fp;
919 LONGEST saved_lr;
920 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
921
922 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
923 if (frame_loc == 0)
924 return;
925
926 cache->framereg = AARCH64_FP_REGNUM;
927 cache->framesize = 16;
928 cache->saved_regs[29].addr = 0;
929 cache->saved_regs[30].addr = 8;
930 }
931}
932
933/* Allocate an aarch64_prologue_cache and fill it with information
934 about the prologue of *THIS_FRAME. */
935
936static struct aarch64_prologue_cache *
937aarch64_make_prologue_cache (struct frame_info *this_frame)
938{
939 struct aarch64_prologue_cache *cache;
940 CORE_ADDR unwound_fp;
941 int reg;
942
943 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
944 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
945
946 aarch64_scan_prologue (this_frame, cache);
947
948 if (cache->framereg == -1)
949 return cache;
950
951 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
952 if (unwound_fp == 0)
953 return cache;
954
955 cache->prev_sp = unwound_fp + cache->framesize;
956
957 /* Calculate actual addresses of saved registers using offsets
958 determined by aarch64_analyze_prologue. */
959 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
960 if (trad_frame_addr_p (cache->saved_regs, reg))
961 cache->saved_regs[reg].addr += cache->prev_sp;
962
963 return cache;
964}
965
966/* Our frame ID for a normal frame is the current function's starting
967 PC and the caller's SP when we were called. */
968
969static void
970aarch64_prologue_this_id (struct frame_info *this_frame,
971 void **this_cache, struct frame_id *this_id)
972{
973 struct aarch64_prologue_cache *cache;
974 struct frame_id id;
975 CORE_ADDR pc, func;
976
977 if (*this_cache == NULL)
978 *this_cache = aarch64_make_prologue_cache (this_frame);
979 cache = *this_cache;
980
981 /* This is meant to halt the backtrace at "_start". */
982 pc = get_frame_pc (this_frame);
983 if (pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
984 return;
985
986 /* If we've hit a wall, stop. */
987 if (cache->prev_sp == 0)
988 return;
989
990 func = get_frame_func (this_frame);
991 id = frame_id_build (cache->prev_sp, func);
992 *this_id = id;
993}
994
995/* Implement the "prev_register" frame_unwind method. */
996
997static struct value *
998aarch64_prologue_prev_register (struct frame_info *this_frame,
999 void **this_cache, int prev_regnum)
1000{
1001 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1002 struct aarch64_prologue_cache *cache;
1003
1004 if (*this_cache == NULL)
1005 *this_cache = aarch64_make_prologue_cache (this_frame);
1006 cache = *this_cache;
1007
1008 /* If we are asked to unwind the PC, then we need to return the LR
1009 instead. The prologue may save PC, but it will point into this
1010 frame's prologue, not the next frame's resume location. */
1011 if (prev_regnum == AARCH64_PC_REGNUM)
1012 {
1013 CORE_ADDR lr;
1014
1015 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1016 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1017 }
1018
1019 /* SP is generally not saved to the stack, but this frame is
1020 identified by the next frame's stack pointer at the time of the
1021 call. The value was already reconstructed into PREV_SP. */
1022 /*
1023 +----------+ ^
1024 | saved lr | |
1025 +->| saved fp |--+
1026 | | |
1027 | | | <- Previous SP
1028 | +----------+
1029 | | saved lr |
1030 +--| saved fp |<- FP
1031 | |
1032 | |<- SP
1033 +----------+ */
1034 if (prev_regnum == AARCH64_SP_REGNUM)
1035 return frame_unwind_got_constant (this_frame, prev_regnum,
1036 cache->prev_sp);
1037
1038 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1039 prev_regnum);
1040}
1041
1042/* AArch64 prologue unwinder. */
1043struct frame_unwind aarch64_prologue_unwind =
1044{
1045 NORMAL_FRAME,
1046 default_frame_unwind_stop_reason,
1047 aarch64_prologue_this_id,
1048 aarch64_prologue_prev_register,
1049 NULL,
1050 default_frame_sniffer
1051};
1052
1053/* Allocate an aarch64_prologue_cache and fill it with information
1054 about the prologue of *THIS_FRAME. */
1055
1056static struct aarch64_prologue_cache *
1057aarch64_make_stub_cache (struct frame_info *this_frame)
1058{
1059 int reg;
1060 struct aarch64_prologue_cache *cache;
1061 CORE_ADDR unwound_fp;
1062
1063 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1064 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1065
1066 cache->prev_sp
1067 = get_frame_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1068
1069 return cache;
1070}
1071
1072/* Our frame ID for a stub frame is the current SP and LR. */
1073
1074static void
1075aarch64_stub_this_id (struct frame_info *this_frame,
1076 void **this_cache, struct frame_id *this_id)
1077{
1078 struct aarch64_prologue_cache *cache;
1079
1080 if (*this_cache == NULL)
1081 *this_cache = aarch64_make_stub_cache (this_frame);
1082 cache = *this_cache;
1083
1084 *this_id = frame_id_build (cache->prev_sp, get_frame_pc (this_frame));
1085}
1086
1087/* Implement the "sniffer" frame_unwind method. */
1088
1089static int
1090aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1091 struct frame_info *this_frame,
1092 void **this_prologue_cache)
1093{
1094 CORE_ADDR addr_in_block;
1095 gdb_byte dummy[4];
1096
1097 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 1098 if (in_plt_section (addr_in_block)
07b287a0
MS
1099 /* We also use the stub winder if the target memory is unreadable
1100 to avoid having the prologue unwinder trying to read it. */
1101 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1102 return 1;
1103
1104 return 0;
1105}
1106
1107/* AArch64 stub unwinder. */
1108struct frame_unwind aarch64_stub_unwind =
1109{
1110 NORMAL_FRAME,
1111 default_frame_unwind_stop_reason,
1112 aarch64_stub_this_id,
1113 aarch64_prologue_prev_register,
1114 NULL,
1115 aarch64_stub_unwind_sniffer
1116};
1117
1118/* Return the frame base address of *THIS_FRAME. */
1119
1120static CORE_ADDR
1121aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1122{
1123 struct aarch64_prologue_cache *cache;
1124
1125 if (*this_cache == NULL)
1126 *this_cache = aarch64_make_prologue_cache (this_frame);
1127 cache = *this_cache;
1128
1129 return cache->prev_sp - cache->framesize;
1130}
1131
1132/* AArch64 default frame base information. */
1133struct frame_base aarch64_normal_base =
1134{
1135 &aarch64_prologue_unwind,
1136 aarch64_normal_frame_base,
1137 aarch64_normal_frame_base,
1138 aarch64_normal_frame_base
1139};
1140
1141/* Assuming THIS_FRAME is a dummy, return the frame ID of that
1142 dummy frame. The frame ID's base needs to match the TOS value
1143 saved by save_dummy_frame_tos () and returned from
1144 aarch64_push_dummy_call, and the PC needs to match the dummy
1145 frame's breakpoint. */
1146
1147static struct frame_id
1148aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1149{
1150 return frame_id_build (get_frame_register_unsigned (this_frame,
1151 AARCH64_SP_REGNUM),
1152 get_frame_pc (this_frame));
1153}
1154
1155/* Implement the "unwind_pc" gdbarch method. */
1156
1157static CORE_ADDR
1158aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1159{
1160 CORE_ADDR pc
1161 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1162
1163 return pc;
1164}
1165
1166/* Implement the "unwind_sp" gdbarch method. */
1167
1168static CORE_ADDR
1169aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1170{
1171 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1172}
1173
1174/* Return the value of the REGNUM register in the previous frame of
1175 *THIS_FRAME. */
1176
1177static struct value *
1178aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1179 void **this_cache, int regnum)
1180{
1181 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1182 CORE_ADDR lr;
1183
1184 switch (regnum)
1185 {
1186 case AARCH64_PC_REGNUM:
1187 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1188 return frame_unwind_got_constant (this_frame, regnum, lr);
1189
1190 default:
1191 internal_error (__FILE__, __LINE__,
1192 _("Unexpected register %d"), regnum);
1193 }
1194}
1195
1196/* Implement the "init_reg" dwarf2_frame_ops method. */
1197
1198static void
1199aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1200 struct dwarf2_frame_state_reg *reg,
1201 struct frame_info *this_frame)
1202{
1203 switch (regnum)
1204 {
1205 case AARCH64_PC_REGNUM:
1206 reg->how = DWARF2_FRAME_REG_FN;
1207 reg->loc.fn = aarch64_dwarf2_prev_register;
1208 break;
1209 case AARCH64_SP_REGNUM:
1210 reg->how = DWARF2_FRAME_REG_CFA;
1211 break;
1212 }
1213}
1214
1215/* When arguments must be pushed onto the stack, they go on in reverse
1216 order. The code below implements a FILO (stack) to do this. */
1217
1218typedef struct
1219{
1220 /* Value to pass on stack. */
1221 const void *data;
1222
1223 /* Size in bytes of value to pass on stack. */
1224 int len;
1225} stack_item_t;
1226
1227DEF_VEC_O (stack_item_t);
1228
1229/* Return the alignment (in bytes) of the given type. */
1230
1231static int
1232aarch64_type_align (struct type *t)
1233{
1234 int n;
1235 int align;
1236 int falign;
1237
1238 t = check_typedef (t);
1239 switch (TYPE_CODE (t))
1240 {
1241 default:
1242 /* Should never happen. */
1243 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1244 return 4;
1245
1246 case TYPE_CODE_PTR:
1247 case TYPE_CODE_ENUM:
1248 case TYPE_CODE_INT:
1249 case TYPE_CODE_FLT:
1250 case TYPE_CODE_SET:
1251 case TYPE_CODE_RANGE:
1252 case TYPE_CODE_BITSTRING:
1253 case TYPE_CODE_REF:
1254 case TYPE_CODE_CHAR:
1255 case TYPE_CODE_BOOL:
1256 return TYPE_LENGTH (t);
1257
1258 case TYPE_CODE_ARRAY:
1259 case TYPE_CODE_COMPLEX:
1260 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1261
1262 case TYPE_CODE_STRUCT:
1263 case TYPE_CODE_UNION:
1264 align = 1;
1265 for (n = 0; n < TYPE_NFIELDS (t); n++)
1266 {
1267 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1268 if (falign > align)
1269 align = falign;
1270 }
1271 return align;
1272 }
1273}
1274
1275/* Return 1 if *TY is a homogeneous floating-point aggregate as
1276 defined in the AAPCS64 ABI document; otherwise return 0. */
1277
1278static int
1279is_hfa (struct type *ty)
1280{
1281 switch (TYPE_CODE (ty))
1282 {
1283 case TYPE_CODE_ARRAY:
1284 {
1285 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1286 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1287 return 1;
1288 break;
1289 }
1290
1291 case TYPE_CODE_UNION:
1292 case TYPE_CODE_STRUCT:
1293 {
1294 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1295 {
1296 struct type *member0_type;
1297
1298 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1299 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1300 {
1301 int i;
1302
1303 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1304 {
1305 struct type *member1_type;
1306
1307 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1308 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1309 || (TYPE_LENGTH (member0_type)
1310 != TYPE_LENGTH (member1_type)))
1311 return 0;
1312 }
1313 return 1;
1314 }
1315 }
1316 return 0;
1317 }
1318
1319 default:
1320 break;
1321 }
1322
1323 return 0;
1324}
1325
1326/* AArch64 function call information structure. */
1327struct aarch64_call_info
1328{
1329 /* the current argument number. */
1330 unsigned argnum;
1331
1332 /* The next general purpose register number, equivalent to NGRN as
1333 described in the AArch64 Procedure Call Standard. */
1334 unsigned ngrn;
1335
1336 /* The next SIMD and floating point register number, equivalent to
1337 NSRN as described in the AArch64 Procedure Call Standard. */
1338 unsigned nsrn;
1339
1340 /* The next stacked argument address, equivalent to NSAA as
1341 described in the AArch64 Procedure Call Standard. */
1342 unsigned nsaa;
1343
1344 /* Stack item vector. */
1345 VEC(stack_item_t) *si;
1346};
1347
1348/* Pass a value in a sequence of consecutive X registers. The caller
1349 is responsbile for ensuring sufficient registers are available. */
1350
1351static void
1352pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1353 struct aarch64_call_info *info, struct type *type,
1354 const bfd_byte *buf)
1355{
1356 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1357 int len = TYPE_LENGTH (type);
1358 enum type_code typecode = TYPE_CODE (type);
1359 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1360
1361 info->argnum++;
1362
1363 while (len > 0)
1364 {
1365 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1366 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1367 byte_order);
1368
1369
1370 /* Adjust sub-word struct/union args when big-endian. */
1371 if (byte_order == BFD_ENDIAN_BIG
1372 && partial_len < X_REGISTER_SIZE
1373 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1374 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1375
1376 if (aarch64_debug)
1377 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1378 info->argnum,
1379 gdbarch_register_name (gdbarch, regnum),
1380 phex (regval, X_REGISTER_SIZE));
1381 regcache_cooked_write_unsigned (regcache, regnum, regval);
1382 len -= partial_len;
1383 buf += partial_len;
1384 regnum++;
1385 }
1386}
1387
1388/* Attempt to marshall a value in a V register. Return 1 if
1389 successful, or 0 if insufficient registers are available. This
1390 function, unlike the equivalent pass_in_x() function does not
1391 handle arguments spread across multiple registers. */
1392
1393static int
1394pass_in_v (struct gdbarch *gdbarch,
1395 struct regcache *regcache,
1396 struct aarch64_call_info *info,
1397 const bfd_byte *buf)
1398{
1399 if (info->nsrn < 8)
1400 {
1401 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1402 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1403
1404 info->argnum++;
1405 info->nsrn++;
1406
1407 regcache_cooked_write (regcache, regnum, buf);
1408 if (aarch64_debug)
1409 fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n",
1410 info->argnum,
1411 gdbarch_register_name (gdbarch, regnum));
1412 return 1;
1413 }
1414 info->nsrn = 8;
1415 return 0;
1416}
1417
1418/* Marshall an argument onto the stack. */
1419
1420static void
1421pass_on_stack (struct aarch64_call_info *info, struct type *type,
1422 const bfd_byte *buf)
1423{
1424 int len = TYPE_LENGTH (type);
1425 int align;
1426 stack_item_t item;
1427
1428 info->argnum++;
1429
1430 align = aarch64_type_align (type);
1431
1432 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1433 Natural alignment of the argument's type. */
1434 align = align_up (align, 8);
1435
1436 /* The AArch64 PCS requires at most doubleword alignment. */
1437 if (align > 16)
1438 align = 16;
1439
1440 if (aarch64_debug)
1441 fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n",
1442 info->argnum, len, info->nsaa);
1443
1444 item.len = len;
1445 item.data = buf;
1446 VEC_safe_push (stack_item_t, info->si, &item);
1447
1448 info->nsaa += len;
1449 if (info->nsaa & (align - 1))
1450 {
1451 /* Push stack alignment padding. */
1452 int pad = align - (info->nsaa & (align - 1));
1453
1454 item.len = pad;
1455 item.data = buf;
1456
1457 VEC_safe_push (stack_item_t, info->si, &item);
1458 info->nsaa += pad;
1459 }
1460}
1461
1462/* Marshall an argument into a sequence of one or more consecutive X
1463 registers or, if insufficient X registers are available then onto
1464 the stack. */
1465
1466static void
1467pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1468 struct aarch64_call_info *info, struct type *type,
1469 const bfd_byte *buf)
1470{
1471 int len = TYPE_LENGTH (type);
1472 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1473
1474 /* PCS C.13 - Pass in registers if we have enough spare */
1475 if (info->ngrn + nregs <= 8)
1476 {
1477 pass_in_x (gdbarch, regcache, info, type, buf);
1478 info->ngrn += nregs;
1479 }
1480 else
1481 {
1482 info->ngrn = 8;
1483 pass_on_stack (info, type, buf);
1484 }
1485}
1486
1487/* Pass a value in a V register, or on the stack if insufficient are
1488 available. */
1489
1490static void
1491pass_in_v_or_stack (struct gdbarch *gdbarch,
1492 struct regcache *regcache,
1493 struct aarch64_call_info *info,
1494 struct type *type,
1495 const bfd_byte *buf)
1496{
1497 if (!pass_in_v (gdbarch, regcache, info, buf))
1498 pass_on_stack (info, type, buf);
1499}
1500
1501/* Implement the "push_dummy_call" gdbarch method. */
1502
1503static CORE_ADDR
1504aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1505 struct regcache *regcache, CORE_ADDR bp_addr,
1506 int nargs,
1507 struct value **args, CORE_ADDR sp, int struct_return,
1508 CORE_ADDR struct_addr)
1509{
1510 int nstack = 0;
1511 int argnum;
1512 int x_argreg;
1513 int v_argreg;
1514 struct aarch64_call_info info;
1515 struct type *func_type;
1516 struct type *return_type;
1517 int lang_struct_return;
1518
1519 memset (&info, 0, sizeof (info));
1520
1521 /* We need to know what the type of the called function is in order
1522 to determine the number of named/anonymous arguments for the
1523 actual argument placement, and the return type in order to handle
1524 return value correctly.
1525
1526 The generic code above us views the decision of return in memory
1527 or return in registers as a two stage processes. The language
1528 handler is consulted first and may decide to return in memory (eg
1529 class with copy constructor returned by value), this will cause
1530 the generic code to allocate space AND insert an initial leading
1531 argument.
1532
1533 If the language code does not decide to pass in memory then the
1534 target code is consulted.
1535
1536 If the language code decides to pass in memory we want to move
1537 the pointer inserted as the initial argument from the argument
1538 list and into X8, the conventional AArch64 struct return pointer
1539 register.
1540
1541 This is slightly awkward, ideally the flag "lang_struct_return"
1542 would be passed to the targets implementation of push_dummy_call.
1543 Rather that change the target interface we call the language code
1544 directly ourselves. */
1545
1546 func_type = check_typedef (value_type (function));
1547
1548 /* Dereference function pointer types. */
1549 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1550 func_type = TYPE_TARGET_TYPE (func_type);
1551
1552 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1553 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1554
1555 /* If language_pass_by_reference () returned true we will have been
1556 given an additional initial argument, a hidden pointer to the
1557 return slot in memory. */
1558 return_type = TYPE_TARGET_TYPE (func_type);
1559 lang_struct_return = language_pass_by_reference (return_type);
1560
1561 /* Set the return address. For the AArch64, the return breakpoint
1562 is always at BP_ADDR. */
1563 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1564
1565 /* If we were given an initial argument for the return slot because
1566 lang_struct_return was true, lose it. */
1567 if (lang_struct_return)
1568 {
1569 args++;
1570 nargs--;
1571 }
1572
1573 /* The struct_return pointer occupies X8. */
1574 if (struct_return || lang_struct_return)
1575 {
1576 if (aarch64_debug)
1577 fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n",
1578 gdbarch_register_name
1579 (gdbarch,
1580 AARCH64_STRUCT_RETURN_REGNUM),
1581 paddress (gdbarch, struct_addr));
1582 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1583 struct_addr);
1584 }
1585
1586 for (argnum = 0; argnum < nargs; argnum++)
1587 {
1588 struct value *arg = args[argnum];
1589 struct type *arg_type;
1590 int len;
1591
1592 arg_type = check_typedef (value_type (arg));
1593 len = TYPE_LENGTH (arg_type);
1594
1595 switch (TYPE_CODE (arg_type))
1596 {
1597 case TYPE_CODE_INT:
1598 case TYPE_CODE_BOOL:
1599 case TYPE_CODE_CHAR:
1600 case TYPE_CODE_RANGE:
1601 case TYPE_CODE_ENUM:
1602 if (len < 4)
1603 {
1604 /* Promote to 32 bit integer. */
1605 if (TYPE_UNSIGNED (arg_type))
1606 arg_type = builtin_type (gdbarch)->builtin_uint32;
1607 else
1608 arg_type = builtin_type (gdbarch)->builtin_int32;
1609 arg = value_cast (arg_type, arg);
1610 }
1611 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1612 value_contents (arg));
1613 break;
1614
1615 case TYPE_CODE_COMPLEX:
1616 if (info.nsrn <= 6)
1617 {
1618 const bfd_byte *buf = value_contents (arg);
1619 struct type *target_type =
1620 check_typedef (TYPE_TARGET_TYPE (arg_type));
1621
1622 pass_in_v (gdbarch, regcache, &info, buf);
1623 pass_in_v (gdbarch, regcache, &info,
1624 buf + TYPE_LENGTH (target_type));
1625 }
1626 else
1627 {
1628 info.nsrn = 8;
1629 pass_on_stack (&info, arg_type, value_contents (arg));
1630 }
1631 break;
1632 case TYPE_CODE_FLT:
1633 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1634 value_contents (arg));
1635 break;
1636
1637 case TYPE_CODE_STRUCT:
1638 case TYPE_CODE_ARRAY:
1639 case TYPE_CODE_UNION:
1640 if (is_hfa (arg_type))
1641 {
1642 int elements = TYPE_NFIELDS (arg_type);
1643
1644 /* Homogeneous Aggregates */
1645 if (info.nsrn + elements < 8)
1646 {
1647 int i;
1648
1649 for (i = 0; i < elements; i++)
1650 {
1651 /* We know that we have sufficient registers
1652 available therefore this will never fallback
1653 to the stack. */
1654 struct value *field =
1655 value_primitive_field (arg, 0, i, arg_type);
1656 struct type *field_type =
1657 check_typedef (value_type (field));
1658
1659 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1660 value_contents_writeable (field));
1661 }
1662 }
1663 else
1664 {
1665 info.nsrn = 8;
1666 pass_on_stack (&info, arg_type, value_contents (arg));
1667 }
1668 }
1669 else if (len > 16)
1670 {
1671 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1672 invisible reference. */
1673
1674 /* Allocate aligned storage. */
1675 sp = align_down (sp - len, 16);
1676
1677 /* Write the real data into the stack. */
1678 write_memory (sp, value_contents (arg), len);
1679
1680 /* Construct the indirection. */
1681 arg_type = lookup_pointer_type (arg_type);
1682 arg = value_from_pointer (arg_type, sp);
1683 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1684 value_contents (arg));
1685 }
1686 else
1687 /* PCS C.15 / C.18 multiple values pass. */
1688 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1689 value_contents (arg));
1690 break;
1691
1692 default:
1693 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1694 value_contents (arg));
1695 break;
1696 }
1697 }
1698
1699 /* Make sure stack retains 16 byte alignment. */
1700 if (info.nsaa & 15)
1701 sp -= 16 - (info.nsaa & 15);
1702
1703 while (!VEC_empty (stack_item_t, info.si))
1704 {
1705 stack_item_t *si = VEC_last (stack_item_t, info.si);
1706
1707 sp -= si->len;
1708 write_memory (sp, si->data, si->len);
1709 VEC_pop (stack_item_t, info.si);
1710 }
1711
1712 VEC_free (stack_item_t, info.si);
1713
1714 /* Finally, update the SP register. */
1715 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1716
1717 return sp;
1718}
1719
1720/* Implement the "frame_align" gdbarch method. */
1721
1722static CORE_ADDR
1723aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1724{
1725 /* Align the stack to sixteen bytes. */
1726 return sp & ~(CORE_ADDR) 15;
1727}
1728
1729/* Return the type for an AdvSISD Q register. */
1730
1731static struct type *
1732aarch64_vnq_type (struct gdbarch *gdbarch)
1733{
1734 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1735
1736 if (tdep->vnq_type == NULL)
1737 {
1738 struct type *t;
1739 struct type *elem;
1740
1741 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1742 TYPE_CODE_UNION);
1743
1744 elem = builtin_type (gdbarch)->builtin_uint128;
1745 append_composite_type_field (t, "u", elem);
1746
1747 elem = builtin_type (gdbarch)->builtin_int128;
1748 append_composite_type_field (t, "s", elem);
1749
1750 tdep->vnq_type = t;
1751 }
1752
1753 return tdep->vnq_type;
1754}
1755
1756/* Return the type for an AdvSISD D register. */
1757
1758static struct type *
1759aarch64_vnd_type (struct gdbarch *gdbarch)
1760{
1761 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1762
1763 if (tdep->vnd_type == NULL)
1764 {
1765 struct type *t;
1766 struct type *elem;
1767
1768 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1769 TYPE_CODE_UNION);
1770
1771 elem = builtin_type (gdbarch)->builtin_double;
1772 append_composite_type_field (t, "f", elem);
1773
1774 elem = builtin_type (gdbarch)->builtin_uint64;
1775 append_composite_type_field (t, "u", elem);
1776
1777 elem = builtin_type (gdbarch)->builtin_int64;
1778 append_composite_type_field (t, "s", elem);
1779
1780 tdep->vnd_type = t;
1781 }
1782
1783 return tdep->vnd_type;
1784}
1785
1786/* Return the type for an AdvSISD S register. */
1787
1788static struct type *
1789aarch64_vns_type (struct gdbarch *gdbarch)
1790{
1791 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1792
1793 if (tdep->vns_type == NULL)
1794 {
1795 struct type *t;
1796 struct type *elem;
1797
1798 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1799 TYPE_CODE_UNION);
1800
1801 elem = builtin_type (gdbarch)->builtin_float;
1802 append_composite_type_field (t, "f", elem);
1803
1804 elem = builtin_type (gdbarch)->builtin_uint32;
1805 append_composite_type_field (t, "u", elem);
1806
1807 elem = builtin_type (gdbarch)->builtin_int32;
1808 append_composite_type_field (t, "s", elem);
1809
1810 tdep->vns_type = t;
1811 }
1812
1813 return tdep->vns_type;
1814}
1815
1816/* Return the type for an AdvSISD H register. */
1817
1818static struct type *
1819aarch64_vnh_type (struct gdbarch *gdbarch)
1820{
1821 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1822
1823 if (tdep->vnh_type == NULL)
1824 {
1825 struct type *t;
1826 struct type *elem;
1827
1828 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1829 TYPE_CODE_UNION);
1830
1831 elem = builtin_type (gdbarch)->builtin_uint16;
1832 append_composite_type_field (t, "u", elem);
1833
1834 elem = builtin_type (gdbarch)->builtin_int16;
1835 append_composite_type_field (t, "s", elem);
1836
1837 tdep->vnh_type = t;
1838 }
1839
1840 return tdep->vnh_type;
1841}
1842
1843/* Return the type for an AdvSISD B register. */
1844
1845static struct type *
1846aarch64_vnb_type (struct gdbarch *gdbarch)
1847{
1848 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1849
1850 if (tdep->vnb_type == NULL)
1851 {
1852 struct type *t;
1853 struct type *elem;
1854
1855 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1856 TYPE_CODE_UNION);
1857
1858 elem = builtin_type (gdbarch)->builtin_uint8;
1859 append_composite_type_field (t, "u", elem);
1860
1861 elem = builtin_type (gdbarch)->builtin_int8;
1862 append_composite_type_field (t, "s", elem);
1863
1864 tdep->vnb_type = t;
1865 }
1866
1867 return tdep->vnb_type;
1868}
1869
1870/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1871
1872static int
1873aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1874{
1875 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1876 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1877
1878 if (reg == AARCH64_DWARF_SP)
1879 return AARCH64_SP_REGNUM;
1880
1881 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1882 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1883
1884 return -1;
1885}
1886\f
1887
1888/* Implement the "print_insn" gdbarch method. */
1889
1890static int
1891aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1892{
1893 info->symbols = NULL;
1894 return print_insn_aarch64 (memaddr, info);
1895}
1896
1897/* AArch64 BRK software debug mode instruction.
1898 Note that AArch64 code is always little-endian.
1899 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
948f8e3d 1900static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0
MS
1901
1902/* Implement the "breakpoint_from_pc" gdbarch method. */
1903
948f8e3d 1904static const gdb_byte *
07b287a0
MS
1905aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1906 int *lenptr)
1907{
1908 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1909
1910 *lenptr = sizeof (aarch64_default_breakpoint);
1911 return aarch64_default_breakpoint;
1912}
1913
1914/* Extract from an array REGS containing the (raw) register state a
1915 function return value of type TYPE, and copy that, in virtual
1916 format, into VALBUF. */
1917
1918static void
1919aarch64_extract_return_value (struct type *type, struct regcache *regs,
1920 gdb_byte *valbuf)
1921{
1922 struct gdbarch *gdbarch = get_regcache_arch (regs);
1923 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1924
1925 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1926 {
1927 bfd_byte buf[V_REGISTER_SIZE];
1928 int len = TYPE_LENGTH (type);
1929
1930 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1931 memcpy (valbuf, buf, len);
1932 }
1933 else if (TYPE_CODE (type) == TYPE_CODE_INT
1934 || TYPE_CODE (type) == TYPE_CODE_CHAR
1935 || TYPE_CODE (type) == TYPE_CODE_BOOL
1936 || TYPE_CODE (type) == TYPE_CODE_PTR
1937 || TYPE_CODE (type) == TYPE_CODE_REF
1938 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1939 {
1940 /* If the the type is a plain integer, then the access is
1941 straight-forward. Otherwise we have to play around a bit
1942 more. */
1943 int len = TYPE_LENGTH (type);
1944 int regno = AARCH64_X0_REGNUM;
1945 ULONGEST tmp;
1946
1947 while (len > 0)
1948 {
1949 /* By using store_unsigned_integer we avoid having to do
1950 anything special for small big-endian values. */
1951 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1952 store_unsigned_integer (valbuf,
1953 (len > X_REGISTER_SIZE
1954 ? X_REGISTER_SIZE : len), byte_order, tmp);
1955 len -= X_REGISTER_SIZE;
1956 valbuf += X_REGISTER_SIZE;
1957 }
1958 }
1959 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1960 {
1961 int regno = AARCH64_V0_REGNUM;
1962 bfd_byte buf[V_REGISTER_SIZE];
1963 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1964 int len = TYPE_LENGTH (target_type);
1965
1966 regcache_cooked_read (regs, regno, buf);
1967 memcpy (valbuf, buf, len);
1968 valbuf += len;
1969 regcache_cooked_read (regs, regno + 1, buf);
1970 memcpy (valbuf, buf, len);
1971 valbuf += len;
1972 }
1973 else if (is_hfa (type))
1974 {
1975 int elements = TYPE_NFIELDS (type);
1976 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1977 int len = TYPE_LENGTH (member_type);
1978 int i;
1979
1980 for (i = 0; i < elements; i++)
1981 {
1982 int regno = AARCH64_V0_REGNUM + i;
1983 bfd_byte buf[X_REGISTER_SIZE];
1984
1985 if (aarch64_debug)
1986 fprintf_unfiltered (gdb_stdlog,
1987 "read HFA return value element %d from %s\n",
1988 i + 1,
1989 gdbarch_register_name (gdbarch, regno));
1990 regcache_cooked_read (regs, regno, buf);
1991
1992 memcpy (valbuf, buf, len);
1993 valbuf += len;
1994 }
1995 }
1996 else
1997 {
1998 /* For a structure or union the behaviour is as if the value had
1999 been stored to word-aligned memory and then loaded into
2000 registers with 64-bit load instruction(s). */
2001 int len = TYPE_LENGTH (type);
2002 int regno = AARCH64_X0_REGNUM;
2003 bfd_byte buf[X_REGISTER_SIZE];
2004
2005 while (len > 0)
2006 {
2007 regcache_cooked_read (regs, regno++, buf);
2008 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2009 len -= X_REGISTER_SIZE;
2010 valbuf += X_REGISTER_SIZE;
2011 }
2012 }
2013}
2014
2015
2016/* Will a function return an aggregate type in memory or in a
2017 register? Return 0 if an aggregate type can be returned in a
2018 register, 1 if it must be returned in memory. */
2019
2020static int
2021aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2022{
2023 int nRc;
2024 enum type_code code;
2025
2026 CHECK_TYPEDEF (type);
2027
2028 /* In the AArch64 ABI, "integer" like aggregate types are returned
2029 in registers. For an aggregate type to be integer like, its size
2030 must be less than or equal to 4 * X_REGISTER_SIZE. */
2031
2032 if (is_hfa (type))
2033 {
2034 /* PCS B.5 If the argument is a Named HFA, then the argument is
2035 used unmodified. */
2036 return 0;
2037 }
2038
2039 if (TYPE_LENGTH (type) > 16)
2040 {
2041 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2042 invisible reference. */
2043
2044 return 1;
2045 }
2046
2047 return 0;
2048}
2049
2050/* Write into appropriate registers a function return value of type
2051 TYPE, given in virtual format. */
2052
2053static void
2054aarch64_store_return_value (struct type *type, struct regcache *regs,
2055 const gdb_byte *valbuf)
2056{
2057 struct gdbarch *gdbarch = get_regcache_arch (regs);
2058 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2059
2060 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2061 {
2062 bfd_byte buf[V_REGISTER_SIZE];
2063 int len = TYPE_LENGTH (type);
2064
2065 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2066 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2067 }
2068 else if (TYPE_CODE (type) == TYPE_CODE_INT
2069 || TYPE_CODE (type) == TYPE_CODE_CHAR
2070 || TYPE_CODE (type) == TYPE_CODE_BOOL
2071 || TYPE_CODE (type) == TYPE_CODE_PTR
2072 || TYPE_CODE (type) == TYPE_CODE_REF
2073 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2074 {
2075 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2076 {
2077 /* Values of one word or less are zero/sign-extended and
2078 returned in r0. */
2079 bfd_byte tmpbuf[X_REGISTER_SIZE];
2080 LONGEST val = unpack_long (type, valbuf);
2081
2082 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2083 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2084 }
2085 else
2086 {
2087 /* Integral values greater than one word are stored in
2088 consecutive registers starting with r0. This will always
2089 be a multiple of the regiser size. */
2090 int len = TYPE_LENGTH (type);
2091 int regno = AARCH64_X0_REGNUM;
2092
2093 while (len > 0)
2094 {
2095 regcache_cooked_write (regs, regno++, valbuf);
2096 len -= X_REGISTER_SIZE;
2097 valbuf += X_REGISTER_SIZE;
2098 }
2099 }
2100 }
2101 else if (is_hfa (type))
2102 {
2103 int elements = TYPE_NFIELDS (type);
2104 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2105 int len = TYPE_LENGTH (member_type);
2106 int i;
2107
2108 for (i = 0; i < elements; i++)
2109 {
2110 int regno = AARCH64_V0_REGNUM + i;
2111 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2112
2113 if (aarch64_debug)
2114 fprintf_unfiltered (gdb_stdlog,
2115 "write HFA return value element %d to %s\n",
2116 i + 1,
2117 gdbarch_register_name (gdbarch, regno));
2118
2119 memcpy (tmpbuf, valbuf, len);
2120 regcache_cooked_write (regs, regno, tmpbuf);
2121 valbuf += len;
2122 }
2123 }
2124 else
2125 {
2126 /* For a structure or union the behaviour is as if the value had
2127 been stored to word-aligned memory and then loaded into
2128 registers with 64-bit load instruction(s). */
2129 int len = TYPE_LENGTH (type);
2130 int regno = AARCH64_X0_REGNUM;
2131 bfd_byte tmpbuf[X_REGISTER_SIZE];
2132
2133 while (len > 0)
2134 {
2135 memcpy (tmpbuf, valbuf,
2136 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2137 regcache_cooked_write (regs, regno++, tmpbuf);
2138 len -= X_REGISTER_SIZE;
2139 valbuf += X_REGISTER_SIZE;
2140 }
2141 }
2142}
2143
2144/* Implement the "return_value" gdbarch method. */
2145
2146static enum return_value_convention
2147aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2148 struct type *valtype, struct regcache *regcache,
2149 gdb_byte *readbuf, const gdb_byte *writebuf)
2150{
2151 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2152
2153 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2154 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2155 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2156 {
2157 if (aarch64_return_in_memory (gdbarch, valtype))
2158 {
2159 if (aarch64_debug)
2160 fprintf_unfiltered (gdb_stdlog, "return value in memory\n");
2161 return RETURN_VALUE_STRUCT_CONVENTION;
2162 }
2163 }
2164
2165 if (writebuf)
2166 aarch64_store_return_value (valtype, regcache, writebuf);
2167
2168 if (readbuf)
2169 aarch64_extract_return_value (valtype, regcache, readbuf);
2170
2171 if (aarch64_debug)
2172 fprintf_unfiltered (gdb_stdlog, "return value in registers\n");
2173
2174 return RETURN_VALUE_REGISTER_CONVENTION;
2175}
2176
2177/* Implement the "get_longjmp_target" gdbarch method. */
2178
2179static int
2180aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2181{
2182 CORE_ADDR jb_addr;
2183 gdb_byte buf[X_REGISTER_SIZE];
2184 struct gdbarch *gdbarch = get_frame_arch (frame);
2185 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2186 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2187
2188 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2189
2190 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2191 X_REGISTER_SIZE))
2192 return 0;
2193
2194 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2195 return 1;
2196}
2197\f
2198
2199/* Return the pseudo register name corresponding to register regnum. */
2200
2201static const char *
2202aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2203{
2204 static const char *const q_name[] =
2205 {
2206 "q0", "q1", "q2", "q3",
2207 "q4", "q5", "q6", "q7",
2208 "q8", "q9", "q10", "q11",
2209 "q12", "q13", "q14", "q15",
2210 "q16", "q17", "q18", "q19",
2211 "q20", "q21", "q22", "q23",
2212 "q24", "q25", "q26", "q27",
2213 "q28", "q29", "q30", "q31",
2214 };
2215
2216 static const char *const d_name[] =
2217 {
2218 "d0", "d1", "d2", "d3",
2219 "d4", "d5", "d6", "d7",
2220 "d8", "d9", "d10", "d11",
2221 "d12", "d13", "d14", "d15",
2222 "d16", "d17", "d18", "d19",
2223 "d20", "d21", "d22", "d23",
2224 "d24", "d25", "d26", "d27",
2225 "d28", "d29", "d30", "d31",
2226 };
2227
2228 static const char *const s_name[] =
2229 {
2230 "s0", "s1", "s2", "s3",
2231 "s4", "s5", "s6", "s7",
2232 "s8", "s9", "s10", "s11",
2233 "s12", "s13", "s14", "s15",
2234 "s16", "s17", "s18", "s19",
2235 "s20", "s21", "s22", "s23",
2236 "s24", "s25", "s26", "s27",
2237 "s28", "s29", "s30", "s31",
2238 };
2239
2240 static const char *const h_name[] =
2241 {
2242 "h0", "h1", "h2", "h3",
2243 "h4", "h5", "h6", "h7",
2244 "h8", "h9", "h10", "h11",
2245 "h12", "h13", "h14", "h15",
2246 "h16", "h17", "h18", "h19",
2247 "h20", "h21", "h22", "h23",
2248 "h24", "h25", "h26", "h27",
2249 "h28", "h29", "h30", "h31",
2250 };
2251
2252 static const char *const b_name[] =
2253 {
2254 "b0", "b1", "b2", "b3",
2255 "b4", "b5", "b6", "b7",
2256 "b8", "b9", "b10", "b11",
2257 "b12", "b13", "b14", "b15",
2258 "b16", "b17", "b18", "b19",
2259 "b20", "b21", "b22", "b23",
2260 "b24", "b25", "b26", "b27",
2261 "b28", "b29", "b30", "b31",
2262 };
2263
2264 regnum -= gdbarch_num_regs (gdbarch);
2265
2266 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2267 return q_name[regnum - AARCH64_Q0_REGNUM];
2268
2269 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2270 return d_name[regnum - AARCH64_D0_REGNUM];
2271
2272 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2273 return s_name[regnum - AARCH64_S0_REGNUM];
2274
2275 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2276 return h_name[regnum - AARCH64_H0_REGNUM];
2277
2278 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2279 return b_name[regnum - AARCH64_B0_REGNUM];
2280
2281 internal_error (__FILE__, __LINE__,
2282 _("aarch64_pseudo_register_name: bad register number %d"),
2283 regnum);
2284}
2285
2286/* Implement the "pseudo_register_type" tdesc_arch_data method. */
2287
2288static struct type *
2289aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2290{
2291 regnum -= gdbarch_num_regs (gdbarch);
2292
2293 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2294 return aarch64_vnq_type (gdbarch);
2295
2296 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2297 return aarch64_vnd_type (gdbarch);
2298
2299 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2300 return aarch64_vns_type (gdbarch);
2301
2302 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2303 return aarch64_vnh_type (gdbarch);
2304
2305 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2306 return aarch64_vnb_type (gdbarch);
2307
2308 internal_error (__FILE__, __LINE__,
2309 _("aarch64_pseudo_register_type: bad register number %d"),
2310 regnum);
2311}
2312
2313/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2314
2315static int
2316aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2317 struct reggroup *group)
2318{
2319 regnum -= gdbarch_num_regs (gdbarch);
2320
2321 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2322 return group == all_reggroup || group == vector_reggroup;
2323 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2324 return (group == all_reggroup || group == vector_reggroup
2325 || group == float_reggroup);
2326 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2327 return (group == all_reggroup || group == vector_reggroup
2328 || group == float_reggroup);
2329 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2330 return group == all_reggroup || group == vector_reggroup;
2331 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2332 return group == all_reggroup || group == vector_reggroup;
2333
2334 return group == all_reggroup;
2335}
2336
2337/* Implement the "pseudo_register_read_value" gdbarch method. */
2338
2339static struct value *
2340aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2341 struct regcache *regcache,
2342 int regnum)
2343{
2344 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2345 struct value *result_value;
2346 gdb_byte *buf;
2347
2348 result_value = allocate_value (register_type (gdbarch, regnum));
2349 VALUE_LVAL (result_value) = lval_register;
2350 VALUE_REGNUM (result_value) = regnum;
2351 buf = value_contents_raw (result_value);
2352
2353 regnum -= gdbarch_num_regs (gdbarch);
2354
2355 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2356 {
2357 enum register_status status;
2358 unsigned v_regnum;
2359
2360 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2361 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2362 if (status != REG_VALID)
2363 mark_value_bytes_unavailable (result_value, 0,
2364 TYPE_LENGTH (value_type (result_value)));
2365 else
2366 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2367 return result_value;
2368 }
2369
2370 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2371 {
2372 enum register_status status;
2373 unsigned v_regnum;
2374
2375 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2376 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2377 if (status != REG_VALID)
2378 mark_value_bytes_unavailable (result_value, 0,
2379 TYPE_LENGTH (value_type (result_value)));
2380 else
2381 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2382 return result_value;
2383 }
2384
2385 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2386 {
2387 enum register_status status;
2388 unsigned v_regnum;
2389
2390 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2391 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2392 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2393 return result_value;
2394 }
2395
2396 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2397 {
2398 enum register_status status;
2399 unsigned v_regnum;
2400
2401 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2402 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2403 if (status != REG_VALID)
2404 mark_value_bytes_unavailable (result_value, 0,
2405 TYPE_LENGTH (value_type (result_value)));
2406 else
2407 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2408 return result_value;
2409 }
2410
2411 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2412 {
2413 enum register_status status;
2414 unsigned v_regnum;
2415
2416 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2417 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2418 if (status != REG_VALID)
2419 mark_value_bytes_unavailable (result_value, 0,
2420 TYPE_LENGTH (value_type (result_value)));
2421 else
2422 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2423 return result_value;
2424 }
2425
2426 gdb_assert_not_reached ("regnum out of bound");
2427}
2428
2429/* Implement the "pseudo_register_write" gdbarch method. */
2430
2431static void
2432aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2433 int regnum, const gdb_byte *buf)
2434{
2435 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2436
2437 /* Ensure the register buffer is zero, we want gdb writes of the
2438 various 'scalar' pseudo registers to behavior like architectural
2439 writes, register width bytes are written the remainder are set to
2440 zero. */
2441 memset (reg_buf, 0, sizeof (reg_buf));
2442
2443 regnum -= gdbarch_num_regs (gdbarch);
2444
2445 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2446 {
2447 /* pseudo Q registers */
2448 unsigned v_regnum;
2449
2450 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2451 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2452 regcache_raw_write (regcache, v_regnum, reg_buf);
2453 return;
2454 }
2455
2456 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2457 {
2458 /* pseudo D registers */
2459 unsigned v_regnum;
2460
2461 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2462 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2463 regcache_raw_write (regcache, v_regnum, reg_buf);
2464 return;
2465 }
2466
2467 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2468 {
2469 unsigned v_regnum;
2470
2471 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2472 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2473 regcache_raw_write (regcache, v_regnum, reg_buf);
2474 return;
2475 }
2476
2477 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2478 {
2479 /* pseudo H registers */
2480 unsigned v_regnum;
2481
2482 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2483 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2484 regcache_raw_write (regcache, v_regnum, reg_buf);
2485 return;
2486 }
2487
2488 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2489 {
2490 /* pseudo B registers */
2491 unsigned v_regnum;
2492
2493 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2494 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2495 regcache_raw_write (regcache, v_regnum, reg_buf);
2496 return;
2497 }
2498
2499 gdb_assert_not_reached ("regnum out of bound");
2500}
2501
07b287a0
MS
2502/* Callback function for user_reg_add. */
2503
2504static struct value *
2505value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2506{
2507 const int *reg_p = baton;
2508
2509 return value_of_register (*reg_p, frame);
2510}
2511\f
2512
9404b58f
KM
2513/* Implement the "software_single_step" gdbarch method, needed to
2514 single step through atomic sequences on AArch64. */
2515
2516static int
2517aarch64_software_single_step (struct frame_info *frame)
2518{
2519 struct gdbarch *gdbarch = get_frame_arch (frame);
2520 struct address_space *aspace = get_frame_address_space (frame);
2521 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2522 const int insn_size = 4;
2523 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2524 CORE_ADDR pc = get_frame_pc (frame);
2525 CORE_ADDR breaks[2] = { -1, -1 };
2526 CORE_ADDR loc = pc;
2527 CORE_ADDR closing_insn = 0;
2528 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2529 byte_order_for_code);
2530 int index;
2531 int insn_count;
2532 int bc_insn_count = 0; /* Conditional branch instruction count. */
2533 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2534
2535 /* Look for a Load Exclusive instruction which begins the sequence. */
2536 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2537 return 0;
2538
2539 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2540 {
2541 int32_t offset;
2542 unsigned cond;
2543
2544 loc += insn_size;
2545 insn = read_memory_unsigned_integer (loc, insn_size,
2546 byte_order_for_code);
2547
2548 /* Check if the instruction is a conditional branch. */
2549 if (decode_bcond (loc, insn, &cond, &offset))
2550 {
2551 if (bc_insn_count >= 1)
2552 return 0;
2553
2554 /* It is, so we'll try to set a breakpoint at the destination. */
2555 breaks[1] = loc + offset;
2556
2557 bc_insn_count++;
2558 last_breakpoint++;
2559 }
2560
2561 /* Look for the Store Exclusive which closes the atomic sequence. */
2562 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2563 {
2564 closing_insn = loc;
2565 break;
2566 }
2567 }
2568
2569 /* We didn't find a closing Store Exclusive instruction, fall back. */
2570 if (!closing_insn)
2571 return 0;
2572
2573 /* Insert breakpoint after the end of the atomic sequence. */
2574 breaks[0] = loc + insn_size;
2575
2576 /* Check for duplicated breakpoints, and also check that the second
2577 breakpoint is not within the atomic sequence. */
2578 if (last_breakpoint
2579 && (breaks[1] == breaks[0]
2580 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2581 last_breakpoint = 0;
2582
2583 /* Insert the breakpoint at the end of the sequence, and one at the
2584 destination of the conditional branch, if it exists. */
2585 for (index = 0; index <= last_breakpoint; index++)
2586 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2587
2588 return 1;
2589}
2590
07b287a0
MS
2591/* Initialize the current architecture based on INFO. If possible,
2592 re-use an architecture from ARCHES, which is a list of
2593 architectures already created during this debugging session.
2594
2595 Called e.g. at program startup, when reading a core file, and when
2596 reading a binary file. */
2597
2598static struct gdbarch *
2599aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2600{
2601 struct gdbarch_tdep *tdep;
2602 struct gdbarch *gdbarch;
2603 struct gdbarch_list *best_arch;
2604 struct tdesc_arch_data *tdesc_data = NULL;
2605 const struct target_desc *tdesc = info.target_desc;
2606 int i;
2607 int have_fpa_registers = 1;
2608 int valid_p = 1;
2609 const struct tdesc_feature *feature;
2610 int num_regs = 0;
2611 int num_pseudo_regs = 0;
2612
2613 /* Ensure we always have a target descriptor. */
2614 if (!tdesc_has_registers (tdesc))
2615 tdesc = tdesc_aarch64;
2616
2617 gdb_assert (tdesc);
2618
2619 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2620
2621 if (feature == NULL)
2622 return NULL;
2623
2624 tdesc_data = tdesc_data_alloc ();
2625
2626 /* Validate the descriptor provides the mandatory core R registers
2627 and allocate their numbers. */
2628 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2629 valid_p &=
2630 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2631 aarch64_r_register_names[i]);
2632
2633 num_regs = AARCH64_X0_REGNUM + i;
2634
2635 /* Look for the V registers. */
2636 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2637 if (feature)
2638 {
2639 /* Validate the descriptor provides the mandatory V registers
2640 and allocate their numbers. */
2641 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2642 valid_p &=
2643 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2644 aarch64_v_register_names[i]);
2645
2646 num_regs = AARCH64_V0_REGNUM + i;
2647
2648 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2649 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2650 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2651 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2652 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2653 }
2654
2655 if (!valid_p)
2656 {
2657 tdesc_data_cleanup (tdesc_data);
2658 return NULL;
2659 }
2660
2661 /* AArch64 code is always little-endian. */
2662 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2663
2664 /* If there is already a candidate, use it. */
2665 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2666 best_arch != NULL;
2667 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2668 {
2669 /* Found a match. */
2670 break;
2671 }
2672
2673 if (best_arch != NULL)
2674 {
2675 if (tdesc_data != NULL)
2676 tdesc_data_cleanup (tdesc_data);
2677 return best_arch->gdbarch;
2678 }
2679
2680 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
2681 gdbarch = gdbarch_alloc (&info, tdep);
2682
2683 /* This should be low enough for everything. */
2684 tdep->lowest_pc = 0x20;
2685 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2686 tdep->jb_elt_size = 8;
2687
2688 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2689 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2690
07b287a0
MS
2691 /* Frame handling. */
2692 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2693 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2694 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2695
2696 /* Advance PC across function entry code. */
2697 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2698
2699 /* The stack grows downward. */
2700 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2701
2702 /* Breakpoint manipulation. */
2703 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
07b287a0 2704 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2705 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2706
2707 /* Information about registers, etc. */
2708 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2709 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2710 set_gdbarch_num_regs (gdbarch, num_regs);
2711
2712 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2713 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2714 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2715 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2716 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2717 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2718 aarch64_pseudo_register_reggroup_p);
2719
2720 /* ABI */
2721 set_gdbarch_short_bit (gdbarch, 16);
2722 set_gdbarch_int_bit (gdbarch, 32);
2723 set_gdbarch_float_bit (gdbarch, 32);
2724 set_gdbarch_double_bit (gdbarch, 64);
2725 set_gdbarch_long_double_bit (gdbarch, 128);
2726 set_gdbarch_long_bit (gdbarch, 64);
2727 set_gdbarch_long_long_bit (gdbarch, 64);
2728 set_gdbarch_ptr_bit (gdbarch, 64);
2729 set_gdbarch_char_signed (gdbarch, 0);
2730 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2731 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2732 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2733
2734 /* Internal <-> external register number maps. */
2735 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2736
2737 /* Returning results. */
2738 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2739
2740 /* Disassembly. */
2741 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2742
2743 /* Virtual tables. */
2744 set_gdbarch_vbit_in_delta (gdbarch, 1);
2745
2746 /* Hook in the ABI-specific overrides, if they have been registered. */
2747 info.target_desc = tdesc;
2748 info.tdep_info = (void *) tdesc_data;
2749 gdbarch_init_osabi (info, gdbarch);
2750
2751 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2752
2753 /* Add some default predicates. */
2754 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2755 dwarf2_append_unwinders (gdbarch);
2756 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2757
2758 frame_base_set_default (gdbarch, &aarch64_normal_base);
2759
2760 /* Now we have tuned the configuration, set a few final things,
2761 based on what the OS ABI has told us. */
2762
2763 if (tdep->jb_pc >= 0)
2764 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2765
2766 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2767
2768 /* Add standard register aliases. */
2769 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2770 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2771 value_of_aarch64_user_reg,
2772 &aarch64_register_aliases[i].regnum);
2773
2774 return gdbarch;
2775}
2776
2777static void
2778aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2779{
2780 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2781
2782 if (tdep == NULL)
2783 return;
2784
2785 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2786 paddress (gdbarch, tdep->lowest_pc));
2787}
2788
2789/* Suppress warning from -Wmissing-prototypes. */
2790extern initialize_file_ftype _initialize_aarch64_tdep;
2791
2792void
2793_initialize_aarch64_tdep (void)
2794{
2795 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2796 aarch64_dump_tdep);
2797
2798 initialize_tdesc_aarch64 ();
07b287a0
MS
2799
2800 /* Debug this file's internals. */
2801 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2802Set AArch64 debugging."), _("\
2803Show AArch64 debugging."), _("\
2804When on, AArch64 specific debugging is enabled."),
2805 NULL,
2806 show_aarch64_debug,
2807 &setdebuglist, &showdebuglist);
2808}
99afc88b
OJ
2809
2810/* AArch64 process record-replay related structures, defines etc. */
2811
2812#define submask(x) ((1L << ((x) + 1)) - 1)
2813#define bit(obj,st) (((obj) >> (st)) & 1)
2814#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2815
2816#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2817 do \
2818 { \
2819 unsigned int reg_len = LENGTH; \
2820 if (reg_len) \
2821 { \
2822 REGS = XNEWVEC (uint32_t, reg_len); \
2823 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2824 } \
2825 } \
2826 while (0)
2827
2828#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2829 do \
2830 { \
2831 unsigned int mem_len = LENGTH; \
2832 if (mem_len) \
2833 { \
2834 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2835 memcpy(&MEMS->len, &RECORD_BUF[0], \
2836 sizeof(struct aarch64_mem_r) * LENGTH); \
2837 } \
2838 } \
2839 while (0)
2840
2841/* AArch64 record/replay structures and enumerations. */
2842
2843struct aarch64_mem_r
2844{
2845 uint64_t len; /* Record length. */
2846 uint64_t addr; /* Memory address. */
2847};
2848
2849enum aarch64_record_result
2850{
2851 AARCH64_RECORD_SUCCESS,
2852 AARCH64_RECORD_FAILURE,
2853 AARCH64_RECORD_UNSUPPORTED,
2854 AARCH64_RECORD_UNKNOWN
2855};
2856
2857typedef struct insn_decode_record_t
2858{
2859 struct gdbarch *gdbarch;
2860 struct regcache *regcache;
2861 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2862 uint32_t aarch64_insn; /* Insn to be recorded. */
2863 uint32_t mem_rec_count; /* Count of memory records. */
2864 uint32_t reg_rec_count; /* Count of register records. */
2865 uint32_t *aarch64_regs; /* Registers to be recorded. */
2866 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2867} insn_decode_record;
2868
2869/* Record handler for data processing - register instructions. */
2870
2871static unsigned int
2872aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2873{
2874 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2875 uint32_t record_buf[4];
2876
2877 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2878 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2879 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2880
2881 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2882 {
2883 uint8_t setflags;
2884
2885 /* Logical (shifted register). */
2886 if (insn_bits24_27 == 0x0a)
2887 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2888 /* Add/subtract. */
2889 else if (insn_bits24_27 == 0x0b)
2890 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2891 else
2892 return AARCH64_RECORD_UNKNOWN;
2893
2894 record_buf[0] = reg_rd;
2895 aarch64_insn_r->reg_rec_count = 1;
2896 if (setflags)
2897 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2898 }
2899 else
2900 {
2901 if (insn_bits24_27 == 0x0b)
2902 {
2903 /* Data-processing (3 source). */
2904 record_buf[0] = reg_rd;
2905 aarch64_insn_r->reg_rec_count = 1;
2906 }
2907 else if (insn_bits24_27 == 0x0a)
2908 {
2909 if (insn_bits21_23 == 0x00)
2910 {
2911 /* Add/subtract (with carry). */
2912 record_buf[0] = reg_rd;
2913 aarch64_insn_r->reg_rec_count = 1;
2914 if (bit (aarch64_insn_r->aarch64_insn, 29))
2915 {
2916 record_buf[1] = AARCH64_CPSR_REGNUM;
2917 aarch64_insn_r->reg_rec_count = 2;
2918 }
2919 }
2920 else if (insn_bits21_23 == 0x02)
2921 {
2922 /* Conditional compare (register) and conditional compare
2923 (immediate) instructions. */
2924 record_buf[0] = AARCH64_CPSR_REGNUM;
2925 aarch64_insn_r->reg_rec_count = 1;
2926 }
2927 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2928 {
2929 /* CConditional select. */
2930 /* Data-processing (2 source). */
2931 /* Data-processing (1 source). */
2932 record_buf[0] = reg_rd;
2933 aarch64_insn_r->reg_rec_count = 1;
2934 }
2935 else
2936 return AARCH64_RECORD_UNKNOWN;
2937 }
2938 }
2939
2940 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2941 record_buf);
2942 return AARCH64_RECORD_SUCCESS;
2943}
2944
2945/* Record handler for data processing - immediate instructions. */
2946
2947static unsigned int
2948aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2949{
2950 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2951 uint32_t record_buf[4];
2952
2953 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2954 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2955 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2956 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2957
2958 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2959 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2960 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2961 {
2962 record_buf[0] = reg_rd;
2963 aarch64_insn_r->reg_rec_count = 1;
2964 }
2965 else if (insn_bits24_27 == 0x01)
2966 {
2967 /* Add/Subtract (immediate). */
2968 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2969 record_buf[0] = reg_rd;
2970 aarch64_insn_r->reg_rec_count = 1;
2971 if (setflags)
2972 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2973 }
2974 else if (insn_bits24_27 == 0x02 && !insn_bit23)
2975 {
2976 /* Logical (immediate). */
2977 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
2978 record_buf[0] = reg_rd;
2979 aarch64_insn_r->reg_rec_count = 1;
2980 if (setflags)
2981 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2982 }
2983 else
2984 return AARCH64_RECORD_UNKNOWN;
2985
2986 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2987 record_buf);
2988 return AARCH64_RECORD_SUCCESS;
2989}
2990
2991/* Record handler for branch, exception generation and system instructions. */
2992
2993static unsigned int
2994aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
2995{
2996 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
2997 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
2998 uint32_t record_buf[4];
2999
3000 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3001 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3002 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3003
3004 if (insn_bits28_31 == 0x0d)
3005 {
3006 /* Exception generation instructions. */
3007 if (insn_bits24_27 == 0x04)
3008 {
3009 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4) &&
3010 !bits (aarch64_insn_r->aarch64_insn, 21, 23) &&
3011 bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3012 {
3013 ULONGEST svc_number;
3014
3015 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3016 &svc_number);
3017 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3018 svc_number);
3019 }
3020 else
3021 return AARCH64_RECORD_UNSUPPORTED;
3022 }
3023 /* System instructions. */
3024 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3025 {
3026 uint32_t reg_rt, reg_crn;
3027
3028 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3029 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3030
3031 /* Record rt in case of sysl and mrs instructions. */
3032 if (bit (aarch64_insn_r->aarch64_insn, 21))
3033 {
3034 record_buf[0] = reg_rt;
3035 aarch64_insn_r->reg_rec_count = 1;
3036 }
3037 /* Record cpsr for hint and msr(immediate) instructions. */
3038 else if (reg_crn == 0x02 || reg_crn == 0x04)
3039 {
3040 record_buf[0] = AARCH64_CPSR_REGNUM;
3041 aarch64_insn_r->reg_rec_count = 1;
3042 }
3043 }
3044 /* Unconditional branch (register). */
3045 else if((insn_bits24_27 & 0x0e) == 0x06)
3046 {
3047 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3048 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3049 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3050 }
3051 else
3052 return AARCH64_RECORD_UNKNOWN;
3053 }
3054 /* Unconditional branch (immediate). */
3055 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3056 {
3057 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3058 if (bit (aarch64_insn_r->aarch64_insn, 31))
3059 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3060 }
3061 else
3062 /* Compare & branch (immediate), Test & branch (immediate) and
3063 Conditional branch (immediate). */
3064 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3065
3066 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3067 record_buf);
3068 return AARCH64_RECORD_SUCCESS;
3069}
3070
3071/* Record handler for advanced SIMD load and store instructions. */
3072
3073static unsigned int
3074aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3075{
3076 CORE_ADDR address;
3077 uint64_t addr_offset = 0;
3078 uint32_t record_buf[24];
3079 uint64_t record_buf_mem[24];
3080 uint32_t reg_rn, reg_rt;
3081 uint32_t reg_index = 0, mem_index = 0;
3082 uint8_t opcode_bits, size_bits;
3083
3084 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3085 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3086 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3087 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3088 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3089
3090 if (record_debug)
3091 {
3092 fprintf_unfiltered (gdb_stdlog,
3093 "Process record: Advanced SIMD load/store\n");
3094 }
3095
3096 /* Load/store single structure. */
3097 if (bit (aarch64_insn_r->aarch64_insn, 24))
3098 {
3099 uint8_t sindex, scale, selem, esize, replicate = 0;
3100 scale = opcode_bits >> 2;
3101 selem = ((opcode_bits & 0x02) |
3102 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3103 switch (scale)
3104 {
3105 case 1:
3106 if (size_bits & 0x01)
3107 return AARCH64_RECORD_UNKNOWN;
3108 break;
3109 case 2:
3110 if ((size_bits >> 1) & 0x01)
3111 return AARCH64_RECORD_UNKNOWN;
3112 if (size_bits & 0x01)
3113 {
3114 if (!((opcode_bits >> 1) & 0x01))
3115 scale = 3;
3116 else
3117 return AARCH64_RECORD_UNKNOWN;
3118 }
3119 break;
3120 case 3:
3121 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3122 {
3123 scale = size_bits;
3124 replicate = 1;
3125 break;
3126 }
3127 else
3128 return AARCH64_RECORD_UNKNOWN;
3129 default:
3130 break;
3131 }
3132 esize = 8 << scale;
3133 if (replicate)
3134 for (sindex = 0; sindex < selem; sindex++)
3135 {
3136 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3137 reg_rt = (reg_rt + 1) % 32;
3138 }
3139 else
3140 {
3141 for (sindex = 0; sindex < selem; sindex++)
3142 if (bit (aarch64_insn_r->aarch64_insn, 22))
3143 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3144 else
3145 {
3146 record_buf_mem[mem_index++] = esize / 8;
3147 record_buf_mem[mem_index++] = address + addr_offset;
3148 }
3149 addr_offset = addr_offset + (esize / 8);
3150 reg_rt = (reg_rt + 1) % 32;
3151 }
3152 }
3153 /* Load/store multiple structure. */
3154 else
3155 {
3156 uint8_t selem, esize, rpt, elements;
3157 uint8_t eindex, rindex;
3158
3159 esize = 8 << size_bits;
3160 if (bit (aarch64_insn_r->aarch64_insn, 30))
3161 elements = 128 / esize;
3162 else
3163 elements = 64 / esize;
3164
3165 switch (opcode_bits)
3166 {
3167 /*LD/ST4 (4 Registers). */
3168 case 0:
3169 rpt = 1;
3170 selem = 4;
3171 break;
3172 /*LD/ST1 (4 Registers). */
3173 case 2:
3174 rpt = 4;
3175 selem = 1;
3176 break;
3177 /*LD/ST3 (3 Registers). */
3178 case 4:
3179 rpt = 1;
3180 selem = 3;
3181 break;
3182 /*LD/ST1 (3 Registers). */
3183 case 6:
3184 rpt = 3;
3185 selem = 1;
3186 break;
3187 /*LD/ST1 (1 Register). */
3188 case 7:
3189 rpt = 1;
3190 selem = 1;
3191 break;
3192 /*LD/ST2 (2 Registers). */
3193 case 8:
3194 rpt = 1;
3195 selem = 2;
3196 break;
3197 /*LD/ST1 (2 Registers). */
3198 case 10:
3199 rpt = 2;
3200 selem = 1;
3201 break;
3202 default:
3203 return AARCH64_RECORD_UNSUPPORTED;
3204 break;
3205 }
3206 for (rindex = 0; rindex < rpt; rindex++)
3207 for (eindex = 0; eindex < elements; eindex++)
3208 {
3209 uint8_t reg_tt, sindex;
3210 reg_tt = (reg_rt + rindex) % 32;
3211 for (sindex = 0; sindex < selem; sindex++)
3212 {
3213 if (bit (aarch64_insn_r->aarch64_insn, 22))
3214 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3215 else
3216 {
3217 record_buf_mem[mem_index++] = esize / 8;
3218 record_buf_mem[mem_index++] = address + addr_offset;
3219 }
3220 addr_offset = addr_offset + (esize / 8);
3221 reg_tt = (reg_tt + 1) % 32;
3222 }
3223 }
3224 }
3225
3226 if (bit (aarch64_insn_r->aarch64_insn, 23))
3227 record_buf[reg_index++] = reg_rn;
3228
3229 aarch64_insn_r->reg_rec_count = reg_index;
3230 aarch64_insn_r->mem_rec_count = mem_index / 2;
3231 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3232 record_buf_mem);
3233 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3234 record_buf);
3235 return AARCH64_RECORD_SUCCESS;
3236}
3237
3238/* Record handler for load and store instructions. */
3239
3240static unsigned int
3241aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3242{
3243 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3244 uint8_t insn_bit23, insn_bit21;
3245 uint8_t opc, size_bits, ld_flag, vector_flag;
3246 uint32_t reg_rn, reg_rt, reg_rt2;
3247 uint64_t datasize, offset;
3248 uint32_t record_buf[8];
3249 uint64_t record_buf_mem[8];
3250 CORE_ADDR address;
3251
3252 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3253 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3254 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3255 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3256 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3257 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3258 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3259 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3260 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3261 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3262 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3263
3264 /* Load/store exclusive. */
3265 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3266 {
3267 if (record_debug)
3268 {
3269 fprintf_unfiltered (gdb_stdlog,
3270 "Process record: load/store exclusive\n");
3271 }
3272
3273 if (ld_flag)
3274 {
3275 record_buf[0] = reg_rt;
3276 aarch64_insn_r->reg_rec_count = 1;
3277 if (insn_bit21)
3278 {
3279 record_buf[1] = reg_rt2;
3280 aarch64_insn_r->reg_rec_count = 2;
3281 }
3282 }
3283 else
3284 {
3285 if (insn_bit21)
3286 datasize = (8 << size_bits) * 2;
3287 else
3288 datasize = (8 << size_bits);
3289 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3290 &address);
3291 record_buf_mem[0] = datasize / 8;
3292 record_buf_mem[1] = address;
3293 aarch64_insn_r->mem_rec_count = 1;
3294 if (!insn_bit23)
3295 {
3296 /* Save register rs. */
3297 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3298 aarch64_insn_r->reg_rec_count = 1;
3299 }
3300 }
3301 }
3302 /* Load register (literal) instructions decoding. */
3303 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3304 {
3305 if (record_debug)
3306 {
3307 fprintf_unfiltered (gdb_stdlog,
3308 "Process record: load register (literal)\n");
3309 }
3310 if (vector_flag)
3311 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3312 else
3313 record_buf[0] = reg_rt;
3314 aarch64_insn_r->reg_rec_count = 1;
3315 }
3316 /* All types of load/store pair instructions decoding. */
3317 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3318 {
3319 if (record_debug)
3320 {
3321 fprintf_unfiltered (gdb_stdlog,
3322 "Process record: load/store pair\n");
3323 }
3324
3325 if (ld_flag)
3326 {
3327 if (vector_flag)
3328 {
3329 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3330 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3331 }
3332 else
3333 {
3334 record_buf[0] = reg_rt;
3335 record_buf[1] = reg_rt2;
3336 }
3337 aarch64_insn_r->reg_rec_count = 2;
3338 }
3339 else
3340 {
3341 uint16_t imm7_off;
3342 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3343 if (!vector_flag)
3344 size_bits = size_bits >> 1;
3345 datasize = 8 << (2 + size_bits);
3346 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3347 offset = offset << (2 + size_bits);
3348 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3349 &address);
3350 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3351 {
3352 if (imm7_off & 0x40)
3353 address = address - offset;
3354 else
3355 address = address + offset;
3356 }
3357
3358 record_buf_mem[0] = datasize / 8;
3359 record_buf_mem[1] = address;
3360 record_buf_mem[2] = datasize / 8;
3361 record_buf_mem[3] = address + (datasize / 8);
3362 aarch64_insn_r->mem_rec_count = 2;
3363 }
3364 if (bit (aarch64_insn_r->aarch64_insn, 23))
3365 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3366 }
3367 /* Load/store register (unsigned immediate) instructions. */
3368 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3369 {
3370 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3371 if (!(opc >> 1))
3372 if (opc & 0x01)
3373 ld_flag = 0x01;
3374 else
3375 ld_flag = 0x0;
3376 else
3377 if (size_bits != 0x03)
3378 ld_flag = 0x01;
3379 else
3380 return AARCH64_RECORD_UNKNOWN;
3381
3382 if (record_debug)
3383 {
3384 fprintf_unfiltered (gdb_stdlog,
3385 "Process record: load/store (unsigned immediate):"
3386 " size %x V %d opc %x\n", size_bits, vector_flag,
3387 opc);
3388 }
3389
3390 if (!ld_flag)
3391 {
3392 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3393 datasize = 8 << size_bits;
3394 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3395 &address);
3396 offset = offset << size_bits;
3397 address = address + offset;
3398
3399 record_buf_mem[0] = datasize >> 3;
3400 record_buf_mem[1] = address;
3401 aarch64_insn_r->mem_rec_count = 1;
3402 }
3403 else
3404 {
3405 if (vector_flag)
3406 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3407 else
3408 record_buf[0] = reg_rt;
3409 aarch64_insn_r->reg_rec_count = 1;
3410 }
3411 }
3412 /* Load/store register (register offset) instructions. */
3413 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03 &&
3414 insn_bits10_11 == 0x02 && insn_bit21)
3415 {
3416 if (record_debug)
3417 {
3418 fprintf_unfiltered (gdb_stdlog,
3419 "Process record: load/store (register offset)\n");
3420 }
3421 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3422 if (!(opc >> 1))
3423 if (opc & 0x01)
3424 ld_flag = 0x01;
3425 else
3426 ld_flag = 0x0;
3427 else
3428 if (size_bits != 0x03)
3429 ld_flag = 0x01;
3430 else
3431 return AARCH64_RECORD_UNKNOWN;
3432
3433 if (!ld_flag)
3434 {
3435 uint64_t reg_rm_val;
3436 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3437 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3438 if (bit (aarch64_insn_r->aarch64_insn, 12))
3439 offset = reg_rm_val << size_bits;
3440 else
3441 offset = reg_rm_val;
3442 datasize = 8 << size_bits;
3443 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3444 &address);
3445 address = address + offset;
3446 record_buf_mem[0] = datasize >> 3;
3447 record_buf_mem[1] = address;
3448 aarch64_insn_r->mem_rec_count = 1;
3449 }
3450 else
3451 {
3452 if (vector_flag)
3453 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3454 else
3455 record_buf[0] = reg_rt;
3456 aarch64_insn_r->reg_rec_count = 1;
3457 }
3458 }
3459 /* Load/store register (immediate and unprivileged) instructions. */
3460 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03 &&
3461 !insn_bit21)
3462 {
3463 if (record_debug)
3464 {
3465 fprintf_unfiltered (gdb_stdlog,
3466 "Process record: load/store (immediate and unprivileged)\n");
3467 }
3468 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3469 if (!(opc >> 1))
3470 if (opc & 0x01)
3471 ld_flag = 0x01;
3472 else
3473 ld_flag = 0x0;
3474 else
3475 if (size_bits != 0x03)
3476 ld_flag = 0x01;
3477 else
3478 return AARCH64_RECORD_UNKNOWN;
3479
3480 if (!ld_flag)
3481 {
3482 uint16_t imm9_off;
3483 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3484 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3485 datasize = 8 << size_bits;
3486 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3487 &address);
3488 if (insn_bits10_11 != 0x01)
3489 {
3490 if (imm9_off & 0x0100)
3491 address = address - offset;
3492 else
3493 address = address + offset;
3494 }
3495 record_buf_mem[0] = datasize >> 3;
3496 record_buf_mem[1] = address;
3497 aarch64_insn_r->mem_rec_count = 1;
3498 }
3499 else
3500 {
3501 if (vector_flag)
3502 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3503 else
3504 record_buf[0] = reg_rt;
3505 aarch64_insn_r->reg_rec_count = 1;
3506 }
3507 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3508 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3509 }
3510 /* Advanced SIMD load/store instructions. */
3511 else
3512 return aarch64_record_asimd_load_store (aarch64_insn_r);
3513
3514 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3515 record_buf_mem);
3516 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3517 record_buf);
3518 return AARCH64_RECORD_SUCCESS;
3519}
3520
3521/* Record handler for data processing SIMD and floating point instructions. */
3522
3523static unsigned int
3524aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3525{
3526 uint8_t insn_bit21, opcode, rmode, reg_rd;
3527 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3528 uint8_t insn_bits11_14;
3529 uint32_t record_buf[2];
3530
3531 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3532 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3533 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3534 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3535 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3536 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3537 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3538 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3539 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3540
3541 if (record_debug)
3542 {
3543 fprintf_unfiltered (gdb_stdlog,
3544 "Process record: data processing SIMD/FP: ");
3545 }
3546
3547 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3548 {
3549 /* Floating point - fixed point conversion instructions. */
3550 if (!insn_bit21)
3551 {
3552 if (record_debug)
3553 fprintf_unfiltered (gdb_stdlog, "FP - fixed point conversion");
3554
3555 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3556 record_buf[0] = reg_rd;
3557 else
3558 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3559 }
3560 /* Floating point - conditional compare instructions. */
3561 else if (insn_bits10_11 == 0x01)
3562 {
3563 if (record_debug)
3564 fprintf_unfiltered (gdb_stdlog, "FP - conditional compare");
3565
3566 record_buf[0] = AARCH64_CPSR_REGNUM;
3567 }
3568 /* Floating point - data processing (2-source) and
3569 conditional select instructions. */
3570 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3571 {
3572 if (record_debug)
3573 fprintf_unfiltered (gdb_stdlog, "FP - DP (2-source)");
3574
3575 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3576 }
3577 else if (insn_bits10_11 == 0x00)
3578 {
3579 /* Floating point - immediate instructions. */
3580 if ((insn_bits12_15 & 0x01) == 0x01
3581 || (insn_bits12_15 & 0x07) == 0x04)
3582 {
3583 if (record_debug)
3584 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3585 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3586 }
3587 /* Floating point - compare instructions. */
3588 else if ((insn_bits12_15 & 0x03) == 0x02)
3589 {
3590 if (record_debug)
3591 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3592 record_buf[0] = AARCH64_CPSR_REGNUM;
3593 }
3594 /* Floating point - integer conversions instructions. */
3595 if (insn_bits12_15 == 0x00)
3596 {
3597 /* Convert float to integer instruction. */
3598 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3599 {
3600 if (record_debug)
3601 fprintf_unfiltered (gdb_stdlog, "float to int conversion");
3602
3603 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3604 }
3605 /* Convert integer to float instruction. */
3606 else if ((opcode >> 1) == 0x01 && !rmode)
3607 {
3608 if (record_debug)
3609 fprintf_unfiltered (gdb_stdlog, "int to float conversion");
3610
3611 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3612 }
3613 /* Move float to integer instruction. */
3614 else if ((opcode >> 1) == 0x03)
3615 {
3616 if (record_debug)
3617 fprintf_unfiltered (gdb_stdlog, "move float to int");
3618
3619 if (!(opcode & 0x01))
3620 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3621 else
3622 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3623 }
3624 }
3625 }
3626 }
3627 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3628 {
3629 if (record_debug)
3630 fprintf_unfiltered (gdb_stdlog, "SIMD copy");
3631
3632 /* Advanced SIMD copy instructions. */
3633 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3634 && !bit (aarch64_insn_r->aarch64_insn, 15)
3635 && bit (aarch64_insn_r->aarch64_insn, 10))
3636 {
3637 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3638 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3639 else
3640 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3641 }
3642 else
3643 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3644 }
3645 /* All remaining floating point or advanced SIMD instructions. */
3646 else
3647 {
3648 if (record_debug)
3649 fprintf_unfiltered (gdb_stdlog, "all remain");
3650
3651 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3652 }
3653
3654 if (record_debug)
3655 fprintf_unfiltered (gdb_stdlog, "\n");
3656
3657 aarch64_insn_r->reg_rec_count++;
3658 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3659 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3660 record_buf);
3661 return AARCH64_RECORD_SUCCESS;
3662}
3663
3664/* Decodes insns type and invokes its record handler. */
3665
3666static unsigned int
3667aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3668{
3669 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3670
3671 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3672 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3673 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3674 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3675
3676 /* Data processing - immediate instructions. */
3677 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3678 return aarch64_record_data_proc_imm (aarch64_insn_r);
3679
3680 /* Branch, exception generation and system instructions. */
3681 if (ins_bit26 && !ins_bit27 && ins_bit28)
3682 return aarch64_record_branch_except_sys (aarch64_insn_r);
3683
3684 /* Load and store instructions. */
3685 if (!ins_bit25 && ins_bit27)
3686 return aarch64_record_load_store (aarch64_insn_r);
3687
3688 /* Data processing - register instructions. */
3689 if (ins_bit25 && !ins_bit26 && ins_bit27)
3690 return aarch64_record_data_proc_reg (aarch64_insn_r);
3691
3692 /* Data processing - SIMD and floating point instructions. */
3693 if (ins_bit25 && ins_bit26 && ins_bit27)
3694 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3695
3696 return AARCH64_RECORD_UNSUPPORTED;
3697}
3698
3699/* Cleans up local record registers and memory allocations. */
3700
3701static void
3702deallocate_reg_mem (insn_decode_record *record)
3703{
3704 xfree (record->aarch64_regs);
3705 xfree (record->aarch64_mems);
3706}
3707
3708/* Parse the current instruction and record the values of the registers and
3709 memory that will be changed in current instruction to record_arch_list
3710 return -1 if something is wrong. */
3711
3712int
3713aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3714 CORE_ADDR insn_addr)
3715{
3716 uint32_t rec_no = 0;
3717 uint8_t insn_size = 4;
3718 uint32_t ret = 0;
3719 ULONGEST t_bit = 0, insn_id = 0;
3720 gdb_byte buf[insn_size];
3721 insn_decode_record aarch64_record;
3722
3723 memset (&buf[0], 0, insn_size);
3724 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3725 target_read_memory (insn_addr, &buf[0], insn_size);
3726 aarch64_record.aarch64_insn
3727 = (uint32_t) extract_unsigned_integer (&buf[0],
3728 insn_size,
3729 gdbarch_byte_order (gdbarch));
3730 aarch64_record.regcache = regcache;
3731 aarch64_record.this_addr = insn_addr;
3732 aarch64_record.gdbarch = gdbarch;
3733
3734 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3735 if (ret == AARCH64_RECORD_UNSUPPORTED)
3736 {
3737 printf_unfiltered (_("Process record does not support instruction "
3738 "0x%0x at address %s.\n"),
3739 aarch64_record.aarch64_insn,
3740 paddress (gdbarch, insn_addr));
3741 ret = -1;
3742 }
3743
3744 if (0 == ret)
3745 {
3746 /* Record registers. */
3747 record_full_arch_list_add_reg (aarch64_record.regcache,
3748 AARCH64_PC_REGNUM);
3749 /* Always record register CPSR. */
3750 record_full_arch_list_add_reg (aarch64_record.regcache,
3751 AARCH64_CPSR_REGNUM);
3752 if (aarch64_record.aarch64_regs)
3753 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3754 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3755 aarch64_record.aarch64_regs[rec_no]))
3756 ret = -1;
3757
3758 /* Record memories. */
3759 if (aarch64_record.aarch64_mems)
3760 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3761 if (record_full_arch_list_add_mem
3762 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3763 aarch64_record.aarch64_mems[rec_no].len))
3764 ret = -1;
3765
3766 if (record_full_arch_list_add_end ())
3767 ret = -1;
3768 }
3769
3770 deallocate_reg_mem (&aarch64_record);
3771 return ret;
3772}
This page took 0.310561 seconds and 4 git commands to generate.