Check R_386_GOT32/R_386_GOT32X without base register
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57
58 #include "features/aarch64.c"
59
60 #include "arch/aarch64-insn.h"
61
62 #include "opcode/aarch64.h"
63
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
68 /* Pseudo register base numbers. */
69 #define AARCH64_Q0_REGNUM 0
70 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
74
75 /* The standard register names, and all the valid aliases for them. */
76 static const struct
77 {
78 const char *const name;
79 int regnum;
80 } aarch64_register_aliases[] =
81 {
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123 };
124
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
127 {
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139 };
140
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
143 {
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156 };
157
158 /* AArch64 prologue cache structure. */
159 struct aarch64_prologue_cache
160 {
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
163 CORE_ADDR func;
164
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
167 stub frame. */
168 CORE_ADDR prev_pc;
169
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
173 CORE_ADDR prev_sp;
174
175 /* Is the target available to read from? */
176 int available_p;
177
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
181 int framesize;
182
183 /* The register used to hold the frame pointer for this frame. */
184 int framereg;
185
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
188 };
189
190 static void
191 show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
193 {
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
195 }
196
197 /* Extract a signed value from a bit field within an instruction
198 encoding.
199
200 INSN is the instruction opcode.
201
202 WIDTH specifies the width of the bit field to extract (in bits).
203
204 OFFSET specifies the least significant bit of the field where bits
205 are numbered zero counting from least to most significant. */
206
207 static int32_t
208 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
209 {
210 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
211 unsigned shift_r = sizeof (int32_t) * 8 - width;
212
213 return ((int32_t) insn << shift_l) >> shift_r;
214 }
215
216 /* Determine if specified bits within an instruction opcode matches a
217 specific pattern.
218
219 INSN is the instruction opcode.
220
221 MASK specifies the bits within the opcode that are to be tested
222 agsinst for a match with PATTERN. */
223
224 static int
225 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
226 {
227 return (insn & mask) == pattern;
228 }
229
230 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
231
232 ADDR specifies the address of the opcode.
233 INSN specifies the opcode to test.
234 RD receives the 'rd' field from the decoded instruction.
235 RN receives the 'rn' field from the decoded instruction.
236
237 Return 1 if the opcodes matches and is decoded, otherwise 0. */
238 static int
239 aarch64_decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd,
240 unsigned *rn, int32_t *imm)
241 {
242 if ((insn & 0x9f000000) == 0x91000000)
243 {
244 unsigned shift;
245 unsigned op_is_sub;
246
247 *rd = (insn >> 0) & 0x1f;
248 *rn = (insn >> 5) & 0x1f;
249 *imm = (insn >> 10) & 0xfff;
250 shift = (insn >> 22) & 0x3;
251 op_is_sub = (insn >> 30) & 0x1;
252
253 switch (shift)
254 {
255 case 0:
256 break;
257 case 1:
258 *imm <<= 12;
259 break;
260 default:
261 /* UNDEFINED */
262 return 0;
263 }
264
265 if (op_is_sub)
266 *imm = -*imm;
267
268 if (aarch64_debug)
269 {
270 debug_printf ("decode: 0x%s 0x%x add x%u, x%u, #%d\n",
271 core_addr_to_string_nz (addr), insn, *rd, *rn,
272 *imm);
273 }
274 return 1;
275 }
276 return 0;
277 }
278
279 /* Decode an opcode if it represents a branch via register instruction.
280
281 ADDR specifies the address of the opcode.
282 INSN specifies the opcode to test.
283 IS_BLR receives the 'op' bit from the decoded instruction.
284 RN receives the 'rn' field from the decoded instruction.
285
286 Return 1 if the opcodes matches and is decoded, otherwise 0. */
287
288 static int
289 aarch64_decode_br (CORE_ADDR addr, uint32_t insn, int *is_blr,
290 unsigned *rn)
291 {
292 /* 8 4 0 6 2 8 4 0 */
293 /* blr 110101100011111100000000000rrrrr */
294 /* br 110101100001111100000000000rrrrr */
295 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
296 {
297 *is_blr = (insn >> 21) & 1;
298 *rn = (insn >> 5) & 0x1f;
299
300 if (aarch64_debug)
301 {
302 debug_printf ("decode: 0x%s 0x%x %s 0x%x\n",
303 core_addr_to_string_nz (addr), insn,
304 *is_blr ? "blr" : "br", *rn);
305 }
306
307 return 1;
308 }
309 return 0;
310 }
311
312 /* Decode an opcode if it represents a ERET instruction.
313
314 ADDR specifies the address of the opcode.
315 INSN specifies the opcode to test.
316
317 Return 1 if the opcodes matches and is decoded, otherwise 0. */
318
319 static int
320 aarch64_decode_eret (CORE_ADDR addr, uint32_t insn)
321 {
322 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
323 if (insn == 0xd69f03e0)
324 {
325 if (aarch64_debug)
326 {
327 debug_printf ("decode: 0x%s 0x%x eret\n",
328 core_addr_to_string_nz (addr), insn);
329 }
330 return 1;
331 }
332 return 0;
333 }
334
335 /* Decode an opcode if it represents a MOVZ instruction.
336
337 ADDR specifies the address of the opcode.
338 INSN specifies the opcode to test.
339 RD receives the 'rd' field from the decoded instruction.
340
341 Return 1 if the opcodes matches and is decoded, otherwise 0. */
342
343 static int
344 aarch64_decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
345 {
346 if (decode_masked_match (insn, 0xff800000, 0x52800000))
347 {
348 *rd = (insn >> 0) & 0x1f;
349
350 if (aarch64_debug)
351 {
352 debug_printf ("decode: 0x%s 0x%x movz x%u, #?\n",
353 core_addr_to_string_nz (addr), insn, *rd);
354 }
355 return 1;
356 }
357 return 0;
358 }
359
360 /* Decode an opcode if it represents a ORR (shifted register)
361 instruction.
362
363 ADDR specifies the address of the opcode.
364 INSN specifies the opcode to test.
365 RD receives the 'rd' field from the decoded instruction.
366 RN receives the 'rn' field from the decoded instruction.
367 RM receives the 'rm' field from the decoded instruction.
368 IMM receives the 'imm6' field from the decoded instruction.
369
370 Return 1 if the opcodes matches and is decoded, otherwise 0. */
371
372 static int
373 aarch64_decode_orr_shifted_register_x (CORE_ADDR addr, uint32_t insn,
374 unsigned *rd, unsigned *rn,
375 unsigned *rm, int32_t *imm)
376 {
377 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
378 {
379 *rd = (insn >> 0) & 0x1f;
380 *rn = (insn >> 5) & 0x1f;
381 *rm = (insn >> 16) & 0x1f;
382 *imm = (insn >> 10) & 0x3f;
383
384 if (aarch64_debug)
385 {
386 debug_printf ("decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
387 core_addr_to_string_nz (addr), insn, *rd, *rn,
388 *rm, *imm);
389 }
390 return 1;
391 }
392 return 0;
393 }
394
395 /* Decode an opcode if it represents a RET instruction.
396
397 ADDR specifies the address of the opcode.
398 INSN specifies the opcode to test.
399 RN receives the 'rn' field from the decoded instruction.
400
401 Return 1 if the opcodes matches and is decoded, otherwise 0. */
402
403 static int
404 aarch64_decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
405 {
406 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
407 {
408 *rn = (insn >> 5) & 0x1f;
409 if (aarch64_debug)
410 {
411 debug_printf ("decode: 0x%s 0x%x ret x%u\n",
412 core_addr_to_string_nz (addr), insn, *rn);
413 }
414 return 1;
415 }
416 return 0;
417 }
418
419 /* Decode an opcode if it represents the following instruction:
420 STP rt, rt2, [rn, #imm]
421
422 ADDR specifies the address of the opcode.
423 INSN specifies the opcode to test.
424 RT1 receives the 'rt' field from the decoded instruction.
425 RT2 receives the 'rt2' field from the decoded instruction.
426 RN receives the 'rn' field from the decoded instruction.
427 IMM receives the 'imm' field from the decoded instruction.
428
429 Return 1 if the opcodes matches and is decoded, otherwise 0. */
430
431 static int
432 aarch64_decode_stp_offset (CORE_ADDR addr, uint32_t insn, unsigned *rt1,
433 unsigned *rt2, unsigned *rn, int32_t *imm)
434 {
435 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
436 {
437 *rt1 = (insn >> 0) & 0x1f;
438 *rn = (insn >> 5) & 0x1f;
439 *rt2 = (insn >> 10) & 0x1f;
440 *imm = extract_signed_bitfield (insn, 7, 15);
441 *imm <<= 3;
442
443 if (aarch64_debug)
444 {
445 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
446 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
447 *rn, *imm);
448 }
449 return 1;
450 }
451 return 0;
452 }
453
454 /* Decode an opcode if it represents the following instruction:
455 STP rt, rt2, [rn, #imm]!
456
457 ADDR specifies the address of the opcode.
458 INSN specifies the opcode to test.
459 RT1 receives the 'rt' field from the decoded instruction.
460 RT2 receives the 'rt2' field from the decoded instruction.
461 RN receives the 'rn' field from the decoded instruction.
462 IMM receives the 'imm' field from the decoded instruction.
463
464 Return 1 if the opcodes matches and is decoded, otherwise 0. */
465
466 static int
467 aarch64_decode_stp_offset_wb (CORE_ADDR addr, uint32_t insn, unsigned *rt1,
468 unsigned *rt2, unsigned *rn, int32_t *imm)
469 {
470 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
471 {
472 *rt1 = (insn >> 0) & 0x1f;
473 *rn = (insn >> 5) & 0x1f;
474 *rt2 = (insn >> 10) & 0x1f;
475 *imm = extract_signed_bitfield (insn, 7, 15);
476 *imm <<= 3;
477
478 if (aarch64_debug)
479 {
480 debug_printf ("decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
481 core_addr_to_string_nz (addr), insn, *rt1, *rt2,
482 *rn, *imm);
483 }
484 return 1;
485 }
486 return 0;
487 }
488
489 /* Decode an opcode if it represents the following instruction:
490 STUR rt, [rn, #imm]
491
492 ADDR specifies the address of the opcode.
493 INSN specifies the opcode to test.
494 IS64 receives size field from the decoded instruction.
495 RT receives the 'rt' field from the decoded instruction.
496 RN receives the 'rn' field from the decoded instruction.
497 IMM receives the 'imm' field from the decoded instruction.
498
499 Return 1 if the opcodes matches and is decoded, otherwise 0. */
500
501 static int
502 aarch64_decode_stur (CORE_ADDR addr, uint32_t insn, int *is64,
503 unsigned *rt, unsigned *rn, int32_t *imm)
504 {
505 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
506 {
507 *is64 = (insn >> 30) & 1;
508 *rt = (insn >> 0) & 0x1f;
509 *rn = (insn >> 5) & 0x1f;
510 *imm = extract_signed_bitfield (insn, 9, 12);
511
512 if (aarch64_debug)
513 {
514 debug_printf ("decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
515 core_addr_to_string_nz (addr), insn,
516 *is64 ? 'x' : 'w', *rt, *rn, *imm);
517 }
518 return 1;
519 }
520 return 0;
521 }
522
523 /* Analyze a prologue, looking for a recognizable stack frame
524 and frame pointer. Scan until we encounter a store that could
525 clobber the stack frame unexpectedly, or an unknown instruction. */
526
527 static CORE_ADDR
528 aarch64_analyze_prologue (struct gdbarch *gdbarch,
529 CORE_ADDR start, CORE_ADDR limit,
530 struct aarch64_prologue_cache *cache)
531 {
532 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
533 int i;
534 pv_t regs[AARCH64_X_REGISTER_COUNT];
535 struct pv_area *stack;
536 struct cleanup *back_to;
537
538 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
539 regs[i] = pv_register (i, 0);
540 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
541 back_to = make_cleanup_free_pv_area (stack);
542
543 for (; start < limit; start += 4)
544 {
545 uint32_t insn;
546 unsigned rd;
547 unsigned rn;
548 unsigned rm;
549 unsigned rt;
550 unsigned rt1;
551 unsigned rt2;
552 int op_is_sub;
553 int32_t imm;
554 unsigned cond;
555 int is64;
556 int is_link;
557 int is_cbnz;
558 int is_tbnz;
559 unsigned bit;
560 int is_adrp;
561 int32_t offset;
562
563 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
564
565 if (aarch64_decode_add_sub_imm (start, insn, &rd, &rn, &imm))
566 regs[rd] = pv_add_constant (regs[rn], imm);
567 else if (aarch64_decode_adr (start, insn, &is_adrp, &rd, &offset)
568 && is_adrp)
569 regs[rd] = pv_unknown ();
570 else if (aarch64_decode_b (start, insn, &is_link, &offset))
571 {
572 /* Stop analysis on branch. */
573 break;
574 }
575 else if (aarch64_decode_bcond (start, insn, &cond, &offset))
576 {
577 /* Stop analysis on branch. */
578 break;
579 }
580 else if (aarch64_decode_br (start, insn, &is_link, &rn))
581 {
582 /* Stop analysis on branch. */
583 break;
584 }
585 else if (aarch64_decode_cb (start, insn, &is64, &is_cbnz, &rn,
586 &offset))
587 {
588 /* Stop analysis on branch. */
589 break;
590 }
591 else if (aarch64_decode_eret (start, insn))
592 {
593 /* Stop analysis on branch. */
594 break;
595 }
596 else if (aarch64_decode_movz (start, insn, &rd))
597 regs[rd] = pv_unknown ();
598 else if (aarch64_decode_orr_shifted_register_x (start, insn, &rd,
599 &rn, &rm, &imm))
600 {
601 if (imm == 0 && rn == 31)
602 regs[rd] = regs[rm];
603 else
604 {
605 if (aarch64_debug)
606 {
607 debug_printf ("aarch64: prologue analysis gave up "
608 "addr=0x%s opcode=0x%x (orr x register)\n",
609 core_addr_to_string_nz (start), insn);
610 }
611 break;
612 }
613 }
614 else if (aarch64_decode_ret (start, insn, &rn))
615 {
616 /* Stop analysis on branch. */
617 break;
618 }
619 else if (aarch64_decode_stur (start, insn, &is64, &rt, &rn, &offset))
620 {
621 pv_area_store (stack, pv_add_constant (regs[rn], offset),
622 is64 ? 8 : 4, regs[rt]);
623 }
624 else if (aarch64_decode_stp_offset (start, insn, &rt1, &rt2, &rn,
625 &imm))
626 {
627 /* If recording this store would invalidate the store area
628 (perhaps because rn is not known) then we should abandon
629 further prologue analysis. */
630 if (pv_area_store_would_trash (stack,
631 pv_add_constant (regs[rn], imm)))
632 break;
633
634 if (pv_area_store_would_trash (stack,
635 pv_add_constant (regs[rn], imm + 8)))
636 break;
637
638 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
639 regs[rt1]);
640 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
641 regs[rt2]);
642 }
643 else if (aarch64_decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn,
644 &imm))
645 {
646 /* If recording this store would invalidate the store area
647 (perhaps because rn is not known) then we should abandon
648 further prologue analysis. */
649 if (pv_area_store_would_trash (stack,
650 pv_add_constant (regs[rn], imm)))
651 break;
652
653 if (pv_area_store_would_trash (stack,
654 pv_add_constant (regs[rn], imm + 8)))
655 break;
656
657 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
658 regs[rt1]);
659 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
660 regs[rt2]);
661 regs[rn] = pv_add_constant (regs[rn], imm);
662 }
663 else if (aarch64_decode_tb (start, insn, &is_tbnz, &bit, &rn,
664 &offset))
665 {
666 /* Stop analysis on branch. */
667 break;
668 }
669 else
670 {
671 if (aarch64_debug)
672 {
673 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
674 " opcode=0x%x\n",
675 core_addr_to_string_nz (start), insn);
676 }
677 break;
678 }
679 }
680
681 if (cache == NULL)
682 {
683 do_cleanups (back_to);
684 return start;
685 }
686
687 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
688 {
689 /* Frame pointer is fp. Frame size is constant. */
690 cache->framereg = AARCH64_FP_REGNUM;
691 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
692 }
693 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
694 {
695 /* Try the stack pointer. */
696 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
697 cache->framereg = AARCH64_SP_REGNUM;
698 }
699 else
700 {
701 /* We're just out of luck. We don't know where the frame is. */
702 cache->framereg = -1;
703 cache->framesize = 0;
704 }
705
706 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
707 {
708 CORE_ADDR offset;
709
710 if (pv_area_find_reg (stack, gdbarch, i, &offset))
711 cache->saved_regs[i].addr = offset;
712 }
713
714 do_cleanups (back_to);
715 return start;
716 }
717
718 /* Implement the "skip_prologue" gdbarch method. */
719
720 static CORE_ADDR
721 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
722 {
723 unsigned long inst;
724 CORE_ADDR skip_pc;
725 CORE_ADDR func_addr, limit_pc;
726 struct symtab_and_line sal;
727
728 /* See if we can determine the end of the prologue via the symbol
729 table. If so, then return either PC, or the PC after the
730 prologue, whichever is greater. */
731 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
732 {
733 CORE_ADDR post_prologue_pc
734 = skip_prologue_using_sal (gdbarch, func_addr);
735
736 if (post_prologue_pc != 0)
737 return max (pc, post_prologue_pc);
738 }
739
740 /* Can't determine prologue from the symbol table, need to examine
741 instructions. */
742
743 /* Find an upper limit on the function prologue using the debug
744 information. If the debug information could not be used to
745 provide that bound, then use an arbitrary large number as the
746 upper bound. */
747 limit_pc = skip_prologue_using_sal (gdbarch, pc);
748 if (limit_pc == 0)
749 limit_pc = pc + 128; /* Magic. */
750
751 /* Try disassembling prologue. */
752 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
753 }
754
755 /* Scan the function prologue for THIS_FRAME and populate the prologue
756 cache CACHE. */
757
758 static void
759 aarch64_scan_prologue (struct frame_info *this_frame,
760 struct aarch64_prologue_cache *cache)
761 {
762 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
763 CORE_ADDR prologue_start;
764 CORE_ADDR prologue_end;
765 CORE_ADDR prev_pc = get_frame_pc (this_frame);
766 struct gdbarch *gdbarch = get_frame_arch (this_frame);
767
768 cache->prev_pc = prev_pc;
769
770 /* Assume we do not find a frame. */
771 cache->framereg = -1;
772 cache->framesize = 0;
773
774 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
775 &prologue_end))
776 {
777 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
778
779 if (sal.line == 0)
780 {
781 /* No line info so use the current PC. */
782 prologue_end = prev_pc;
783 }
784 else if (sal.end < prologue_end)
785 {
786 /* The next line begins after the function end. */
787 prologue_end = sal.end;
788 }
789
790 prologue_end = min (prologue_end, prev_pc);
791 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
792 }
793 else
794 {
795 CORE_ADDR frame_loc;
796 LONGEST saved_fp;
797 LONGEST saved_lr;
798 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
799
800 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
801 if (frame_loc == 0)
802 return;
803
804 cache->framereg = AARCH64_FP_REGNUM;
805 cache->framesize = 16;
806 cache->saved_regs[29].addr = 0;
807 cache->saved_regs[30].addr = 8;
808 }
809 }
810
811 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
812 function may throw an exception if the inferior's registers or memory is
813 not available. */
814
815 static void
816 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
817 struct aarch64_prologue_cache *cache)
818 {
819 CORE_ADDR unwound_fp;
820 int reg;
821
822 aarch64_scan_prologue (this_frame, cache);
823
824 if (cache->framereg == -1)
825 return;
826
827 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
828 if (unwound_fp == 0)
829 return;
830
831 cache->prev_sp = unwound_fp + cache->framesize;
832
833 /* Calculate actual addresses of saved registers using offsets
834 determined by aarch64_analyze_prologue. */
835 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
836 if (trad_frame_addr_p (cache->saved_regs, reg))
837 cache->saved_regs[reg].addr += cache->prev_sp;
838
839 cache->func = get_frame_func (this_frame);
840
841 cache->available_p = 1;
842 }
843
844 /* Allocate and fill in *THIS_CACHE with information about the prologue of
845 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
846 Return a pointer to the current aarch64_prologue_cache in
847 *THIS_CACHE. */
848
849 static struct aarch64_prologue_cache *
850 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
851 {
852 struct aarch64_prologue_cache *cache;
853
854 if (*this_cache != NULL)
855 return (struct aarch64_prologue_cache *) *this_cache;
856
857 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
858 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
859 *this_cache = cache;
860
861 TRY
862 {
863 aarch64_make_prologue_cache_1 (this_frame, cache);
864 }
865 CATCH (ex, RETURN_MASK_ERROR)
866 {
867 if (ex.error != NOT_AVAILABLE_ERROR)
868 throw_exception (ex);
869 }
870 END_CATCH
871
872 return cache;
873 }
874
875 /* Implement the "stop_reason" frame_unwind method. */
876
877 static enum unwind_stop_reason
878 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
879 void **this_cache)
880 {
881 struct aarch64_prologue_cache *cache
882 = aarch64_make_prologue_cache (this_frame, this_cache);
883
884 if (!cache->available_p)
885 return UNWIND_UNAVAILABLE;
886
887 /* Halt the backtrace at "_start". */
888 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
889 return UNWIND_OUTERMOST;
890
891 /* We've hit a wall, stop. */
892 if (cache->prev_sp == 0)
893 return UNWIND_OUTERMOST;
894
895 return UNWIND_NO_REASON;
896 }
897
898 /* Our frame ID for a normal frame is the current function's starting
899 PC and the caller's SP when we were called. */
900
901 static void
902 aarch64_prologue_this_id (struct frame_info *this_frame,
903 void **this_cache, struct frame_id *this_id)
904 {
905 struct aarch64_prologue_cache *cache
906 = aarch64_make_prologue_cache (this_frame, this_cache);
907
908 if (!cache->available_p)
909 *this_id = frame_id_build_unavailable_stack (cache->func);
910 else
911 *this_id = frame_id_build (cache->prev_sp, cache->func);
912 }
913
914 /* Implement the "prev_register" frame_unwind method. */
915
916 static struct value *
917 aarch64_prologue_prev_register (struct frame_info *this_frame,
918 void **this_cache, int prev_regnum)
919 {
920 struct gdbarch *gdbarch = get_frame_arch (this_frame);
921 struct aarch64_prologue_cache *cache
922 = aarch64_make_prologue_cache (this_frame, this_cache);
923
924 /* If we are asked to unwind the PC, then we need to return the LR
925 instead. The prologue may save PC, but it will point into this
926 frame's prologue, not the next frame's resume location. */
927 if (prev_regnum == AARCH64_PC_REGNUM)
928 {
929 CORE_ADDR lr;
930
931 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
932 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
933 }
934
935 /* SP is generally not saved to the stack, but this frame is
936 identified by the next frame's stack pointer at the time of the
937 call. The value was already reconstructed into PREV_SP. */
938 /*
939 +----------+ ^
940 | saved lr | |
941 +->| saved fp |--+
942 | | |
943 | | | <- Previous SP
944 | +----------+
945 | | saved lr |
946 +--| saved fp |<- FP
947 | |
948 | |<- SP
949 +----------+ */
950 if (prev_regnum == AARCH64_SP_REGNUM)
951 return frame_unwind_got_constant (this_frame, prev_regnum,
952 cache->prev_sp);
953
954 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
955 prev_regnum);
956 }
957
958 /* AArch64 prologue unwinder. */
959 struct frame_unwind aarch64_prologue_unwind =
960 {
961 NORMAL_FRAME,
962 aarch64_prologue_frame_unwind_stop_reason,
963 aarch64_prologue_this_id,
964 aarch64_prologue_prev_register,
965 NULL,
966 default_frame_sniffer
967 };
968
969 /* Allocate and fill in *THIS_CACHE with information about the prologue of
970 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
971 Return a pointer to the current aarch64_prologue_cache in
972 *THIS_CACHE. */
973
974 static struct aarch64_prologue_cache *
975 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
976 {
977 struct aarch64_prologue_cache *cache;
978
979 if (*this_cache != NULL)
980 return (struct aarch64_prologue_cache *) *this_cache;
981
982 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
983 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
984 *this_cache = cache;
985
986 TRY
987 {
988 cache->prev_sp = get_frame_register_unsigned (this_frame,
989 AARCH64_SP_REGNUM);
990 cache->prev_pc = get_frame_pc (this_frame);
991 cache->available_p = 1;
992 }
993 CATCH (ex, RETURN_MASK_ERROR)
994 {
995 if (ex.error != NOT_AVAILABLE_ERROR)
996 throw_exception (ex);
997 }
998 END_CATCH
999
1000 return cache;
1001 }
1002
1003 /* Implement the "stop_reason" frame_unwind method. */
1004
1005 static enum unwind_stop_reason
1006 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1007 void **this_cache)
1008 {
1009 struct aarch64_prologue_cache *cache
1010 = aarch64_make_stub_cache (this_frame, this_cache);
1011
1012 if (!cache->available_p)
1013 return UNWIND_UNAVAILABLE;
1014
1015 return UNWIND_NO_REASON;
1016 }
1017
1018 /* Our frame ID for a stub frame is the current SP and LR. */
1019
1020 static void
1021 aarch64_stub_this_id (struct frame_info *this_frame,
1022 void **this_cache, struct frame_id *this_id)
1023 {
1024 struct aarch64_prologue_cache *cache
1025 = aarch64_make_stub_cache (this_frame, this_cache);
1026
1027 if (cache->available_p)
1028 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1029 else
1030 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1031 }
1032
1033 /* Implement the "sniffer" frame_unwind method. */
1034
1035 static int
1036 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1037 struct frame_info *this_frame,
1038 void **this_prologue_cache)
1039 {
1040 CORE_ADDR addr_in_block;
1041 gdb_byte dummy[4];
1042
1043 addr_in_block = get_frame_address_in_block (this_frame);
1044 if (in_plt_section (addr_in_block)
1045 /* We also use the stub winder if the target memory is unreadable
1046 to avoid having the prologue unwinder trying to read it. */
1047 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1048 return 1;
1049
1050 return 0;
1051 }
1052
1053 /* AArch64 stub unwinder. */
1054 struct frame_unwind aarch64_stub_unwind =
1055 {
1056 NORMAL_FRAME,
1057 aarch64_stub_frame_unwind_stop_reason,
1058 aarch64_stub_this_id,
1059 aarch64_prologue_prev_register,
1060 NULL,
1061 aarch64_stub_unwind_sniffer
1062 };
1063
1064 /* Return the frame base address of *THIS_FRAME. */
1065
1066 static CORE_ADDR
1067 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1068 {
1069 struct aarch64_prologue_cache *cache
1070 = aarch64_make_prologue_cache (this_frame, this_cache);
1071
1072 return cache->prev_sp - cache->framesize;
1073 }
1074
1075 /* AArch64 default frame base information. */
1076 struct frame_base aarch64_normal_base =
1077 {
1078 &aarch64_prologue_unwind,
1079 aarch64_normal_frame_base,
1080 aarch64_normal_frame_base,
1081 aarch64_normal_frame_base
1082 };
1083
1084 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1085 dummy frame. The frame ID's base needs to match the TOS value
1086 saved by save_dummy_frame_tos () and returned from
1087 aarch64_push_dummy_call, and the PC needs to match the dummy
1088 frame's breakpoint. */
1089
1090 static struct frame_id
1091 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1092 {
1093 return frame_id_build (get_frame_register_unsigned (this_frame,
1094 AARCH64_SP_REGNUM),
1095 get_frame_pc (this_frame));
1096 }
1097
1098 /* Implement the "unwind_pc" gdbarch method. */
1099
1100 static CORE_ADDR
1101 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1102 {
1103 CORE_ADDR pc
1104 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1105
1106 return pc;
1107 }
1108
1109 /* Implement the "unwind_sp" gdbarch method. */
1110
1111 static CORE_ADDR
1112 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1113 {
1114 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1115 }
1116
1117 /* Return the value of the REGNUM register in the previous frame of
1118 *THIS_FRAME. */
1119
1120 static struct value *
1121 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1122 void **this_cache, int regnum)
1123 {
1124 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1125 CORE_ADDR lr;
1126
1127 switch (regnum)
1128 {
1129 case AARCH64_PC_REGNUM:
1130 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1131 return frame_unwind_got_constant (this_frame, regnum, lr);
1132
1133 default:
1134 internal_error (__FILE__, __LINE__,
1135 _("Unexpected register %d"), regnum);
1136 }
1137 }
1138
1139 /* Implement the "init_reg" dwarf2_frame_ops method. */
1140
1141 static void
1142 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1143 struct dwarf2_frame_state_reg *reg,
1144 struct frame_info *this_frame)
1145 {
1146 switch (regnum)
1147 {
1148 case AARCH64_PC_REGNUM:
1149 reg->how = DWARF2_FRAME_REG_FN;
1150 reg->loc.fn = aarch64_dwarf2_prev_register;
1151 break;
1152 case AARCH64_SP_REGNUM:
1153 reg->how = DWARF2_FRAME_REG_CFA;
1154 break;
1155 }
1156 }
1157
1158 /* When arguments must be pushed onto the stack, they go on in reverse
1159 order. The code below implements a FILO (stack) to do this. */
1160
1161 typedef struct
1162 {
1163 /* Value to pass on stack. */
1164 const gdb_byte *data;
1165
1166 /* Size in bytes of value to pass on stack. */
1167 int len;
1168 } stack_item_t;
1169
1170 DEF_VEC_O (stack_item_t);
1171
1172 /* Return the alignment (in bytes) of the given type. */
1173
1174 static int
1175 aarch64_type_align (struct type *t)
1176 {
1177 int n;
1178 int align;
1179 int falign;
1180
1181 t = check_typedef (t);
1182 switch (TYPE_CODE (t))
1183 {
1184 default:
1185 /* Should never happen. */
1186 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1187 return 4;
1188
1189 case TYPE_CODE_PTR:
1190 case TYPE_CODE_ENUM:
1191 case TYPE_CODE_INT:
1192 case TYPE_CODE_FLT:
1193 case TYPE_CODE_SET:
1194 case TYPE_CODE_RANGE:
1195 case TYPE_CODE_BITSTRING:
1196 case TYPE_CODE_REF:
1197 case TYPE_CODE_CHAR:
1198 case TYPE_CODE_BOOL:
1199 return TYPE_LENGTH (t);
1200
1201 case TYPE_CODE_ARRAY:
1202 case TYPE_CODE_COMPLEX:
1203 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1204
1205 case TYPE_CODE_STRUCT:
1206 case TYPE_CODE_UNION:
1207 align = 1;
1208 for (n = 0; n < TYPE_NFIELDS (t); n++)
1209 {
1210 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1211 if (falign > align)
1212 align = falign;
1213 }
1214 return align;
1215 }
1216 }
1217
1218 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1219 defined in the AAPCS64 ABI document; otherwise return 0. */
1220
1221 static int
1222 is_hfa (struct type *ty)
1223 {
1224 switch (TYPE_CODE (ty))
1225 {
1226 case TYPE_CODE_ARRAY:
1227 {
1228 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1229 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1230 return 1;
1231 break;
1232 }
1233
1234 case TYPE_CODE_UNION:
1235 case TYPE_CODE_STRUCT:
1236 {
1237 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1238 {
1239 struct type *member0_type;
1240
1241 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1242 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1243 {
1244 int i;
1245
1246 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1247 {
1248 struct type *member1_type;
1249
1250 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1251 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1252 || (TYPE_LENGTH (member0_type)
1253 != TYPE_LENGTH (member1_type)))
1254 return 0;
1255 }
1256 return 1;
1257 }
1258 }
1259 return 0;
1260 }
1261
1262 default:
1263 break;
1264 }
1265
1266 return 0;
1267 }
1268
1269 /* AArch64 function call information structure. */
1270 struct aarch64_call_info
1271 {
1272 /* the current argument number. */
1273 unsigned argnum;
1274
1275 /* The next general purpose register number, equivalent to NGRN as
1276 described in the AArch64 Procedure Call Standard. */
1277 unsigned ngrn;
1278
1279 /* The next SIMD and floating point register number, equivalent to
1280 NSRN as described in the AArch64 Procedure Call Standard. */
1281 unsigned nsrn;
1282
1283 /* The next stacked argument address, equivalent to NSAA as
1284 described in the AArch64 Procedure Call Standard. */
1285 unsigned nsaa;
1286
1287 /* Stack item vector. */
1288 VEC(stack_item_t) *si;
1289 };
1290
1291 /* Pass a value in a sequence of consecutive X registers. The caller
1292 is responsbile for ensuring sufficient registers are available. */
1293
1294 static void
1295 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1296 struct aarch64_call_info *info, struct type *type,
1297 const bfd_byte *buf)
1298 {
1299 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1300 int len = TYPE_LENGTH (type);
1301 enum type_code typecode = TYPE_CODE (type);
1302 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1303
1304 info->argnum++;
1305
1306 while (len > 0)
1307 {
1308 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1309 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1310 byte_order);
1311
1312
1313 /* Adjust sub-word struct/union args when big-endian. */
1314 if (byte_order == BFD_ENDIAN_BIG
1315 && partial_len < X_REGISTER_SIZE
1316 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1317 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1318
1319 if (aarch64_debug)
1320 {
1321 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1322 gdbarch_register_name (gdbarch, regnum),
1323 phex (regval, X_REGISTER_SIZE));
1324 }
1325 regcache_cooked_write_unsigned (regcache, regnum, regval);
1326 len -= partial_len;
1327 buf += partial_len;
1328 regnum++;
1329 }
1330 }
1331
1332 /* Attempt to marshall a value in a V register. Return 1 if
1333 successful, or 0 if insufficient registers are available. This
1334 function, unlike the equivalent pass_in_x() function does not
1335 handle arguments spread across multiple registers. */
1336
1337 static int
1338 pass_in_v (struct gdbarch *gdbarch,
1339 struct regcache *regcache,
1340 struct aarch64_call_info *info,
1341 const bfd_byte *buf)
1342 {
1343 if (info->nsrn < 8)
1344 {
1345 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1346 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1347
1348 info->argnum++;
1349 info->nsrn++;
1350
1351 regcache_cooked_write (regcache, regnum, buf);
1352 if (aarch64_debug)
1353 {
1354 debug_printf ("arg %d in %s\n", info->argnum,
1355 gdbarch_register_name (gdbarch, regnum));
1356 }
1357 return 1;
1358 }
1359 info->nsrn = 8;
1360 return 0;
1361 }
1362
1363 /* Marshall an argument onto the stack. */
1364
1365 static void
1366 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1367 const bfd_byte *buf)
1368 {
1369 int len = TYPE_LENGTH (type);
1370 int align;
1371 stack_item_t item;
1372
1373 info->argnum++;
1374
1375 align = aarch64_type_align (type);
1376
1377 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1378 Natural alignment of the argument's type. */
1379 align = align_up (align, 8);
1380
1381 /* The AArch64 PCS requires at most doubleword alignment. */
1382 if (align > 16)
1383 align = 16;
1384
1385 if (aarch64_debug)
1386 {
1387 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1388 info->nsaa);
1389 }
1390
1391 item.len = len;
1392 item.data = buf;
1393 VEC_safe_push (stack_item_t, info->si, &item);
1394
1395 info->nsaa += len;
1396 if (info->nsaa & (align - 1))
1397 {
1398 /* Push stack alignment padding. */
1399 int pad = align - (info->nsaa & (align - 1));
1400
1401 item.len = pad;
1402 item.data = buf;
1403
1404 VEC_safe_push (stack_item_t, info->si, &item);
1405 info->nsaa += pad;
1406 }
1407 }
1408
1409 /* Marshall an argument into a sequence of one or more consecutive X
1410 registers or, if insufficient X registers are available then onto
1411 the stack. */
1412
1413 static void
1414 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1415 struct aarch64_call_info *info, struct type *type,
1416 const bfd_byte *buf)
1417 {
1418 int len = TYPE_LENGTH (type);
1419 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1420
1421 /* PCS C.13 - Pass in registers if we have enough spare */
1422 if (info->ngrn + nregs <= 8)
1423 {
1424 pass_in_x (gdbarch, regcache, info, type, buf);
1425 info->ngrn += nregs;
1426 }
1427 else
1428 {
1429 info->ngrn = 8;
1430 pass_on_stack (info, type, buf);
1431 }
1432 }
1433
1434 /* Pass a value in a V register, or on the stack if insufficient are
1435 available. */
1436
1437 static void
1438 pass_in_v_or_stack (struct gdbarch *gdbarch,
1439 struct regcache *regcache,
1440 struct aarch64_call_info *info,
1441 struct type *type,
1442 const bfd_byte *buf)
1443 {
1444 if (!pass_in_v (gdbarch, regcache, info, buf))
1445 pass_on_stack (info, type, buf);
1446 }
1447
1448 /* Implement the "push_dummy_call" gdbarch method. */
1449
1450 static CORE_ADDR
1451 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1452 struct regcache *regcache, CORE_ADDR bp_addr,
1453 int nargs,
1454 struct value **args, CORE_ADDR sp, int struct_return,
1455 CORE_ADDR struct_addr)
1456 {
1457 int nstack = 0;
1458 int argnum;
1459 int x_argreg;
1460 int v_argreg;
1461 struct aarch64_call_info info;
1462 struct type *func_type;
1463 struct type *return_type;
1464 int lang_struct_return;
1465
1466 memset (&info, 0, sizeof (info));
1467
1468 /* We need to know what the type of the called function is in order
1469 to determine the number of named/anonymous arguments for the
1470 actual argument placement, and the return type in order to handle
1471 return value correctly.
1472
1473 The generic code above us views the decision of return in memory
1474 or return in registers as a two stage processes. The language
1475 handler is consulted first and may decide to return in memory (eg
1476 class with copy constructor returned by value), this will cause
1477 the generic code to allocate space AND insert an initial leading
1478 argument.
1479
1480 If the language code does not decide to pass in memory then the
1481 target code is consulted.
1482
1483 If the language code decides to pass in memory we want to move
1484 the pointer inserted as the initial argument from the argument
1485 list and into X8, the conventional AArch64 struct return pointer
1486 register.
1487
1488 This is slightly awkward, ideally the flag "lang_struct_return"
1489 would be passed to the targets implementation of push_dummy_call.
1490 Rather that change the target interface we call the language code
1491 directly ourselves. */
1492
1493 func_type = check_typedef (value_type (function));
1494
1495 /* Dereference function pointer types. */
1496 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1497 func_type = TYPE_TARGET_TYPE (func_type);
1498
1499 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1500 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1501
1502 /* If language_pass_by_reference () returned true we will have been
1503 given an additional initial argument, a hidden pointer to the
1504 return slot in memory. */
1505 return_type = TYPE_TARGET_TYPE (func_type);
1506 lang_struct_return = language_pass_by_reference (return_type);
1507
1508 /* Set the return address. For the AArch64, the return breakpoint
1509 is always at BP_ADDR. */
1510 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1511
1512 /* If we were given an initial argument for the return slot because
1513 lang_struct_return was true, lose it. */
1514 if (lang_struct_return)
1515 {
1516 args++;
1517 nargs--;
1518 }
1519
1520 /* The struct_return pointer occupies X8. */
1521 if (struct_return || lang_struct_return)
1522 {
1523 if (aarch64_debug)
1524 {
1525 debug_printf ("struct return in %s = 0x%s\n",
1526 gdbarch_register_name (gdbarch,
1527 AARCH64_STRUCT_RETURN_REGNUM),
1528 paddress (gdbarch, struct_addr));
1529 }
1530 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1531 struct_addr);
1532 }
1533
1534 for (argnum = 0; argnum < nargs; argnum++)
1535 {
1536 struct value *arg = args[argnum];
1537 struct type *arg_type;
1538 int len;
1539
1540 arg_type = check_typedef (value_type (arg));
1541 len = TYPE_LENGTH (arg_type);
1542
1543 switch (TYPE_CODE (arg_type))
1544 {
1545 case TYPE_CODE_INT:
1546 case TYPE_CODE_BOOL:
1547 case TYPE_CODE_CHAR:
1548 case TYPE_CODE_RANGE:
1549 case TYPE_CODE_ENUM:
1550 if (len < 4)
1551 {
1552 /* Promote to 32 bit integer. */
1553 if (TYPE_UNSIGNED (arg_type))
1554 arg_type = builtin_type (gdbarch)->builtin_uint32;
1555 else
1556 arg_type = builtin_type (gdbarch)->builtin_int32;
1557 arg = value_cast (arg_type, arg);
1558 }
1559 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1560 value_contents (arg));
1561 break;
1562
1563 case TYPE_CODE_COMPLEX:
1564 if (info.nsrn <= 6)
1565 {
1566 const bfd_byte *buf = value_contents (arg);
1567 struct type *target_type =
1568 check_typedef (TYPE_TARGET_TYPE (arg_type));
1569
1570 pass_in_v (gdbarch, regcache, &info, buf);
1571 pass_in_v (gdbarch, regcache, &info,
1572 buf + TYPE_LENGTH (target_type));
1573 }
1574 else
1575 {
1576 info.nsrn = 8;
1577 pass_on_stack (&info, arg_type, value_contents (arg));
1578 }
1579 break;
1580 case TYPE_CODE_FLT:
1581 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1582 value_contents (arg));
1583 break;
1584
1585 case TYPE_CODE_STRUCT:
1586 case TYPE_CODE_ARRAY:
1587 case TYPE_CODE_UNION:
1588 if (is_hfa (arg_type))
1589 {
1590 int elements = TYPE_NFIELDS (arg_type);
1591
1592 /* Homogeneous Aggregates */
1593 if (info.nsrn + elements < 8)
1594 {
1595 int i;
1596
1597 for (i = 0; i < elements; i++)
1598 {
1599 /* We know that we have sufficient registers
1600 available therefore this will never fallback
1601 to the stack. */
1602 struct value *field =
1603 value_primitive_field (arg, 0, i, arg_type);
1604 struct type *field_type =
1605 check_typedef (value_type (field));
1606
1607 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1608 value_contents_writeable (field));
1609 }
1610 }
1611 else
1612 {
1613 info.nsrn = 8;
1614 pass_on_stack (&info, arg_type, value_contents (arg));
1615 }
1616 }
1617 else if (len > 16)
1618 {
1619 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1620 invisible reference. */
1621
1622 /* Allocate aligned storage. */
1623 sp = align_down (sp - len, 16);
1624
1625 /* Write the real data into the stack. */
1626 write_memory (sp, value_contents (arg), len);
1627
1628 /* Construct the indirection. */
1629 arg_type = lookup_pointer_type (arg_type);
1630 arg = value_from_pointer (arg_type, sp);
1631 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1632 value_contents (arg));
1633 }
1634 else
1635 /* PCS C.15 / C.18 multiple values pass. */
1636 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1637 value_contents (arg));
1638 break;
1639
1640 default:
1641 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1642 value_contents (arg));
1643 break;
1644 }
1645 }
1646
1647 /* Make sure stack retains 16 byte alignment. */
1648 if (info.nsaa & 15)
1649 sp -= 16 - (info.nsaa & 15);
1650
1651 while (!VEC_empty (stack_item_t, info.si))
1652 {
1653 stack_item_t *si = VEC_last (stack_item_t, info.si);
1654
1655 sp -= si->len;
1656 write_memory (sp, si->data, si->len);
1657 VEC_pop (stack_item_t, info.si);
1658 }
1659
1660 VEC_free (stack_item_t, info.si);
1661
1662 /* Finally, update the SP register. */
1663 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1664
1665 return sp;
1666 }
1667
1668 /* Implement the "frame_align" gdbarch method. */
1669
1670 static CORE_ADDR
1671 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1672 {
1673 /* Align the stack to sixteen bytes. */
1674 return sp & ~(CORE_ADDR) 15;
1675 }
1676
1677 /* Return the type for an AdvSISD Q register. */
1678
1679 static struct type *
1680 aarch64_vnq_type (struct gdbarch *gdbarch)
1681 {
1682 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1683
1684 if (tdep->vnq_type == NULL)
1685 {
1686 struct type *t;
1687 struct type *elem;
1688
1689 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1690 TYPE_CODE_UNION);
1691
1692 elem = builtin_type (gdbarch)->builtin_uint128;
1693 append_composite_type_field (t, "u", elem);
1694
1695 elem = builtin_type (gdbarch)->builtin_int128;
1696 append_composite_type_field (t, "s", elem);
1697
1698 tdep->vnq_type = t;
1699 }
1700
1701 return tdep->vnq_type;
1702 }
1703
1704 /* Return the type for an AdvSISD D register. */
1705
1706 static struct type *
1707 aarch64_vnd_type (struct gdbarch *gdbarch)
1708 {
1709 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1710
1711 if (tdep->vnd_type == NULL)
1712 {
1713 struct type *t;
1714 struct type *elem;
1715
1716 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1717 TYPE_CODE_UNION);
1718
1719 elem = builtin_type (gdbarch)->builtin_double;
1720 append_composite_type_field (t, "f", elem);
1721
1722 elem = builtin_type (gdbarch)->builtin_uint64;
1723 append_composite_type_field (t, "u", elem);
1724
1725 elem = builtin_type (gdbarch)->builtin_int64;
1726 append_composite_type_field (t, "s", elem);
1727
1728 tdep->vnd_type = t;
1729 }
1730
1731 return tdep->vnd_type;
1732 }
1733
1734 /* Return the type for an AdvSISD S register. */
1735
1736 static struct type *
1737 aarch64_vns_type (struct gdbarch *gdbarch)
1738 {
1739 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1740
1741 if (tdep->vns_type == NULL)
1742 {
1743 struct type *t;
1744 struct type *elem;
1745
1746 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1747 TYPE_CODE_UNION);
1748
1749 elem = builtin_type (gdbarch)->builtin_float;
1750 append_composite_type_field (t, "f", elem);
1751
1752 elem = builtin_type (gdbarch)->builtin_uint32;
1753 append_composite_type_field (t, "u", elem);
1754
1755 elem = builtin_type (gdbarch)->builtin_int32;
1756 append_composite_type_field (t, "s", elem);
1757
1758 tdep->vns_type = t;
1759 }
1760
1761 return tdep->vns_type;
1762 }
1763
1764 /* Return the type for an AdvSISD H register. */
1765
1766 static struct type *
1767 aarch64_vnh_type (struct gdbarch *gdbarch)
1768 {
1769 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1770
1771 if (tdep->vnh_type == NULL)
1772 {
1773 struct type *t;
1774 struct type *elem;
1775
1776 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1777 TYPE_CODE_UNION);
1778
1779 elem = builtin_type (gdbarch)->builtin_uint16;
1780 append_composite_type_field (t, "u", elem);
1781
1782 elem = builtin_type (gdbarch)->builtin_int16;
1783 append_composite_type_field (t, "s", elem);
1784
1785 tdep->vnh_type = t;
1786 }
1787
1788 return tdep->vnh_type;
1789 }
1790
1791 /* Return the type for an AdvSISD B register. */
1792
1793 static struct type *
1794 aarch64_vnb_type (struct gdbarch *gdbarch)
1795 {
1796 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1797
1798 if (tdep->vnb_type == NULL)
1799 {
1800 struct type *t;
1801 struct type *elem;
1802
1803 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1804 TYPE_CODE_UNION);
1805
1806 elem = builtin_type (gdbarch)->builtin_uint8;
1807 append_composite_type_field (t, "u", elem);
1808
1809 elem = builtin_type (gdbarch)->builtin_int8;
1810 append_composite_type_field (t, "s", elem);
1811
1812 tdep->vnb_type = t;
1813 }
1814
1815 return tdep->vnb_type;
1816 }
1817
1818 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1819
1820 static int
1821 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1822 {
1823 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1824 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1825
1826 if (reg == AARCH64_DWARF_SP)
1827 return AARCH64_SP_REGNUM;
1828
1829 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1830 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1831
1832 return -1;
1833 }
1834 \f
1835
1836 /* Implement the "print_insn" gdbarch method. */
1837
1838 static int
1839 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1840 {
1841 info->symbols = NULL;
1842 return print_insn_aarch64 (memaddr, info);
1843 }
1844
1845 /* AArch64 BRK software debug mode instruction.
1846 Note that AArch64 code is always little-endian.
1847 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1848 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1849
1850 /* Implement the "breakpoint_from_pc" gdbarch method. */
1851
1852 static const gdb_byte *
1853 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1854 int *lenptr)
1855 {
1856 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1857
1858 *lenptr = sizeof (aarch64_default_breakpoint);
1859 return aarch64_default_breakpoint;
1860 }
1861
1862 /* Extract from an array REGS containing the (raw) register state a
1863 function return value of type TYPE, and copy that, in virtual
1864 format, into VALBUF. */
1865
1866 static void
1867 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1868 gdb_byte *valbuf)
1869 {
1870 struct gdbarch *gdbarch = get_regcache_arch (regs);
1871 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1872
1873 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1874 {
1875 bfd_byte buf[V_REGISTER_SIZE];
1876 int len = TYPE_LENGTH (type);
1877
1878 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1879 memcpy (valbuf, buf, len);
1880 }
1881 else if (TYPE_CODE (type) == TYPE_CODE_INT
1882 || TYPE_CODE (type) == TYPE_CODE_CHAR
1883 || TYPE_CODE (type) == TYPE_CODE_BOOL
1884 || TYPE_CODE (type) == TYPE_CODE_PTR
1885 || TYPE_CODE (type) == TYPE_CODE_REF
1886 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1887 {
1888 /* If the the type is a plain integer, then the access is
1889 straight-forward. Otherwise we have to play around a bit
1890 more. */
1891 int len = TYPE_LENGTH (type);
1892 int regno = AARCH64_X0_REGNUM;
1893 ULONGEST tmp;
1894
1895 while (len > 0)
1896 {
1897 /* By using store_unsigned_integer we avoid having to do
1898 anything special for small big-endian values. */
1899 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1900 store_unsigned_integer (valbuf,
1901 (len > X_REGISTER_SIZE
1902 ? X_REGISTER_SIZE : len), byte_order, tmp);
1903 len -= X_REGISTER_SIZE;
1904 valbuf += X_REGISTER_SIZE;
1905 }
1906 }
1907 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1908 {
1909 int regno = AARCH64_V0_REGNUM;
1910 bfd_byte buf[V_REGISTER_SIZE];
1911 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1912 int len = TYPE_LENGTH (target_type);
1913
1914 regcache_cooked_read (regs, regno, buf);
1915 memcpy (valbuf, buf, len);
1916 valbuf += len;
1917 regcache_cooked_read (regs, regno + 1, buf);
1918 memcpy (valbuf, buf, len);
1919 valbuf += len;
1920 }
1921 else if (is_hfa (type))
1922 {
1923 int elements = TYPE_NFIELDS (type);
1924 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1925 int len = TYPE_LENGTH (member_type);
1926 int i;
1927
1928 for (i = 0; i < elements; i++)
1929 {
1930 int regno = AARCH64_V0_REGNUM + i;
1931 bfd_byte buf[X_REGISTER_SIZE];
1932
1933 if (aarch64_debug)
1934 {
1935 debug_printf ("read HFA return value element %d from %s\n",
1936 i + 1,
1937 gdbarch_register_name (gdbarch, regno));
1938 }
1939 regcache_cooked_read (regs, regno, buf);
1940
1941 memcpy (valbuf, buf, len);
1942 valbuf += len;
1943 }
1944 }
1945 else
1946 {
1947 /* For a structure or union the behaviour is as if the value had
1948 been stored to word-aligned memory and then loaded into
1949 registers with 64-bit load instruction(s). */
1950 int len = TYPE_LENGTH (type);
1951 int regno = AARCH64_X0_REGNUM;
1952 bfd_byte buf[X_REGISTER_SIZE];
1953
1954 while (len > 0)
1955 {
1956 regcache_cooked_read (regs, regno++, buf);
1957 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1958 len -= X_REGISTER_SIZE;
1959 valbuf += X_REGISTER_SIZE;
1960 }
1961 }
1962 }
1963
1964
1965 /* Will a function return an aggregate type in memory or in a
1966 register? Return 0 if an aggregate type can be returned in a
1967 register, 1 if it must be returned in memory. */
1968
1969 static int
1970 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1971 {
1972 int nRc;
1973 enum type_code code;
1974
1975 type = check_typedef (type);
1976
1977 /* In the AArch64 ABI, "integer" like aggregate types are returned
1978 in registers. For an aggregate type to be integer like, its size
1979 must be less than or equal to 4 * X_REGISTER_SIZE. */
1980
1981 if (is_hfa (type))
1982 {
1983 /* PCS B.5 If the argument is a Named HFA, then the argument is
1984 used unmodified. */
1985 return 0;
1986 }
1987
1988 if (TYPE_LENGTH (type) > 16)
1989 {
1990 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1991 invisible reference. */
1992
1993 return 1;
1994 }
1995
1996 return 0;
1997 }
1998
1999 /* Write into appropriate registers a function return value of type
2000 TYPE, given in virtual format. */
2001
2002 static void
2003 aarch64_store_return_value (struct type *type, struct regcache *regs,
2004 const gdb_byte *valbuf)
2005 {
2006 struct gdbarch *gdbarch = get_regcache_arch (regs);
2007 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2008
2009 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2010 {
2011 bfd_byte buf[V_REGISTER_SIZE];
2012 int len = TYPE_LENGTH (type);
2013
2014 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2015 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2016 }
2017 else if (TYPE_CODE (type) == TYPE_CODE_INT
2018 || TYPE_CODE (type) == TYPE_CODE_CHAR
2019 || TYPE_CODE (type) == TYPE_CODE_BOOL
2020 || TYPE_CODE (type) == TYPE_CODE_PTR
2021 || TYPE_CODE (type) == TYPE_CODE_REF
2022 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2023 {
2024 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2025 {
2026 /* Values of one word or less are zero/sign-extended and
2027 returned in r0. */
2028 bfd_byte tmpbuf[X_REGISTER_SIZE];
2029 LONGEST val = unpack_long (type, valbuf);
2030
2031 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2032 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2033 }
2034 else
2035 {
2036 /* Integral values greater than one word are stored in
2037 consecutive registers starting with r0. This will always
2038 be a multiple of the regiser size. */
2039 int len = TYPE_LENGTH (type);
2040 int regno = AARCH64_X0_REGNUM;
2041
2042 while (len > 0)
2043 {
2044 regcache_cooked_write (regs, regno++, valbuf);
2045 len -= X_REGISTER_SIZE;
2046 valbuf += X_REGISTER_SIZE;
2047 }
2048 }
2049 }
2050 else if (is_hfa (type))
2051 {
2052 int elements = TYPE_NFIELDS (type);
2053 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2054 int len = TYPE_LENGTH (member_type);
2055 int i;
2056
2057 for (i = 0; i < elements; i++)
2058 {
2059 int regno = AARCH64_V0_REGNUM + i;
2060 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2061
2062 if (aarch64_debug)
2063 {
2064 debug_printf ("write HFA return value element %d to %s\n",
2065 i + 1,
2066 gdbarch_register_name (gdbarch, regno));
2067 }
2068
2069 memcpy (tmpbuf, valbuf, len);
2070 regcache_cooked_write (regs, regno, tmpbuf);
2071 valbuf += len;
2072 }
2073 }
2074 else
2075 {
2076 /* For a structure or union the behaviour is as if the value had
2077 been stored to word-aligned memory and then loaded into
2078 registers with 64-bit load instruction(s). */
2079 int len = TYPE_LENGTH (type);
2080 int regno = AARCH64_X0_REGNUM;
2081 bfd_byte tmpbuf[X_REGISTER_SIZE];
2082
2083 while (len > 0)
2084 {
2085 memcpy (tmpbuf, valbuf,
2086 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2087 regcache_cooked_write (regs, regno++, tmpbuf);
2088 len -= X_REGISTER_SIZE;
2089 valbuf += X_REGISTER_SIZE;
2090 }
2091 }
2092 }
2093
2094 /* Implement the "return_value" gdbarch method. */
2095
2096 static enum return_value_convention
2097 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2098 struct type *valtype, struct regcache *regcache,
2099 gdb_byte *readbuf, const gdb_byte *writebuf)
2100 {
2101 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2102
2103 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2104 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2105 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2106 {
2107 if (aarch64_return_in_memory (gdbarch, valtype))
2108 {
2109 if (aarch64_debug)
2110 debug_printf ("return value in memory\n");
2111 return RETURN_VALUE_STRUCT_CONVENTION;
2112 }
2113 }
2114
2115 if (writebuf)
2116 aarch64_store_return_value (valtype, regcache, writebuf);
2117
2118 if (readbuf)
2119 aarch64_extract_return_value (valtype, regcache, readbuf);
2120
2121 if (aarch64_debug)
2122 debug_printf ("return value in registers\n");
2123
2124 return RETURN_VALUE_REGISTER_CONVENTION;
2125 }
2126
2127 /* Implement the "get_longjmp_target" gdbarch method. */
2128
2129 static int
2130 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2131 {
2132 CORE_ADDR jb_addr;
2133 gdb_byte buf[X_REGISTER_SIZE];
2134 struct gdbarch *gdbarch = get_frame_arch (frame);
2135 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2136 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2137
2138 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2139
2140 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2141 X_REGISTER_SIZE))
2142 return 0;
2143
2144 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2145 return 1;
2146 }
2147
2148 /* Implement the "gen_return_address" gdbarch method. */
2149
2150 static void
2151 aarch64_gen_return_address (struct gdbarch *gdbarch,
2152 struct agent_expr *ax, struct axs_value *value,
2153 CORE_ADDR scope)
2154 {
2155 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2156 value->kind = axs_lvalue_register;
2157 value->u.reg = AARCH64_LR_REGNUM;
2158 }
2159 \f
2160
2161 /* Return the pseudo register name corresponding to register regnum. */
2162
2163 static const char *
2164 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2165 {
2166 static const char *const q_name[] =
2167 {
2168 "q0", "q1", "q2", "q3",
2169 "q4", "q5", "q6", "q7",
2170 "q8", "q9", "q10", "q11",
2171 "q12", "q13", "q14", "q15",
2172 "q16", "q17", "q18", "q19",
2173 "q20", "q21", "q22", "q23",
2174 "q24", "q25", "q26", "q27",
2175 "q28", "q29", "q30", "q31",
2176 };
2177
2178 static const char *const d_name[] =
2179 {
2180 "d0", "d1", "d2", "d3",
2181 "d4", "d5", "d6", "d7",
2182 "d8", "d9", "d10", "d11",
2183 "d12", "d13", "d14", "d15",
2184 "d16", "d17", "d18", "d19",
2185 "d20", "d21", "d22", "d23",
2186 "d24", "d25", "d26", "d27",
2187 "d28", "d29", "d30", "d31",
2188 };
2189
2190 static const char *const s_name[] =
2191 {
2192 "s0", "s1", "s2", "s3",
2193 "s4", "s5", "s6", "s7",
2194 "s8", "s9", "s10", "s11",
2195 "s12", "s13", "s14", "s15",
2196 "s16", "s17", "s18", "s19",
2197 "s20", "s21", "s22", "s23",
2198 "s24", "s25", "s26", "s27",
2199 "s28", "s29", "s30", "s31",
2200 };
2201
2202 static const char *const h_name[] =
2203 {
2204 "h0", "h1", "h2", "h3",
2205 "h4", "h5", "h6", "h7",
2206 "h8", "h9", "h10", "h11",
2207 "h12", "h13", "h14", "h15",
2208 "h16", "h17", "h18", "h19",
2209 "h20", "h21", "h22", "h23",
2210 "h24", "h25", "h26", "h27",
2211 "h28", "h29", "h30", "h31",
2212 };
2213
2214 static const char *const b_name[] =
2215 {
2216 "b0", "b1", "b2", "b3",
2217 "b4", "b5", "b6", "b7",
2218 "b8", "b9", "b10", "b11",
2219 "b12", "b13", "b14", "b15",
2220 "b16", "b17", "b18", "b19",
2221 "b20", "b21", "b22", "b23",
2222 "b24", "b25", "b26", "b27",
2223 "b28", "b29", "b30", "b31",
2224 };
2225
2226 regnum -= gdbarch_num_regs (gdbarch);
2227
2228 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2229 return q_name[regnum - AARCH64_Q0_REGNUM];
2230
2231 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2232 return d_name[regnum - AARCH64_D0_REGNUM];
2233
2234 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2235 return s_name[regnum - AARCH64_S0_REGNUM];
2236
2237 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2238 return h_name[regnum - AARCH64_H0_REGNUM];
2239
2240 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2241 return b_name[regnum - AARCH64_B0_REGNUM];
2242
2243 internal_error (__FILE__, __LINE__,
2244 _("aarch64_pseudo_register_name: bad register number %d"),
2245 regnum);
2246 }
2247
2248 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2249
2250 static struct type *
2251 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2252 {
2253 regnum -= gdbarch_num_regs (gdbarch);
2254
2255 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2256 return aarch64_vnq_type (gdbarch);
2257
2258 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2259 return aarch64_vnd_type (gdbarch);
2260
2261 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2262 return aarch64_vns_type (gdbarch);
2263
2264 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2265 return aarch64_vnh_type (gdbarch);
2266
2267 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2268 return aarch64_vnb_type (gdbarch);
2269
2270 internal_error (__FILE__, __LINE__,
2271 _("aarch64_pseudo_register_type: bad register number %d"),
2272 regnum);
2273 }
2274
2275 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2276
2277 static int
2278 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2279 struct reggroup *group)
2280 {
2281 regnum -= gdbarch_num_regs (gdbarch);
2282
2283 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2284 return group == all_reggroup || group == vector_reggroup;
2285 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2286 return (group == all_reggroup || group == vector_reggroup
2287 || group == float_reggroup);
2288 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2289 return (group == all_reggroup || group == vector_reggroup
2290 || group == float_reggroup);
2291 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2292 return group == all_reggroup || group == vector_reggroup;
2293 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2294 return group == all_reggroup || group == vector_reggroup;
2295
2296 return group == all_reggroup;
2297 }
2298
2299 /* Implement the "pseudo_register_read_value" gdbarch method. */
2300
2301 static struct value *
2302 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2303 struct regcache *regcache,
2304 int regnum)
2305 {
2306 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2307 struct value *result_value;
2308 gdb_byte *buf;
2309
2310 result_value = allocate_value (register_type (gdbarch, regnum));
2311 VALUE_LVAL (result_value) = lval_register;
2312 VALUE_REGNUM (result_value) = regnum;
2313 buf = value_contents_raw (result_value);
2314
2315 regnum -= gdbarch_num_regs (gdbarch);
2316
2317 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2318 {
2319 enum register_status status;
2320 unsigned v_regnum;
2321
2322 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2323 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2324 if (status != REG_VALID)
2325 mark_value_bytes_unavailable (result_value, 0,
2326 TYPE_LENGTH (value_type (result_value)));
2327 else
2328 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2329 return result_value;
2330 }
2331
2332 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2333 {
2334 enum register_status status;
2335 unsigned v_regnum;
2336
2337 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2338 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2339 if (status != REG_VALID)
2340 mark_value_bytes_unavailable (result_value, 0,
2341 TYPE_LENGTH (value_type (result_value)));
2342 else
2343 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2344 return result_value;
2345 }
2346
2347 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2348 {
2349 enum register_status status;
2350 unsigned v_regnum;
2351
2352 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2353 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2354 if (status != REG_VALID)
2355 mark_value_bytes_unavailable (result_value, 0,
2356 TYPE_LENGTH (value_type (result_value)));
2357 else
2358 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2359 return result_value;
2360 }
2361
2362 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2363 {
2364 enum register_status status;
2365 unsigned v_regnum;
2366
2367 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2368 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2369 if (status != REG_VALID)
2370 mark_value_bytes_unavailable (result_value, 0,
2371 TYPE_LENGTH (value_type (result_value)));
2372 else
2373 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2374 return result_value;
2375 }
2376
2377 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2378 {
2379 enum register_status status;
2380 unsigned v_regnum;
2381
2382 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2383 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2384 if (status != REG_VALID)
2385 mark_value_bytes_unavailable (result_value, 0,
2386 TYPE_LENGTH (value_type (result_value)));
2387 else
2388 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2389 return result_value;
2390 }
2391
2392 gdb_assert_not_reached ("regnum out of bound");
2393 }
2394
2395 /* Implement the "pseudo_register_write" gdbarch method. */
2396
2397 static void
2398 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2399 int regnum, const gdb_byte *buf)
2400 {
2401 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2402
2403 /* Ensure the register buffer is zero, we want gdb writes of the
2404 various 'scalar' pseudo registers to behavior like architectural
2405 writes, register width bytes are written the remainder are set to
2406 zero. */
2407 memset (reg_buf, 0, sizeof (reg_buf));
2408
2409 regnum -= gdbarch_num_regs (gdbarch);
2410
2411 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2412 {
2413 /* pseudo Q registers */
2414 unsigned v_regnum;
2415
2416 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2417 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2418 regcache_raw_write (regcache, v_regnum, reg_buf);
2419 return;
2420 }
2421
2422 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2423 {
2424 /* pseudo D registers */
2425 unsigned v_regnum;
2426
2427 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2428 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2429 regcache_raw_write (regcache, v_regnum, reg_buf);
2430 return;
2431 }
2432
2433 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2434 {
2435 unsigned v_regnum;
2436
2437 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2438 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2439 regcache_raw_write (regcache, v_regnum, reg_buf);
2440 return;
2441 }
2442
2443 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2444 {
2445 /* pseudo H registers */
2446 unsigned v_regnum;
2447
2448 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2449 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2450 regcache_raw_write (regcache, v_regnum, reg_buf);
2451 return;
2452 }
2453
2454 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2455 {
2456 /* pseudo B registers */
2457 unsigned v_regnum;
2458
2459 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2460 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2461 regcache_raw_write (regcache, v_regnum, reg_buf);
2462 return;
2463 }
2464
2465 gdb_assert_not_reached ("regnum out of bound");
2466 }
2467
2468 /* Callback function for user_reg_add. */
2469
2470 static struct value *
2471 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2472 {
2473 const int *reg_p = (const int *) baton;
2474
2475 return value_of_register (*reg_p, frame);
2476 }
2477 \f
2478
2479 /* Implement the "software_single_step" gdbarch method, needed to
2480 single step through atomic sequences on AArch64. */
2481
2482 static int
2483 aarch64_software_single_step (struct frame_info *frame)
2484 {
2485 struct gdbarch *gdbarch = get_frame_arch (frame);
2486 struct address_space *aspace = get_frame_address_space (frame);
2487 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2488 const int insn_size = 4;
2489 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2490 CORE_ADDR pc = get_frame_pc (frame);
2491 CORE_ADDR breaks[2] = { -1, -1 };
2492 CORE_ADDR loc = pc;
2493 CORE_ADDR closing_insn = 0;
2494 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2495 byte_order_for_code);
2496 int index;
2497 int insn_count;
2498 int bc_insn_count = 0; /* Conditional branch instruction count. */
2499 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2500 aarch64_inst inst;
2501
2502 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2503 return 0;
2504
2505 /* Look for a Load Exclusive instruction which begins the sequence. */
2506 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2507 return 0;
2508
2509 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2510 {
2511 loc += insn_size;
2512 insn = read_memory_unsigned_integer (loc, insn_size,
2513 byte_order_for_code);
2514
2515 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2516 return 0;
2517 /* Check if the instruction is a conditional branch. */
2518 if (inst.opcode->iclass == condbranch)
2519 {
2520 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2521
2522 if (bc_insn_count >= 1)
2523 return 0;
2524
2525 /* It is, so we'll try to set a breakpoint at the destination. */
2526 breaks[1] = loc + inst.operands[0].imm.value;
2527
2528 bc_insn_count++;
2529 last_breakpoint++;
2530 }
2531
2532 /* Look for the Store Exclusive which closes the atomic sequence. */
2533 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2534 {
2535 closing_insn = loc;
2536 break;
2537 }
2538 }
2539
2540 /* We didn't find a closing Store Exclusive instruction, fall back. */
2541 if (!closing_insn)
2542 return 0;
2543
2544 /* Insert breakpoint after the end of the atomic sequence. */
2545 breaks[0] = loc + insn_size;
2546
2547 /* Check for duplicated breakpoints, and also check that the second
2548 breakpoint is not within the atomic sequence. */
2549 if (last_breakpoint
2550 && (breaks[1] == breaks[0]
2551 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2552 last_breakpoint = 0;
2553
2554 /* Insert the breakpoint at the end of the sequence, and one at the
2555 destination of the conditional branch, if it exists. */
2556 for (index = 0; index <= last_breakpoint; index++)
2557 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2558
2559 return 1;
2560 }
2561
2562 struct displaced_step_closure
2563 {
2564 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2565 is being displaced stepping. */
2566 int cond;
2567
2568 /* PC adjustment offset after displaced stepping. */
2569 int32_t pc_adjust;
2570 };
2571
2572 /* Data when visiting instructions for displaced stepping. */
2573
2574 struct aarch64_displaced_step_data
2575 {
2576 struct aarch64_insn_data base;
2577
2578 /* The address where the instruction will be executed at. */
2579 CORE_ADDR new_addr;
2580 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2581 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2582 /* Number of instructions in INSN_BUF. */
2583 unsigned insn_count;
2584 /* Registers when doing displaced stepping. */
2585 struct regcache *regs;
2586
2587 struct displaced_step_closure *dsc;
2588 };
2589
2590 /* Implementation of aarch64_insn_visitor method "b". */
2591
2592 static void
2593 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2594 struct aarch64_insn_data *data)
2595 {
2596 struct aarch64_displaced_step_data *dsd
2597 = (struct aarch64_displaced_step_data *) data;
2598 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2599
2600 if (can_encode_int32 (new_offset, 28))
2601 {
2602 /* Emit B rather than BL, because executing BL on a new address
2603 will get the wrong address into LR. In order to avoid this,
2604 we emit B, and update LR if the instruction is BL. */
2605 emit_b (dsd->insn_buf, 0, new_offset);
2606 dsd->insn_count++;
2607 }
2608 else
2609 {
2610 /* Write NOP. */
2611 emit_nop (dsd->insn_buf);
2612 dsd->insn_count++;
2613 dsd->dsc->pc_adjust = offset;
2614 }
2615
2616 if (is_bl)
2617 {
2618 /* Update LR. */
2619 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2620 data->insn_addr + 4);
2621 }
2622 }
2623
2624 /* Implementation of aarch64_insn_visitor method "b_cond". */
2625
2626 static void
2627 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2628 struct aarch64_insn_data *data)
2629 {
2630 struct aarch64_displaced_step_data *dsd
2631 = (struct aarch64_displaced_step_data *) data;
2632 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2633
2634 /* GDB has to fix up PC after displaced step this instruction
2635 differently according to the condition is true or false. Instead
2636 of checking COND against conditional flags, we can use
2637 the following instructions, and GDB can tell how to fix up PC
2638 according to the PC value.
2639
2640 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2641 INSN1 ;
2642 TAKEN:
2643 INSN2
2644 */
2645
2646 emit_bcond (dsd->insn_buf, cond, 8);
2647 dsd->dsc->cond = 1;
2648 dsd->dsc->pc_adjust = offset;
2649 dsd->insn_count = 1;
2650 }
2651
2652 /* Dynamically allocate a new register. If we know the register
2653 statically, we should make it a global as above instead of using this
2654 helper function. */
2655
2656 static struct aarch64_register
2657 aarch64_register (unsigned num, int is64)
2658 {
2659 return (struct aarch64_register) { num, is64 };
2660 }
2661
2662 /* Implementation of aarch64_insn_visitor method "cb". */
2663
2664 static void
2665 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2666 const unsigned rn, int is64,
2667 struct aarch64_insn_data *data)
2668 {
2669 struct aarch64_displaced_step_data *dsd
2670 = (struct aarch64_displaced_step_data *) data;
2671 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2672
2673 /* The offset is out of range for a compare and branch
2674 instruction. We can use the following instructions instead:
2675
2676 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2677 INSN1 ;
2678 TAKEN:
2679 INSN2
2680 */
2681 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2682 dsd->insn_count = 1;
2683 dsd->dsc->cond = 1;
2684 dsd->dsc->pc_adjust = offset;
2685 }
2686
2687 /* Implementation of aarch64_insn_visitor method "tb". */
2688
2689 static void
2690 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2691 const unsigned rt, unsigned bit,
2692 struct aarch64_insn_data *data)
2693 {
2694 struct aarch64_displaced_step_data *dsd
2695 = (struct aarch64_displaced_step_data *) data;
2696 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2697
2698 /* The offset is out of range for a test bit and branch
2699 instruction We can use the following instructions instead:
2700
2701 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2702 INSN1 ;
2703 TAKEN:
2704 INSN2
2705
2706 */
2707 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2708 dsd->insn_count = 1;
2709 dsd->dsc->cond = 1;
2710 dsd->dsc->pc_adjust = offset;
2711 }
2712
2713 /* Implementation of aarch64_insn_visitor method "adr". */
2714
2715 static void
2716 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2717 const int is_adrp, struct aarch64_insn_data *data)
2718 {
2719 struct aarch64_displaced_step_data *dsd
2720 = (struct aarch64_displaced_step_data *) data;
2721 /* We know exactly the address the ADR{P,} instruction will compute.
2722 We can just write it to the destination register. */
2723 CORE_ADDR address = data->insn_addr + offset;
2724
2725 if (is_adrp)
2726 {
2727 /* Clear the lower 12 bits of the offset to get the 4K page. */
2728 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2729 address & ~0xfff);
2730 }
2731 else
2732 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2733 address);
2734
2735 dsd->dsc->pc_adjust = 4;
2736 emit_nop (dsd->insn_buf);
2737 dsd->insn_count = 1;
2738 }
2739
2740 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2741
2742 static void
2743 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2744 const unsigned rt, const int is64,
2745 struct aarch64_insn_data *data)
2746 {
2747 struct aarch64_displaced_step_data *dsd
2748 = (struct aarch64_displaced_step_data *) data;
2749 CORE_ADDR address = data->insn_addr + offset;
2750 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2751
2752 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2753 address);
2754
2755 if (is_sw)
2756 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2757 aarch64_register (rt, 1), zero);
2758 else
2759 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2760 aarch64_register (rt, 1), zero);
2761
2762 dsd->dsc->pc_adjust = 4;
2763 }
2764
2765 /* Implementation of aarch64_insn_visitor method "others". */
2766
2767 static void
2768 aarch64_displaced_step_others (const uint32_t insn,
2769 struct aarch64_insn_data *data)
2770 {
2771 struct aarch64_displaced_step_data *dsd
2772 = (struct aarch64_displaced_step_data *) data;
2773
2774 aarch64_emit_insn (dsd->insn_buf, insn);
2775 dsd->insn_count = 1;
2776
2777 if ((insn & 0xfffffc1f) == 0xd65f0000)
2778 {
2779 /* RET */
2780 dsd->dsc->pc_adjust = 0;
2781 }
2782 else
2783 dsd->dsc->pc_adjust = 4;
2784 }
2785
2786 static const struct aarch64_insn_visitor visitor =
2787 {
2788 aarch64_displaced_step_b,
2789 aarch64_displaced_step_b_cond,
2790 aarch64_displaced_step_cb,
2791 aarch64_displaced_step_tb,
2792 aarch64_displaced_step_adr,
2793 aarch64_displaced_step_ldr_literal,
2794 aarch64_displaced_step_others,
2795 };
2796
2797 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2798
2799 struct displaced_step_closure *
2800 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2801 CORE_ADDR from, CORE_ADDR to,
2802 struct regcache *regs)
2803 {
2804 struct displaced_step_closure *dsc = NULL;
2805 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2806 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2807 struct aarch64_displaced_step_data dsd;
2808
2809 /* Look for a Load Exclusive instruction which begins the sequence. */
2810 if (decode_masked_match (insn, 0x3fc00000, 0x08400000))
2811 {
2812 /* We can't displaced step atomic sequences. */
2813 return NULL;
2814 }
2815
2816 dsc = XCNEW (struct displaced_step_closure);
2817 dsd.base.insn_addr = from;
2818 dsd.new_addr = to;
2819 dsd.regs = regs;
2820 dsd.dsc = dsc;
2821 dsd.insn_count = 0;
2822 aarch64_relocate_instruction (insn, &visitor,
2823 (struct aarch64_insn_data *) &dsd);
2824 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2825
2826 if (dsd.insn_count != 0)
2827 {
2828 int i;
2829
2830 /* Instruction can be relocated to scratch pad. Copy
2831 relocated instruction(s) there. */
2832 for (i = 0; i < dsd.insn_count; i++)
2833 {
2834 if (debug_displaced)
2835 {
2836 debug_printf ("displaced: writing insn ");
2837 debug_printf ("%.8x", dsd.insn_buf[i]);
2838 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2839 }
2840 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2841 (ULONGEST) dsd.insn_buf[i]);
2842 }
2843 }
2844 else
2845 {
2846 xfree (dsc);
2847 dsc = NULL;
2848 }
2849
2850 return dsc;
2851 }
2852
2853 /* Implement the "displaced_step_fixup" gdbarch method. */
2854
2855 void
2856 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2857 struct displaced_step_closure *dsc,
2858 CORE_ADDR from, CORE_ADDR to,
2859 struct regcache *regs)
2860 {
2861 if (dsc->cond)
2862 {
2863 ULONGEST pc;
2864
2865 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2866 if (pc - to == 8)
2867 {
2868 /* Condition is true. */
2869 }
2870 else if (pc - to == 4)
2871 {
2872 /* Condition is false. */
2873 dsc->pc_adjust = 4;
2874 }
2875 else
2876 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2877 }
2878
2879 if (dsc->pc_adjust != 0)
2880 {
2881 if (debug_displaced)
2882 {
2883 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2884 paddress (gdbarch, from), dsc->pc_adjust);
2885 }
2886 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2887 from + dsc->pc_adjust);
2888 }
2889 }
2890
2891 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2892
2893 int
2894 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2895 struct displaced_step_closure *closure)
2896 {
2897 return 1;
2898 }
2899
2900 /* Initialize the current architecture based on INFO. If possible,
2901 re-use an architecture from ARCHES, which is a list of
2902 architectures already created during this debugging session.
2903
2904 Called e.g. at program startup, when reading a core file, and when
2905 reading a binary file. */
2906
2907 static struct gdbarch *
2908 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2909 {
2910 struct gdbarch_tdep *tdep;
2911 struct gdbarch *gdbarch;
2912 struct gdbarch_list *best_arch;
2913 struct tdesc_arch_data *tdesc_data = NULL;
2914 const struct target_desc *tdesc = info.target_desc;
2915 int i;
2916 int have_fpa_registers = 1;
2917 int valid_p = 1;
2918 const struct tdesc_feature *feature;
2919 int num_regs = 0;
2920 int num_pseudo_regs = 0;
2921
2922 /* Ensure we always have a target descriptor. */
2923 if (!tdesc_has_registers (tdesc))
2924 tdesc = tdesc_aarch64;
2925
2926 gdb_assert (tdesc);
2927
2928 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2929
2930 if (feature == NULL)
2931 return NULL;
2932
2933 tdesc_data = tdesc_data_alloc ();
2934
2935 /* Validate the descriptor provides the mandatory core R registers
2936 and allocate their numbers. */
2937 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2938 valid_p &=
2939 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2940 aarch64_r_register_names[i]);
2941
2942 num_regs = AARCH64_X0_REGNUM + i;
2943
2944 /* Look for the V registers. */
2945 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2946 if (feature)
2947 {
2948 /* Validate the descriptor provides the mandatory V registers
2949 and allocate their numbers. */
2950 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2951 valid_p &=
2952 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2953 aarch64_v_register_names[i]);
2954
2955 num_regs = AARCH64_V0_REGNUM + i;
2956
2957 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2958 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2959 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2960 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2961 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2962 }
2963
2964 if (!valid_p)
2965 {
2966 tdesc_data_cleanup (tdesc_data);
2967 return NULL;
2968 }
2969
2970 /* AArch64 code is always little-endian. */
2971 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2972
2973 /* If there is already a candidate, use it. */
2974 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2975 best_arch != NULL;
2976 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2977 {
2978 /* Found a match. */
2979 break;
2980 }
2981
2982 if (best_arch != NULL)
2983 {
2984 if (tdesc_data != NULL)
2985 tdesc_data_cleanup (tdesc_data);
2986 return best_arch->gdbarch;
2987 }
2988
2989 tdep = XCNEW (struct gdbarch_tdep);
2990 gdbarch = gdbarch_alloc (&info, tdep);
2991
2992 /* This should be low enough for everything. */
2993 tdep->lowest_pc = 0x20;
2994 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2995 tdep->jb_elt_size = 8;
2996
2997 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2998 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2999
3000 /* Frame handling. */
3001 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3002 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3003 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3004
3005 /* Advance PC across function entry code. */
3006 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3007
3008 /* The stack grows downward. */
3009 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3010
3011 /* Breakpoint manipulation. */
3012 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
3013 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3014 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3015
3016 /* Information about registers, etc. */
3017 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3018 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3019 set_gdbarch_num_regs (gdbarch, num_regs);
3020
3021 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3022 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3023 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3024 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3025 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3026 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3027 aarch64_pseudo_register_reggroup_p);
3028
3029 /* ABI */
3030 set_gdbarch_short_bit (gdbarch, 16);
3031 set_gdbarch_int_bit (gdbarch, 32);
3032 set_gdbarch_float_bit (gdbarch, 32);
3033 set_gdbarch_double_bit (gdbarch, 64);
3034 set_gdbarch_long_double_bit (gdbarch, 128);
3035 set_gdbarch_long_bit (gdbarch, 64);
3036 set_gdbarch_long_long_bit (gdbarch, 64);
3037 set_gdbarch_ptr_bit (gdbarch, 64);
3038 set_gdbarch_char_signed (gdbarch, 0);
3039 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3040 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3041 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3042
3043 /* Internal <-> external register number maps. */
3044 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3045
3046 /* Returning results. */
3047 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3048
3049 /* Disassembly. */
3050 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3051
3052 /* Virtual tables. */
3053 set_gdbarch_vbit_in_delta (gdbarch, 1);
3054
3055 /* Hook in the ABI-specific overrides, if they have been registered. */
3056 info.target_desc = tdesc;
3057 info.tdep_info = (void *) tdesc_data;
3058 gdbarch_init_osabi (info, gdbarch);
3059
3060 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3061
3062 /* Add some default predicates. */
3063 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3064 dwarf2_append_unwinders (gdbarch);
3065 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3066
3067 frame_base_set_default (gdbarch, &aarch64_normal_base);
3068
3069 /* Now we have tuned the configuration, set a few final things,
3070 based on what the OS ABI has told us. */
3071
3072 if (tdep->jb_pc >= 0)
3073 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3074
3075 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3076
3077 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3078
3079 /* Add standard register aliases. */
3080 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3081 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3082 value_of_aarch64_user_reg,
3083 &aarch64_register_aliases[i].regnum);
3084
3085 return gdbarch;
3086 }
3087
3088 static void
3089 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3090 {
3091 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3092
3093 if (tdep == NULL)
3094 return;
3095
3096 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3097 paddress (gdbarch, tdep->lowest_pc));
3098 }
3099
3100 /* Suppress warning from -Wmissing-prototypes. */
3101 extern initialize_file_ftype _initialize_aarch64_tdep;
3102
3103 void
3104 _initialize_aarch64_tdep (void)
3105 {
3106 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3107 aarch64_dump_tdep);
3108
3109 initialize_tdesc_aarch64 ();
3110
3111 /* Debug this file's internals. */
3112 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3113 Set AArch64 debugging."), _("\
3114 Show AArch64 debugging."), _("\
3115 When on, AArch64 specific debugging is enabled."),
3116 NULL,
3117 show_aarch64_debug,
3118 &setdebuglist, &showdebuglist);
3119 }
3120
3121 /* AArch64 process record-replay related structures, defines etc. */
3122
3123 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3124 do \
3125 { \
3126 unsigned int reg_len = LENGTH; \
3127 if (reg_len) \
3128 { \
3129 REGS = XNEWVEC (uint32_t, reg_len); \
3130 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3131 } \
3132 } \
3133 while (0)
3134
3135 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3136 do \
3137 { \
3138 unsigned int mem_len = LENGTH; \
3139 if (mem_len) \
3140 { \
3141 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3142 memcpy(&MEMS->len, &RECORD_BUF[0], \
3143 sizeof(struct aarch64_mem_r) * LENGTH); \
3144 } \
3145 } \
3146 while (0)
3147
3148 /* AArch64 record/replay structures and enumerations. */
3149
3150 struct aarch64_mem_r
3151 {
3152 uint64_t len; /* Record length. */
3153 uint64_t addr; /* Memory address. */
3154 };
3155
3156 enum aarch64_record_result
3157 {
3158 AARCH64_RECORD_SUCCESS,
3159 AARCH64_RECORD_FAILURE,
3160 AARCH64_RECORD_UNSUPPORTED,
3161 AARCH64_RECORD_UNKNOWN
3162 };
3163
3164 typedef struct insn_decode_record_t
3165 {
3166 struct gdbarch *gdbarch;
3167 struct regcache *regcache;
3168 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3169 uint32_t aarch64_insn; /* Insn to be recorded. */
3170 uint32_t mem_rec_count; /* Count of memory records. */
3171 uint32_t reg_rec_count; /* Count of register records. */
3172 uint32_t *aarch64_regs; /* Registers to be recorded. */
3173 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3174 } insn_decode_record;
3175
3176 /* Record handler for data processing - register instructions. */
3177
3178 static unsigned int
3179 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3180 {
3181 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3182 uint32_t record_buf[4];
3183
3184 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3185 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3186 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3187
3188 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3189 {
3190 uint8_t setflags;
3191
3192 /* Logical (shifted register). */
3193 if (insn_bits24_27 == 0x0a)
3194 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3195 /* Add/subtract. */
3196 else if (insn_bits24_27 == 0x0b)
3197 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3198 else
3199 return AARCH64_RECORD_UNKNOWN;
3200
3201 record_buf[0] = reg_rd;
3202 aarch64_insn_r->reg_rec_count = 1;
3203 if (setflags)
3204 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3205 }
3206 else
3207 {
3208 if (insn_bits24_27 == 0x0b)
3209 {
3210 /* Data-processing (3 source). */
3211 record_buf[0] = reg_rd;
3212 aarch64_insn_r->reg_rec_count = 1;
3213 }
3214 else if (insn_bits24_27 == 0x0a)
3215 {
3216 if (insn_bits21_23 == 0x00)
3217 {
3218 /* Add/subtract (with carry). */
3219 record_buf[0] = reg_rd;
3220 aarch64_insn_r->reg_rec_count = 1;
3221 if (bit (aarch64_insn_r->aarch64_insn, 29))
3222 {
3223 record_buf[1] = AARCH64_CPSR_REGNUM;
3224 aarch64_insn_r->reg_rec_count = 2;
3225 }
3226 }
3227 else if (insn_bits21_23 == 0x02)
3228 {
3229 /* Conditional compare (register) and conditional compare
3230 (immediate) instructions. */
3231 record_buf[0] = AARCH64_CPSR_REGNUM;
3232 aarch64_insn_r->reg_rec_count = 1;
3233 }
3234 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3235 {
3236 /* CConditional select. */
3237 /* Data-processing (2 source). */
3238 /* Data-processing (1 source). */
3239 record_buf[0] = reg_rd;
3240 aarch64_insn_r->reg_rec_count = 1;
3241 }
3242 else
3243 return AARCH64_RECORD_UNKNOWN;
3244 }
3245 }
3246
3247 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3248 record_buf);
3249 return AARCH64_RECORD_SUCCESS;
3250 }
3251
3252 /* Record handler for data processing - immediate instructions. */
3253
3254 static unsigned int
3255 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3256 {
3257 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
3258 uint32_t record_buf[4];
3259
3260 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3261 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3262 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3263 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3264
3265 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3266 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3267 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3268 {
3269 record_buf[0] = reg_rd;
3270 aarch64_insn_r->reg_rec_count = 1;
3271 }
3272 else if (insn_bits24_27 == 0x01)
3273 {
3274 /* Add/Subtract (immediate). */
3275 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3276 record_buf[0] = reg_rd;
3277 aarch64_insn_r->reg_rec_count = 1;
3278 if (setflags)
3279 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3280 }
3281 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3282 {
3283 /* Logical (immediate). */
3284 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3285 record_buf[0] = reg_rd;
3286 aarch64_insn_r->reg_rec_count = 1;
3287 if (setflags)
3288 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3289 }
3290 else
3291 return AARCH64_RECORD_UNKNOWN;
3292
3293 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3294 record_buf);
3295 return AARCH64_RECORD_SUCCESS;
3296 }
3297
3298 /* Record handler for branch, exception generation and system instructions. */
3299
3300 static unsigned int
3301 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3302 {
3303 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3304 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3305 uint32_t record_buf[4];
3306
3307 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3308 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3309 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3310
3311 if (insn_bits28_31 == 0x0d)
3312 {
3313 /* Exception generation instructions. */
3314 if (insn_bits24_27 == 0x04)
3315 {
3316 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3317 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3318 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3319 {
3320 ULONGEST svc_number;
3321
3322 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3323 &svc_number);
3324 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3325 svc_number);
3326 }
3327 else
3328 return AARCH64_RECORD_UNSUPPORTED;
3329 }
3330 /* System instructions. */
3331 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3332 {
3333 uint32_t reg_rt, reg_crn;
3334
3335 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3336 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3337
3338 /* Record rt in case of sysl and mrs instructions. */
3339 if (bit (aarch64_insn_r->aarch64_insn, 21))
3340 {
3341 record_buf[0] = reg_rt;
3342 aarch64_insn_r->reg_rec_count = 1;
3343 }
3344 /* Record cpsr for hint and msr(immediate) instructions. */
3345 else if (reg_crn == 0x02 || reg_crn == 0x04)
3346 {
3347 record_buf[0] = AARCH64_CPSR_REGNUM;
3348 aarch64_insn_r->reg_rec_count = 1;
3349 }
3350 }
3351 /* Unconditional branch (register). */
3352 else if((insn_bits24_27 & 0x0e) == 0x06)
3353 {
3354 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3355 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3356 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3357 }
3358 else
3359 return AARCH64_RECORD_UNKNOWN;
3360 }
3361 /* Unconditional branch (immediate). */
3362 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3363 {
3364 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3365 if (bit (aarch64_insn_r->aarch64_insn, 31))
3366 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3367 }
3368 else
3369 /* Compare & branch (immediate), Test & branch (immediate) and
3370 Conditional branch (immediate). */
3371 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3372
3373 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3374 record_buf);
3375 return AARCH64_RECORD_SUCCESS;
3376 }
3377
3378 /* Record handler for advanced SIMD load and store instructions. */
3379
3380 static unsigned int
3381 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3382 {
3383 CORE_ADDR address;
3384 uint64_t addr_offset = 0;
3385 uint32_t record_buf[24];
3386 uint64_t record_buf_mem[24];
3387 uint32_t reg_rn, reg_rt;
3388 uint32_t reg_index = 0, mem_index = 0;
3389 uint8_t opcode_bits, size_bits;
3390
3391 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3392 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3393 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3394 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3395 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3396
3397 if (record_debug)
3398 debug_printf ("Process record: Advanced SIMD load/store\n");
3399
3400 /* Load/store single structure. */
3401 if (bit (aarch64_insn_r->aarch64_insn, 24))
3402 {
3403 uint8_t sindex, scale, selem, esize, replicate = 0;
3404 scale = opcode_bits >> 2;
3405 selem = ((opcode_bits & 0x02) |
3406 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3407 switch (scale)
3408 {
3409 case 1:
3410 if (size_bits & 0x01)
3411 return AARCH64_RECORD_UNKNOWN;
3412 break;
3413 case 2:
3414 if ((size_bits >> 1) & 0x01)
3415 return AARCH64_RECORD_UNKNOWN;
3416 if (size_bits & 0x01)
3417 {
3418 if (!((opcode_bits >> 1) & 0x01))
3419 scale = 3;
3420 else
3421 return AARCH64_RECORD_UNKNOWN;
3422 }
3423 break;
3424 case 3:
3425 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3426 {
3427 scale = size_bits;
3428 replicate = 1;
3429 break;
3430 }
3431 else
3432 return AARCH64_RECORD_UNKNOWN;
3433 default:
3434 break;
3435 }
3436 esize = 8 << scale;
3437 if (replicate)
3438 for (sindex = 0; sindex < selem; sindex++)
3439 {
3440 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3441 reg_rt = (reg_rt + 1) % 32;
3442 }
3443 else
3444 {
3445 for (sindex = 0; sindex < selem; sindex++)
3446 if (bit (aarch64_insn_r->aarch64_insn, 22))
3447 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3448 else
3449 {
3450 record_buf_mem[mem_index++] = esize / 8;
3451 record_buf_mem[mem_index++] = address + addr_offset;
3452 }
3453 addr_offset = addr_offset + (esize / 8);
3454 reg_rt = (reg_rt + 1) % 32;
3455 }
3456 }
3457 /* Load/store multiple structure. */
3458 else
3459 {
3460 uint8_t selem, esize, rpt, elements;
3461 uint8_t eindex, rindex;
3462
3463 esize = 8 << size_bits;
3464 if (bit (aarch64_insn_r->aarch64_insn, 30))
3465 elements = 128 / esize;
3466 else
3467 elements = 64 / esize;
3468
3469 switch (opcode_bits)
3470 {
3471 /*LD/ST4 (4 Registers). */
3472 case 0:
3473 rpt = 1;
3474 selem = 4;
3475 break;
3476 /*LD/ST1 (4 Registers). */
3477 case 2:
3478 rpt = 4;
3479 selem = 1;
3480 break;
3481 /*LD/ST3 (3 Registers). */
3482 case 4:
3483 rpt = 1;
3484 selem = 3;
3485 break;
3486 /*LD/ST1 (3 Registers). */
3487 case 6:
3488 rpt = 3;
3489 selem = 1;
3490 break;
3491 /*LD/ST1 (1 Register). */
3492 case 7:
3493 rpt = 1;
3494 selem = 1;
3495 break;
3496 /*LD/ST2 (2 Registers). */
3497 case 8:
3498 rpt = 1;
3499 selem = 2;
3500 break;
3501 /*LD/ST1 (2 Registers). */
3502 case 10:
3503 rpt = 2;
3504 selem = 1;
3505 break;
3506 default:
3507 return AARCH64_RECORD_UNSUPPORTED;
3508 break;
3509 }
3510 for (rindex = 0; rindex < rpt; rindex++)
3511 for (eindex = 0; eindex < elements; eindex++)
3512 {
3513 uint8_t reg_tt, sindex;
3514 reg_tt = (reg_rt + rindex) % 32;
3515 for (sindex = 0; sindex < selem; sindex++)
3516 {
3517 if (bit (aarch64_insn_r->aarch64_insn, 22))
3518 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3519 else
3520 {
3521 record_buf_mem[mem_index++] = esize / 8;
3522 record_buf_mem[mem_index++] = address + addr_offset;
3523 }
3524 addr_offset = addr_offset + (esize / 8);
3525 reg_tt = (reg_tt + 1) % 32;
3526 }
3527 }
3528 }
3529
3530 if (bit (aarch64_insn_r->aarch64_insn, 23))
3531 record_buf[reg_index++] = reg_rn;
3532
3533 aarch64_insn_r->reg_rec_count = reg_index;
3534 aarch64_insn_r->mem_rec_count = mem_index / 2;
3535 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3536 record_buf_mem);
3537 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3538 record_buf);
3539 return AARCH64_RECORD_SUCCESS;
3540 }
3541
3542 /* Record handler for load and store instructions. */
3543
3544 static unsigned int
3545 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3546 {
3547 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3548 uint8_t insn_bit23, insn_bit21;
3549 uint8_t opc, size_bits, ld_flag, vector_flag;
3550 uint32_t reg_rn, reg_rt, reg_rt2;
3551 uint64_t datasize, offset;
3552 uint32_t record_buf[8];
3553 uint64_t record_buf_mem[8];
3554 CORE_ADDR address;
3555
3556 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3557 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3558 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3559 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3560 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3561 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3562 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3563 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3564 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3565 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3566 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3567
3568 /* Load/store exclusive. */
3569 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3570 {
3571 if (record_debug)
3572 debug_printf ("Process record: load/store exclusive\n");
3573
3574 if (ld_flag)
3575 {
3576 record_buf[0] = reg_rt;
3577 aarch64_insn_r->reg_rec_count = 1;
3578 if (insn_bit21)
3579 {
3580 record_buf[1] = reg_rt2;
3581 aarch64_insn_r->reg_rec_count = 2;
3582 }
3583 }
3584 else
3585 {
3586 if (insn_bit21)
3587 datasize = (8 << size_bits) * 2;
3588 else
3589 datasize = (8 << size_bits);
3590 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3591 &address);
3592 record_buf_mem[0] = datasize / 8;
3593 record_buf_mem[1] = address;
3594 aarch64_insn_r->mem_rec_count = 1;
3595 if (!insn_bit23)
3596 {
3597 /* Save register rs. */
3598 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3599 aarch64_insn_r->reg_rec_count = 1;
3600 }
3601 }
3602 }
3603 /* Load register (literal) instructions decoding. */
3604 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3605 {
3606 if (record_debug)
3607 debug_printf ("Process record: load register (literal)\n");
3608 if (vector_flag)
3609 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3610 else
3611 record_buf[0] = reg_rt;
3612 aarch64_insn_r->reg_rec_count = 1;
3613 }
3614 /* All types of load/store pair instructions decoding. */
3615 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3616 {
3617 if (record_debug)
3618 debug_printf ("Process record: load/store pair\n");
3619
3620 if (ld_flag)
3621 {
3622 if (vector_flag)
3623 {
3624 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3625 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3626 }
3627 else
3628 {
3629 record_buf[0] = reg_rt;
3630 record_buf[1] = reg_rt2;
3631 }
3632 aarch64_insn_r->reg_rec_count = 2;
3633 }
3634 else
3635 {
3636 uint16_t imm7_off;
3637 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3638 if (!vector_flag)
3639 size_bits = size_bits >> 1;
3640 datasize = 8 << (2 + size_bits);
3641 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3642 offset = offset << (2 + size_bits);
3643 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3644 &address);
3645 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3646 {
3647 if (imm7_off & 0x40)
3648 address = address - offset;
3649 else
3650 address = address + offset;
3651 }
3652
3653 record_buf_mem[0] = datasize / 8;
3654 record_buf_mem[1] = address;
3655 record_buf_mem[2] = datasize / 8;
3656 record_buf_mem[3] = address + (datasize / 8);
3657 aarch64_insn_r->mem_rec_count = 2;
3658 }
3659 if (bit (aarch64_insn_r->aarch64_insn, 23))
3660 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3661 }
3662 /* Load/store register (unsigned immediate) instructions. */
3663 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3664 {
3665 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3666 if (!(opc >> 1))
3667 if (opc & 0x01)
3668 ld_flag = 0x01;
3669 else
3670 ld_flag = 0x0;
3671 else
3672 if (size_bits != 0x03)
3673 ld_flag = 0x01;
3674 else
3675 return AARCH64_RECORD_UNKNOWN;
3676
3677 if (record_debug)
3678 {
3679 debug_printf ("Process record: load/store (unsigned immediate):"
3680 " size %x V %d opc %x\n", size_bits, vector_flag,
3681 opc);
3682 }
3683
3684 if (!ld_flag)
3685 {
3686 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3687 datasize = 8 << size_bits;
3688 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3689 &address);
3690 offset = offset << size_bits;
3691 address = address + offset;
3692
3693 record_buf_mem[0] = datasize >> 3;
3694 record_buf_mem[1] = address;
3695 aarch64_insn_r->mem_rec_count = 1;
3696 }
3697 else
3698 {
3699 if (vector_flag)
3700 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3701 else
3702 record_buf[0] = reg_rt;
3703 aarch64_insn_r->reg_rec_count = 1;
3704 }
3705 }
3706 /* Load/store register (register offset) instructions. */
3707 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3708 && insn_bits10_11 == 0x02 && insn_bit21)
3709 {
3710 if (record_debug)
3711 debug_printf ("Process record: load/store (register offset)\n");
3712 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3713 if (!(opc >> 1))
3714 if (opc & 0x01)
3715 ld_flag = 0x01;
3716 else
3717 ld_flag = 0x0;
3718 else
3719 if (size_bits != 0x03)
3720 ld_flag = 0x01;
3721 else
3722 return AARCH64_RECORD_UNKNOWN;
3723
3724 if (!ld_flag)
3725 {
3726 uint64_t reg_rm_val;
3727 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3728 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3729 if (bit (aarch64_insn_r->aarch64_insn, 12))
3730 offset = reg_rm_val << size_bits;
3731 else
3732 offset = reg_rm_val;
3733 datasize = 8 << size_bits;
3734 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3735 &address);
3736 address = address + offset;
3737 record_buf_mem[0] = datasize >> 3;
3738 record_buf_mem[1] = address;
3739 aarch64_insn_r->mem_rec_count = 1;
3740 }
3741 else
3742 {
3743 if (vector_flag)
3744 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3745 else
3746 record_buf[0] = reg_rt;
3747 aarch64_insn_r->reg_rec_count = 1;
3748 }
3749 }
3750 /* Load/store register (immediate and unprivileged) instructions. */
3751 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3752 && !insn_bit21)
3753 {
3754 if (record_debug)
3755 {
3756 debug_printf ("Process record: load/store "
3757 "(immediate and unprivileged)\n");
3758 }
3759 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3760 if (!(opc >> 1))
3761 if (opc & 0x01)
3762 ld_flag = 0x01;
3763 else
3764 ld_flag = 0x0;
3765 else
3766 if (size_bits != 0x03)
3767 ld_flag = 0x01;
3768 else
3769 return AARCH64_RECORD_UNKNOWN;
3770
3771 if (!ld_flag)
3772 {
3773 uint16_t imm9_off;
3774 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3775 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3776 datasize = 8 << size_bits;
3777 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3778 &address);
3779 if (insn_bits10_11 != 0x01)
3780 {
3781 if (imm9_off & 0x0100)
3782 address = address - offset;
3783 else
3784 address = address + offset;
3785 }
3786 record_buf_mem[0] = datasize >> 3;
3787 record_buf_mem[1] = address;
3788 aarch64_insn_r->mem_rec_count = 1;
3789 }
3790 else
3791 {
3792 if (vector_flag)
3793 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3794 else
3795 record_buf[0] = reg_rt;
3796 aarch64_insn_r->reg_rec_count = 1;
3797 }
3798 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3799 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3800 }
3801 /* Advanced SIMD load/store instructions. */
3802 else
3803 return aarch64_record_asimd_load_store (aarch64_insn_r);
3804
3805 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3806 record_buf_mem);
3807 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3808 record_buf);
3809 return AARCH64_RECORD_SUCCESS;
3810 }
3811
3812 /* Record handler for data processing SIMD and floating point instructions. */
3813
3814 static unsigned int
3815 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3816 {
3817 uint8_t insn_bit21, opcode, rmode, reg_rd;
3818 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3819 uint8_t insn_bits11_14;
3820 uint32_t record_buf[2];
3821
3822 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3823 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3824 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3825 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3826 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3827 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3828 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3829 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3830 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3831
3832 if (record_debug)
3833 debug_printf ("Process record: data processing SIMD/FP: ");
3834
3835 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3836 {
3837 /* Floating point - fixed point conversion instructions. */
3838 if (!insn_bit21)
3839 {
3840 if (record_debug)
3841 debug_printf ("FP - fixed point conversion");
3842
3843 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3844 record_buf[0] = reg_rd;
3845 else
3846 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3847 }
3848 /* Floating point - conditional compare instructions. */
3849 else if (insn_bits10_11 == 0x01)
3850 {
3851 if (record_debug)
3852 debug_printf ("FP - conditional compare");
3853
3854 record_buf[0] = AARCH64_CPSR_REGNUM;
3855 }
3856 /* Floating point - data processing (2-source) and
3857 conditional select instructions. */
3858 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3859 {
3860 if (record_debug)
3861 debug_printf ("FP - DP (2-source)");
3862
3863 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3864 }
3865 else if (insn_bits10_11 == 0x00)
3866 {
3867 /* Floating point - immediate instructions. */
3868 if ((insn_bits12_15 & 0x01) == 0x01
3869 || (insn_bits12_15 & 0x07) == 0x04)
3870 {
3871 if (record_debug)
3872 debug_printf ("FP - immediate");
3873 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3874 }
3875 /* Floating point - compare instructions. */
3876 else if ((insn_bits12_15 & 0x03) == 0x02)
3877 {
3878 if (record_debug)
3879 debug_printf ("FP - immediate");
3880 record_buf[0] = AARCH64_CPSR_REGNUM;
3881 }
3882 /* Floating point - integer conversions instructions. */
3883 else if (insn_bits12_15 == 0x00)
3884 {
3885 /* Convert float to integer instruction. */
3886 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3887 {
3888 if (record_debug)
3889 debug_printf ("float to int conversion");
3890
3891 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3892 }
3893 /* Convert integer to float instruction. */
3894 else if ((opcode >> 1) == 0x01 && !rmode)
3895 {
3896 if (record_debug)
3897 debug_printf ("int to float conversion");
3898
3899 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3900 }
3901 /* Move float to integer instruction. */
3902 else if ((opcode >> 1) == 0x03)
3903 {
3904 if (record_debug)
3905 debug_printf ("move float to int");
3906
3907 if (!(opcode & 0x01))
3908 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3909 else
3910 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3911 }
3912 else
3913 return AARCH64_RECORD_UNKNOWN;
3914 }
3915 else
3916 return AARCH64_RECORD_UNKNOWN;
3917 }
3918 else
3919 return AARCH64_RECORD_UNKNOWN;
3920 }
3921 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3922 {
3923 if (record_debug)
3924 debug_printf ("SIMD copy");
3925
3926 /* Advanced SIMD copy instructions. */
3927 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3928 && !bit (aarch64_insn_r->aarch64_insn, 15)
3929 && bit (aarch64_insn_r->aarch64_insn, 10))
3930 {
3931 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3932 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3933 else
3934 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3935 }
3936 else
3937 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3938 }
3939 /* All remaining floating point or advanced SIMD instructions. */
3940 else
3941 {
3942 if (record_debug)
3943 debug_printf ("all remain");
3944
3945 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3946 }
3947
3948 if (record_debug)
3949 debug_printf ("\n");
3950
3951 aarch64_insn_r->reg_rec_count++;
3952 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3953 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3954 record_buf);
3955 return AARCH64_RECORD_SUCCESS;
3956 }
3957
3958 /* Decodes insns type and invokes its record handler. */
3959
3960 static unsigned int
3961 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3962 {
3963 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3964
3965 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3966 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3967 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3968 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3969
3970 /* Data processing - immediate instructions. */
3971 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3972 return aarch64_record_data_proc_imm (aarch64_insn_r);
3973
3974 /* Branch, exception generation and system instructions. */
3975 if (ins_bit26 && !ins_bit27 && ins_bit28)
3976 return aarch64_record_branch_except_sys (aarch64_insn_r);
3977
3978 /* Load and store instructions. */
3979 if (!ins_bit25 && ins_bit27)
3980 return aarch64_record_load_store (aarch64_insn_r);
3981
3982 /* Data processing - register instructions. */
3983 if (ins_bit25 && !ins_bit26 && ins_bit27)
3984 return aarch64_record_data_proc_reg (aarch64_insn_r);
3985
3986 /* Data processing - SIMD and floating point instructions. */
3987 if (ins_bit25 && ins_bit26 && ins_bit27)
3988 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3989
3990 return AARCH64_RECORD_UNSUPPORTED;
3991 }
3992
3993 /* Cleans up local record registers and memory allocations. */
3994
3995 static void
3996 deallocate_reg_mem (insn_decode_record *record)
3997 {
3998 xfree (record->aarch64_regs);
3999 xfree (record->aarch64_mems);
4000 }
4001
4002 /* Parse the current instruction and record the values of the registers and
4003 memory that will be changed in current instruction to record_arch_list
4004 return -1 if something is wrong. */
4005
4006 int
4007 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4008 CORE_ADDR insn_addr)
4009 {
4010 uint32_t rec_no = 0;
4011 uint8_t insn_size = 4;
4012 uint32_t ret = 0;
4013 ULONGEST t_bit = 0, insn_id = 0;
4014 gdb_byte buf[insn_size];
4015 insn_decode_record aarch64_record;
4016
4017 memset (&buf[0], 0, insn_size);
4018 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4019 target_read_memory (insn_addr, &buf[0], insn_size);
4020 aarch64_record.aarch64_insn
4021 = (uint32_t) extract_unsigned_integer (&buf[0],
4022 insn_size,
4023 gdbarch_byte_order (gdbarch));
4024 aarch64_record.regcache = regcache;
4025 aarch64_record.this_addr = insn_addr;
4026 aarch64_record.gdbarch = gdbarch;
4027
4028 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4029 if (ret == AARCH64_RECORD_UNSUPPORTED)
4030 {
4031 printf_unfiltered (_("Process record does not support instruction "
4032 "0x%0x at address %s.\n"),
4033 aarch64_record.aarch64_insn,
4034 paddress (gdbarch, insn_addr));
4035 ret = -1;
4036 }
4037
4038 if (0 == ret)
4039 {
4040 /* Record registers. */
4041 record_full_arch_list_add_reg (aarch64_record.regcache,
4042 AARCH64_PC_REGNUM);
4043 /* Always record register CPSR. */
4044 record_full_arch_list_add_reg (aarch64_record.regcache,
4045 AARCH64_CPSR_REGNUM);
4046 if (aarch64_record.aarch64_regs)
4047 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4048 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4049 aarch64_record.aarch64_regs[rec_no]))
4050 ret = -1;
4051
4052 /* Record memories. */
4053 if (aarch64_record.aarch64_mems)
4054 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4055 if (record_full_arch_list_add_mem
4056 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4057 aarch64_record.aarch64_mems[rec_no].len))
4058 ret = -1;
4059
4060 if (record_full_arch_list_add_end ())
4061 ret = -1;
4062 }
4063
4064 deallocate_reg_mem (&aarch64_record);
4065 return ret;
4066 }
This page took 0.174574 seconds and 4 git commands to generate.