2ac1a49ee8f8a31ef744e9d85165386ae5b3d139
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57
58 #include "features/aarch64.c"
59
60 /* Pseudo register base numbers. */
61 #define AARCH64_Q0_REGNUM 0
62 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
63 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
64 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
65 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
66
67 /* The standard register names, and all the valid aliases for them. */
68 static const struct
69 {
70 const char *const name;
71 int regnum;
72 } aarch64_register_aliases[] =
73 {
74 /* 64-bit register names. */
75 {"fp", AARCH64_FP_REGNUM},
76 {"lr", AARCH64_LR_REGNUM},
77 {"sp", AARCH64_SP_REGNUM},
78
79 /* 32-bit register names. */
80 {"w0", AARCH64_X0_REGNUM + 0},
81 {"w1", AARCH64_X0_REGNUM + 1},
82 {"w2", AARCH64_X0_REGNUM + 2},
83 {"w3", AARCH64_X0_REGNUM + 3},
84 {"w4", AARCH64_X0_REGNUM + 4},
85 {"w5", AARCH64_X0_REGNUM + 5},
86 {"w6", AARCH64_X0_REGNUM + 6},
87 {"w7", AARCH64_X0_REGNUM + 7},
88 {"w8", AARCH64_X0_REGNUM + 8},
89 {"w9", AARCH64_X0_REGNUM + 9},
90 {"w10", AARCH64_X0_REGNUM + 10},
91 {"w11", AARCH64_X0_REGNUM + 11},
92 {"w12", AARCH64_X0_REGNUM + 12},
93 {"w13", AARCH64_X0_REGNUM + 13},
94 {"w14", AARCH64_X0_REGNUM + 14},
95 {"w15", AARCH64_X0_REGNUM + 15},
96 {"w16", AARCH64_X0_REGNUM + 16},
97 {"w17", AARCH64_X0_REGNUM + 17},
98 {"w18", AARCH64_X0_REGNUM + 18},
99 {"w19", AARCH64_X0_REGNUM + 19},
100 {"w20", AARCH64_X0_REGNUM + 20},
101 {"w21", AARCH64_X0_REGNUM + 21},
102 {"w22", AARCH64_X0_REGNUM + 22},
103 {"w23", AARCH64_X0_REGNUM + 23},
104 {"w24", AARCH64_X0_REGNUM + 24},
105 {"w25", AARCH64_X0_REGNUM + 25},
106 {"w26", AARCH64_X0_REGNUM + 26},
107 {"w27", AARCH64_X0_REGNUM + 27},
108 {"w28", AARCH64_X0_REGNUM + 28},
109 {"w29", AARCH64_X0_REGNUM + 29},
110 {"w30", AARCH64_X0_REGNUM + 30},
111
112 /* specials */
113 {"ip0", AARCH64_X0_REGNUM + 16},
114 {"ip1", AARCH64_X0_REGNUM + 17}
115 };
116
117 /* The required core 'R' registers. */
118 static const char *const aarch64_r_register_names[] =
119 {
120 /* These registers must appear in consecutive RAW register number
121 order and they must begin with AARCH64_X0_REGNUM! */
122 "x0", "x1", "x2", "x3",
123 "x4", "x5", "x6", "x7",
124 "x8", "x9", "x10", "x11",
125 "x12", "x13", "x14", "x15",
126 "x16", "x17", "x18", "x19",
127 "x20", "x21", "x22", "x23",
128 "x24", "x25", "x26", "x27",
129 "x28", "x29", "x30", "sp",
130 "pc", "cpsr"
131 };
132
133 /* The FP/SIMD 'V' registers. */
134 static const char *const aarch64_v_register_names[] =
135 {
136 /* These registers must appear in consecutive RAW register number
137 order and they must begin with AARCH64_V0_REGNUM! */
138 "v0", "v1", "v2", "v3",
139 "v4", "v5", "v6", "v7",
140 "v8", "v9", "v10", "v11",
141 "v12", "v13", "v14", "v15",
142 "v16", "v17", "v18", "v19",
143 "v20", "v21", "v22", "v23",
144 "v24", "v25", "v26", "v27",
145 "v28", "v29", "v30", "v31",
146 "fpsr",
147 "fpcr"
148 };
149
150 /* AArch64 prologue cache structure. */
151 struct aarch64_prologue_cache
152 {
153 /* The program counter at the start of the function. It is used to
154 identify this frame as a prologue frame. */
155 CORE_ADDR func;
156
157 /* The program counter at the time this frame was created; i.e. where
158 this function was called from. It is used to identify this frame as a
159 stub frame. */
160 CORE_ADDR prev_pc;
161
162 /* The stack pointer at the time this frame was created; i.e. the
163 caller's stack pointer when this function was called. It is used
164 to identify this frame. */
165 CORE_ADDR prev_sp;
166
167 /* Is the target available to read from? */
168 int available_p;
169
170 /* The frame base for this frame is just prev_sp - frame size.
171 FRAMESIZE is the distance from the frame pointer to the
172 initial stack pointer. */
173 int framesize;
174
175 /* The register used to hold the frame pointer for this frame. */
176 int framereg;
177
178 /* Saved register offsets. */
179 struct trad_frame_saved_reg *saved_regs;
180 };
181
182 /* Toggle this file's internal debugging dump. */
183 static int aarch64_debug;
184
185 static void
186 show_aarch64_debug (struct ui_file *file, int from_tty,
187 struct cmd_list_element *c, const char *value)
188 {
189 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
190 }
191
192 /* Extract a signed value from a bit field within an instruction
193 encoding.
194
195 INSN is the instruction opcode.
196
197 WIDTH specifies the width of the bit field to extract (in bits).
198
199 OFFSET specifies the least significant bit of the field where bits
200 are numbered zero counting from least to most significant. */
201
202 static int32_t
203 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
204 {
205 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
206 unsigned shift_r = sizeof (int32_t) * 8 - width;
207
208 return ((int32_t) insn << shift_l) >> shift_r;
209 }
210
211 /* Determine if specified bits within an instruction opcode matches a
212 specific pattern.
213
214 INSN is the instruction opcode.
215
216 MASK specifies the bits within the opcode that are to be tested
217 agsinst for a match with PATTERN. */
218
219 static int
220 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
221 {
222 return (insn & mask) == pattern;
223 }
224
225 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
226
227 ADDR specifies the address of the opcode.
228 INSN specifies the opcode to test.
229 RD receives the 'rd' field from the decoded instruction.
230 RN receives the 'rn' field from the decoded instruction.
231
232 Return 1 if the opcodes matches and is decoded, otherwise 0. */
233 static int
234 decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
235 int32_t *imm)
236 {
237 if ((insn & 0x9f000000) == 0x91000000)
238 {
239 unsigned shift;
240 unsigned op_is_sub;
241
242 *rd = (insn >> 0) & 0x1f;
243 *rn = (insn >> 5) & 0x1f;
244 *imm = (insn >> 10) & 0xfff;
245 shift = (insn >> 22) & 0x3;
246 op_is_sub = (insn >> 30) & 0x1;
247
248 switch (shift)
249 {
250 case 0:
251 break;
252 case 1:
253 *imm <<= 12;
254 break;
255 default:
256 /* UNDEFINED */
257 return 0;
258 }
259
260 if (op_is_sub)
261 *imm = -*imm;
262
263 if (aarch64_debug)
264 fprintf_unfiltered (gdb_stdlog,
265 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
266 core_addr_to_string_nz (addr), insn, *rd, *rn,
267 *imm);
268 return 1;
269 }
270 return 0;
271 }
272
273 /* Decode an opcode if it represents an ADRP instruction.
274
275 ADDR specifies the address of the opcode.
276 INSN specifies the opcode to test.
277 RD receives the 'rd' field from the decoded instruction.
278
279 Return 1 if the opcodes matches and is decoded, otherwise 0. */
280
281 static int
282 decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
283 {
284 if (decode_masked_match (insn, 0x9f000000, 0x90000000))
285 {
286 *rd = (insn >> 0) & 0x1f;
287
288 if (aarch64_debug)
289 fprintf_unfiltered (gdb_stdlog,
290 "decode: 0x%s 0x%x adrp x%u, #?\n",
291 core_addr_to_string_nz (addr), insn, *rd);
292 return 1;
293 }
294 return 0;
295 }
296
297 /* Decode an opcode if it represents an branch immediate or branch
298 and link immediate instruction.
299
300 ADDR specifies the address of the opcode.
301 INSN specifies the opcode to test.
302 IS_BL receives the 'op' bit from the decoded instruction.
303 OFFSET receives the immediate offset from the decoded instruction.
304
305 Return 1 if the opcodes matches and is decoded, otherwise 0. */
306
307 static int
308 decode_b (CORE_ADDR addr, uint32_t insn, int *is_bl, int32_t *offset)
309 {
310 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
311 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
312 if (decode_masked_match (insn, 0x7c000000, 0x14000000))
313 {
314 *is_bl = (insn >> 31) & 0x1;
315 *offset = extract_signed_bitfield (insn, 26, 0) << 2;
316
317 if (aarch64_debug)
318 fprintf_unfiltered (gdb_stdlog,
319 "decode: 0x%s 0x%x %s 0x%s\n",
320 core_addr_to_string_nz (addr), insn,
321 *is_bl ? "bl" : "b",
322 core_addr_to_string_nz (addr + *offset));
323
324 return 1;
325 }
326 return 0;
327 }
328
329 /* Decode an opcode if it represents a conditional branch instruction.
330
331 ADDR specifies the address of the opcode.
332 INSN specifies the opcode to test.
333 COND receives the branch condition field from the decoded
334 instruction.
335 OFFSET receives the immediate offset from the decoded instruction.
336
337 Return 1 if the opcodes matches and is decoded, otherwise 0. */
338
339 static int
340 decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
341 {
342 /* b.cond 0101 0100 iiii iiii iiii iiii iii0 cccc */
343 if (decode_masked_match (insn, 0xff000010, 0x54000000))
344 {
345 *cond = (insn >> 0) & 0xf;
346 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
347
348 if (aarch64_debug)
349 fprintf_unfiltered (gdb_stdlog,
350 "decode: 0x%s 0x%x b<%u> 0x%s\n",
351 core_addr_to_string_nz (addr), insn, *cond,
352 core_addr_to_string_nz (addr + *offset));
353 return 1;
354 }
355 return 0;
356 }
357
358 /* Decode an opcode if it represents a branch via register instruction.
359
360 ADDR specifies the address of the opcode.
361 INSN specifies the opcode to test.
362 IS_BLR receives the 'op' bit from the decoded instruction.
363 RN receives the 'rn' field from the decoded instruction.
364
365 Return 1 if the opcodes matches and is decoded, otherwise 0. */
366
367 static int
368 decode_br (CORE_ADDR addr, uint32_t insn, int *is_blr, unsigned *rn)
369 {
370 /* 8 4 0 6 2 8 4 0 */
371 /* blr 110101100011111100000000000rrrrr */
372 /* br 110101100001111100000000000rrrrr */
373 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
374 {
375 *is_blr = (insn >> 21) & 1;
376 *rn = (insn >> 5) & 0x1f;
377
378 if (aarch64_debug)
379 fprintf_unfiltered (gdb_stdlog,
380 "decode: 0x%s 0x%x %s 0x%x\n",
381 core_addr_to_string_nz (addr), insn,
382 *is_blr ? "blr" : "br", *rn);
383
384 return 1;
385 }
386 return 0;
387 }
388
389 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
390
391 ADDR specifies the address of the opcode.
392 INSN specifies the opcode to test.
393 IS64 receives the 'sf' field from the decoded instruction.
394 IS_CBNZ receives the 'op' field from the decoded instruction.
395 RN receives the 'rn' field from the decoded instruction.
396 OFFSET receives the 'imm19' field from the decoded instruction.
397
398 Return 1 if the opcodes matches and is decoded, otherwise 0. */
399
400 static int
401 decode_cb (CORE_ADDR addr, uint32_t insn, int *is64, int *is_cbnz,
402 unsigned *rn, int32_t *offset)
403 {
404 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
405 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
406 if (decode_masked_match (insn, 0x7e000000, 0x34000000))
407 {
408 *rn = (insn >> 0) & 0x1f;
409 *is64 = (insn >> 31) & 0x1;
410 *is_cbnz = (insn >> 24) & 0x1;
411 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
412
413 if (aarch64_debug)
414 fprintf_unfiltered (gdb_stdlog,
415 "decode: 0x%s 0x%x %s 0x%s\n",
416 core_addr_to_string_nz (addr), insn,
417 *is_cbnz ? "cbnz" : "cbz",
418 core_addr_to_string_nz (addr + *offset));
419 return 1;
420 }
421 return 0;
422 }
423
424 /* Decode an opcode if it represents a ERET instruction.
425
426 ADDR specifies the address of the opcode.
427 INSN specifies the opcode to test.
428
429 Return 1 if the opcodes matches and is decoded, otherwise 0. */
430
431 static int
432 decode_eret (CORE_ADDR addr, uint32_t insn)
433 {
434 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
435 if (insn == 0xd69f03e0)
436 {
437 if (aarch64_debug)
438 fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n",
439 core_addr_to_string_nz (addr), insn);
440 return 1;
441 }
442 return 0;
443 }
444
445 /* Decode an opcode if it represents a MOVZ instruction.
446
447 ADDR specifies the address of the opcode.
448 INSN specifies the opcode to test.
449 RD receives the 'rd' field from the decoded instruction.
450
451 Return 1 if the opcodes matches and is decoded, otherwise 0. */
452
453 static int
454 decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
455 {
456 if (decode_masked_match (insn, 0xff800000, 0x52800000))
457 {
458 *rd = (insn >> 0) & 0x1f;
459
460 if (aarch64_debug)
461 fprintf_unfiltered (gdb_stdlog,
462 "decode: 0x%s 0x%x movz x%u, #?\n",
463 core_addr_to_string_nz (addr), insn, *rd);
464 return 1;
465 }
466 return 0;
467 }
468
469 /* Decode an opcode if it represents a ORR (shifted register)
470 instruction.
471
472 ADDR specifies the address of the opcode.
473 INSN specifies the opcode to test.
474 RD receives the 'rd' field from the decoded instruction.
475 RN receives the 'rn' field from the decoded instruction.
476 RM receives the 'rm' field from the decoded instruction.
477 IMM receives the 'imm6' field from the decoded instruction.
478
479 Return 1 if the opcodes matches and is decoded, otherwise 0. */
480
481 static int
482 decode_orr_shifted_register_x (CORE_ADDR addr,
483 uint32_t insn, unsigned *rd, unsigned *rn,
484 unsigned *rm, int32_t *imm)
485 {
486 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
487 {
488 *rd = (insn >> 0) & 0x1f;
489 *rn = (insn >> 5) & 0x1f;
490 *rm = (insn >> 16) & 0x1f;
491 *imm = (insn >> 10) & 0x3f;
492
493 if (aarch64_debug)
494 fprintf_unfiltered (gdb_stdlog,
495 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
496 core_addr_to_string_nz (addr), insn, *rd,
497 *rn, *rm, *imm);
498 return 1;
499 }
500 return 0;
501 }
502
503 /* Decode an opcode if it represents a RET instruction.
504
505 ADDR specifies the address of the opcode.
506 INSN specifies the opcode to test.
507 RN receives the 'rn' field from the decoded instruction.
508
509 Return 1 if the opcodes matches and is decoded, otherwise 0. */
510
511 static int
512 decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
513 {
514 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
515 {
516 *rn = (insn >> 5) & 0x1f;
517 if (aarch64_debug)
518 fprintf_unfiltered (gdb_stdlog,
519 "decode: 0x%s 0x%x ret x%u\n",
520 core_addr_to_string_nz (addr), insn, *rn);
521 return 1;
522 }
523 return 0;
524 }
525
526 /* Decode an opcode if it represents the following instruction:
527 STP rt, rt2, [rn, #imm]
528
529 ADDR specifies the address of the opcode.
530 INSN specifies the opcode to test.
531 RT1 receives the 'rt' field from the decoded instruction.
532 RT2 receives the 'rt2' field from the decoded instruction.
533 RN receives the 'rn' field from the decoded instruction.
534 IMM receives the 'imm' field from the decoded instruction.
535
536 Return 1 if the opcodes matches and is decoded, otherwise 0. */
537
538 static int
539 decode_stp_offset (CORE_ADDR addr,
540 uint32_t insn,
541 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
542 {
543 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
544 {
545 *rt1 = (insn >> 0) & 0x1f;
546 *rn = (insn >> 5) & 0x1f;
547 *rt2 = (insn >> 10) & 0x1f;
548 *imm = extract_signed_bitfield (insn, 7, 15);
549 *imm <<= 3;
550
551 if (aarch64_debug)
552 fprintf_unfiltered (gdb_stdlog,
553 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
554 core_addr_to_string_nz (addr), insn,
555 *rt1, *rt2, *rn, *imm);
556 return 1;
557 }
558 return 0;
559 }
560
561 /* Decode an opcode if it represents the following instruction:
562 STP rt, rt2, [rn, #imm]!
563
564 ADDR specifies the address of the opcode.
565 INSN specifies the opcode to test.
566 RT1 receives the 'rt' field from the decoded instruction.
567 RT2 receives the 'rt2' field from the decoded instruction.
568 RN receives the 'rn' field from the decoded instruction.
569 IMM receives the 'imm' field from the decoded instruction.
570
571 Return 1 if the opcodes matches and is decoded, otherwise 0. */
572
573 static int
574 decode_stp_offset_wb (CORE_ADDR addr,
575 uint32_t insn,
576 unsigned *rt1, unsigned *rt2, unsigned *rn,
577 int32_t *imm)
578 {
579 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
580 {
581 *rt1 = (insn >> 0) & 0x1f;
582 *rn = (insn >> 5) & 0x1f;
583 *rt2 = (insn >> 10) & 0x1f;
584 *imm = extract_signed_bitfield (insn, 7, 15);
585 *imm <<= 3;
586
587 if (aarch64_debug)
588 fprintf_unfiltered (gdb_stdlog,
589 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
590 core_addr_to_string_nz (addr), insn,
591 *rt1, *rt2, *rn, *imm);
592 return 1;
593 }
594 return 0;
595 }
596
597 /* Decode an opcode if it represents the following instruction:
598 STUR rt, [rn, #imm]
599
600 ADDR specifies the address of the opcode.
601 INSN specifies the opcode to test.
602 IS64 receives size field from the decoded instruction.
603 RT receives the 'rt' field from the decoded instruction.
604 RN receives the 'rn' field from the decoded instruction.
605 IMM receives the 'imm' field from the decoded instruction.
606
607 Return 1 if the opcodes matches and is decoded, otherwise 0. */
608
609 static int
610 decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
611 unsigned *rn, int32_t *imm)
612 {
613 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
614 {
615 *is64 = (insn >> 30) & 1;
616 *rt = (insn >> 0) & 0x1f;
617 *rn = (insn >> 5) & 0x1f;
618 *imm = extract_signed_bitfield (insn, 9, 12);
619
620 if (aarch64_debug)
621 fprintf_unfiltered (gdb_stdlog,
622 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
623 core_addr_to_string_nz (addr), insn,
624 *is64 ? 'x' : 'w', *rt, *rn, *imm);
625 return 1;
626 }
627 return 0;
628 }
629
630 /* Decode an opcode if it represents a TBZ or TBNZ instruction.
631
632 ADDR specifies the address of the opcode.
633 INSN specifies the opcode to test.
634 IS_TBNZ receives the 'op' field from the decoded instruction.
635 BIT receives the bit position field from the decoded instruction.
636 RT receives 'rt' field from the decoded instruction.
637 IMM receives 'imm' field from the decoded instruction.
638
639 Return 1 if the opcodes matches and is decoded, otherwise 0. */
640
641 static int
642 decode_tb (CORE_ADDR addr, uint32_t insn, int *is_tbnz, unsigned *bit,
643 unsigned *rt, int32_t *imm)
644 {
645 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
646 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
647 if (decode_masked_match (insn, 0x7e000000, 0x36000000))
648 {
649 *rt = (insn >> 0) & 0x1f;
650 *is_tbnz = (insn >> 24) & 0x1;
651 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
652 *imm = extract_signed_bitfield (insn, 14, 5) << 2;
653
654 if (aarch64_debug)
655 fprintf_unfiltered (gdb_stdlog,
656 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
657 core_addr_to_string_nz (addr), insn,
658 *is_tbnz ? "tbnz" : "tbz", *rt, *bit,
659 core_addr_to_string_nz (addr + *imm));
660 return 1;
661 }
662 return 0;
663 }
664
665 /* Analyze a prologue, looking for a recognizable stack frame
666 and frame pointer. Scan until we encounter a store that could
667 clobber the stack frame unexpectedly, or an unknown instruction. */
668
669 static CORE_ADDR
670 aarch64_analyze_prologue (struct gdbarch *gdbarch,
671 CORE_ADDR start, CORE_ADDR limit,
672 struct aarch64_prologue_cache *cache)
673 {
674 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
675 int i;
676 pv_t regs[AARCH64_X_REGISTER_COUNT];
677 struct pv_area *stack;
678 struct cleanup *back_to;
679
680 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
681 regs[i] = pv_register (i, 0);
682 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
683 back_to = make_cleanup_free_pv_area (stack);
684
685 for (; start < limit; start += 4)
686 {
687 uint32_t insn;
688 unsigned rd;
689 unsigned rn;
690 unsigned rm;
691 unsigned rt;
692 unsigned rt1;
693 unsigned rt2;
694 int op_is_sub;
695 int32_t imm;
696 unsigned cond;
697 int is64;
698 int is_link;
699 int is_cbnz;
700 int is_tbnz;
701 unsigned bit;
702 int32_t offset;
703
704 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
705
706 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
707 regs[rd] = pv_add_constant (regs[rn], imm);
708 else if (decode_adrp (start, insn, &rd))
709 regs[rd] = pv_unknown ();
710 else if (decode_b (start, insn, &is_link, &offset))
711 {
712 /* Stop analysis on branch. */
713 break;
714 }
715 else if (decode_bcond (start, insn, &cond, &offset))
716 {
717 /* Stop analysis on branch. */
718 break;
719 }
720 else if (decode_br (start, insn, &is_link, &rn))
721 {
722 /* Stop analysis on branch. */
723 break;
724 }
725 else if (decode_cb (start, insn, &is64, &is_cbnz, &rn, &offset))
726 {
727 /* Stop analysis on branch. */
728 break;
729 }
730 else if (decode_eret (start, insn))
731 {
732 /* Stop analysis on branch. */
733 break;
734 }
735 else if (decode_movz (start, insn, &rd))
736 regs[rd] = pv_unknown ();
737 else
738 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
739 {
740 if (imm == 0 && rn == 31)
741 regs[rd] = regs[rm];
742 else
743 {
744 if (aarch64_debug)
745 fprintf_unfiltered
746 (gdb_stdlog,
747 "aarch64: prologue analysis gave up addr=0x%s "
748 "opcode=0x%x (orr x register)\n",
749 core_addr_to_string_nz (start),
750 insn);
751 break;
752 }
753 }
754 else if (decode_ret (start, insn, &rn))
755 {
756 /* Stop analysis on branch. */
757 break;
758 }
759 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
760 {
761 pv_area_store (stack, pv_add_constant (regs[rn], offset),
762 is64 ? 8 : 4, regs[rt]);
763 }
764 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
765 {
766 /* If recording this store would invalidate the store area
767 (perhaps because rn is not known) then we should abandon
768 further prologue analysis. */
769 if (pv_area_store_would_trash (stack,
770 pv_add_constant (regs[rn], imm)))
771 break;
772
773 if (pv_area_store_would_trash (stack,
774 pv_add_constant (regs[rn], imm + 8)))
775 break;
776
777 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
778 regs[rt1]);
779 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
780 regs[rt2]);
781 }
782 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
783 {
784 /* If recording this store would invalidate the store area
785 (perhaps because rn is not known) then we should abandon
786 further prologue analysis. */
787 if (pv_area_store_would_trash (stack,
788 pv_add_constant (regs[rn], imm)))
789 break;
790
791 if (pv_area_store_would_trash (stack,
792 pv_add_constant (regs[rn], imm + 8)))
793 break;
794
795 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
796 regs[rt1]);
797 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
798 regs[rt2]);
799 regs[rn] = pv_add_constant (regs[rn], imm);
800 }
801 else if (decode_tb (start, insn, &is_tbnz, &bit, &rn, &offset))
802 {
803 /* Stop analysis on branch. */
804 break;
805 }
806 else
807 {
808 if (aarch64_debug)
809 fprintf_unfiltered (gdb_stdlog,
810 "aarch64: prologue analysis gave up addr=0x%s"
811 " opcode=0x%x\n",
812 core_addr_to_string_nz (start), insn);
813 break;
814 }
815 }
816
817 if (cache == NULL)
818 {
819 do_cleanups (back_to);
820 return start;
821 }
822
823 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
824 {
825 /* Frame pointer is fp. Frame size is constant. */
826 cache->framereg = AARCH64_FP_REGNUM;
827 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
828 }
829 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
830 {
831 /* Try the stack pointer. */
832 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
833 cache->framereg = AARCH64_SP_REGNUM;
834 }
835 else
836 {
837 /* We're just out of luck. We don't know where the frame is. */
838 cache->framereg = -1;
839 cache->framesize = 0;
840 }
841
842 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
843 {
844 CORE_ADDR offset;
845
846 if (pv_area_find_reg (stack, gdbarch, i, &offset))
847 cache->saved_regs[i].addr = offset;
848 }
849
850 do_cleanups (back_to);
851 return start;
852 }
853
854 /* Implement the "skip_prologue" gdbarch method. */
855
856 static CORE_ADDR
857 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
858 {
859 unsigned long inst;
860 CORE_ADDR skip_pc;
861 CORE_ADDR func_addr, limit_pc;
862 struct symtab_and_line sal;
863
864 /* See if we can determine the end of the prologue via the symbol
865 table. If so, then return either PC, or the PC after the
866 prologue, whichever is greater. */
867 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
868 {
869 CORE_ADDR post_prologue_pc
870 = skip_prologue_using_sal (gdbarch, func_addr);
871
872 if (post_prologue_pc != 0)
873 return max (pc, post_prologue_pc);
874 }
875
876 /* Can't determine prologue from the symbol table, need to examine
877 instructions. */
878
879 /* Find an upper limit on the function prologue using the debug
880 information. If the debug information could not be used to
881 provide that bound, then use an arbitrary large number as the
882 upper bound. */
883 limit_pc = skip_prologue_using_sal (gdbarch, pc);
884 if (limit_pc == 0)
885 limit_pc = pc + 128; /* Magic. */
886
887 /* Try disassembling prologue. */
888 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
889 }
890
891 /* Scan the function prologue for THIS_FRAME and populate the prologue
892 cache CACHE. */
893
894 static void
895 aarch64_scan_prologue (struct frame_info *this_frame,
896 struct aarch64_prologue_cache *cache)
897 {
898 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
899 CORE_ADDR prologue_start;
900 CORE_ADDR prologue_end;
901 CORE_ADDR prev_pc = get_frame_pc (this_frame);
902 struct gdbarch *gdbarch = get_frame_arch (this_frame);
903
904 cache->prev_pc = prev_pc;
905
906 /* Assume we do not find a frame. */
907 cache->framereg = -1;
908 cache->framesize = 0;
909
910 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
911 &prologue_end))
912 {
913 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
914
915 if (sal.line == 0)
916 {
917 /* No line info so use the current PC. */
918 prologue_end = prev_pc;
919 }
920 else if (sal.end < prologue_end)
921 {
922 /* The next line begins after the function end. */
923 prologue_end = sal.end;
924 }
925
926 prologue_end = min (prologue_end, prev_pc);
927 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
928 }
929 else
930 {
931 CORE_ADDR frame_loc;
932 LONGEST saved_fp;
933 LONGEST saved_lr;
934 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
935
936 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
937 if (frame_loc == 0)
938 return;
939
940 cache->framereg = AARCH64_FP_REGNUM;
941 cache->framesize = 16;
942 cache->saved_regs[29].addr = 0;
943 cache->saved_regs[30].addr = 8;
944 }
945 }
946
947 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
948 function may throw an exception if the inferior's registers or memory is
949 not available. */
950
951 static void
952 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
953 struct aarch64_prologue_cache *cache)
954 {
955 CORE_ADDR unwound_fp;
956 int reg;
957
958 aarch64_scan_prologue (this_frame, cache);
959
960 if (cache->framereg == -1)
961 return;
962
963 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
964 if (unwound_fp == 0)
965 return;
966
967 cache->prev_sp = unwound_fp + cache->framesize;
968
969 /* Calculate actual addresses of saved registers using offsets
970 determined by aarch64_analyze_prologue. */
971 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
972 if (trad_frame_addr_p (cache->saved_regs, reg))
973 cache->saved_regs[reg].addr += cache->prev_sp;
974
975 cache->func = get_frame_func (this_frame);
976
977 cache->available_p = 1;
978 }
979
980 /* Allocate and fill in *THIS_CACHE with information about the prologue of
981 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
982 Return a pointer to the current aarch64_prologue_cache in
983 *THIS_CACHE. */
984
985 static struct aarch64_prologue_cache *
986 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
987 {
988 struct aarch64_prologue_cache *cache;
989
990 if (*this_cache != NULL)
991 return *this_cache;
992
993 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
994 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
995 *this_cache = cache;
996
997 TRY
998 {
999 aarch64_make_prologue_cache_1 (this_frame, cache);
1000 }
1001 CATCH (ex, RETURN_MASK_ERROR)
1002 {
1003 if (ex.error != NOT_AVAILABLE_ERROR)
1004 throw_exception (ex);
1005 }
1006 END_CATCH
1007
1008 return cache;
1009 }
1010
1011 /* Implement the "stop_reason" frame_unwind method. */
1012
1013 static enum unwind_stop_reason
1014 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1015 void **this_cache)
1016 {
1017 struct aarch64_prologue_cache *cache
1018 = aarch64_make_prologue_cache (this_frame, this_cache);
1019
1020 if (!cache->available_p)
1021 return UNWIND_UNAVAILABLE;
1022
1023 /* Halt the backtrace at "_start". */
1024 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1025 return UNWIND_OUTERMOST;
1026
1027 /* We've hit a wall, stop. */
1028 if (cache->prev_sp == 0)
1029 return UNWIND_OUTERMOST;
1030
1031 return UNWIND_NO_REASON;
1032 }
1033
1034 /* Our frame ID for a normal frame is the current function's starting
1035 PC and the caller's SP when we were called. */
1036
1037 static void
1038 aarch64_prologue_this_id (struct frame_info *this_frame,
1039 void **this_cache, struct frame_id *this_id)
1040 {
1041 struct aarch64_prologue_cache *cache
1042 = aarch64_make_prologue_cache (this_frame, this_cache);
1043
1044 if (!cache->available_p)
1045 *this_id = frame_id_build_unavailable_stack (cache->func);
1046 else
1047 *this_id = frame_id_build (cache->prev_sp, cache->func);
1048 }
1049
1050 /* Implement the "prev_register" frame_unwind method. */
1051
1052 static struct value *
1053 aarch64_prologue_prev_register (struct frame_info *this_frame,
1054 void **this_cache, int prev_regnum)
1055 {
1056 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1057 struct aarch64_prologue_cache *cache
1058 = aarch64_make_prologue_cache (this_frame, this_cache);
1059
1060 /* If we are asked to unwind the PC, then we need to return the LR
1061 instead. The prologue may save PC, but it will point into this
1062 frame's prologue, not the next frame's resume location. */
1063 if (prev_regnum == AARCH64_PC_REGNUM)
1064 {
1065 CORE_ADDR lr;
1066
1067 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1068 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1069 }
1070
1071 /* SP is generally not saved to the stack, but this frame is
1072 identified by the next frame's stack pointer at the time of the
1073 call. The value was already reconstructed into PREV_SP. */
1074 /*
1075 +----------+ ^
1076 | saved lr | |
1077 +->| saved fp |--+
1078 | | |
1079 | | | <- Previous SP
1080 | +----------+
1081 | | saved lr |
1082 +--| saved fp |<- FP
1083 | |
1084 | |<- SP
1085 +----------+ */
1086 if (prev_regnum == AARCH64_SP_REGNUM)
1087 return frame_unwind_got_constant (this_frame, prev_regnum,
1088 cache->prev_sp);
1089
1090 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1091 prev_regnum);
1092 }
1093
1094 /* AArch64 prologue unwinder. */
1095 struct frame_unwind aarch64_prologue_unwind =
1096 {
1097 NORMAL_FRAME,
1098 aarch64_prologue_frame_unwind_stop_reason,
1099 aarch64_prologue_this_id,
1100 aarch64_prologue_prev_register,
1101 NULL,
1102 default_frame_sniffer
1103 };
1104
1105 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1106 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1107 Return a pointer to the current aarch64_prologue_cache in
1108 *THIS_CACHE. */
1109
1110 static struct aarch64_prologue_cache *
1111 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1112 {
1113 struct aarch64_prologue_cache *cache;
1114
1115 if (*this_cache != NULL)
1116 return *this_cache;
1117
1118 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1119 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1120 *this_cache = cache;
1121
1122 TRY
1123 {
1124 cache->prev_sp = get_frame_register_unsigned (this_frame,
1125 AARCH64_SP_REGNUM);
1126 cache->prev_pc = get_frame_pc (this_frame);
1127 cache->available_p = 1;
1128 }
1129 CATCH (ex, RETURN_MASK_ERROR)
1130 {
1131 if (ex.error != NOT_AVAILABLE_ERROR)
1132 throw_exception (ex);
1133 }
1134 END_CATCH
1135
1136 return cache;
1137 }
1138
1139 /* Implement the "stop_reason" frame_unwind method. */
1140
1141 static enum unwind_stop_reason
1142 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1143 void **this_cache)
1144 {
1145 struct aarch64_prologue_cache *cache
1146 = aarch64_make_stub_cache (this_frame, this_cache);
1147
1148 if (!cache->available_p)
1149 return UNWIND_UNAVAILABLE;
1150
1151 return UNWIND_NO_REASON;
1152 }
1153
1154 /* Our frame ID for a stub frame is the current SP and LR. */
1155
1156 static void
1157 aarch64_stub_this_id (struct frame_info *this_frame,
1158 void **this_cache, struct frame_id *this_id)
1159 {
1160 struct aarch64_prologue_cache *cache
1161 = aarch64_make_stub_cache (this_frame, this_cache);
1162
1163 if (cache->available_p)
1164 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1165 else
1166 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1167 }
1168
1169 /* Implement the "sniffer" frame_unwind method. */
1170
1171 static int
1172 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1173 struct frame_info *this_frame,
1174 void **this_prologue_cache)
1175 {
1176 CORE_ADDR addr_in_block;
1177 gdb_byte dummy[4];
1178
1179 addr_in_block = get_frame_address_in_block (this_frame);
1180 if (in_plt_section (addr_in_block)
1181 /* We also use the stub winder if the target memory is unreadable
1182 to avoid having the prologue unwinder trying to read it. */
1183 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1184 return 1;
1185
1186 return 0;
1187 }
1188
1189 /* AArch64 stub unwinder. */
1190 struct frame_unwind aarch64_stub_unwind =
1191 {
1192 NORMAL_FRAME,
1193 aarch64_stub_frame_unwind_stop_reason,
1194 aarch64_stub_this_id,
1195 aarch64_prologue_prev_register,
1196 NULL,
1197 aarch64_stub_unwind_sniffer
1198 };
1199
1200 /* Return the frame base address of *THIS_FRAME. */
1201
1202 static CORE_ADDR
1203 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1204 {
1205 struct aarch64_prologue_cache *cache
1206 = aarch64_make_prologue_cache (this_frame, this_cache);
1207
1208 return cache->prev_sp - cache->framesize;
1209 }
1210
1211 /* AArch64 default frame base information. */
1212 struct frame_base aarch64_normal_base =
1213 {
1214 &aarch64_prologue_unwind,
1215 aarch64_normal_frame_base,
1216 aarch64_normal_frame_base,
1217 aarch64_normal_frame_base
1218 };
1219
1220 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1221 dummy frame. The frame ID's base needs to match the TOS value
1222 saved by save_dummy_frame_tos () and returned from
1223 aarch64_push_dummy_call, and the PC needs to match the dummy
1224 frame's breakpoint. */
1225
1226 static struct frame_id
1227 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1228 {
1229 return frame_id_build (get_frame_register_unsigned (this_frame,
1230 AARCH64_SP_REGNUM),
1231 get_frame_pc (this_frame));
1232 }
1233
1234 /* Implement the "unwind_pc" gdbarch method. */
1235
1236 static CORE_ADDR
1237 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1238 {
1239 CORE_ADDR pc
1240 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1241
1242 return pc;
1243 }
1244
1245 /* Implement the "unwind_sp" gdbarch method. */
1246
1247 static CORE_ADDR
1248 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1249 {
1250 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1251 }
1252
1253 /* Return the value of the REGNUM register in the previous frame of
1254 *THIS_FRAME. */
1255
1256 static struct value *
1257 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1258 void **this_cache, int regnum)
1259 {
1260 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1261 CORE_ADDR lr;
1262
1263 switch (regnum)
1264 {
1265 case AARCH64_PC_REGNUM:
1266 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1267 return frame_unwind_got_constant (this_frame, regnum, lr);
1268
1269 default:
1270 internal_error (__FILE__, __LINE__,
1271 _("Unexpected register %d"), regnum);
1272 }
1273 }
1274
1275 /* Implement the "init_reg" dwarf2_frame_ops method. */
1276
1277 static void
1278 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1279 struct dwarf2_frame_state_reg *reg,
1280 struct frame_info *this_frame)
1281 {
1282 switch (regnum)
1283 {
1284 case AARCH64_PC_REGNUM:
1285 reg->how = DWARF2_FRAME_REG_FN;
1286 reg->loc.fn = aarch64_dwarf2_prev_register;
1287 break;
1288 case AARCH64_SP_REGNUM:
1289 reg->how = DWARF2_FRAME_REG_CFA;
1290 break;
1291 }
1292 }
1293
1294 /* When arguments must be pushed onto the stack, they go on in reverse
1295 order. The code below implements a FILO (stack) to do this. */
1296
1297 typedef struct
1298 {
1299 /* Value to pass on stack. */
1300 const void *data;
1301
1302 /* Size in bytes of value to pass on stack. */
1303 int len;
1304 } stack_item_t;
1305
1306 DEF_VEC_O (stack_item_t);
1307
1308 /* Return the alignment (in bytes) of the given type. */
1309
1310 static int
1311 aarch64_type_align (struct type *t)
1312 {
1313 int n;
1314 int align;
1315 int falign;
1316
1317 t = check_typedef (t);
1318 switch (TYPE_CODE (t))
1319 {
1320 default:
1321 /* Should never happen. */
1322 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1323 return 4;
1324
1325 case TYPE_CODE_PTR:
1326 case TYPE_CODE_ENUM:
1327 case TYPE_CODE_INT:
1328 case TYPE_CODE_FLT:
1329 case TYPE_CODE_SET:
1330 case TYPE_CODE_RANGE:
1331 case TYPE_CODE_BITSTRING:
1332 case TYPE_CODE_REF:
1333 case TYPE_CODE_CHAR:
1334 case TYPE_CODE_BOOL:
1335 return TYPE_LENGTH (t);
1336
1337 case TYPE_CODE_ARRAY:
1338 case TYPE_CODE_COMPLEX:
1339 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1340
1341 case TYPE_CODE_STRUCT:
1342 case TYPE_CODE_UNION:
1343 align = 1;
1344 for (n = 0; n < TYPE_NFIELDS (t); n++)
1345 {
1346 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1347 if (falign > align)
1348 align = falign;
1349 }
1350 return align;
1351 }
1352 }
1353
1354 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1355 defined in the AAPCS64 ABI document; otherwise return 0. */
1356
1357 static int
1358 is_hfa (struct type *ty)
1359 {
1360 switch (TYPE_CODE (ty))
1361 {
1362 case TYPE_CODE_ARRAY:
1363 {
1364 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1365 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1366 return 1;
1367 break;
1368 }
1369
1370 case TYPE_CODE_UNION:
1371 case TYPE_CODE_STRUCT:
1372 {
1373 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1374 {
1375 struct type *member0_type;
1376
1377 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1378 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1379 {
1380 int i;
1381
1382 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1383 {
1384 struct type *member1_type;
1385
1386 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1387 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1388 || (TYPE_LENGTH (member0_type)
1389 != TYPE_LENGTH (member1_type)))
1390 return 0;
1391 }
1392 return 1;
1393 }
1394 }
1395 return 0;
1396 }
1397
1398 default:
1399 break;
1400 }
1401
1402 return 0;
1403 }
1404
1405 /* AArch64 function call information structure. */
1406 struct aarch64_call_info
1407 {
1408 /* the current argument number. */
1409 unsigned argnum;
1410
1411 /* The next general purpose register number, equivalent to NGRN as
1412 described in the AArch64 Procedure Call Standard. */
1413 unsigned ngrn;
1414
1415 /* The next SIMD and floating point register number, equivalent to
1416 NSRN as described in the AArch64 Procedure Call Standard. */
1417 unsigned nsrn;
1418
1419 /* The next stacked argument address, equivalent to NSAA as
1420 described in the AArch64 Procedure Call Standard. */
1421 unsigned nsaa;
1422
1423 /* Stack item vector. */
1424 VEC(stack_item_t) *si;
1425 };
1426
1427 /* Pass a value in a sequence of consecutive X registers. The caller
1428 is responsbile for ensuring sufficient registers are available. */
1429
1430 static void
1431 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1432 struct aarch64_call_info *info, struct type *type,
1433 const bfd_byte *buf)
1434 {
1435 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1436 int len = TYPE_LENGTH (type);
1437 enum type_code typecode = TYPE_CODE (type);
1438 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1439
1440 info->argnum++;
1441
1442 while (len > 0)
1443 {
1444 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1445 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1446 byte_order);
1447
1448
1449 /* Adjust sub-word struct/union args when big-endian. */
1450 if (byte_order == BFD_ENDIAN_BIG
1451 && partial_len < X_REGISTER_SIZE
1452 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1453 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1454
1455 if (aarch64_debug)
1456 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1457 info->argnum,
1458 gdbarch_register_name (gdbarch, regnum),
1459 phex (regval, X_REGISTER_SIZE));
1460 regcache_cooked_write_unsigned (regcache, regnum, regval);
1461 len -= partial_len;
1462 buf += partial_len;
1463 regnum++;
1464 }
1465 }
1466
1467 /* Attempt to marshall a value in a V register. Return 1 if
1468 successful, or 0 if insufficient registers are available. This
1469 function, unlike the equivalent pass_in_x() function does not
1470 handle arguments spread across multiple registers. */
1471
1472 static int
1473 pass_in_v (struct gdbarch *gdbarch,
1474 struct regcache *regcache,
1475 struct aarch64_call_info *info,
1476 const bfd_byte *buf)
1477 {
1478 if (info->nsrn < 8)
1479 {
1480 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1481 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1482
1483 info->argnum++;
1484 info->nsrn++;
1485
1486 regcache_cooked_write (regcache, regnum, buf);
1487 if (aarch64_debug)
1488 fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n",
1489 info->argnum,
1490 gdbarch_register_name (gdbarch, regnum));
1491 return 1;
1492 }
1493 info->nsrn = 8;
1494 return 0;
1495 }
1496
1497 /* Marshall an argument onto the stack. */
1498
1499 static void
1500 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1501 const bfd_byte *buf)
1502 {
1503 int len = TYPE_LENGTH (type);
1504 int align;
1505 stack_item_t item;
1506
1507 info->argnum++;
1508
1509 align = aarch64_type_align (type);
1510
1511 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1512 Natural alignment of the argument's type. */
1513 align = align_up (align, 8);
1514
1515 /* The AArch64 PCS requires at most doubleword alignment. */
1516 if (align > 16)
1517 align = 16;
1518
1519 if (aarch64_debug)
1520 fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n",
1521 info->argnum, len, info->nsaa);
1522
1523 item.len = len;
1524 item.data = buf;
1525 VEC_safe_push (stack_item_t, info->si, &item);
1526
1527 info->nsaa += len;
1528 if (info->nsaa & (align - 1))
1529 {
1530 /* Push stack alignment padding. */
1531 int pad = align - (info->nsaa & (align - 1));
1532
1533 item.len = pad;
1534 item.data = buf;
1535
1536 VEC_safe_push (stack_item_t, info->si, &item);
1537 info->nsaa += pad;
1538 }
1539 }
1540
1541 /* Marshall an argument into a sequence of one or more consecutive X
1542 registers or, if insufficient X registers are available then onto
1543 the stack. */
1544
1545 static void
1546 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1547 struct aarch64_call_info *info, struct type *type,
1548 const bfd_byte *buf)
1549 {
1550 int len = TYPE_LENGTH (type);
1551 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1552
1553 /* PCS C.13 - Pass in registers if we have enough spare */
1554 if (info->ngrn + nregs <= 8)
1555 {
1556 pass_in_x (gdbarch, regcache, info, type, buf);
1557 info->ngrn += nregs;
1558 }
1559 else
1560 {
1561 info->ngrn = 8;
1562 pass_on_stack (info, type, buf);
1563 }
1564 }
1565
1566 /* Pass a value in a V register, or on the stack if insufficient are
1567 available. */
1568
1569 static void
1570 pass_in_v_or_stack (struct gdbarch *gdbarch,
1571 struct regcache *regcache,
1572 struct aarch64_call_info *info,
1573 struct type *type,
1574 const bfd_byte *buf)
1575 {
1576 if (!pass_in_v (gdbarch, regcache, info, buf))
1577 pass_on_stack (info, type, buf);
1578 }
1579
1580 /* Implement the "push_dummy_call" gdbarch method. */
1581
1582 static CORE_ADDR
1583 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1584 struct regcache *regcache, CORE_ADDR bp_addr,
1585 int nargs,
1586 struct value **args, CORE_ADDR sp, int struct_return,
1587 CORE_ADDR struct_addr)
1588 {
1589 int nstack = 0;
1590 int argnum;
1591 int x_argreg;
1592 int v_argreg;
1593 struct aarch64_call_info info;
1594 struct type *func_type;
1595 struct type *return_type;
1596 int lang_struct_return;
1597
1598 memset (&info, 0, sizeof (info));
1599
1600 /* We need to know what the type of the called function is in order
1601 to determine the number of named/anonymous arguments for the
1602 actual argument placement, and the return type in order to handle
1603 return value correctly.
1604
1605 The generic code above us views the decision of return in memory
1606 or return in registers as a two stage processes. The language
1607 handler is consulted first and may decide to return in memory (eg
1608 class with copy constructor returned by value), this will cause
1609 the generic code to allocate space AND insert an initial leading
1610 argument.
1611
1612 If the language code does not decide to pass in memory then the
1613 target code is consulted.
1614
1615 If the language code decides to pass in memory we want to move
1616 the pointer inserted as the initial argument from the argument
1617 list and into X8, the conventional AArch64 struct return pointer
1618 register.
1619
1620 This is slightly awkward, ideally the flag "lang_struct_return"
1621 would be passed to the targets implementation of push_dummy_call.
1622 Rather that change the target interface we call the language code
1623 directly ourselves. */
1624
1625 func_type = check_typedef (value_type (function));
1626
1627 /* Dereference function pointer types. */
1628 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1629 func_type = TYPE_TARGET_TYPE (func_type);
1630
1631 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1632 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1633
1634 /* If language_pass_by_reference () returned true we will have been
1635 given an additional initial argument, a hidden pointer to the
1636 return slot in memory. */
1637 return_type = TYPE_TARGET_TYPE (func_type);
1638 lang_struct_return = language_pass_by_reference (return_type);
1639
1640 /* Set the return address. For the AArch64, the return breakpoint
1641 is always at BP_ADDR. */
1642 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1643
1644 /* If we were given an initial argument for the return slot because
1645 lang_struct_return was true, lose it. */
1646 if (lang_struct_return)
1647 {
1648 args++;
1649 nargs--;
1650 }
1651
1652 /* The struct_return pointer occupies X8. */
1653 if (struct_return || lang_struct_return)
1654 {
1655 if (aarch64_debug)
1656 fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n",
1657 gdbarch_register_name
1658 (gdbarch,
1659 AARCH64_STRUCT_RETURN_REGNUM),
1660 paddress (gdbarch, struct_addr));
1661 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1662 struct_addr);
1663 }
1664
1665 for (argnum = 0; argnum < nargs; argnum++)
1666 {
1667 struct value *arg = args[argnum];
1668 struct type *arg_type;
1669 int len;
1670
1671 arg_type = check_typedef (value_type (arg));
1672 len = TYPE_LENGTH (arg_type);
1673
1674 switch (TYPE_CODE (arg_type))
1675 {
1676 case TYPE_CODE_INT:
1677 case TYPE_CODE_BOOL:
1678 case TYPE_CODE_CHAR:
1679 case TYPE_CODE_RANGE:
1680 case TYPE_CODE_ENUM:
1681 if (len < 4)
1682 {
1683 /* Promote to 32 bit integer. */
1684 if (TYPE_UNSIGNED (arg_type))
1685 arg_type = builtin_type (gdbarch)->builtin_uint32;
1686 else
1687 arg_type = builtin_type (gdbarch)->builtin_int32;
1688 arg = value_cast (arg_type, arg);
1689 }
1690 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1691 value_contents (arg));
1692 break;
1693
1694 case TYPE_CODE_COMPLEX:
1695 if (info.nsrn <= 6)
1696 {
1697 const bfd_byte *buf = value_contents (arg);
1698 struct type *target_type =
1699 check_typedef (TYPE_TARGET_TYPE (arg_type));
1700
1701 pass_in_v (gdbarch, regcache, &info, buf);
1702 pass_in_v (gdbarch, regcache, &info,
1703 buf + TYPE_LENGTH (target_type));
1704 }
1705 else
1706 {
1707 info.nsrn = 8;
1708 pass_on_stack (&info, arg_type, value_contents (arg));
1709 }
1710 break;
1711 case TYPE_CODE_FLT:
1712 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1713 value_contents (arg));
1714 break;
1715
1716 case TYPE_CODE_STRUCT:
1717 case TYPE_CODE_ARRAY:
1718 case TYPE_CODE_UNION:
1719 if (is_hfa (arg_type))
1720 {
1721 int elements = TYPE_NFIELDS (arg_type);
1722
1723 /* Homogeneous Aggregates */
1724 if (info.nsrn + elements < 8)
1725 {
1726 int i;
1727
1728 for (i = 0; i < elements; i++)
1729 {
1730 /* We know that we have sufficient registers
1731 available therefore this will never fallback
1732 to the stack. */
1733 struct value *field =
1734 value_primitive_field (arg, 0, i, arg_type);
1735 struct type *field_type =
1736 check_typedef (value_type (field));
1737
1738 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1739 value_contents_writeable (field));
1740 }
1741 }
1742 else
1743 {
1744 info.nsrn = 8;
1745 pass_on_stack (&info, arg_type, value_contents (arg));
1746 }
1747 }
1748 else if (len > 16)
1749 {
1750 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1751 invisible reference. */
1752
1753 /* Allocate aligned storage. */
1754 sp = align_down (sp - len, 16);
1755
1756 /* Write the real data into the stack. */
1757 write_memory (sp, value_contents (arg), len);
1758
1759 /* Construct the indirection. */
1760 arg_type = lookup_pointer_type (arg_type);
1761 arg = value_from_pointer (arg_type, sp);
1762 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1763 value_contents (arg));
1764 }
1765 else
1766 /* PCS C.15 / C.18 multiple values pass. */
1767 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1768 value_contents (arg));
1769 break;
1770
1771 default:
1772 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1773 value_contents (arg));
1774 break;
1775 }
1776 }
1777
1778 /* Make sure stack retains 16 byte alignment. */
1779 if (info.nsaa & 15)
1780 sp -= 16 - (info.nsaa & 15);
1781
1782 while (!VEC_empty (stack_item_t, info.si))
1783 {
1784 stack_item_t *si = VEC_last (stack_item_t, info.si);
1785
1786 sp -= si->len;
1787 write_memory (sp, si->data, si->len);
1788 VEC_pop (stack_item_t, info.si);
1789 }
1790
1791 VEC_free (stack_item_t, info.si);
1792
1793 /* Finally, update the SP register. */
1794 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1795
1796 return sp;
1797 }
1798
1799 /* Implement the "frame_align" gdbarch method. */
1800
1801 static CORE_ADDR
1802 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1803 {
1804 /* Align the stack to sixteen bytes. */
1805 return sp & ~(CORE_ADDR) 15;
1806 }
1807
1808 /* Return the type for an AdvSISD Q register. */
1809
1810 static struct type *
1811 aarch64_vnq_type (struct gdbarch *gdbarch)
1812 {
1813 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1814
1815 if (tdep->vnq_type == NULL)
1816 {
1817 struct type *t;
1818 struct type *elem;
1819
1820 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1821 TYPE_CODE_UNION);
1822
1823 elem = builtin_type (gdbarch)->builtin_uint128;
1824 append_composite_type_field (t, "u", elem);
1825
1826 elem = builtin_type (gdbarch)->builtin_int128;
1827 append_composite_type_field (t, "s", elem);
1828
1829 tdep->vnq_type = t;
1830 }
1831
1832 return tdep->vnq_type;
1833 }
1834
1835 /* Return the type for an AdvSISD D register. */
1836
1837 static struct type *
1838 aarch64_vnd_type (struct gdbarch *gdbarch)
1839 {
1840 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1841
1842 if (tdep->vnd_type == NULL)
1843 {
1844 struct type *t;
1845 struct type *elem;
1846
1847 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1848 TYPE_CODE_UNION);
1849
1850 elem = builtin_type (gdbarch)->builtin_double;
1851 append_composite_type_field (t, "f", elem);
1852
1853 elem = builtin_type (gdbarch)->builtin_uint64;
1854 append_composite_type_field (t, "u", elem);
1855
1856 elem = builtin_type (gdbarch)->builtin_int64;
1857 append_composite_type_field (t, "s", elem);
1858
1859 tdep->vnd_type = t;
1860 }
1861
1862 return tdep->vnd_type;
1863 }
1864
1865 /* Return the type for an AdvSISD S register. */
1866
1867 static struct type *
1868 aarch64_vns_type (struct gdbarch *gdbarch)
1869 {
1870 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1871
1872 if (tdep->vns_type == NULL)
1873 {
1874 struct type *t;
1875 struct type *elem;
1876
1877 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1878 TYPE_CODE_UNION);
1879
1880 elem = builtin_type (gdbarch)->builtin_float;
1881 append_composite_type_field (t, "f", elem);
1882
1883 elem = builtin_type (gdbarch)->builtin_uint32;
1884 append_composite_type_field (t, "u", elem);
1885
1886 elem = builtin_type (gdbarch)->builtin_int32;
1887 append_composite_type_field (t, "s", elem);
1888
1889 tdep->vns_type = t;
1890 }
1891
1892 return tdep->vns_type;
1893 }
1894
1895 /* Return the type for an AdvSISD H register. */
1896
1897 static struct type *
1898 aarch64_vnh_type (struct gdbarch *gdbarch)
1899 {
1900 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1901
1902 if (tdep->vnh_type == NULL)
1903 {
1904 struct type *t;
1905 struct type *elem;
1906
1907 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1908 TYPE_CODE_UNION);
1909
1910 elem = builtin_type (gdbarch)->builtin_uint16;
1911 append_composite_type_field (t, "u", elem);
1912
1913 elem = builtin_type (gdbarch)->builtin_int16;
1914 append_composite_type_field (t, "s", elem);
1915
1916 tdep->vnh_type = t;
1917 }
1918
1919 return tdep->vnh_type;
1920 }
1921
1922 /* Return the type for an AdvSISD B register. */
1923
1924 static struct type *
1925 aarch64_vnb_type (struct gdbarch *gdbarch)
1926 {
1927 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1928
1929 if (tdep->vnb_type == NULL)
1930 {
1931 struct type *t;
1932 struct type *elem;
1933
1934 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1935 TYPE_CODE_UNION);
1936
1937 elem = builtin_type (gdbarch)->builtin_uint8;
1938 append_composite_type_field (t, "u", elem);
1939
1940 elem = builtin_type (gdbarch)->builtin_int8;
1941 append_composite_type_field (t, "s", elem);
1942
1943 tdep->vnb_type = t;
1944 }
1945
1946 return tdep->vnb_type;
1947 }
1948
1949 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1950
1951 static int
1952 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1953 {
1954 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1955 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1956
1957 if (reg == AARCH64_DWARF_SP)
1958 return AARCH64_SP_REGNUM;
1959
1960 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1961 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1962
1963 return -1;
1964 }
1965 \f
1966
1967 /* Implement the "print_insn" gdbarch method. */
1968
1969 static int
1970 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1971 {
1972 info->symbols = NULL;
1973 return print_insn_aarch64 (memaddr, info);
1974 }
1975
1976 /* AArch64 BRK software debug mode instruction.
1977 Note that AArch64 code is always little-endian.
1978 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1979 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1980
1981 /* Implement the "breakpoint_from_pc" gdbarch method. */
1982
1983 static const gdb_byte *
1984 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1985 int *lenptr)
1986 {
1987 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1988
1989 *lenptr = sizeof (aarch64_default_breakpoint);
1990 return aarch64_default_breakpoint;
1991 }
1992
1993 /* Extract from an array REGS containing the (raw) register state a
1994 function return value of type TYPE, and copy that, in virtual
1995 format, into VALBUF. */
1996
1997 static void
1998 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1999 gdb_byte *valbuf)
2000 {
2001 struct gdbarch *gdbarch = get_regcache_arch (regs);
2002 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2003
2004 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2005 {
2006 bfd_byte buf[V_REGISTER_SIZE];
2007 int len = TYPE_LENGTH (type);
2008
2009 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
2010 memcpy (valbuf, buf, len);
2011 }
2012 else if (TYPE_CODE (type) == TYPE_CODE_INT
2013 || TYPE_CODE (type) == TYPE_CODE_CHAR
2014 || TYPE_CODE (type) == TYPE_CODE_BOOL
2015 || TYPE_CODE (type) == TYPE_CODE_PTR
2016 || TYPE_CODE (type) == TYPE_CODE_REF
2017 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2018 {
2019 /* If the the type is a plain integer, then the access is
2020 straight-forward. Otherwise we have to play around a bit
2021 more. */
2022 int len = TYPE_LENGTH (type);
2023 int regno = AARCH64_X0_REGNUM;
2024 ULONGEST tmp;
2025
2026 while (len > 0)
2027 {
2028 /* By using store_unsigned_integer we avoid having to do
2029 anything special for small big-endian values. */
2030 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2031 store_unsigned_integer (valbuf,
2032 (len > X_REGISTER_SIZE
2033 ? X_REGISTER_SIZE : len), byte_order, tmp);
2034 len -= X_REGISTER_SIZE;
2035 valbuf += X_REGISTER_SIZE;
2036 }
2037 }
2038 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
2039 {
2040 int regno = AARCH64_V0_REGNUM;
2041 bfd_byte buf[V_REGISTER_SIZE];
2042 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
2043 int len = TYPE_LENGTH (target_type);
2044
2045 regcache_cooked_read (regs, regno, buf);
2046 memcpy (valbuf, buf, len);
2047 valbuf += len;
2048 regcache_cooked_read (regs, regno + 1, buf);
2049 memcpy (valbuf, buf, len);
2050 valbuf += len;
2051 }
2052 else if (is_hfa (type))
2053 {
2054 int elements = TYPE_NFIELDS (type);
2055 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2056 int len = TYPE_LENGTH (member_type);
2057 int i;
2058
2059 for (i = 0; i < elements; i++)
2060 {
2061 int regno = AARCH64_V0_REGNUM + i;
2062 bfd_byte buf[X_REGISTER_SIZE];
2063
2064 if (aarch64_debug)
2065 fprintf_unfiltered (gdb_stdlog,
2066 "read HFA return value element %d from %s\n",
2067 i + 1,
2068 gdbarch_register_name (gdbarch, regno));
2069 regcache_cooked_read (regs, regno, buf);
2070
2071 memcpy (valbuf, buf, len);
2072 valbuf += len;
2073 }
2074 }
2075 else
2076 {
2077 /* For a structure or union the behaviour is as if the value had
2078 been stored to word-aligned memory and then loaded into
2079 registers with 64-bit load instruction(s). */
2080 int len = TYPE_LENGTH (type);
2081 int regno = AARCH64_X0_REGNUM;
2082 bfd_byte buf[X_REGISTER_SIZE];
2083
2084 while (len > 0)
2085 {
2086 regcache_cooked_read (regs, regno++, buf);
2087 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2088 len -= X_REGISTER_SIZE;
2089 valbuf += X_REGISTER_SIZE;
2090 }
2091 }
2092 }
2093
2094
2095 /* Will a function return an aggregate type in memory or in a
2096 register? Return 0 if an aggregate type can be returned in a
2097 register, 1 if it must be returned in memory. */
2098
2099 static int
2100 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2101 {
2102 int nRc;
2103 enum type_code code;
2104
2105 type = check_typedef (type);
2106
2107 /* In the AArch64 ABI, "integer" like aggregate types are returned
2108 in registers. For an aggregate type to be integer like, its size
2109 must be less than or equal to 4 * X_REGISTER_SIZE. */
2110
2111 if (is_hfa (type))
2112 {
2113 /* PCS B.5 If the argument is a Named HFA, then the argument is
2114 used unmodified. */
2115 return 0;
2116 }
2117
2118 if (TYPE_LENGTH (type) > 16)
2119 {
2120 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2121 invisible reference. */
2122
2123 return 1;
2124 }
2125
2126 return 0;
2127 }
2128
2129 /* Write into appropriate registers a function return value of type
2130 TYPE, given in virtual format. */
2131
2132 static void
2133 aarch64_store_return_value (struct type *type, struct regcache *regs,
2134 const gdb_byte *valbuf)
2135 {
2136 struct gdbarch *gdbarch = get_regcache_arch (regs);
2137 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2138
2139 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2140 {
2141 bfd_byte buf[V_REGISTER_SIZE];
2142 int len = TYPE_LENGTH (type);
2143
2144 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2145 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2146 }
2147 else if (TYPE_CODE (type) == TYPE_CODE_INT
2148 || TYPE_CODE (type) == TYPE_CODE_CHAR
2149 || TYPE_CODE (type) == TYPE_CODE_BOOL
2150 || TYPE_CODE (type) == TYPE_CODE_PTR
2151 || TYPE_CODE (type) == TYPE_CODE_REF
2152 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2153 {
2154 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2155 {
2156 /* Values of one word or less are zero/sign-extended and
2157 returned in r0. */
2158 bfd_byte tmpbuf[X_REGISTER_SIZE];
2159 LONGEST val = unpack_long (type, valbuf);
2160
2161 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2162 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2163 }
2164 else
2165 {
2166 /* Integral values greater than one word are stored in
2167 consecutive registers starting with r0. This will always
2168 be a multiple of the regiser size. */
2169 int len = TYPE_LENGTH (type);
2170 int regno = AARCH64_X0_REGNUM;
2171
2172 while (len > 0)
2173 {
2174 regcache_cooked_write (regs, regno++, valbuf);
2175 len -= X_REGISTER_SIZE;
2176 valbuf += X_REGISTER_SIZE;
2177 }
2178 }
2179 }
2180 else if (is_hfa (type))
2181 {
2182 int elements = TYPE_NFIELDS (type);
2183 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2184 int len = TYPE_LENGTH (member_type);
2185 int i;
2186
2187 for (i = 0; i < elements; i++)
2188 {
2189 int regno = AARCH64_V0_REGNUM + i;
2190 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2191
2192 if (aarch64_debug)
2193 fprintf_unfiltered (gdb_stdlog,
2194 "write HFA return value element %d to %s\n",
2195 i + 1,
2196 gdbarch_register_name (gdbarch, regno));
2197
2198 memcpy (tmpbuf, valbuf, len);
2199 regcache_cooked_write (regs, regno, tmpbuf);
2200 valbuf += len;
2201 }
2202 }
2203 else
2204 {
2205 /* For a structure or union the behaviour is as if the value had
2206 been stored to word-aligned memory and then loaded into
2207 registers with 64-bit load instruction(s). */
2208 int len = TYPE_LENGTH (type);
2209 int regno = AARCH64_X0_REGNUM;
2210 bfd_byte tmpbuf[X_REGISTER_SIZE];
2211
2212 while (len > 0)
2213 {
2214 memcpy (tmpbuf, valbuf,
2215 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2216 regcache_cooked_write (regs, regno++, tmpbuf);
2217 len -= X_REGISTER_SIZE;
2218 valbuf += X_REGISTER_SIZE;
2219 }
2220 }
2221 }
2222
2223 /* Implement the "return_value" gdbarch method. */
2224
2225 static enum return_value_convention
2226 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2227 struct type *valtype, struct regcache *regcache,
2228 gdb_byte *readbuf, const gdb_byte *writebuf)
2229 {
2230 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2231
2232 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2233 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2234 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2235 {
2236 if (aarch64_return_in_memory (gdbarch, valtype))
2237 {
2238 if (aarch64_debug)
2239 fprintf_unfiltered (gdb_stdlog, "return value in memory\n");
2240 return RETURN_VALUE_STRUCT_CONVENTION;
2241 }
2242 }
2243
2244 if (writebuf)
2245 aarch64_store_return_value (valtype, regcache, writebuf);
2246
2247 if (readbuf)
2248 aarch64_extract_return_value (valtype, regcache, readbuf);
2249
2250 if (aarch64_debug)
2251 fprintf_unfiltered (gdb_stdlog, "return value in registers\n");
2252
2253 return RETURN_VALUE_REGISTER_CONVENTION;
2254 }
2255
2256 /* Implement the "get_longjmp_target" gdbarch method. */
2257
2258 static int
2259 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2260 {
2261 CORE_ADDR jb_addr;
2262 gdb_byte buf[X_REGISTER_SIZE];
2263 struct gdbarch *gdbarch = get_frame_arch (frame);
2264 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2265 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2266
2267 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2268
2269 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2270 X_REGISTER_SIZE))
2271 return 0;
2272
2273 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2274 return 1;
2275 }
2276
2277 /* Implement the "gen_return_address" gdbarch method. */
2278
2279 static void
2280 aarch64_gen_return_address (struct gdbarch *gdbarch,
2281 struct agent_expr *ax, struct axs_value *value,
2282 CORE_ADDR scope)
2283 {
2284 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2285 value->kind = axs_lvalue_register;
2286 value->u.reg = AARCH64_LR_REGNUM;
2287 }
2288 \f
2289
2290 /* Return the pseudo register name corresponding to register regnum. */
2291
2292 static const char *
2293 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2294 {
2295 static const char *const q_name[] =
2296 {
2297 "q0", "q1", "q2", "q3",
2298 "q4", "q5", "q6", "q7",
2299 "q8", "q9", "q10", "q11",
2300 "q12", "q13", "q14", "q15",
2301 "q16", "q17", "q18", "q19",
2302 "q20", "q21", "q22", "q23",
2303 "q24", "q25", "q26", "q27",
2304 "q28", "q29", "q30", "q31",
2305 };
2306
2307 static const char *const d_name[] =
2308 {
2309 "d0", "d1", "d2", "d3",
2310 "d4", "d5", "d6", "d7",
2311 "d8", "d9", "d10", "d11",
2312 "d12", "d13", "d14", "d15",
2313 "d16", "d17", "d18", "d19",
2314 "d20", "d21", "d22", "d23",
2315 "d24", "d25", "d26", "d27",
2316 "d28", "d29", "d30", "d31",
2317 };
2318
2319 static const char *const s_name[] =
2320 {
2321 "s0", "s1", "s2", "s3",
2322 "s4", "s5", "s6", "s7",
2323 "s8", "s9", "s10", "s11",
2324 "s12", "s13", "s14", "s15",
2325 "s16", "s17", "s18", "s19",
2326 "s20", "s21", "s22", "s23",
2327 "s24", "s25", "s26", "s27",
2328 "s28", "s29", "s30", "s31",
2329 };
2330
2331 static const char *const h_name[] =
2332 {
2333 "h0", "h1", "h2", "h3",
2334 "h4", "h5", "h6", "h7",
2335 "h8", "h9", "h10", "h11",
2336 "h12", "h13", "h14", "h15",
2337 "h16", "h17", "h18", "h19",
2338 "h20", "h21", "h22", "h23",
2339 "h24", "h25", "h26", "h27",
2340 "h28", "h29", "h30", "h31",
2341 };
2342
2343 static const char *const b_name[] =
2344 {
2345 "b0", "b1", "b2", "b3",
2346 "b4", "b5", "b6", "b7",
2347 "b8", "b9", "b10", "b11",
2348 "b12", "b13", "b14", "b15",
2349 "b16", "b17", "b18", "b19",
2350 "b20", "b21", "b22", "b23",
2351 "b24", "b25", "b26", "b27",
2352 "b28", "b29", "b30", "b31",
2353 };
2354
2355 regnum -= gdbarch_num_regs (gdbarch);
2356
2357 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2358 return q_name[regnum - AARCH64_Q0_REGNUM];
2359
2360 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2361 return d_name[regnum - AARCH64_D0_REGNUM];
2362
2363 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2364 return s_name[regnum - AARCH64_S0_REGNUM];
2365
2366 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2367 return h_name[regnum - AARCH64_H0_REGNUM];
2368
2369 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2370 return b_name[regnum - AARCH64_B0_REGNUM];
2371
2372 internal_error (__FILE__, __LINE__,
2373 _("aarch64_pseudo_register_name: bad register number %d"),
2374 regnum);
2375 }
2376
2377 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2378
2379 static struct type *
2380 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2381 {
2382 regnum -= gdbarch_num_regs (gdbarch);
2383
2384 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2385 return aarch64_vnq_type (gdbarch);
2386
2387 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2388 return aarch64_vnd_type (gdbarch);
2389
2390 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2391 return aarch64_vns_type (gdbarch);
2392
2393 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2394 return aarch64_vnh_type (gdbarch);
2395
2396 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2397 return aarch64_vnb_type (gdbarch);
2398
2399 internal_error (__FILE__, __LINE__,
2400 _("aarch64_pseudo_register_type: bad register number %d"),
2401 regnum);
2402 }
2403
2404 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2405
2406 static int
2407 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2408 struct reggroup *group)
2409 {
2410 regnum -= gdbarch_num_regs (gdbarch);
2411
2412 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2413 return group == all_reggroup || group == vector_reggroup;
2414 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2415 return (group == all_reggroup || group == vector_reggroup
2416 || group == float_reggroup);
2417 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2418 return (group == all_reggroup || group == vector_reggroup
2419 || group == float_reggroup);
2420 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2421 return group == all_reggroup || group == vector_reggroup;
2422 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2423 return group == all_reggroup || group == vector_reggroup;
2424
2425 return group == all_reggroup;
2426 }
2427
2428 /* Implement the "pseudo_register_read_value" gdbarch method. */
2429
2430 static struct value *
2431 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2432 struct regcache *regcache,
2433 int regnum)
2434 {
2435 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2436 struct value *result_value;
2437 gdb_byte *buf;
2438
2439 result_value = allocate_value (register_type (gdbarch, regnum));
2440 VALUE_LVAL (result_value) = lval_register;
2441 VALUE_REGNUM (result_value) = regnum;
2442 buf = value_contents_raw (result_value);
2443
2444 regnum -= gdbarch_num_regs (gdbarch);
2445
2446 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2447 {
2448 enum register_status status;
2449 unsigned v_regnum;
2450
2451 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2452 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2453 if (status != REG_VALID)
2454 mark_value_bytes_unavailable (result_value, 0,
2455 TYPE_LENGTH (value_type (result_value)));
2456 else
2457 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2458 return result_value;
2459 }
2460
2461 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2462 {
2463 enum register_status status;
2464 unsigned v_regnum;
2465
2466 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2467 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2468 if (status != REG_VALID)
2469 mark_value_bytes_unavailable (result_value, 0,
2470 TYPE_LENGTH (value_type (result_value)));
2471 else
2472 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2473 return result_value;
2474 }
2475
2476 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2477 {
2478 enum register_status status;
2479 unsigned v_regnum;
2480
2481 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2482 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2483 if (status != REG_VALID)
2484 mark_value_bytes_unavailable (result_value, 0,
2485 TYPE_LENGTH (value_type (result_value)));
2486 else
2487 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2488 return result_value;
2489 }
2490
2491 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2492 {
2493 enum register_status status;
2494 unsigned v_regnum;
2495
2496 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2497 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2498 if (status != REG_VALID)
2499 mark_value_bytes_unavailable (result_value, 0,
2500 TYPE_LENGTH (value_type (result_value)));
2501 else
2502 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2503 return result_value;
2504 }
2505
2506 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2507 {
2508 enum register_status status;
2509 unsigned v_regnum;
2510
2511 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2512 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2513 if (status != REG_VALID)
2514 mark_value_bytes_unavailable (result_value, 0,
2515 TYPE_LENGTH (value_type (result_value)));
2516 else
2517 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2518 return result_value;
2519 }
2520
2521 gdb_assert_not_reached ("regnum out of bound");
2522 }
2523
2524 /* Implement the "pseudo_register_write" gdbarch method. */
2525
2526 static void
2527 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2528 int regnum, const gdb_byte *buf)
2529 {
2530 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2531
2532 /* Ensure the register buffer is zero, we want gdb writes of the
2533 various 'scalar' pseudo registers to behavior like architectural
2534 writes, register width bytes are written the remainder are set to
2535 zero. */
2536 memset (reg_buf, 0, sizeof (reg_buf));
2537
2538 regnum -= gdbarch_num_regs (gdbarch);
2539
2540 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2541 {
2542 /* pseudo Q registers */
2543 unsigned v_regnum;
2544
2545 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2546 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2547 regcache_raw_write (regcache, v_regnum, reg_buf);
2548 return;
2549 }
2550
2551 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2552 {
2553 /* pseudo D registers */
2554 unsigned v_regnum;
2555
2556 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2557 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2558 regcache_raw_write (regcache, v_regnum, reg_buf);
2559 return;
2560 }
2561
2562 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2563 {
2564 unsigned v_regnum;
2565
2566 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2567 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2568 regcache_raw_write (regcache, v_regnum, reg_buf);
2569 return;
2570 }
2571
2572 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2573 {
2574 /* pseudo H registers */
2575 unsigned v_regnum;
2576
2577 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2578 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2579 regcache_raw_write (regcache, v_regnum, reg_buf);
2580 return;
2581 }
2582
2583 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2584 {
2585 /* pseudo B registers */
2586 unsigned v_regnum;
2587
2588 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2589 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2590 regcache_raw_write (regcache, v_regnum, reg_buf);
2591 return;
2592 }
2593
2594 gdb_assert_not_reached ("regnum out of bound");
2595 }
2596
2597 /* Callback function for user_reg_add. */
2598
2599 static struct value *
2600 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2601 {
2602 const int *reg_p = baton;
2603
2604 return value_of_register (*reg_p, frame);
2605 }
2606 \f
2607
2608 /* Implement the "software_single_step" gdbarch method, needed to
2609 single step through atomic sequences on AArch64. */
2610
2611 static int
2612 aarch64_software_single_step (struct frame_info *frame)
2613 {
2614 struct gdbarch *gdbarch = get_frame_arch (frame);
2615 struct address_space *aspace = get_frame_address_space (frame);
2616 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2617 const int insn_size = 4;
2618 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2619 CORE_ADDR pc = get_frame_pc (frame);
2620 CORE_ADDR breaks[2] = { -1, -1 };
2621 CORE_ADDR loc = pc;
2622 CORE_ADDR closing_insn = 0;
2623 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2624 byte_order_for_code);
2625 int index;
2626 int insn_count;
2627 int bc_insn_count = 0; /* Conditional branch instruction count. */
2628 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2629
2630 /* Look for a Load Exclusive instruction which begins the sequence. */
2631 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2632 return 0;
2633
2634 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2635 {
2636 int32_t offset;
2637 unsigned cond;
2638
2639 loc += insn_size;
2640 insn = read_memory_unsigned_integer (loc, insn_size,
2641 byte_order_for_code);
2642
2643 /* Check if the instruction is a conditional branch. */
2644 if (decode_bcond (loc, insn, &cond, &offset))
2645 {
2646 if (bc_insn_count >= 1)
2647 return 0;
2648
2649 /* It is, so we'll try to set a breakpoint at the destination. */
2650 breaks[1] = loc + offset;
2651
2652 bc_insn_count++;
2653 last_breakpoint++;
2654 }
2655
2656 /* Look for the Store Exclusive which closes the atomic sequence. */
2657 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2658 {
2659 closing_insn = loc;
2660 break;
2661 }
2662 }
2663
2664 /* We didn't find a closing Store Exclusive instruction, fall back. */
2665 if (!closing_insn)
2666 return 0;
2667
2668 /* Insert breakpoint after the end of the atomic sequence. */
2669 breaks[0] = loc + insn_size;
2670
2671 /* Check for duplicated breakpoints, and also check that the second
2672 breakpoint is not within the atomic sequence. */
2673 if (last_breakpoint
2674 && (breaks[1] == breaks[0]
2675 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2676 last_breakpoint = 0;
2677
2678 /* Insert the breakpoint at the end of the sequence, and one at the
2679 destination of the conditional branch, if it exists. */
2680 for (index = 0; index <= last_breakpoint; index++)
2681 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2682
2683 return 1;
2684 }
2685
2686 /* Initialize the current architecture based on INFO. If possible,
2687 re-use an architecture from ARCHES, which is a list of
2688 architectures already created during this debugging session.
2689
2690 Called e.g. at program startup, when reading a core file, and when
2691 reading a binary file. */
2692
2693 static struct gdbarch *
2694 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2695 {
2696 struct gdbarch_tdep *tdep;
2697 struct gdbarch *gdbarch;
2698 struct gdbarch_list *best_arch;
2699 struct tdesc_arch_data *tdesc_data = NULL;
2700 const struct target_desc *tdesc = info.target_desc;
2701 int i;
2702 int have_fpa_registers = 1;
2703 int valid_p = 1;
2704 const struct tdesc_feature *feature;
2705 int num_regs = 0;
2706 int num_pseudo_regs = 0;
2707
2708 /* Ensure we always have a target descriptor. */
2709 if (!tdesc_has_registers (tdesc))
2710 tdesc = tdesc_aarch64;
2711
2712 gdb_assert (tdesc);
2713
2714 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2715
2716 if (feature == NULL)
2717 return NULL;
2718
2719 tdesc_data = tdesc_data_alloc ();
2720
2721 /* Validate the descriptor provides the mandatory core R registers
2722 and allocate their numbers. */
2723 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2724 valid_p &=
2725 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2726 aarch64_r_register_names[i]);
2727
2728 num_regs = AARCH64_X0_REGNUM + i;
2729
2730 /* Look for the V registers. */
2731 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2732 if (feature)
2733 {
2734 /* Validate the descriptor provides the mandatory V registers
2735 and allocate their numbers. */
2736 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2737 valid_p &=
2738 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2739 aarch64_v_register_names[i]);
2740
2741 num_regs = AARCH64_V0_REGNUM + i;
2742
2743 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2744 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2745 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2746 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2747 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2748 }
2749
2750 if (!valid_p)
2751 {
2752 tdesc_data_cleanup (tdesc_data);
2753 return NULL;
2754 }
2755
2756 /* AArch64 code is always little-endian. */
2757 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2758
2759 /* If there is already a candidate, use it. */
2760 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2761 best_arch != NULL;
2762 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2763 {
2764 /* Found a match. */
2765 break;
2766 }
2767
2768 if (best_arch != NULL)
2769 {
2770 if (tdesc_data != NULL)
2771 tdesc_data_cleanup (tdesc_data);
2772 return best_arch->gdbarch;
2773 }
2774
2775 tdep = XCNEW (struct gdbarch_tdep);
2776 gdbarch = gdbarch_alloc (&info, tdep);
2777
2778 /* This should be low enough for everything. */
2779 tdep->lowest_pc = 0x20;
2780 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2781 tdep->jb_elt_size = 8;
2782
2783 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2784 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2785
2786 /* Frame handling. */
2787 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2788 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2789 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2790
2791 /* Advance PC across function entry code. */
2792 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2793
2794 /* The stack grows downward. */
2795 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2796
2797 /* Breakpoint manipulation. */
2798 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2799 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2800 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2801
2802 /* Information about registers, etc. */
2803 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2804 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2805 set_gdbarch_num_regs (gdbarch, num_regs);
2806
2807 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2808 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2809 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2810 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2811 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2812 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2813 aarch64_pseudo_register_reggroup_p);
2814
2815 /* ABI */
2816 set_gdbarch_short_bit (gdbarch, 16);
2817 set_gdbarch_int_bit (gdbarch, 32);
2818 set_gdbarch_float_bit (gdbarch, 32);
2819 set_gdbarch_double_bit (gdbarch, 64);
2820 set_gdbarch_long_double_bit (gdbarch, 128);
2821 set_gdbarch_long_bit (gdbarch, 64);
2822 set_gdbarch_long_long_bit (gdbarch, 64);
2823 set_gdbarch_ptr_bit (gdbarch, 64);
2824 set_gdbarch_char_signed (gdbarch, 0);
2825 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2826 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2827 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2828
2829 /* Internal <-> external register number maps. */
2830 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2831
2832 /* Returning results. */
2833 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2834
2835 /* Disassembly. */
2836 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2837
2838 /* Virtual tables. */
2839 set_gdbarch_vbit_in_delta (gdbarch, 1);
2840
2841 /* Hook in the ABI-specific overrides, if they have been registered. */
2842 info.target_desc = tdesc;
2843 info.tdep_info = (void *) tdesc_data;
2844 gdbarch_init_osabi (info, gdbarch);
2845
2846 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2847
2848 /* Add some default predicates. */
2849 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2850 dwarf2_append_unwinders (gdbarch);
2851 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2852
2853 frame_base_set_default (gdbarch, &aarch64_normal_base);
2854
2855 /* Now we have tuned the configuration, set a few final things,
2856 based on what the OS ABI has told us. */
2857
2858 if (tdep->jb_pc >= 0)
2859 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2860
2861 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2862
2863 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2864
2865 /* Add standard register aliases. */
2866 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2867 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2868 value_of_aarch64_user_reg,
2869 &aarch64_register_aliases[i].regnum);
2870
2871 return gdbarch;
2872 }
2873
2874 static void
2875 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2876 {
2877 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2878
2879 if (tdep == NULL)
2880 return;
2881
2882 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2883 paddress (gdbarch, tdep->lowest_pc));
2884 }
2885
2886 /* Suppress warning from -Wmissing-prototypes. */
2887 extern initialize_file_ftype _initialize_aarch64_tdep;
2888
2889 void
2890 _initialize_aarch64_tdep (void)
2891 {
2892 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2893 aarch64_dump_tdep);
2894
2895 initialize_tdesc_aarch64 ();
2896
2897 /* Debug this file's internals. */
2898 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2899 Set AArch64 debugging."), _("\
2900 Show AArch64 debugging."), _("\
2901 When on, AArch64 specific debugging is enabled."),
2902 NULL,
2903 show_aarch64_debug,
2904 &setdebuglist, &showdebuglist);
2905 }
2906
2907 /* AArch64 process record-replay related structures, defines etc. */
2908
2909 #define submask(x) ((1L << ((x) + 1)) - 1)
2910 #define bit(obj,st) (((obj) >> (st)) & 1)
2911 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2912
2913 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2914 do \
2915 { \
2916 unsigned int reg_len = LENGTH; \
2917 if (reg_len) \
2918 { \
2919 REGS = XNEWVEC (uint32_t, reg_len); \
2920 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2921 } \
2922 } \
2923 while (0)
2924
2925 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2926 do \
2927 { \
2928 unsigned int mem_len = LENGTH; \
2929 if (mem_len) \
2930 { \
2931 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2932 memcpy(&MEMS->len, &RECORD_BUF[0], \
2933 sizeof(struct aarch64_mem_r) * LENGTH); \
2934 } \
2935 } \
2936 while (0)
2937
2938 /* AArch64 record/replay structures and enumerations. */
2939
2940 struct aarch64_mem_r
2941 {
2942 uint64_t len; /* Record length. */
2943 uint64_t addr; /* Memory address. */
2944 };
2945
2946 enum aarch64_record_result
2947 {
2948 AARCH64_RECORD_SUCCESS,
2949 AARCH64_RECORD_FAILURE,
2950 AARCH64_RECORD_UNSUPPORTED,
2951 AARCH64_RECORD_UNKNOWN
2952 };
2953
2954 typedef struct insn_decode_record_t
2955 {
2956 struct gdbarch *gdbarch;
2957 struct regcache *regcache;
2958 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2959 uint32_t aarch64_insn; /* Insn to be recorded. */
2960 uint32_t mem_rec_count; /* Count of memory records. */
2961 uint32_t reg_rec_count; /* Count of register records. */
2962 uint32_t *aarch64_regs; /* Registers to be recorded. */
2963 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2964 } insn_decode_record;
2965
2966 /* Record handler for data processing - register instructions. */
2967
2968 static unsigned int
2969 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2970 {
2971 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2972 uint32_t record_buf[4];
2973
2974 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2975 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2976 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2977
2978 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2979 {
2980 uint8_t setflags;
2981
2982 /* Logical (shifted register). */
2983 if (insn_bits24_27 == 0x0a)
2984 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2985 /* Add/subtract. */
2986 else if (insn_bits24_27 == 0x0b)
2987 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2988 else
2989 return AARCH64_RECORD_UNKNOWN;
2990
2991 record_buf[0] = reg_rd;
2992 aarch64_insn_r->reg_rec_count = 1;
2993 if (setflags)
2994 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2995 }
2996 else
2997 {
2998 if (insn_bits24_27 == 0x0b)
2999 {
3000 /* Data-processing (3 source). */
3001 record_buf[0] = reg_rd;
3002 aarch64_insn_r->reg_rec_count = 1;
3003 }
3004 else if (insn_bits24_27 == 0x0a)
3005 {
3006 if (insn_bits21_23 == 0x00)
3007 {
3008 /* Add/subtract (with carry). */
3009 record_buf[0] = reg_rd;
3010 aarch64_insn_r->reg_rec_count = 1;
3011 if (bit (aarch64_insn_r->aarch64_insn, 29))
3012 {
3013 record_buf[1] = AARCH64_CPSR_REGNUM;
3014 aarch64_insn_r->reg_rec_count = 2;
3015 }
3016 }
3017 else if (insn_bits21_23 == 0x02)
3018 {
3019 /* Conditional compare (register) and conditional compare
3020 (immediate) instructions. */
3021 record_buf[0] = AARCH64_CPSR_REGNUM;
3022 aarch64_insn_r->reg_rec_count = 1;
3023 }
3024 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3025 {
3026 /* CConditional select. */
3027 /* Data-processing (2 source). */
3028 /* Data-processing (1 source). */
3029 record_buf[0] = reg_rd;
3030 aarch64_insn_r->reg_rec_count = 1;
3031 }
3032 else
3033 return AARCH64_RECORD_UNKNOWN;
3034 }
3035 }
3036
3037 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3038 record_buf);
3039 return AARCH64_RECORD_SUCCESS;
3040 }
3041
3042 /* Record handler for data processing - immediate instructions. */
3043
3044 static unsigned int
3045 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3046 {
3047 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
3048 uint32_t record_buf[4];
3049
3050 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3051 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3052 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3053 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3054
3055 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3056 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3057 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3058 {
3059 record_buf[0] = reg_rd;
3060 aarch64_insn_r->reg_rec_count = 1;
3061 }
3062 else if (insn_bits24_27 == 0x01)
3063 {
3064 /* Add/Subtract (immediate). */
3065 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3066 record_buf[0] = reg_rd;
3067 aarch64_insn_r->reg_rec_count = 1;
3068 if (setflags)
3069 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3070 }
3071 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3072 {
3073 /* Logical (immediate). */
3074 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3075 record_buf[0] = reg_rd;
3076 aarch64_insn_r->reg_rec_count = 1;
3077 if (setflags)
3078 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3079 }
3080 else
3081 return AARCH64_RECORD_UNKNOWN;
3082
3083 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3084 record_buf);
3085 return AARCH64_RECORD_SUCCESS;
3086 }
3087
3088 /* Record handler for branch, exception generation and system instructions. */
3089
3090 static unsigned int
3091 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3092 {
3093 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3094 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3095 uint32_t record_buf[4];
3096
3097 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3098 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3099 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3100
3101 if (insn_bits28_31 == 0x0d)
3102 {
3103 /* Exception generation instructions. */
3104 if (insn_bits24_27 == 0x04)
3105 {
3106 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3107 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3108 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3109 {
3110 ULONGEST svc_number;
3111
3112 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3113 &svc_number);
3114 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3115 svc_number);
3116 }
3117 else
3118 return AARCH64_RECORD_UNSUPPORTED;
3119 }
3120 /* System instructions. */
3121 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3122 {
3123 uint32_t reg_rt, reg_crn;
3124
3125 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3126 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3127
3128 /* Record rt in case of sysl and mrs instructions. */
3129 if (bit (aarch64_insn_r->aarch64_insn, 21))
3130 {
3131 record_buf[0] = reg_rt;
3132 aarch64_insn_r->reg_rec_count = 1;
3133 }
3134 /* Record cpsr for hint and msr(immediate) instructions. */
3135 else if (reg_crn == 0x02 || reg_crn == 0x04)
3136 {
3137 record_buf[0] = AARCH64_CPSR_REGNUM;
3138 aarch64_insn_r->reg_rec_count = 1;
3139 }
3140 }
3141 /* Unconditional branch (register). */
3142 else if((insn_bits24_27 & 0x0e) == 0x06)
3143 {
3144 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3145 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3146 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3147 }
3148 else
3149 return AARCH64_RECORD_UNKNOWN;
3150 }
3151 /* Unconditional branch (immediate). */
3152 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3153 {
3154 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3155 if (bit (aarch64_insn_r->aarch64_insn, 31))
3156 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3157 }
3158 else
3159 /* Compare & branch (immediate), Test & branch (immediate) and
3160 Conditional branch (immediate). */
3161 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3162
3163 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3164 record_buf);
3165 return AARCH64_RECORD_SUCCESS;
3166 }
3167
3168 /* Record handler for advanced SIMD load and store instructions. */
3169
3170 static unsigned int
3171 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3172 {
3173 CORE_ADDR address;
3174 uint64_t addr_offset = 0;
3175 uint32_t record_buf[24];
3176 uint64_t record_buf_mem[24];
3177 uint32_t reg_rn, reg_rt;
3178 uint32_t reg_index = 0, mem_index = 0;
3179 uint8_t opcode_bits, size_bits;
3180
3181 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3182 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3183 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3184 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3185 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3186
3187 if (record_debug)
3188 {
3189 fprintf_unfiltered (gdb_stdlog,
3190 "Process record: Advanced SIMD load/store\n");
3191 }
3192
3193 /* Load/store single structure. */
3194 if (bit (aarch64_insn_r->aarch64_insn, 24))
3195 {
3196 uint8_t sindex, scale, selem, esize, replicate = 0;
3197 scale = opcode_bits >> 2;
3198 selem = ((opcode_bits & 0x02) |
3199 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3200 switch (scale)
3201 {
3202 case 1:
3203 if (size_bits & 0x01)
3204 return AARCH64_RECORD_UNKNOWN;
3205 break;
3206 case 2:
3207 if ((size_bits >> 1) & 0x01)
3208 return AARCH64_RECORD_UNKNOWN;
3209 if (size_bits & 0x01)
3210 {
3211 if (!((opcode_bits >> 1) & 0x01))
3212 scale = 3;
3213 else
3214 return AARCH64_RECORD_UNKNOWN;
3215 }
3216 break;
3217 case 3:
3218 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3219 {
3220 scale = size_bits;
3221 replicate = 1;
3222 break;
3223 }
3224 else
3225 return AARCH64_RECORD_UNKNOWN;
3226 default:
3227 break;
3228 }
3229 esize = 8 << scale;
3230 if (replicate)
3231 for (sindex = 0; sindex < selem; sindex++)
3232 {
3233 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3234 reg_rt = (reg_rt + 1) % 32;
3235 }
3236 else
3237 {
3238 for (sindex = 0; sindex < selem; sindex++)
3239 if (bit (aarch64_insn_r->aarch64_insn, 22))
3240 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3241 else
3242 {
3243 record_buf_mem[mem_index++] = esize / 8;
3244 record_buf_mem[mem_index++] = address + addr_offset;
3245 }
3246 addr_offset = addr_offset + (esize / 8);
3247 reg_rt = (reg_rt + 1) % 32;
3248 }
3249 }
3250 /* Load/store multiple structure. */
3251 else
3252 {
3253 uint8_t selem, esize, rpt, elements;
3254 uint8_t eindex, rindex;
3255
3256 esize = 8 << size_bits;
3257 if (bit (aarch64_insn_r->aarch64_insn, 30))
3258 elements = 128 / esize;
3259 else
3260 elements = 64 / esize;
3261
3262 switch (opcode_bits)
3263 {
3264 /*LD/ST4 (4 Registers). */
3265 case 0:
3266 rpt = 1;
3267 selem = 4;
3268 break;
3269 /*LD/ST1 (4 Registers). */
3270 case 2:
3271 rpt = 4;
3272 selem = 1;
3273 break;
3274 /*LD/ST3 (3 Registers). */
3275 case 4:
3276 rpt = 1;
3277 selem = 3;
3278 break;
3279 /*LD/ST1 (3 Registers). */
3280 case 6:
3281 rpt = 3;
3282 selem = 1;
3283 break;
3284 /*LD/ST1 (1 Register). */
3285 case 7:
3286 rpt = 1;
3287 selem = 1;
3288 break;
3289 /*LD/ST2 (2 Registers). */
3290 case 8:
3291 rpt = 1;
3292 selem = 2;
3293 break;
3294 /*LD/ST1 (2 Registers). */
3295 case 10:
3296 rpt = 2;
3297 selem = 1;
3298 break;
3299 default:
3300 return AARCH64_RECORD_UNSUPPORTED;
3301 break;
3302 }
3303 for (rindex = 0; rindex < rpt; rindex++)
3304 for (eindex = 0; eindex < elements; eindex++)
3305 {
3306 uint8_t reg_tt, sindex;
3307 reg_tt = (reg_rt + rindex) % 32;
3308 for (sindex = 0; sindex < selem; sindex++)
3309 {
3310 if (bit (aarch64_insn_r->aarch64_insn, 22))
3311 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3312 else
3313 {
3314 record_buf_mem[mem_index++] = esize / 8;
3315 record_buf_mem[mem_index++] = address + addr_offset;
3316 }
3317 addr_offset = addr_offset + (esize / 8);
3318 reg_tt = (reg_tt + 1) % 32;
3319 }
3320 }
3321 }
3322
3323 if (bit (aarch64_insn_r->aarch64_insn, 23))
3324 record_buf[reg_index++] = reg_rn;
3325
3326 aarch64_insn_r->reg_rec_count = reg_index;
3327 aarch64_insn_r->mem_rec_count = mem_index / 2;
3328 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3329 record_buf_mem);
3330 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3331 record_buf);
3332 return AARCH64_RECORD_SUCCESS;
3333 }
3334
3335 /* Record handler for load and store instructions. */
3336
3337 static unsigned int
3338 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3339 {
3340 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3341 uint8_t insn_bit23, insn_bit21;
3342 uint8_t opc, size_bits, ld_flag, vector_flag;
3343 uint32_t reg_rn, reg_rt, reg_rt2;
3344 uint64_t datasize, offset;
3345 uint32_t record_buf[8];
3346 uint64_t record_buf_mem[8];
3347 CORE_ADDR address;
3348
3349 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3350 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3351 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3352 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3353 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3354 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3355 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3356 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3357 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3358 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3359 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3360
3361 /* Load/store exclusive. */
3362 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3363 {
3364 if (record_debug)
3365 {
3366 fprintf_unfiltered (gdb_stdlog,
3367 "Process record: load/store exclusive\n");
3368 }
3369
3370 if (ld_flag)
3371 {
3372 record_buf[0] = reg_rt;
3373 aarch64_insn_r->reg_rec_count = 1;
3374 if (insn_bit21)
3375 {
3376 record_buf[1] = reg_rt2;
3377 aarch64_insn_r->reg_rec_count = 2;
3378 }
3379 }
3380 else
3381 {
3382 if (insn_bit21)
3383 datasize = (8 << size_bits) * 2;
3384 else
3385 datasize = (8 << size_bits);
3386 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3387 &address);
3388 record_buf_mem[0] = datasize / 8;
3389 record_buf_mem[1] = address;
3390 aarch64_insn_r->mem_rec_count = 1;
3391 if (!insn_bit23)
3392 {
3393 /* Save register rs. */
3394 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3395 aarch64_insn_r->reg_rec_count = 1;
3396 }
3397 }
3398 }
3399 /* Load register (literal) instructions decoding. */
3400 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3401 {
3402 if (record_debug)
3403 {
3404 fprintf_unfiltered (gdb_stdlog,
3405 "Process record: load register (literal)\n");
3406 }
3407 if (vector_flag)
3408 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3409 else
3410 record_buf[0] = reg_rt;
3411 aarch64_insn_r->reg_rec_count = 1;
3412 }
3413 /* All types of load/store pair instructions decoding. */
3414 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3415 {
3416 if (record_debug)
3417 {
3418 fprintf_unfiltered (gdb_stdlog,
3419 "Process record: load/store pair\n");
3420 }
3421
3422 if (ld_flag)
3423 {
3424 if (vector_flag)
3425 {
3426 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3427 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3428 }
3429 else
3430 {
3431 record_buf[0] = reg_rt;
3432 record_buf[1] = reg_rt2;
3433 }
3434 aarch64_insn_r->reg_rec_count = 2;
3435 }
3436 else
3437 {
3438 uint16_t imm7_off;
3439 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3440 if (!vector_flag)
3441 size_bits = size_bits >> 1;
3442 datasize = 8 << (2 + size_bits);
3443 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3444 offset = offset << (2 + size_bits);
3445 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3446 &address);
3447 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3448 {
3449 if (imm7_off & 0x40)
3450 address = address - offset;
3451 else
3452 address = address + offset;
3453 }
3454
3455 record_buf_mem[0] = datasize / 8;
3456 record_buf_mem[1] = address;
3457 record_buf_mem[2] = datasize / 8;
3458 record_buf_mem[3] = address + (datasize / 8);
3459 aarch64_insn_r->mem_rec_count = 2;
3460 }
3461 if (bit (aarch64_insn_r->aarch64_insn, 23))
3462 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3463 }
3464 /* Load/store register (unsigned immediate) instructions. */
3465 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3466 {
3467 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3468 if (!(opc >> 1))
3469 if (opc & 0x01)
3470 ld_flag = 0x01;
3471 else
3472 ld_flag = 0x0;
3473 else
3474 if (size_bits != 0x03)
3475 ld_flag = 0x01;
3476 else
3477 return AARCH64_RECORD_UNKNOWN;
3478
3479 if (record_debug)
3480 {
3481 fprintf_unfiltered (gdb_stdlog,
3482 "Process record: load/store (unsigned immediate):"
3483 " size %x V %d opc %x\n", size_bits, vector_flag,
3484 opc);
3485 }
3486
3487 if (!ld_flag)
3488 {
3489 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3490 datasize = 8 << size_bits;
3491 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3492 &address);
3493 offset = offset << size_bits;
3494 address = address + offset;
3495
3496 record_buf_mem[0] = datasize >> 3;
3497 record_buf_mem[1] = address;
3498 aarch64_insn_r->mem_rec_count = 1;
3499 }
3500 else
3501 {
3502 if (vector_flag)
3503 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3504 else
3505 record_buf[0] = reg_rt;
3506 aarch64_insn_r->reg_rec_count = 1;
3507 }
3508 }
3509 /* Load/store register (register offset) instructions. */
3510 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3511 && insn_bits10_11 == 0x02 && insn_bit21)
3512 {
3513 if (record_debug)
3514 {
3515 fprintf_unfiltered (gdb_stdlog,
3516 "Process record: load/store (register offset)\n");
3517 }
3518 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3519 if (!(opc >> 1))
3520 if (opc & 0x01)
3521 ld_flag = 0x01;
3522 else
3523 ld_flag = 0x0;
3524 else
3525 if (size_bits != 0x03)
3526 ld_flag = 0x01;
3527 else
3528 return AARCH64_RECORD_UNKNOWN;
3529
3530 if (!ld_flag)
3531 {
3532 uint64_t reg_rm_val;
3533 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3534 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3535 if (bit (aarch64_insn_r->aarch64_insn, 12))
3536 offset = reg_rm_val << size_bits;
3537 else
3538 offset = reg_rm_val;
3539 datasize = 8 << size_bits;
3540 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3541 &address);
3542 address = address + offset;
3543 record_buf_mem[0] = datasize >> 3;
3544 record_buf_mem[1] = address;
3545 aarch64_insn_r->mem_rec_count = 1;
3546 }
3547 else
3548 {
3549 if (vector_flag)
3550 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3551 else
3552 record_buf[0] = reg_rt;
3553 aarch64_insn_r->reg_rec_count = 1;
3554 }
3555 }
3556 /* Load/store register (immediate and unprivileged) instructions. */
3557 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3558 && !insn_bit21)
3559 {
3560 if (record_debug)
3561 {
3562 fprintf_unfiltered (gdb_stdlog,
3563 "Process record: load/store (immediate and unprivileged)\n");
3564 }
3565 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3566 if (!(opc >> 1))
3567 if (opc & 0x01)
3568 ld_flag = 0x01;
3569 else
3570 ld_flag = 0x0;
3571 else
3572 if (size_bits != 0x03)
3573 ld_flag = 0x01;
3574 else
3575 return AARCH64_RECORD_UNKNOWN;
3576
3577 if (!ld_flag)
3578 {
3579 uint16_t imm9_off;
3580 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3581 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3582 datasize = 8 << size_bits;
3583 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3584 &address);
3585 if (insn_bits10_11 != 0x01)
3586 {
3587 if (imm9_off & 0x0100)
3588 address = address - offset;
3589 else
3590 address = address + offset;
3591 }
3592 record_buf_mem[0] = datasize >> 3;
3593 record_buf_mem[1] = address;
3594 aarch64_insn_r->mem_rec_count = 1;
3595 }
3596 else
3597 {
3598 if (vector_flag)
3599 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3600 else
3601 record_buf[0] = reg_rt;
3602 aarch64_insn_r->reg_rec_count = 1;
3603 }
3604 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3605 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3606 }
3607 /* Advanced SIMD load/store instructions. */
3608 else
3609 return aarch64_record_asimd_load_store (aarch64_insn_r);
3610
3611 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3612 record_buf_mem);
3613 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3614 record_buf);
3615 return AARCH64_RECORD_SUCCESS;
3616 }
3617
3618 /* Record handler for data processing SIMD and floating point instructions. */
3619
3620 static unsigned int
3621 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3622 {
3623 uint8_t insn_bit21, opcode, rmode, reg_rd;
3624 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3625 uint8_t insn_bits11_14;
3626 uint32_t record_buf[2];
3627
3628 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3629 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3630 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3631 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3632 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3633 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3634 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3635 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3636 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3637
3638 if (record_debug)
3639 {
3640 fprintf_unfiltered (gdb_stdlog,
3641 "Process record: data processing SIMD/FP: ");
3642 }
3643
3644 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3645 {
3646 /* Floating point - fixed point conversion instructions. */
3647 if (!insn_bit21)
3648 {
3649 if (record_debug)
3650 fprintf_unfiltered (gdb_stdlog, "FP - fixed point conversion");
3651
3652 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3653 record_buf[0] = reg_rd;
3654 else
3655 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3656 }
3657 /* Floating point - conditional compare instructions. */
3658 else if (insn_bits10_11 == 0x01)
3659 {
3660 if (record_debug)
3661 fprintf_unfiltered (gdb_stdlog, "FP - conditional compare");
3662
3663 record_buf[0] = AARCH64_CPSR_REGNUM;
3664 }
3665 /* Floating point - data processing (2-source) and
3666 conditional select instructions. */
3667 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3668 {
3669 if (record_debug)
3670 fprintf_unfiltered (gdb_stdlog, "FP - DP (2-source)");
3671
3672 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3673 }
3674 else if (insn_bits10_11 == 0x00)
3675 {
3676 /* Floating point - immediate instructions. */
3677 if ((insn_bits12_15 & 0x01) == 0x01
3678 || (insn_bits12_15 & 0x07) == 0x04)
3679 {
3680 if (record_debug)
3681 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3682 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3683 }
3684 /* Floating point - compare instructions. */
3685 else if ((insn_bits12_15 & 0x03) == 0x02)
3686 {
3687 if (record_debug)
3688 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3689 record_buf[0] = AARCH64_CPSR_REGNUM;
3690 }
3691 /* Floating point - integer conversions instructions. */
3692 else if (insn_bits12_15 == 0x00)
3693 {
3694 /* Convert float to integer instruction. */
3695 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3696 {
3697 if (record_debug)
3698 fprintf_unfiltered (gdb_stdlog, "float to int conversion");
3699
3700 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3701 }
3702 /* Convert integer to float instruction. */
3703 else if ((opcode >> 1) == 0x01 && !rmode)
3704 {
3705 if (record_debug)
3706 fprintf_unfiltered (gdb_stdlog, "int to float conversion");
3707
3708 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3709 }
3710 /* Move float to integer instruction. */
3711 else if ((opcode >> 1) == 0x03)
3712 {
3713 if (record_debug)
3714 fprintf_unfiltered (gdb_stdlog, "move float to int");
3715
3716 if (!(opcode & 0x01))
3717 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3718 else
3719 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3720 }
3721 else
3722 return AARCH64_RECORD_UNKNOWN;
3723 }
3724 else
3725 return AARCH64_RECORD_UNKNOWN;
3726 }
3727 else
3728 return AARCH64_RECORD_UNKNOWN;
3729 }
3730 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3731 {
3732 if (record_debug)
3733 fprintf_unfiltered (gdb_stdlog, "SIMD copy");
3734
3735 /* Advanced SIMD copy instructions. */
3736 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3737 && !bit (aarch64_insn_r->aarch64_insn, 15)
3738 && bit (aarch64_insn_r->aarch64_insn, 10))
3739 {
3740 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3741 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3742 else
3743 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3744 }
3745 else
3746 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3747 }
3748 /* All remaining floating point or advanced SIMD instructions. */
3749 else
3750 {
3751 if (record_debug)
3752 fprintf_unfiltered (gdb_stdlog, "all remain");
3753
3754 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3755 }
3756
3757 if (record_debug)
3758 fprintf_unfiltered (gdb_stdlog, "\n");
3759
3760 aarch64_insn_r->reg_rec_count++;
3761 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3762 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3763 record_buf);
3764 return AARCH64_RECORD_SUCCESS;
3765 }
3766
3767 /* Decodes insns type and invokes its record handler. */
3768
3769 static unsigned int
3770 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3771 {
3772 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3773
3774 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3775 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3776 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3777 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3778
3779 /* Data processing - immediate instructions. */
3780 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3781 return aarch64_record_data_proc_imm (aarch64_insn_r);
3782
3783 /* Branch, exception generation and system instructions. */
3784 if (ins_bit26 && !ins_bit27 && ins_bit28)
3785 return aarch64_record_branch_except_sys (aarch64_insn_r);
3786
3787 /* Load and store instructions. */
3788 if (!ins_bit25 && ins_bit27)
3789 return aarch64_record_load_store (aarch64_insn_r);
3790
3791 /* Data processing - register instructions. */
3792 if (ins_bit25 && !ins_bit26 && ins_bit27)
3793 return aarch64_record_data_proc_reg (aarch64_insn_r);
3794
3795 /* Data processing - SIMD and floating point instructions. */
3796 if (ins_bit25 && ins_bit26 && ins_bit27)
3797 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3798
3799 return AARCH64_RECORD_UNSUPPORTED;
3800 }
3801
3802 /* Cleans up local record registers and memory allocations. */
3803
3804 static void
3805 deallocate_reg_mem (insn_decode_record *record)
3806 {
3807 xfree (record->aarch64_regs);
3808 xfree (record->aarch64_mems);
3809 }
3810
3811 /* Parse the current instruction and record the values of the registers and
3812 memory that will be changed in current instruction to record_arch_list
3813 return -1 if something is wrong. */
3814
3815 int
3816 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3817 CORE_ADDR insn_addr)
3818 {
3819 uint32_t rec_no = 0;
3820 uint8_t insn_size = 4;
3821 uint32_t ret = 0;
3822 ULONGEST t_bit = 0, insn_id = 0;
3823 gdb_byte buf[insn_size];
3824 insn_decode_record aarch64_record;
3825
3826 memset (&buf[0], 0, insn_size);
3827 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3828 target_read_memory (insn_addr, &buf[0], insn_size);
3829 aarch64_record.aarch64_insn
3830 = (uint32_t) extract_unsigned_integer (&buf[0],
3831 insn_size,
3832 gdbarch_byte_order (gdbarch));
3833 aarch64_record.regcache = regcache;
3834 aarch64_record.this_addr = insn_addr;
3835 aarch64_record.gdbarch = gdbarch;
3836
3837 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3838 if (ret == AARCH64_RECORD_UNSUPPORTED)
3839 {
3840 printf_unfiltered (_("Process record does not support instruction "
3841 "0x%0x at address %s.\n"),
3842 aarch64_record.aarch64_insn,
3843 paddress (gdbarch, insn_addr));
3844 ret = -1;
3845 }
3846
3847 if (0 == ret)
3848 {
3849 /* Record registers. */
3850 record_full_arch_list_add_reg (aarch64_record.regcache,
3851 AARCH64_PC_REGNUM);
3852 /* Always record register CPSR. */
3853 record_full_arch_list_add_reg (aarch64_record.regcache,
3854 AARCH64_CPSR_REGNUM);
3855 if (aarch64_record.aarch64_regs)
3856 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3857 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3858 aarch64_record.aarch64_regs[rec_no]))
3859 ret = -1;
3860
3861 /* Record memories. */
3862 if (aarch64_record.aarch64_mems)
3863 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3864 if (record_full_arch_list_add_mem
3865 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3866 aarch64_record.aarch64_mems[rec_no].len))
3867 ret = -1;
3868
3869 if (record_full_arch_list_add_end ())
3870 ret = -1;
3871 }
3872
3873 deallocate_reg_mem (&aarch64_record);
3874 return ret;
3875 }
This page took 0.231538 seconds and 4 git commands to generate.