C++: dlsym casts in gdb/linux-thread-db.c and gdb/gdbserver/thread-db.c
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57
58 #include "features/aarch64.c"
59
60 /* Pseudo register base numbers. */
61 #define AARCH64_Q0_REGNUM 0
62 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
63 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
64 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
65 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
66
67 /* The standard register names, and all the valid aliases for them. */
68 static const struct
69 {
70 const char *const name;
71 int regnum;
72 } aarch64_register_aliases[] =
73 {
74 /* 64-bit register names. */
75 {"fp", AARCH64_FP_REGNUM},
76 {"lr", AARCH64_LR_REGNUM},
77 {"sp", AARCH64_SP_REGNUM},
78
79 /* 32-bit register names. */
80 {"w0", AARCH64_X0_REGNUM + 0},
81 {"w1", AARCH64_X0_REGNUM + 1},
82 {"w2", AARCH64_X0_REGNUM + 2},
83 {"w3", AARCH64_X0_REGNUM + 3},
84 {"w4", AARCH64_X0_REGNUM + 4},
85 {"w5", AARCH64_X0_REGNUM + 5},
86 {"w6", AARCH64_X0_REGNUM + 6},
87 {"w7", AARCH64_X0_REGNUM + 7},
88 {"w8", AARCH64_X0_REGNUM + 8},
89 {"w9", AARCH64_X0_REGNUM + 9},
90 {"w10", AARCH64_X0_REGNUM + 10},
91 {"w11", AARCH64_X0_REGNUM + 11},
92 {"w12", AARCH64_X0_REGNUM + 12},
93 {"w13", AARCH64_X0_REGNUM + 13},
94 {"w14", AARCH64_X0_REGNUM + 14},
95 {"w15", AARCH64_X0_REGNUM + 15},
96 {"w16", AARCH64_X0_REGNUM + 16},
97 {"w17", AARCH64_X0_REGNUM + 17},
98 {"w18", AARCH64_X0_REGNUM + 18},
99 {"w19", AARCH64_X0_REGNUM + 19},
100 {"w20", AARCH64_X0_REGNUM + 20},
101 {"w21", AARCH64_X0_REGNUM + 21},
102 {"w22", AARCH64_X0_REGNUM + 22},
103 {"w23", AARCH64_X0_REGNUM + 23},
104 {"w24", AARCH64_X0_REGNUM + 24},
105 {"w25", AARCH64_X0_REGNUM + 25},
106 {"w26", AARCH64_X0_REGNUM + 26},
107 {"w27", AARCH64_X0_REGNUM + 27},
108 {"w28", AARCH64_X0_REGNUM + 28},
109 {"w29", AARCH64_X0_REGNUM + 29},
110 {"w30", AARCH64_X0_REGNUM + 30},
111
112 /* specials */
113 {"ip0", AARCH64_X0_REGNUM + 16},
114 {"ip1", AARCH64_X0_REGNUM + 17}
115 };
116
117 /* The required core 'R' registers. */
118 static const char *const aarch64_r_register_names[] =
119 {
120 /* These registers must appear in consecutive RAW register number
121 order and they must begin with AARCH64_X0_REGNUM! */
122 "x0", "x1", "x2", "x3",
123 "x4", "x5", "x6", "x7",
124 "x8", "x9", "x10", "x11",
125 "x12", "x13", "x14", "x15",
126 "x16", "x17", "x18", "x19",
127 "x20", "x21", "x22", "x23",
128 "x24", "x25", "x26", "x27",
129 "x28", "x29", "x30", "sp",
130 "pc", "cpsr"
131 };
132
133 /* The FP/SIMD 'V' registers. */
134 static const char *const aarch64_v_register_names[] =
135 {
136 /* These registers must appear in consecutive RAW register number
137 order and they must begin with AARCH64_V0_REGNUM! */
138 "v0", "v1", "v2", "v3",
139 "v4", "v5", "v6", "v7",
140 "v8", "v9", "v10", "v11",
141 "v12", "v13", "v14", "v15",
142 "v16", "v17", "v18", "v19",
143 "v20", "v21", "v22", "v23",
144 "v24", "v25", "v26", "v27",
145 "v28", "v29", "v30", "v31",
146 "fpsr",
147 "fpcr"
148 };
149
150 /* AArch64 prologue cache structure. */
151 struct aarch64_prologue_cache
152 {
153 /* The program counter at the start of the function. It is used to
154 identify this frame as a prologue frame. */
155 CORE_ADDR func;
156
157 /* The program counter at the time this frame was created; i.e. where
158 this function was called from. It is used to identify this frame as a
159 stub frame. */
160 CORE_ADDR prev_pc;
161
162 /* The stack pointer at the time this frame was created; i.e. the
163 caller's stack pointer when this function was called. It is used
164 to identify this frame. */
165 CORE_ADDR prev_sp;
166
167 /* Is the target available to read from? */
168 int available_p;
169
170 /* The frame base for this frame is just prev_sp - frame size.
171 FRAMESIZE is the distance from the frame pointer to the
172 initial stack pointer. */
173 int framesize;
174
175 /* The register used to hold the frame pointer for this frame. */
176 int framereg;
177
178 /* Saved register offsets. */
179 struct trad_frame_saved_reg *saved_regs;
180 };
181
182 /* Toggle this file's internal debugging dump. */
183 static int aarch64_debug;
184
185 static void
186 show_aarch64_debug (struct ui_file *file, int from_tty,
187 struct cmd_list_element *c, const char *value)
188 {
189 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
190 }
191
192 /* Extract a signed value from a bit field within an instruction
193 encoding.
194
195 INSN is the instruction opcode.
196
197 WIDTH specifies the width of the bit field to extract (in bits).
198
199 OFFSET specifies the least significant bit of the field where bits
200 are numbered zero counting from least to most significant. */
201
202 static int32_t
203 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
204 {
205 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
206 unsigned shift_r = sizeof (int32_t) * 8 - width;
207
208 return ((int32_t) insn << shift_l) >> shift_r;
209 }
210
211 /* Determine if specified bits within an instruction opcode matches a
212 specific pattern.
213
214 INSN is the instruction opcode.
215
216 MASK specifies the bits within the opcode that are to be tested
217 agsinst for a match with PATTERN. */
218
219 static int
220 decode_masked_match (uint32_t insn, uint32_t mask, uint32_t pattern)
221 {
222 return (insn & mask) == pattern;
223 }
224
225 /* Decode an opcode if it represents an immediate ADD or SUB instruction.
226
227 ADDR specifies the address of the opcode.
228 INSN specifies the opcode to test.
229 RD receives the 'rd' field from the decoded instruction.
230 RN receives the 'rn' field from the decoded instruction.
231
232 Return 1 if the opcodes matches and is decoded, otherwise 0. */
233 static int
234 decode_add_sub_imm (CORE_ADDR addr, uint32_t insn, unsigned *rd, unsigned *rn,
235 int32_t *imm)
236 {
237 if ((insn & 0x9f000000) == 0x91000000)
238 {
239 unsigned shift;
240 unsigned op_is_sub;
241
242 *rd = (insn >> 0) & 0x1f;
243 *rn = (insn >> 5) & 0x1f;
244 *imm = (insn >> 10) & 0xfff;
245 shift = (insn >> 22) & 0x3;
246 op_is_sub = (insn >> 30) & 0x1;
247
248 switch (shift)
249 {
250 case 0:
251 break;
252 case 1:
253 *imm <<= 12;
254 break;
255 default:
256 /* UNDEFINED */
257 return 0;
258 }
259
260 if (op_is_sub)
261 *imm = -*imm;
262
263 if (aarch64_debug)
264 fprintf_unfiltered (gdb_stdlog,
265 "decode: 0x%s 0x%x add x%u, x%u, #%d\n",
266 core_addr_to_string_nz (addr), insn, *rd, *rn,
267 *imm);
268 return 1;
269 }
270 return 0;
271 }
272
273 /* Decode an opcode if it represents an ADRP instruction.
274
275 ADDR specifies the address of the opcode.
276 INSN specifies the opcode to test.
277 RD receives the 'rd' field from the decoded instruction.
278
279 Return 1 if the opcodes matches and is decoded, otherwise 0. */
280
281 static int
282 decode_adrp (CORE_ADDR addr, uint32_t insn, unsigned *rd)
283 {
284 if (decode_masked_match (insn, 0x9f000000, 0x90000000))
285 {
286 *rd = (insn >> 0) & 0x1f;
287
288 if (aarch64_debug)
289 fprintf_unfiltered (gdb_stdlog,
290 "decode: 0x%s 0x%x adrp x%u, #?\n",
291 core_addr_to_string_nz (addr), insn, *rd);
292 return 1;
293 }
294 return 0;
295 }
296
297 /* Decode an opcode if it represents an branch immediate or branch
298 and link immediate instruction.
299
300 ADDR specifies the address of the opcode.
301 INSN specifies the opcode to test.
302 IS_BL receives the 'op' bit from the decoded instruction.
303 OFFSET receives the immediate offset from the decoded instruction.
304
305 Return 1 if the opcodes matches and is decoded, otherwise 0. */
306
307 static int
308 decode_b (CORE_ADDR addr, uint32_t insn, int *is_bl, int32_t *offset)
309 {
310 /* b 0001 01ii iiii iiii iiii iiii iiii iiii */
311 /* bl 1001 01ii iiii iiii iiii iiii iiii iiii */
312 if (decode_masked_match (insn, 0x7c000000, 0x14000000))
313 {
314 *is_bl = (insn >> 31) & 0x1;
315 *offset = extract_signed_bitfield (insn, 26, 0) << 2;
316
317 if (aarch64_debug)
318 fprintf_unfiltered (gdb_stdlog,
319 "decode: 0x%s 0x%x %s 0x%s\n",
320 core_addr_to_string_nz (addr), insn,
321 *is_bl ? "bl" : "b",
322 core_addr_to_string_nz (addr + *offset));
323
324 return 1;
325 }
326 return 0;
327 }
328
329 /* Decode an opcode if it represents a conditional branch instruction.
330
331 ADDR specifies the address of the opcode.
332 INSN specifies the opcode to test.
333 COND receives the branch condition field from the decoded
334 instruction.
335 OFFSET receives the immediate offset from the decoded instruction.
336
337 Return 1 if the opcodes matches and is decoded, otherwise 0. */
338
339 static int
340 decode_bcond (CORE_ADDR addr, uint32_t insn, unsigned *cond, int32_t *offset)
341 {
342 if (decode_masked_match (insn, 0xfe000000, 0x54000000))
343 {
344 *cond = (insn >> 0) & 0xf;
345 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
346
347 if (aarch64_debug)
348 fprintf_unfiltered (gdb_stdlog,
349 "decode: 0x%s 0x%x b<%u> 0x%s\n",
350 core_addr_to_string_nz (addr), insn, *cond,
351 core_addr_to_string_nz (addr + *offset));
352 return 1;
353 }
354 return 0;
355 }
356
357 /* Decode an opcode if it represents a branch via register instruction.
358
359 ADDR specifies the address of the opcode.
360 INSN specifies the opcode to test.
361 IS_BLR receives the 'op' bit from the decoded instruction.
362 RN receives the 'rn' field from the decoded instruction.
363
364 Return 1 if the opcodes matches and is decoded, otherwise 0. */
365
366 static int
367 decode_br (CORE_ADDR addr, uint32_t insn, int *is_blr, unsigned *rn)
368 {
369 /* 8 4 0 6 2 8 4 0 */
370 /* blr 110101100011111100000000000rrrrr */
371 /* br 110101100001111100000000000rrrrr */
372 if (decode_masked_match (insn, 0xffdffc1f, 0xd61f0000))
373 {
374 *is_blr = (insn >> 21) & 1;
375 *rn = (insn >> 5) & 0x1f;
376
377 if (aarch64_debug)
378 fprintf_unfiltered (gdb_stdlog,
379 "decode: 0x%s 0x%x %s 0x%x\n",
380 core_addr_to_string_nz (addr), insn,
381 *is_blr ? "blr" : "br", *rn);
382
383 return 1;
384 }
385 return 0;
386 }
387
388 /* Decode an opcode if it represents a CBZ or CBNZ instruction.
389
390 ADDR specifies the address of the opcode.
391 INSN specifies the opcode to test.
392 IS64 receives the 'sf' field from the decoded instruction.
393 IS_CBNZ receives the 'op' field from the decoded instruction.
394 RN receives the 'rn' field from the decoded instruction.
395 OFFSET receives the 'imm19' field from the decoded instruction.
396
397 Return 1 if the opcodes matches and is decoded, otherwise 0. */
398
399 static int
400 decode_cb (CORE_ADDR addr, uint32_t insn, int *is64, int *is_cbnz,
401 unsigned *rn, int32_t *offset)
402 {
403 if (decode_masked_match (insn, 0x7e000000, 0x34000000))
404 {
405 /* cbz T011 010o iiii iiii iiii iiii iiir rrrr */
406 /* cbnz T011 010o iiii iiii iiii iiii iiir rrrr */
407
408 *rn = (insn >> 0) & 0x1f;
409 *is64 = (insn >> 31) & 0x1;
410 *is_cbnz = (insn >> 24) & 0x1;
411 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
412
413 if (aarch64_debug)
414 fprintf_unfiltered (gdb_stdlog,
415 "decode: 0x%s 0x%x %s 0x%s\n",
416 core_addr_to_string_nz (addr), insn,
417 *is_cbnz ? "cbnz" : "cbz",
418 core_addr_to_string_nz (addr + *offset));
419 return 1;
420 }
421 return 0;
422 }
423
424 /* Decode an opcode if it represents a ERET instruction.
425
426 ADDR specifies the address of the opcode.
427 INSN specifies the opcode to test.
428
429 Return 1 if the opcodes matches and is decoded, otherwise 0. */
430
431 static int
432 decode_eret (CORE_ADDR addr, uint32_t insn)
433 {
434 /* eret 1101 0110 1001 1111 0000 0011 1110 0000 */
435 if (insn == 0xd69f03e0)
436 {
437 if (aarch64_debug)
438 fprintf_unfiltered (gdb_stdlog, "decode: 0x%s 0x%x eret\n",
439 core_addr_to_string_nz (addr), insn);
440 return 1;
441 }
442 return 0;
443 }
444
445 /* Decode an opcode if it represents a MOVZ instruction.
446
447 ADDR specifies the address of the opcode.
448 INSN specifies the opcode to test.
449 RD receives the 'rd' field from the decoded instruction.
450
451 Return 1 if the opcodes matches and is decoded, otherwise 0. */
452
453 static int
454 decode_movz (CORE_ADDR addr, uint32_t insn, unsigned *rd)
455 {
456 if (decode_masked_match (insn, 0xff800000, 0x52800000))
457 {
458 *rd = (insn >> 0) & 0x1f;
459
460 if (aarch64_debug)
461 fprintf_unfiltered (gdb_stdlog,
462 "decode: 0x%s 0x%x movz x%u, #?\n",
463 core_addr_to_string_nz (addr), insn, *rd);
464 return 1;
465 }
466 return 0;
467 }
468
469 /* Decode an opcode if it represents a ORR (shifted register)
470 instruction.
471
472 ADDR specifies the address of the opcode.
473 INSN specifies the opcode to test.
474 RD receives the 'rd' field from the decoded instruction.
475 RN receives the 'rn' field from the decoded instruction.
476 RM receives the 'rm' field from the decoded instruction.
477 IMM receives the 'imm6' field from the decoded instruction.
478
479 Return 1 if the opcodes matches and is decoded, otherwise 0. */
480
481 static int
482 decode_orr_shifted_register_x (CORE_ADDR addr,
483 uint32_t insn, unsigned *rd, unsigned *rn,
484 unsigned *rm, int32_t *imm)
485 {
486 if (decode_masked_match (insn, 0xff200000, 0xaa000000))
487 {
488 *rd = (insn >> 0) & 0x1f;
489 *rn = (insn >> 5) & 0x1f;
490 *rm = (insn >> 16) & 0x1f;
491 *imm = (insn >> 10) & 0x3f;
492
493 if (aarch64_debug)
494 fprintf_unfiltered (gdb_stdlog,
495 "decode: 0x%s 0x%x orr x%u, x%u, x%u, #%u\n",
496 core_addr_to_string_nz (addr), insn, *rd,
497 *rn, *rm, *imm);
498 return 1;
499 }
500 return 0;
501 }
502
503 /* Decode an opcode if it represents a RET instruction.
504
505 ADDR specifies the address of the opcode.
506 INSN specifies the opcode to test.
507 RN receives the 'rn' field from the decoded instruction.
508
509 Return 1 if the opcodes matches and is decoded, otherwise 0. */
510
511 static int
512 decode_ret (CORE_ADDR addr, uint32_t insn, unsigned *rn)
513 {
514 if (decode_masked_match (insn, 0xfffffc1f, 0xd65f0000))
515 {
516 *rn = (insn >> 5) & 0x1f;
517 if (aarch64_debug)
518 fprintf_unfiltered (gdb_stdlog,
519 "decode: 0x%s 0x%x ret x%u\n",
520 core_addr_to_string_nz (addr), insn, *rn);
521 return 1;
522 }
523 return 0;
524 }
525
526 /* Decode an opcode if it represents the following instruction:
527 STP rt, rt2, [rn, #imm]
528
529 ADDR specifies the address of the opcode.
530 INSN specifies the opcode to test.
531 RT1 receives the 'rt' field from the decoded instruction.
532 RT2 receives the 'rt2' field from the decoded instruction.
533 RN receives the 'rn' field from the decoded instruction.
534 IMM receives the 'imm' field from the decoded instruction.
535
536 Return 1 if the opcodes matches and is decoded, otherwise 0. */
537
538 static int
539 decode_stp_offset (CORE_ADDR addr,
540 uint32_t insn,
541 unsigned *rt1, unsigned *rt2, unsigned *rn, int32_t *imm)
542 {
543 if (decode_masked_match (insn, 0xffc00000, 0xa9000000))
544 {
545 *rt1 = (insn >> 0) & 0x1f;
546 *rn = (insn >> 5) & 0x1f;
547 *rt2 = (insn >> 10) & 0x1f;
548 *imm = extract_signed_bitfield (insn, 7, 15);
549 *imm <<= 3;
550
551 if (aarch64_debug)
552 fprintf_unfiltered (gdb_stdlog,
553 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]\n",
554 core_addr_to_string_nz (addr), insn,
555 *rt1, *rt2, *rn, *imm);
556 return 1;
557 }
558 return 0;
559 }
560
561 /* Decode an opcode if it represents the following instruction:
562 STP rt, rt2, [rn, #imm]!
563
564 ADDR specifies the address of the opcode.
565 INSN specifies the opcode to test.
566 RT1 receives the 'rt' field from the decoded instruction.
567 RT2 receives the 'rt2' field from the decoded instruction.
568 RN receives the 'rn' field from the decoded instruction.
569 IMM receives the 'imm' field from the decoded instruction.
570
571 Return 1 if the opcodes matches and is decoded, otherwise 0. */
572
573 static int
574 decode_stp_offset_wb (CORE_ADDR addr,
575 uint32_t insn,
576 unsigned *rt1, unsigned *rt2, unsigned *rn,
577 int32_t *imm)
578 {
579 if (decode_masked_match (insn, 0xffc00000, 0xa9800000))
580 {
581 *rt1 = (insn >> 0) & 0x1f;
582 *rn = (insn >> 5) & 0x1f;
583 *rt2 = (insn >> 10) & 0x1f;
584 *imm = extract_signed_bitfield (insn, 7, 15);
585 *imm <<= 3;
586
587 if (aarch64_debug)
588 fprintf_unfiltered (gdb_stdlog,
589 "decode: 0x%s 0x%x stp x%u, x%u, [x%u + #%d]!\n",
590 core_addr_to_string_nz (addr), insn,
591 *rt1, *rt2, *rn, *imm);
592 return 1;
593 }
594 return 0;
595 }
596
597 /* Decode an opcode if it represents the following instruction:
598 STUR rt, [rn, #imm]
599
600 ADDR specifies the address of the opcode.
601 INSN specifies the opcode to test.
602 IS64 receives size field from the decoded instruction.
603 RT receives the 'rt' field from the decoded instruction.
604 RN receives the 'rn' field from the decoded instruction.
605 IMM receives the 'imm' field from the decoded instruction.
606
607 Return 1 if the opcodes matches and is decoded, otherwise 0. */
608
609 static int
610 decode_stur (CORE_ADDR addr, uint32_t insn, int *is64, unsigned *rt,
611 unsigned *rn, int32_t *imm)
612 {
613 if (decode_masked_match (insn, 0xbfe00c00, 0xb8000000))
614 {
615 *is64 = (insn >> 30) & 1;
616 *rt = (insn >> 0) & 0x1f;
617 *rn = (insn >> 5) & 0x1f;
618 *imm = extract_signed_bitfield (insn, 9, 12);
619
620 if (aarch64_debug)
621 fprintf_unfiltered (gdb_stdlog,
622 "decode: 0x%s 0x%x stur %c%u, [x%u + #%d]\n",
623 core_addr_to_string_nz (addr), insn,
624 *is64 ? 'x' : 'w', *rt, *rn, *imm);
625 return 1;
626 }
627 return 0;
628 }
629
630 /* Decode an opcode if it represents a TB or TBNZ instruction.
631
632 ADDR specifies the address of the opcode.
633 INSN specifies the opcode to test.
634 IS_TBNZ receives the 'op' field from the decoded instruction.
635 BIT receives the bit position field from the decoded instruction.
636 RT receives 'rt' field from the decoded instruction.
637 IMM receives 'imm' field from the decoded instruction.
638
639 Return 1 if the opcodes matches and is decoded, otherwise 0. */
640
641 static int
642 decode_tb (CORE_ADDR addr, uint32_t insn, int *is_tbnz, unsigned *bit,
643 unsigned *rt, int32_t *imm)
644 {
645 if (decode_masked_match (insn, 0x7e000000, 0x36000000))
646 {
647 /* tbz b011 0110 bbbb biii iiii iiii iiir rrrr */
648 /* tbnz B011 0111 bbbb biii iiii iiii iiir rrrr */
649
650 *rt = (insn >> 0) & 0x1f;
651 *is_tbnz = (insn >> 24) & 0x1;
652 *bit = ((insn >> (31 - 4)) & 0x20) | ((insn >> 19) & 0x1f);
653 *imm = extract_signed_bitfield (insn, 14, 5) << 2;
654
655 if (aarch64_debug)
656 fprintf_unfiltered (gdb_stdlog,
657 "decode: 0x%s 0x%x %s x%u, #%u, 0x%s\n",
658 core_addr_to_string_nz (addr), insn,
659 *is_tbnz ? "tbnz" : "tbz", *rt, *bit,
660 core_addr_to_string_nz (addr + *imm));
661 return 1;
662 }
663 return 0;
664 }
665
666 /* Analyze a prologue, looking for a recognizable stack frame
667 and frame pointer. Scan until we encounter a store that could
668 clobber the stack frame unexpectedly, or an unknown instruction. */
669
670 static CORE_ADDR
671 aarch64_analyze_prologue (struct gdbarch *gdbarch,
672 CORE_ADDR start, CORE_ADDR limit,
673 struct aarch64_prologue_cache *cache)
674 {
675 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
676 int i;
677 pv_t regs[AARCH64_X_REGISTER_COUNT];
678 struct pv_area *stack;
679 struct cleanup *back_to;
680
681 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
682 regs[i] = pv_register (i, 0);
683 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
684 back_to = make_cleanup_free_pv_area (stack);
685
686 for (; start < limit; start += 4)
687 {
688 uint32_t insn;
689 unsigned rd;
690 unsigned rn;
691 unsigned rm;
692 unsigned rt;
693 unsigned rt1;
694 unsigned rt2;
695 int op_is_sub;
696 int32_t imm;
697 unsigned cond;
698 int is64;
699 int is_link;
700 int is_cbnz;
701 int is_tbnz;
702 unsigned bit;
703 int32_t offset;
704
705 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
706
707 if (decode_add_sub_imm (start, insn, &rd, &rn, &imm))
708 regs[rd] = pv_add_constant (regs[rn], imm);
709 else if (decode_adrp (start, insn, &rd))
710 regs[rd] = pv_unknown ();
711 else if (decode_b (start, insn, &is_link, &offset))
712 {
713 /* Stop analysis on branch. */
714 break;
715 }
716 else if (decode_bcond (start, insn, &cond, &offset))
717 {
718 /* Stop analysis on branch. */
719 break;
720 }
721 else if (decode_br (start, insn, &is_link, &rn))
722 {
723 /* Stop analysis on branch. */
724 break;
725 }
726 else if (decode_cb (start, insn, &is64, &is_cbnz, &rn, &offset))
727 {
728 /* Stop analysis on branch. */
729 break;
730 }
731 else if (decode_eret (start, insn))
732 {
733 /* Stop analysis on branch. */
734 break;
735 }
736 else if (decode_movz (start, insn, &rd))
737 regs[rd] = pv_unknown ();
738 else
739 if (decode_orr_shifted_register_x (start, insn, &rd, &rn, &rm, &imm))
740 {
741 if (imm == 0 && rn == 31)
742 regs[rd] = regs[rm];
743 else
744 {
745 if (aarch64_debug)
746 fprintf_unfiltered
747 (gdb_stdlog,
748 "aarch64: prologue analysis gave up addr=0x%s "
749 "opcode=0x%x (orr x register)\n",
750 core_addr_to_string_nz (start),
751 insn);
752 break;
753 }
754 }
755 else if (decode_ret (start, insn, &rn))
756 {
757 /* Stop analysis on branch. */
758 break;
759 }
760 else if (decode_stur (start, insn, &is64, &rt, &rn, &offset))
761 {
762 pv_area_store (stack, pv_add_constant (regs[rn], offset),
763 is64 ? 8 : 4, regs[rt]);
764 }
765 else if (decode_stp_offset (start, insn, &rt1, &rt2, &rn, &imm))
766 {
767 /* If recording this store would invalidate the store area
768 (perhaps because rn is not known) then we should abandon
769 further prologue analysis. */
770 if (pv_area_store_would_trash (stack,
771 pv_add_constant (regs[rn], imm)))
772 break;
773
774 if (pv_area_store_would_trash (stack,
775 pv_add_constant (regs[rn], imm + 8)))
776 break;
777
778 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
779 regs[rt1]);
780 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
781 regs[rt2]);
782 }
783 else if (decode_stp_offset_wb (start, insn, &rt1, &rt2, &rn, &imm))
784 {
785 /* If recording this store would invalidate the store area
786 (perhaps because rn is not known) then we should abandon
787 further prologue analysis. */
788 if (pv_area_store_would_trash (stack,
789 pv_add_constant (regs[rn], imm)))
790 break;
791
792 if (pv_area_store_would_trash (stack,
793 pv_add_constant (regs[rn], imm + 8)))
794 break;
795
796 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
797 regs[rt1]);
798 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
799 regs[rt2]);
800 regs[rn] = pv_add_constant (regs[rn], imm);
801 }
802 else if (decode_tb (start, insn, &is_tbnz, &bit, &rn, &offset))
803 {
804 /* Stop analysis on branch. */
805 break;
806 }
807 else
808 {
809 if (aarch64_debug)
810 fprintf_unfiltered (gdb_stdlog,
811 "aarch64: prologue analysis gave up addr=0x%s"
812 " opcode=0x%x\n",
813 core_addr_to_string_nz (start), insn);
814 break;
815 }
816 }
817
818 if (cache == NULL)
819 {
820 do_cleanups (back_to);
821 return start;
822 }
823
824 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
825 {
826 /* Frame pointer is fp. Frame size is constant. */
827 cache->framereg = AARCH64_FP_REGNUM;
828 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
829 }
830 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
831 {
832 /* Try the stack pointer. */
833 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
834 cache->framereg = AARCH64_SP_REGNUM;
835 }
836 else
837 {
838 /* We're just out of luck. We don't know where the frame is. */
839 cache->framereg = -1;
840 cache->framesize = 0;
841 }
842
843 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
844 {
845 CORE_ADDR offset;
846
847 if (pv_area_find_reg (stack, gdbarch, i, &offset))
848 cache->saved_regs[i].addr = offset;
849 }
850
851 do_cleanups (back_to);
852 return start;
853 }
854
855 /* Implement the "skip_prologue" gdbarch method. */
856
857 static CORE_ADDR
858 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
859 {
860 unsigned long inst;
861 CORE_ADDR skip_pc;
862 CORE_ADDR func_addr, limit_pc;
863 struct symtab_and_line sal;
864
865 /* See if we can determine the end of the prologue via the symbol
866 table. If so, then return either PC, or the PC after the
867 prologue, whichever is greater. */
868 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
869 {
870 CORE_ADDR post_prologue_pc
871 = skip_prologue_using_sal (gdbarch, func_addr);
872
873 if (post_prologue_pc != 0)
874 return max (pc, post_prologue_pc);
875 }
876
877 /* Can't determine prologue from the symbol table, need to examine
878 instructions. */
879
880 /* Find an upper limit on the function prologue using the debug
881 information. If the debug information could not be used to
882 provide that bound, then use an arbitrary large number as the
883 upper bound. */
884 limit_pc = skip_prologue_using_sal (gdbarch, pc);
885 if (limit_pc == 0)
886 limit_pc = pc + 128; /* Magic. */
887
888 /* Try disassembling prologue. */
889 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
890 }
891
892 /* Scan the function prologue for THIS_FRAME and populate the prologue
893 cache CACHE. */
894
895 static void
896 aarch64_scan_prologue (struct frame_info *this_frame,
897 struct aarch64_prologue_cache *cache)
898 {
899 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
900 CORE_ADDR prologue_start;
901 CORE_ADDR prologue_end;
902 CORE_ADDR prev_pc = get_frame_pc (this_frame);
903 struct gdbarch *gdbarch = get_frame_arch (this_frame);
904
905 cache->prev_pc = prev_pc;
906
907 /* Assume we do not find a frame. */
908 cache->framereg = -1;
909 cache->framesize = 0;
910
911 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
912 &prologue_end))
913 {
914 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
915
916 if (sal.line == 0)
917 {
918 /* No line info so use the current PC. */
919 prologue_end = prev_pc;
920 }
921 else if (sal.end < prologue_end)
922 {
923 /* The next line begins after the function end. */
924 prologue_end = sal.end;
925 }
926
927 prologue_end = min (prologue_end, prev_pc);
928 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
929 }
930 else
931 {
932 CORE_ADDR frame_loc;
933 LONGEST saved_fp;
934 LONGEST saved_lr;
935 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
936
937 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
938 if (frame_loc == 0)
939 return;
940
941 cache->framereg = AARCH64_FP_REGNUM;
942 cache->framesize = 16;
943 cache->saved_regs[29].addr = 0;
944 cache->saved_regs[30].addr = 8;
945 }
946 }
947
948 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
949 function may throw an exception if the inferior's registers or memory is
950 not available. */
951
952 static void
953 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
954 struct aarch64_prologue_cache *cache)
955 {
956 CORE_ADDR unwound_fp;
957 int reg;
958
959 aarch64_scan_prologue (this_frame, cache);
960
961 if (cache->framereg == -1)
962 return;
963
964 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
965 if (unwound_fp == 0)
966 return;
967
968 cache->prev_sp = unwound_fp + cache->framesize;
969
970 /* Calculate actual addresses of saved registers using offsets
971 determined by aarch64_analyze_prologue. */
972 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
973 if (trad_frame_addr_p (cache->saved_regs, reg))
974 cache->saved_regs[reg].addr += cache->prev_sp;
975
976 cache->func = get_frame_func (this_frame);
977
978 cache->available_p = 1;
979 }
980
981 /* Allocate and fill in *THIS_CACHE with information about the prologue of
982 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
983 Return a pointer to the current aarch64_prologue_cache in
984 *THIS_CACHE. */
985
986 static struct aarch64_prologue_cache *
987 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
988 {
989 struct aarch64_prologue_cache *cache;
990
991 if (*this_cache != NULL)
992 return *this_cache;
993
994 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
995 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
996 *this_cache = cache;
997
998 TRY
999 {
1000 aarch64_make_prologue_cache_1 (this_frame, cache);
1001 }
1002 CATCH (ex, RETURN_MASK_ERROR)
1003 {
1004 if (ex.error != NOT_AVAILABLE_ERROR)
1005 throw_exception (ex);
1006 }
1007 END_CATCH
1008
1009 return cache;
1010 }
1011
1012 /* Implement the "stop_reason" frame_unwind method. */
1013
1014 static enum unwind_stop_reason
1015 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1016 void **this_cache)
1017 {
1018 struct aarch64_prologue_cache *cache
1019 = aarch64_make_prologue_cache (this_frame, this_cache);
1020
1021 if (!cache->available_p)
1022 return UNWIND_UNAVAILABLE;
1023
1024 /* Halt the backtrace at "_start". */
1025 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1026 return UNWIND_OUTERMOST;
1027
1028 /* We've hit a wall, stop. */
1029 if (cache->prev_sp == 0)
1030 return UNWIND_OUTERMOST;
1031
1032 return UNWIND_NO_REASON;
1033 }
1034
1035 /* Our frame ID for a normal frame is the current function's starting
1036 PC and the caller's SP when we were called. */
1037
1038 static void
1039 aarch64_prologue_this_id (struct frame_info *this_frame,
1040 void **this_cache, struct frame_id *this_id)
1041 {
1042 struct aarch64_prologue_cache *cache
1043 = aarch64_make_prologue_cache (this_frame, this_cache);
1044
1045 if (!cache->available_p)
1046 *this_id = frame_id_build_unavailable_stack (cache->func);
1047 else
1048 *this_id = frame_id_build (cache->prev_sp, cache->func);
1049 }
1050
1051 /* Implement the "prev_register" frame_unwind method. */
1052
1053 static struct value *
1054 aarch64_prologue_prev_register (struct frame_info *this_frame,
1055 void **this_cache, int prev_regnum)
1056 {
1057 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1058 struct aarch64_prologue_cache *cache
1059 = aarch64_make_prologue_cache (this_frame, this_cache);
1060
1061 /* If we are asked to unwind the PC, then we need to return the LR
1062 instead. The prologue may save PC, but it will point into this
1063 frame's prologue, not the next frame's resume location. */
1064 if (prev_regnum == AARCH64_PC_REGNUM)
1065 {
1066 CORE_ADDR lr;
1067
1068 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1069 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1070 }
1071
1072 /* SP is generally not saved to the stack, but this frame is
1073 identified by the next frame's stack pointer at the time of the
1074 call. The value was already reconstructed into PREV_SP. */
1075 /*
1076 +----------+ ^
1077 | saved lr | |
1078 +->| saved fp |--+
1079 | | |
1080 | | | <- Previous SP
1081 | +----------+
1082 | | saved lr |
1083 +--| saved fp |<- FP
1084 | |
1085 | |<- SP
1086 +----------+ */
1087 if (prev_regnum == AARCH64_SP_REGNUM)
1088 return frame_unwind_got_constant (this_frame, prev_regnum,
1089 cache->prev_sp);
1090
1091 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1092 prev_regnum);
1093 }
1094
1095 /* AArch64 prologue unwinder. */
1096 struct frame_unwind aarch64_prologue_unwind =
1097 {
1098 NORMAL_FRAME,
1099 aarch64_prologue_frame_unwind_stop_reason,
1100 aarch64_prologue_this_id,
1101 aarch64_prologue_prev_register,
1102 NULL,
1103 default_frame_sniffer
1104 };
1105
1106 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1107 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
1108 Return a pointer to the current aarch64_prologue_cache in
1109 *THIS_CACHE. */
1110
1111 static struct aarch64_prologue_cache *
1112 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1113 {
1114 struct aarch64_prologue_cache *cache;
1115
1116 if (*this_cache != NULL)
1117 return *this_cache;
1118
1119 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1120 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1121 *this_cache = cache;
1122
1123 TRY
1124 {
1125 cache->prev_sp = get_frame_register_unsigned (this_frame,
1126 AARCH64_SP_REGNUM);
1127 cache->prev_pc = get_frame_pc (this_frame);
1128 cache->available_p = 1;
1129 }
1130 CATCH (ex, RETURN_MASK_ERROR)
1131 {
1132 if (ex.error != NOT_AVAILABLE_ERROR)
1133 throw_exception (ex);
1134 }
1135 END_CATCH
1136
1137 return cache;
1138 }
1139
1140 /* Implement the "stop_reason" frame_unwind method. */
1141
1142 static enum unwind_stop_reason
1143 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1144 void **this_cache)
1145 {
1146 struct aarch64_prologue_cache *cache
1147 = aarch64_make_stub_cache (this_frame, this_cache);
1148
1149 if (!cache->available_p)
1150 return UNWIND_UNAVAILABLE;
1151
1152 return UNWIND_NO_REASON;
1153 }
1154
1155 /* Our frame ID for a stub frame is the current SP and LR. */
1156
1157 static void
1158 aarch64_stub_this_id (struct frame_info *this_frame,
1159 void **this_cache, struct frame_id *this_id)
1160 {
1161 struct aarch64_prologue_cache *cache
1162 = aarch64_make_stub_cache (this_frame, this_cache);
1163
1164 if (cache->available_p)
1165 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1166 else
1167 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1168 }
1169
1170 /* Implement the "sniffer" frame_unwind method. */
1171
1172 static int
1173 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1174 struct frame_info *this_frame,
1175 void **this_prologue_cache)
1176 {
1177 CORE_ADDR addr_in_block;
1178 gdb_byte dummy[4];
1179
1180 addr_in_block = get_frame_address_in_block (this_frame);
1181 if (in_plt_section (addr_in_block)
1182 /* We also use the stub winder if the target memory is unreadable
1183 to avoid having the prologue unwinder trying to read it. */
1184 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1185 return 1;
1186
1187 return 0;
1188 }
1189
1190 /* AArch64 stub unwinder. */
1191 struct frame_unwind aarch64_stub_unwind =
1192 {
1193 NORMAL_FRAME,
1194 aarch64_stub_frame_unwind_stop_reason,
1195 aarch64_stub_this_id,
1196 aarch64_prologue_prev_register,
1197 NULL,
1198 aarch64_stub_unwind_sniffer
1199 };
1200
1201 /* Return the frame base address of *THIS_FRAME. */
1202
1203 static CORE_ADDR
1204 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1205 {
1206 struct aarch64_prologue_cache *cache
1207 = aarch64_make_prologue_cache (this_frame, this_cache);
1208
1209 return cache->prev_sp - cache->framesize;
1210 }
1211
1212 /* AArch64 default frame base information. */
1213 struct frame_base aarch64_normal_base =
1214 {
1215 &aarch64_prologue_unwind,
1216 aarch64_normal_frame_base,
1217 aarch64_normal_frame_base,
1218 aarch64_normal_frame_base
1219 };
1220
1221 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1222 dummy frame. The frame ID's base needs to match the TOS value
1223 saved by save_dummy_frame_tos () and returned from
1224 aarch64_push_dummy_call, and the PC needs to match the dummy
1225 frame's breakpoint. */
1226
1227 static struct frame_id
1228 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1229 {
1230 return frame_id_build (get_frame_register_unsigned (this_frame,
1231 AARCH64_SP_REGNUM),
1232 get_frame_pc (this_frame));
1233 }
1234
1235 /* Implement the "unwind_pc" gdbarch method. */
1236
1237 static CORE_ADDR
1238 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1239 {
1240 CORE_ADDR pc
1241 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1242
1243 return pc;
1244 }
1245
1246 /* Implement the "unwind_sp" gdbarch method. */
1247
1248 static CORE_ADDR
1249 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1250 {
1251 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1252 }
1253
1254 /* Return the value of the REGNUM register in the previous frame of
1255 *THIS_FRAME. */
1256
1257 static struct value *
1258 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1259 void **this_cache, int regnum)
1260 {
1261 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1262 CORE_ADDR lr;
1263
1264 switch (regnum)
1265 {
1266 case AARCH64_PC_REGNUM:
1267 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1268 return frame_unwind_got_constant (this_frame, regnum, lr);
1269
1270 default:
1271 internal_error (__FILE__, __LINE__,
1272 _("Unexpected register %d"), regnum);
1273 }
1274 }
1275
1276 /* Implement the "init_reg" dwarf2_frame_ops method. */
1277
1278 static void
1279 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1280 struct dwarf2_frame_state_reg *reg,
1281 struct frame_info *this_frame)
1282 {
1283 switch (regnum)
1284 {
1285 case AARCH64_PC_REGNUM:
1286 reg->how = DWARF2_FRAME_REG_FN;
1287 reg->loc.fn = aarch64_dwarf2_prev_register;
1288 break;
1289 case AARCH64_SP_REGNUM:
1290 reg->how = DWARF2_FRAME_REG_CFA;
1291 break;
1292 }
1293 }
1294
1295 /* When arguments must be pushed onto the stack, they go on in reverse
1296 order. The code below implements a FILO (stack) to do this. */
1297
1298 typedef struct
1299 {
1300 /* Value to pass on stack. */
1301 const void *data;
1302
1303 /* Size in bytes of value to pass on stack. */
1304 int len;
1305 } stack_item_t;
1306
1307 DEF_VEC_O (stack_item_t);
1308
1309 /* Return the alignment (in bytes) of the given type. */
1310
1311 static int
1312 aarch64_type_align (struct type *t)
1313 {
1314 int n;
1315 int align;
1316 int falign;
1317
1318 t = check_typedef (t);
1319 switch (TYPE_CODE (t))
1320 {
1321 default:
1322 /* Should never happen. */
1323 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1324 return 4;
1325
1326 case TYPE_CODE_PTR:
1327 case TYPE_CODE_ENUM:
1328 case TYPE_CODE_INT:
1329 case TYPE_CODE_FLT:
1330 case TYPE_CODE_SET:
1331 case TYPE_CODE_RANGE:
1332 case TYPE_CODE_BITSTRING:
1333 case TYPE_CODE_REF:
1334 case TYPE_CODE_CHAR:
1335 case TYPE_CODE_BOOL:
1336 return TYPE_LENGTH (t);
1337
1338 case TYPE_CODE_ARRAY:
1339 case TYPE_CODE_COMPLEX:
1340 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1341
1342 case TYPE_CODE_STRUCT:
1343 case TYPE_CODE_UNION:
1344 align = 1;
1345 for (n = 0; n < TYPE_NFIELDS (t); n++)
1346 {
1347 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1348 if (falign > align)
1349 align = falign;
1350 }
1351 return align;
1352 }
1353 }
1354
1355 /* Return 1 if *TY is a homogeneous floating-point aggregate as
1356 defined in the AAPCS64 ABI document; otherwise return 0. */
1357
1358 static int
1359 is_hfa (struct type *ty)
1360 {
1361 switch (TYPE_CODE (ty))
1362 {
1363 case TYPE_CODE_ARRAY:
1364 {
1365 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1366 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
1367 return 1;
1368 break;
1369 }
1370
1371 case TYPE_CODE_UNION:
1372 case TYPE_CODE_STRUCT:
1373 {
1374 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1375 {
1376 struct type *member0_type;
1377
1378 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1379 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
1380 {
1381 int i;
1382
1383 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1384 {
1385 struct type *member1_type;
1386
1387 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1388 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1389 || (TYPE_LENGTH (member0_type)
1390 != TYPE_LENGTH (member1_type)))
1391 return 0;
1392 }
1393 return 1;
1394 }
1395 }
1396 return 0;
1397 }
1398
1399 default:
1400 break;
1401 }
1402
1403 return 0;
1404 }
1405
1406 /* AArch64 function call information structure. */
1407 struct aarch64_call_info
1408 {
1409 /* the current argument number. */
1410 unsigned argnum;
1411
1412 /* The next general purpose register number, equivalent to NGRN as
1413 described in the AArch64 Procedure Call Standard. */
1414 unsigned ngrn;
1415
1416 /* The next SIMD and floating point register number, equivalent to
1417 NSRN as described in the AArch64 Procedure Call Standard. */
1418 unsigned nsrn;
1419
1420 /* The next stacked argument address, equivalent to NSAA as
1421 described in the AArch64 Procedure Call Standard. */
1422 unsigned nsaa;
1423
1424 /* Stack item vector. */
1425 VEC(stack_item_t) *si;
1426 };
1427
1428 /* Pass a value in a sequence of consecutive X registers. The caller
1429 is responsbile for ensuring sufficient registers are available. */
1430
1431 static void
1432 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1433 struct aarch64_call_info *info, struct type *type,
1434 const bfd_byte *buf)
1435 {
1436 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1437 int len = TYPE_LENGTH (type);
1438 enum type_code typecode = TYPE_CODE (type);
1439 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1440
1441 info->argnum++;
1442
1443 while (len > 0)
1444 {
1445 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1446 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1447 byte_order);
1448
1449
1450 /* Adjust sub-word struct/union args when big-endian. */
1451 if (byte_order == BFD_ENDIAN_BIG
1452 && partial_len < X_REGISTER_SIZE
1453 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1454 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1455
1456 if (aarch64_debug)
1457 fprintf_unfiltered (gdb_stdlog, "arg %d in %s = 0x%s\n",
1458 info->argnum,
1459 gdbarch_register_name (gdbarch, regnum),
1460 phex (regval, X_REGISTER_SIZE));
1461 regcache_cooked_write_unsigned (regcache, regnum, regval);
1462 len -= partial_len;
1463 buf += partial_len;
1464 regnum++;
1465 }
1466 }
1467
1468 /* Attempt to marshall a value in a V register. Return 1 if
1469 successful, or 0 if insufficient registers are available. This
1470 function, unlike the equivalent pass_in_x() function does not
1471 handle arguments spread across multiple registers. */
1472
1473 static int
1474 pass_in_v (struct gdbarch *gdbarch,
1475 struct regcache *regcache,
1476 struct aarch64_call_info *info,
1477 const bfd_byte *buf)
1478 {
1479 if (info->nsrn < 8)
1480 {
1481 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1482 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1483
1484 info->argnum++;
1485 info->nsrn++;
1486
1487 regcache_cooked_write (regcache, regnum, buf);
1488 if (aarch64_debug)
1489 fprintf_unfiltered (gdb_stdlog, "arg %d in %s\n",
1490 info->argnum,
1491 gdbarch_register_name (gdbarch, regnum));
1492 return 1;
1493 }
1494 info->nsrn = 8;
1495 return 0;
1496 }
1497
1498 /* Marshall an argument onto the stack. */
1499
1500 static void
1501 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1502 const bfd_byte *buf)
1503 {
1504 int len = TYPE_LENGTH (type);
1505 int align;
1506 stack_item_t item;
1507
1508 info->argnum++;
1509
1510 align = aarch64_type_align (type);
1511
1512 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1513 Natural alignment of the argument's type. */
1514 align = align_up (align, 8);
1515
1516 /* The AArch64 PCS requires at most doubleword alignment. */
1517 if (align > 16)
1518 align = 16;
1519
1520 if (aarch64_debug)
1521 fprintf_unfiltered (gdb_stdlog, "arg %d len=%d @ sp + %d\n",
1522 info->argnum, len, info->nsaa);
1523
1524 item.len = len;
1525 item.data = buf;
1526 VEC_safe_push (stack_item_t, info->si, &item);
1527
1528 info->nsaa += len;
1529 if (info->nsaa & (align - 1))
1530 {
1531 /* Push stack alignment padding. */
1532 int pad = align - (info->nsaa & (align - 1));
1533
1534 item.len = pad;
1535 item.data = buf;
1536
1537 VEC_safe_push (stack_item_t, info->si, &item);
1538 info->nsaa += pad;
1539 }
1540 }
1541
1542 /* Marshall an argument into a sequence of one or more consecutive X
1543 registers or, if insufficient X registers are available then onto
1544 the stack. */
1545
1546 static void
1547 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1548 struct aarch64_call_info *info, struct type *type,
1549 const bfd_byte *buf)
1550 {
1551 int len = TYPE_LENGTH (type);
1552 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1553
1554 /* PCS C.13 - Pass in registers if we have enough spare */
1555 if (info->ngrn + nregs <= 8)
1556 {
1557 pass_in_x (gdbarch, regcache, info, type, buf);
1558 info->ngrn += nregs;
1559 }
1560 else
1561 {
1562 info->ngrn = 8;
1563 pass_on_stack (info, type, buf);
1564 }
1565 }
1566
1567 /* Pass a value in a V register, or on the stack if insufficient are
1568 available. */
1569
1570 static void
1571 pass_in_v_or_stack (struct gdbarch *gdbarch,
1572 struct regcache *regcache,
1573 struct aarch64_call_info *info,
1574 struct type *type,
1575 const bfd_byte *buf)
1576 {
1577 if (!pass_in_v (gdbarch, regcache, info, buf))
1578 pass_on_stack (info, type, buf);
1579 }
1580
1581 /* Implement the "push_dummy_call" gdbarch method. */
1582
1583 static CORE_ADDR
1584 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1585 struct regcache *regcache, CORE_ADDR bp_addr,
1586 int nargs,
1587 struct value **args, CORE_ADDR sp, int struct_return,
1588 CORE_ADDR struct_addr)
1589 {
1590 int nstack = 0;
1591 int argnum;
1592 int x_argreg;
1593 int v_argreg;
1594 struct aarch64_call_info info;
1595 struct type *func_type;
1596 struct type *return_type;
1597 int lang_struct_return;
1598
1599 memset (&info, 0, sizeof (info));
1600
1601 /* We need to know what the type of the called function is in order
1602 to determine the number of named/anonymous arguments for the
1603 actual argument placement, and the return type in order to handle
1604 return value correctly.
1605
1606 The generic code above us views the decision of return in memory
1607 or return in registers as a two stage processes. The language
1608 handler is consulted first and may decide to return in memory (eg
1609 class with copy constructor returned by value), this will cause
1610 the generic code to allocate space AND insert an initial leading
1611 argument.
1612
1613 If the language code does not decide to pass in memory then the
1614 target code is consulted.
1615
1616 If the language code decides to pass in memory we want to move
1617 the pointer inserted as the initial argument from the argument
1618 list and into X8, the conventional AArch64 struct return pointer
1619 register.
1620
1621 This is slightly awkward, ideally the flag "lang_struct_return"
1622 would be passed to the targets implementation of push_dummy_call.
1623 Rather that change the target interface we call the language code
1624 directly ourselves. */
1625
1626 func_type = check_typedef (value_type (function));
1627
1628 /* Dereference function pointer types. */
1629 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1630 func_type = TYPE_TARGET_TYPE (func_type);
1631
1632 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1633 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1634
1635 /* If language_pass_by_reference () returned true we will have been
1636 given an additional initial argument, a hidden pointer to the
1637 return slot in memory. */
1638 return_type = TYPE_TARGET_TYPE (func_type);
1639 lang_struct_return = language_pass_by_reference (return_type);
1640
1641 /* Set the return address. For the AArch64, the return breakpoint
1642 is always at BP_ADDR. */
1643 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1644
1645 /* If we were given an initial argument for the return slot because
1646 lang_struct_return was true, lose it. */
1647 if (lang_struct_return)
1648 {
1649 args++;
1650 nargs--;
1651 }
1652
1653 /* The struct_return pointer occupies X8. */
1654 if (struct_return || lang_struct_return)
1655 {
1656 if (aarch64_debug)
1657 fprintf_unfiltered (gdb_stdlog, "struct return in %s = 0x%s\n",
1658 gdbarch_register_name
1659 (gdbarch,
1660 AARCH64_STRUCT_RETURN_REGNUM),
1661 paddress (gdbarch, struct_addr));
1662 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1663 struct_addr);
1664 }
1665
1666 for (argnum = 0; argnum < nargs; argnum++)
1667 {
1668 struct value *arg = args[argnum];
1669 struct type *arg_type;
1670 int len;
1671
1672 arg_type = check_typedef (value_type (arg));
1673 len = TYPE_LENGTH (arg_type);
1674
1675 switch (TYPE_CODE (arg_type))
1676 {
1677 case TYPE_CODE_INT:
1678 case TYPE_CODE_BOOL:
1679 case TYPE_CODE_CHAR:
1680 case TYPE_CODE_RANGE:
1681 case TYPE_CODE_ENUM:
1682 if (len < 4)
1683 {
1684 /* Promote to 32 bit integer. */
1685 if (TYPE_UNSIGNED (arg_type))
1686 arg_type = builtin_type (gdbarch)->builtin_uint32;
1687 else
1688 arg_type = builtin_type (gdbarch)->builtin_int32;
1689 arg = value_cast (arg_type, arg);
1690 }
1691 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1692 value_contents (arg));
1693 break;
1694
1695 case TYPE_CODE_COMPLEX:
1696 if (info.nsrn <= 6)
1697 {
1698 const bfd_byte *buf = value_contents (arg);
1699 struct type *target_type =
1700 check_typedef (TYPE_TARGET_TYPE (arg_type));
1701
1702 pass_in_v (gdbarch, regcache, &info, buf);
1703 pass_in_v (gdbarch, regcache, &info,
1704 buf + TYPE_LENGTH (target_type));
1705 }
1706 else
1707 {
1708 info.nsrn = 8;
1709 pass_on_stack (&info, arg_type, value_contents (arg));
1710 }
1711 break;
1712 case TYPE_CODE_FLT:
1713 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type,
1714 value_contents (arg));
1715 break;
1716
1717 case TYPE_CODE_STRUCT:
1718 case TYPE_CODE_ARRAY:
1719 case TYPE_CODE_UNION:
1720 if (is_hfa (arg_type))
1721 {
1722 int elements = TYPE_NFIELDS (arg_type);
1723
1724 /* Homogeneous Aggregates */
1725 if (info.nsrn + elements < 8)
1726 {
1727 int i;
1728
1729 for (i = 0; i < elements; i++)
1730 {
1731 /* We know that we have sufficient registers
1732 available therefore this will never fallback
1733 to the stack. */
1734 struct value *field =
1735 value_primitive_field (arg, 0, i, arg_type);
1736 struct type *field_type =
1737 check_typedef (value_type (field));
1738
1739 pass_in_v_or_stack (gdbarch, regcache, &info, field_type,
1740 value_contents_writeable (field));
1741 }
1742 }
1743 else
1744 {
1745 info.nsrn = 8;
1746 pass_on_stack (&info, arg_type, value_contents (arg));
1747 }
1748 }
1749 else if (len > 16)
1750 {
1751 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1752 invisible reference. */
1753
1754 /* Allocate aligned storage. */
1755 sp = align_down (sp - len, 16);
1756
1757 /* Write the real data into the stack. */
1758 write_memory (sp, value_contents (arg), len);
1759
1760 /* Construct the indirection. */
1761 arg_type = lookup_pointer_type (arg_type);
1762 arg = value_from_pointer (arg_type, sp);
1763 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1764 value_contents (arg));
1765 }
1766 else
1767 /* PCS C.15 / C.18 multiple values pass. */
1768 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1769 value_contents (arg));
1770 break;
1771
1772 default:
1773 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type,
1774 value_contents (arg));
1775 break;
1776 }
1777 }
1778
1779 /* Make sure stack retains 16 byte alignment. */
1780 if (info.nsaa & 15)
1781 sp -= 16 - (info.nsaa & 15);
1782
1783 while (!VEC_empty (stack_item_t, info.si))
1784 {
1785 stack_item_t *si = VEC_last (stack_item_t, info.si);
1786
1787 sp -= si->len;
1788 write_memory (sp, si->data, si->len);
1789 VEC_pop (stack_item_t, info.si);
1790 }
1791
1792 VEC_free (stack_item_t, info.si);
1793
1794 /* Finally, update the SP register. */
1795 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1796
1797 return sp;
1798 }
1799
1800 /* Implement the "frame_align" gdbarch method. */
1801
1802 static CORE_ADDR
1803 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1804 {
1805 /* Align the stack to sixteen bytes. */
1806 return sp & ~(CORE_ADDR) 15;
1807 }
1808
1809 /* Return the type for an AdvSISD Q register. */
1810
1811 static struct type *
1812 aarch64_vnq_type (struct gdbarch *gdbarch)
1813 {
1814 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1815
1816 if (tdep->vnq_type == NULL)
1817 {
1818 struct type *t;
1819 struct type *elem;
1820
1821 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1822 TYPE_CODE_UNION);
1823
1824 elem = builtin_type (gdbarch)->builtin_uint128;
1825 append_composite_type_field (t, "u", elem);
1826
1827 elem = builtin_type (gdbarch)->builtin_int128;
1828 append_composite_type_field (t, "s", elem);
1829
1830 tdep->vnq_type = t;
1831 }
1832
1833 return tdep->vnq_type;
1834 }
1835
1836 /* Return the type for an AdvSISD D register. */
1837
1838 static struct type *
1839 aarch64_vnd_type (struct gdbarch *gdbarch)
1840 {
1841 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1842
1843 if (tdep->vnd_type == NULL)
1844 {
1845 struct type *t;
1846 struct type *elem;
1847
1848 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1849 TYPE_CODE_UNION);
1850
1851 elem = builtin_type (gdbarch)->builtin_double;
1852 append_composite_type_field (t, "f", elem);
1853
1854 elem = builtin_type (gdbarch)->builtin_uint64;
1855 append_composite_type_field (t, "u", elem);
1856
1857 elem = builtin_type (gdbarch)->builtin_int64;
1858 append_composite_type_field (t, "s", elem);
1859
1860 tdep->vnd_type = t;
1861 }
1862
1863 return tdep->vnd_type;
1864 }
1865
1866 /* Return the type for an AdvSISD S register. */
1867
1868 static struct type *
1869 aarch64_vns_type (struct gdbarch *gdbarch)
1870 {
1871 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1872
1873 if (tdep->vns_type == NULL)
1874 {
1875 struct type *t;
1876 struct type *elem;
1877
1878 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1879 TYPE_CODE_UNION);
1880
1881 elem = builtin_type (gdbarch)->builtin_float;
1882 append_composite_type_field (t, "f", elem);
1883
1884 elem = builtin_type (gdbarch)->builtin_uint32;
1885 append_composite_type_field (t, "u", elem);
1886
1887 elem = builtin_type (gdbarch)->builtin_int32;
1888 append_composite_type_field (t, "s", elem);
1889
1890 tdep->vns_type = t;
1891 }
1892
1893 return tdep->vns_type;
1894 }
1895
1896 /* Return the type for an AdvSISD H register. */
1897
1898 static struct type *
1899 aarch64_vnh_type (struct gdbarch *gdbarch)
1900 {
1901 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1902
1903 if (tdep->vnh_type == NULL)
1904 {
1905 struct type *t;
1906 struct type *elem;
1907
1908 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1909 TYPE_CODE_UNION);
1910
1911 elem = builtin_type (gdbarch)->builtin_uint16;
1912 append_composite_type_field (t, "u", elem);
1913
1914 elem = builtin_type (gdbarch)->builtin_int16;
1915 append_composite_type_field (t, "s", elem);
1916
1917 tdep->vnh_type = t;
1918 }
1919
1920 return tdep->vnh_type;
1921 }
1922
1923 /* Return the type for an AdvSISD B register. */
1924
1925 static struct type *
1926 aarch64_vnb_type (struct gdbarch *gdbarch)
1927 {
1928 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1929
1930 if (tdep->vnb_type == NULL)
1931 {
1932 struct type *t;
1933 struct type *elem;
1934
1935 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1936 TYPE_CODE_UNION);
1937
1938 elem = builtin_type (gdbarch)->builtin_uint8;
1939 append_composite_type_field (t, "u", elem);
1940
1941 elem = builtin_type (gdbarch)->builtin_int8;
1942 append_composite_type_field (t, "s", elem);
1943
1944 tdep->vnb_type = t;
1945 }
1946
1947 return tdep->vnb_type;
1948 }
1949
1950 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1951
1952 static int
1953 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1954 {
1955 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1956 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1957
1958 if (reg == AARCH64_DWARF_SP)
1959 return AARCH64_SP_REGNUM;
1960
1961 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1962 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1963
1964 return -1;
1965 }
1966 \f
1967
1968 /* Implement the "print_insn" gdbarch method. */
1969
1970 static int
1971 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1972 {
1973 info->symbols = NULL;
1974 return print_insn_aarch64 (memaddr, info);
1975 }
1976
1977 /* AArch64 BRK software debug mode instruction.
1978 Note that AArch64 code is always little-endian.
1979 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1980 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1981
1982 /* Implement the "breakpoint_from_pc" gdbarch method. */
1983
1984 static const gdb_byte *
1985 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1986 int *lenptr)
1987 {
1988 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1989
1990 *lenptr = sizeof (aarch64_default_breakpoint);
1991 return aarch64_default_breakpoint;
1992 }
1993
1994 /* Extract from an array REGS containing the (raw) register state a
1995 function return value of type TYPE, and copy that, in virtual
1996 format, into VALBUF. */
1997
1998 static void
1999 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2000 gdb_byte *valbuf)
2001 {
2002 struct gdbarch *gdbarch = get_regcache_arch (regs);
2003 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2004
2005 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2006 {
2007 bfd_byte buf[V_REGISTER_SIZE];
2008 int len = TYPE_LENGTH (type);
2009
2010 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
2011 memcpy (valbuf, buf, len);
2012 }
2013 else if (TYPE_CODE (type) == TYPE_CODE_INT
2014 || TYPE_CODE (type) == TYPE_CODE_CHAR
2015 || TYPE_CODE (type) == TYPE_CODE_BOOL
2016 || TYPE_CODE (type) == TYPE_CODE_PTR
2017 || TYPE_CODE (type) == TYPE_CODE_REF
2018 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2019 {
2020 /* If the the type is a plain integer, then the access is
2021 straight-forward. Otherwise we have to play around a bit
2022 more. */
2023 int len = TYPE_LENGTH (type);
2024 int regno = AARCH64_X0_REGNUM;
2025 ULONGEST tmp;
2026
2027 while (len > 0)
2028 {
2029 /* By using store_unsigned_integer we avoid having to do
2030 anything special for small big-endian values. */
2031 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2032 store_unsigned_integer (valbuf,
2033 (len > X_REGISTER_SIZE
2034 ? X_REGISTER_SIZE : len), byte_order, tmp);
2035 len -= X_REGISTER_SIZE;
2036 valbuf += X_REGISTER_SIZE;
2037 }
2038 }
2039 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
2040 {
2041 int regno = AARCH64_V0_REGNUM;
2042 bfd_byte buf[V_REGISTER_SIZE];
2043 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
2044 int len = TYPE_LENGTH (target_type);
2045
2046 regcache_cooked_read (regs, regno, buf);
2047 memcpy (valbuf, buf, len);
2048 valbuf += len;
2049 regcache_cooked_read (regs, regno + 1, buf);
2050 memcpy (valbuf, buf, len);
2051 valbuf += len;
2052 }
2053 else if (is_hfa (type))
2054 {
2055 int elements = TYPE_NFIELDS (type);
2056 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2057 int len = TYPE_LENGTH (member_type);
2058 int i;
2059
2060 for (i = 0; i < elements; i++)
2061 {
2062 int regno = AARCH64_V0_REGNUM + i;
2063 bfd_byte buf[X_REGISTER_SIZE];
2064
2065 if (aarch64_debug)
2066 fprintf_unfiltered (gdb_stdlog,
2067 "read HFA return value element %d from %s\n",
2068 i + 1,
2069 gdbarch_register_name (gdbarch, regno));
2070 regcache_cooked_read (regs, regno, buf);
2071
2072 memcpy (valbuf, buf, len);
2073 valbuf += len;
2074 }
2075 }
2076 else
2077 {
2078 /* For a structure or union the behaviour is as if the value had
2079 been stored to word-aligned memory and then loaded into
2080 registers with 64-bit load instruction(s). */
2081 int len = TYPE_LENGTH (type);
2082 int regno = AARCH64_X0_REGNUM;
2083 bfd_byte buf[X_REGISTER_SIZE];
2084
2085 while (len > 0)
2086 {
2087 regcache_cooked_read (regs, regno++, buf);
2088 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2089 len -= X_REGISTER_SIZE;
2090 valbuf += X_REGISTER_SIZE;
2091 }
2092 }
2093 }
2094
2095
2096 /* Will a function return an aggregate type in memory or in a
2097 register? Return 0 if an aggregate type can be returned in a
2098 register, 1 if it must be returned in memory. */
2099
2100 static int
2101 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2102 {
2103 int nRc;
2104 enum type_code code;
2105
2106 type = check_typedef (type);
2107
2108 /* In the AArch64 ABI, "integer" like aggregate types are returned
2109 in registers. For an aggregate type to be integer like, its size
2110 must be less than or equal to 4 * X_REGISTER_SIZE. */
2111
2112 if (is_hfa (type))
2113 {
2114 /* PCS B.5 If the argument is a Named HFA, then the argument is
2115 used unmodified. */
2116 return 0;
2117 }
2118
2119 if (TYPE_LENGTH (type) > 16)
2120 {
2121 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2122 invisible reference. */
2123
2124 return 1;
2125 }
2126
2127 return 0;
2128 }
2129
2130 /* Write into appropriate registers a function return value of type
2131 TYPE, given in virtual format. */
2132
2133 static void
2134 aarch64_store_return_value (struct type *type, struct regcache *regs,
2135 const gdb_byte *valbuf)
2136 {
2137 struct gdbarch *gdbarch = get_regcache_arch (regs);
2138 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2139
2140 if (TYPE_CODE (type) == TYPE_CODE_FLT)
2141 {
2142 bfd_byte buf[V_REGISTER_SIZE];
2143 int len = TYPE_LENGTH (type);
2144
2145 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2146 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2147 }
2148 else if (TYPE_CODE (type) == TYPE_CODE_INT
2149 || TYPE_CODE (type) == TYPE_CODE_CHAR
2150 || TYPE_CODE (type) == TYPE_CODE_BOOL
2151 || TYPE_CODE (type) == TYPE_CODE_PTR
2152 || TYPE_CODE (type) == TYPE_CODE_REF
2153 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2154 {
2155 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2156 {
2157 /* Values of one word or less are zero/sign-extended and
2158 returned in r0. */
2159 bfd_byte tmpbuf[X_REGISTER_SIZE];
2160 LONGEST val = unpack_long (type, valbuf);
2161
2162 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2163 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
2164 }
2165 else
2166 {
2167 /* Integral values greater than one word are stored in
2168 consecutive registers starting with r0. This will always
2169 be a multiple of the regiser size. */
2170 int len = TYPE_LENGTH (type);
2171 int regno = AARCH64_X0_REGNUM;
2172
2173 while (len > 0)
2174 {
2175 regcache_cooked_write (regs, regno++, valbuf);
2176 len -= X_REGISTER_SIZE;
2177 valbuf += X_REGISTER_SIZE;
2178 }
2179 }
2180 }
2181 else if (is_hfa (type))
2182 {
2183 int elements = TYPE_NFIELDS (type);
2184 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2185 int len = TYPE_LENGTH (member_type);
2186 int i;
2187
2188 for (i = 0; i < elements; i++)
2189 {
2190 int regno = AARCH64_V0_REGNUM + i;
2191 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
2192
2193 if (aarch64_debug)
2194 fprintf_unfiltered (gdb_stdlog,
2195 "write HFA return value element %d to %s\n",
2196 i + 1,
2197 gdbarch_register_name (gdbarch, regno));
2198
2199 memcpy (tmpbuf, valbuf, len);
2200 regcache_cooked_write (regs, regno, tmpbuf);
2201 valbuf += len;
2202 }
2203 }
2204 else
2205 {
2206 /* For a structure or union the behaviour is as if the value had
2207 been stored to word-aligned memory and then loaded into
2208 registers with 64-bit load instruction(s). */
2209 int len = TYPE_LENGTH (type);
2210 int regno = AARCH64_X0_REGNUM;
2211 bfd_byte tmpbuf[X_REGISTER_SIZE];
2212
2213 while (len > 0)
2214 {
2215 memcpy (tmpbuf, valbuf,
2216 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2217 regcache_cooked_write (regs, regno++, tmpbuf);
2218 len -= X_REGISTER_SIZE;
2219 valbuf += X_REGISTER_SIZE;
2220 }
2221 }
2222 }
2223
2224 /* Implement the "return_value" gdbarch method. */
2225
2226 static enum return_value_convention
2227 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2228 struct type *valtype, struct regcache *regcache,
2229 gdb_byte *readbuf, const gdb_byte *writebuf)
2230 {
2231 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2232
2233 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2234 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2235 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2236 {
2237 if (aarch64_return_in_memory (gdbarch, valtype))
2238 {
2239 if (aarch64_debug)
2240 fprintf_unfiltered (gdb_stdlog, "return value in memory\n");
2241 return RETURN_VALUE_STRUCT_CONVENTION;
2242 }
2243 }
2244
2245 if (writebuf)
2246 aarch64_store_return_value (valtype, regcache, writebuf);
2247
2248 if (readbuf)
2249 aarch64_extract_return_value (valtype, regcache, readbuf);
2250
2251 if (aarch64_debug)
2252 fprintf_unfiltered (gdb_stdlog, "return value in registers\n");
2253
2254 return RETURN_VALUE_REGISTER_CONVENTION;
2255 }
2256
2257 /* Implement the "get_longjmp_target" gdbarch method. */
2258
2259 static int
2260 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2261 {
2262 CORE_ADDR jb_addr;
2263 gdb_byte buf[X_REGISTER_SIZE];
2264 struct gdbarch *gdbarch = get_frame_arch (frame);
2265 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2266 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2267
2268 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2269
2270 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2271 X_REGISTER_SIZE))
2272 return 0;
2273
2274 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2275 return 1;
2276 }
2277
2278 /* Implement the "gen_return_address" gdbarch method. */
2279
2280 static void
2281 aarch64_gen_return_address (struct gdbarch *gdbarch,
2282 struct agent_expr *ax, struct axs_value *value,
2283 CORE_ADDR scope)
2284 {
2285 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2286 value->kind = axs_lvalue_register;
2287 value->u.reg = AARCH64_LR_REGNUM;
2288 }
2289 \f
2290
2291 /* Return the pseudo register name corresponding to register regnum. */
2292
2293 static const char *
2294 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2295 {
2296 static const char *const q_name[] =
2297 {
2298 "q0", "q1", "q2", "q3",
2299 "q4", "q5", "q6", "q7",
2300 "q8", "q9", "q10", "q11",
2301 "q12", "q13", "q14", "q15",
2302 "q16", "q17", "q18", "q19",
2303 "q20", "q21", "q22", "q23",
2304 "q24", "q25", "q26", "q27",
2305 "q28", "q29", "q30", "q31",
2306 };
2307
2308 static const char *const d_name[] =
2309 {
2310 "d0", "d1", "d2", "d3",
2311 "d4", "d5", "d6", "d7",
2312 "d8", "d9", "d10", "d11",
2313 "d12", "d13", "d14", "d15",
2314 "d16", "d17", "d18", "d19",
2315 "d20", "d21", "d22", "d23",
2316 "d24", "d25", "d26", "d27",
2317 "d28", "d29", "d30", "d31",
2318 };
2319
2320 static const char *const s_name[] =
2321 {
2322 "s0", "s1", "s2", "s3",
2323 "s4", "s5", "s6", "s7",
2324 "s8", "s9", "s10", "s11",
2325 "s12", "s13", "s14", "s15",
2326 "s16", "s17", "s18", "s19",
2327 "s20", "s21", "s22", "s23",
2328 "s24", "s25", "s26", "s27",
2329 "s28", "s29", "s30", "s31",
2330 };
2331
2332 static const char *const h_name[] =
2333 {
2334 "h0", "h1", "h2", "h3",
2335 "h4", "h5", "h6", "h7",
2336 "h8", "h9", "h10", "h11",
2337 "h12", "h13", "h14", "h15",
2338 "h16", "h17", "h18", "h19",
2339 "h20", "h21", "h22", "h23",
2340 "h24", "h25", "h26", "h27",
2341 "h28", "h29", "h30", "h31",
2342 };
2343
2344 static const char *const b_name[] =
2345 {
2346 "b0", "b1", "b2", "b3",
2347 "b4", "b5", "b6", "b7",
2348 "b8", "b9", "b10", "b11",
2349 "b12", "b13", "b14", "b15",
2350 "b16", "b17", "b18", "b19",
2351 "b20", "b21", "b22", "b23",
2352 "b24", "b25", "b26", "b27",
2353 "b28", "b29", "b30", "b31",
2354 };
2355
2356 regnum -= gdbarch_num_regs (gdbarch);
2357
2358 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2359 return q_name[regnum - AARCH64_Q0_REGNUM];
2360
2361 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2362 return d_name[regnum - AARCH64_D0_REGNUM];
2363
2364 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2365 return s_name[regnum - AARCH64_S0_REGNUM];
2366
2367 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2368 return h_name[regnum - AARCH64_H0_REGNUM];
2369
2370 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2371 return b_name[regnum - AARCH64_B0_REGNUM];
2372
2373 internal_error (__FILE__, __LINE__,
2374 _("aarch64_pseudo_register_name: bad register number %d"),
2375 regnum);
2376 }
2377
2378 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2379
2380 static struct type *
2381 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2382 {
2383 regnum -= gdbarch_num_regs (gdbarch);
2384
2385 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2386 return aarch64_vnq_type (gdbarch);
2387
2388 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2389 return aarch64_vnd_type (gdbarch);
2390
2391 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2392 return aarch64_vns_type (gdbarch);
2393
2394 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2395 return aarch64_vnh_type (gdbarch);
2396
2397 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2398 return aarch64_vnb_type (gdbarch);
2399
2400 internal_error (__FILE__, __LINE__,
2401 _("aarch64_pseudo_register_type: bad register number %d"),
2402 regnum);
2403 }
2404
2405 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2406
2407 static int
2408 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2409 struct reggroup *group)
2410 {
2411 regnum -= gdbarch_num_regs (gdbarch);
2412
2413 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2414 return group == all_reggroup || group == vector_reggroup;
2415 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2416 return (group == all_reggroup || group == vector_reggroup
2417 || group == float_reggroup);
2418 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2419 return (group == all_reggroup || group == vector_reggroup
2420 || group == float_reggroup);
2421 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2422 return group == all_reggroup || group == vector_reggroup;
2423 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2424 return group == all_reggroup || group == vector_reggroup;
2425
2426 return group == all_reggroup;
2427 }
2428
2429 /* Implement the "pseudo_register_read_value" gdbarch method. */
2430
2431 static struct value *
2432 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2433 struct regcache *regcache,
2434 int regnum)
2435 {
2436 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2437 struct value *result_value;
2438 gdb_byte *buf;
2439
2440 result_value = allocate_value (register_type (gdbarch, regnum));
2441 VALUE_LVAL (result_value) = lval_register;
2442 VALUE_REGNUM (result_value) = regnum;
2443 buf = value_contents_raw (result_value);
2444
2445 regnum -= gdbarch_num_regs (gdbarch);
2446
2447 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2448 {
2449 enum register_status status;
2450 unsigned v_regnum;
2451
2452 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2453 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2454 if (status != REG_VALID)
2455 mark_value_bytes_unavailable (result_value, 0,
2456 TYPE_LENGTH (value_type (result_value)));
2457 else
2458 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2459 return result_value;
2460 }
2461
2462 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2463 {
2464 enum register_status status;
2465 unsigned v_regnum;
2466
2467 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2468 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2469 if (status != REG_VALID)
2470 mark_value_bytes_unavailable (result_value, 0,
2471 TYPE_LENGTH (value_type (result_value)));
2472 else
2473 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2474 return result_value;
2475 }
2476
2477 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2478 {
2479 enum register_status status;
2480 unsigned v_regnum;
2481
2482 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2483 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2484 if (status != REG_VALID)
2485 mark_value_bytes_unavailable (result_value, 0,
2486 TYPE_LENGTH (value_type (result_value)));
2487 else
2488 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2489 return result_value;
2490 }
2491
2492 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2493 {
2494 enum register_status status;
2495 unsigned v_regnum;
2496
2497 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2498 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2499 if (status != REG_VALID)
2500 mark_value_bytes_unavailable (result_value, 0,
2501 TYPE_LENGTH (value_type (result_value)));
2502 else
2503 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2504 return result_value;
2505 }
2506
2507 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2508 {
2509 enum register_status status;
2510 unsigned v_regnum;
2511
2512 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2513 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2514 if (status != REG_VALID)
2515 mark_value_bytes_unavailable (result_value, 0,
2516 TYPE_LENGTH (value_type (result_value)));
2517 else
2518 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2519 return result_value;
2520 }
2521
2522 gdb_assert_not_reached ("regnum out of bound");
2523 }
2524
2525 /* Implement the "pseudo_register_write" gdbarch method. */
2526
2527 static void
2528 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2529 int regnum, const gdb_byte *buf)
2530 {
2531 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2532
2533 /* Ensure the register buffer is zero, we want gdb writes of the
2534 various 'scalar' pseudo registers to behavior like architectural
2535 writes, register width bytes are written the remainder are set to
2536 zero. */
2537 memset (reg_buf, 0, sizeof (reg_buf));
2538
2539 regnum -= gdbarch_num_regs (gdbarch);
2540
2541 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2542 {
2543 /* pseudo Q registers */
2544 unsigned v_regnum;
2545
2546 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2547 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2548 regcache_raw_write (regcache, v_regnum, reg_buf);
2549 return;
2550 }
2551
2552 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2553 {
2554 /* pseudo D registers */
2555 unsigned v_regnum;
2556
2557 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2558 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2559 regcache_raw_write (regcache, v_regnum, reg_buf);
2560 return;
2561 }
2562
2563 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2564 {
2565 unsigned v_regnum;
2566
2567 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2568 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2569 regcache_raw_write (regcache, v_regnum, reg_buf);
2570 return;
2571 }
2572
2573 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2574 {
2575 /* pseudo H registers */
2576 unsigned v_regnum;
2577
2578 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2579 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2580 regcache_raw_write (regcache, v_regnum, reg_buf);
2581 return;
2582 }
2583
2584 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2585 {
2586 /* pseudo B registers */
2587 unsigned v_regnum;
2588
2589 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2590 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2591 regcache_raw_write (regcache, v_regnum, reg_buf);
2592 return;
2593 }
2594
2595 gdb_assert_not_reached ("regnum out of bound");
2596 }
2597
2598 /* Callback function for user_reg_add. */
2599
2600 static struct value *
2601 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2602 {
2603 const int *reg_p = baton;
2604
2605 return value_of_register (*reg_p, frame);
2606 }
2607 \f
2608
2609 /* Implement the "software_single_step" gdbarch method, needed to
2610 single step through atomic sequences on AArch64. */
2611
2612 static int
2613 aarch64_software_single_step (struct frame_info *frame)
2614 {
2615 struct gdbarch *gdbarch = get_frame_arch (frame);
2616 struct address_space *aspace = get_frame_address_space (frame);
2617 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2618 const int insn_size = 4;
2619 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2620 CORE_ADDR pc = get_frame_pc (frame);
2621 CORE_ADDR breaks[2] = { -1, -1 };
2622 CORE_ADDR loc = pc;
2623 CORE_ADDR closing_insn = 0;
2624 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2625 byte_order_for_code);
2626 int index;
2627 int insn_count;
2628 int bc_insn_count = 0; /* Conditional branch instruction count. */
2629 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2630
2631 /* Look for a Load Exclusive instruction which begins the sequence. */
2632 if (!decode_masked_match (insn, 0x3fc00000, 0x08400000))
2633 return 0;
2634
2635 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2636 {
2637 int32_t offset;
2638 unsigned cond;
2639
2640 loc += insn_size;
2641 insn = read_memory_unsigned_integer (loc, insn_size,
2642 byte_order_for_code);
2643
2644 /* Check if the instruction is a conditional branch. */
2645 if (decode_bcond (loc, insn, &cond, &offset))
2646 {
2647 if (bc_insn_count >= 1)
2648 return 0;
2649
2650 /* It is, so we'll try to set a breakpoint at the destination. */
2651 breaks[1] = loc + offset;
2652
2653 bc_insn_count++;
2654 last_breakpoint++;
2655 }
2656
2657 /* Look for the Store Exclusive which closes the atomic sequence. */
2658 if (decode_masked_match (insn, 0x3fc00000, 0x08000000))
2659 {
2660 closing_insn = loc;
2661 break;
2662 }
2663 }
2664
2665 /* We didn't find a closing Store Exclusive instruction, fall back. */
2666 if (!closing_insn)
2667 return 0;
2668
2669 /* Insert breakpoint after the end of the atomic sequence. */
2670 breaks[0] = loc + insn_size;
2671
2672 /* Check for duplicated breakpoints, and also check that the second
2673 breakpoint is not within the atomic sequence. */
2674 if (last_breakpoint
2675 && (breaks[1] == breaks[0]
2676 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2677 last_breakpoint = 0;
2678
2679 /* Insert the breakpoint at the end of the sequence, and one at the
2680 destination of the conditional branch, if it exists. */
2681 for (index = 0; index <= last_breakpoint; index++)
2682 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2683
2684 return 1;
2685 }
2686
2687 /* Initialize the current architecture based on INFO. If possible,
2688 re-use an architecture from ARCHES, which is a list of
2689 architectures already created during this debugging session.
2690
2691 Called e.g. at program startup, when reading a core file, and when
2692 reading a binary file. */
2693
2694 static struct gdbarch *
2695 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2696 {
2697 struct gdbarch_tdep *tdep;
2698 struct gdbarch *gdbarch;
2699 struct gdbarch_list *best_arch;
2700 struct tdesc_arch_data *tdesc_data = NULL;
2701 const struct target_desc *tdesc = info.target_desc;
2702 int i;
2703 int have_fpa_registers = 1;
2704 int valid_p = 1;
2705 const struct tdesc_feature *feature;
2706 int num_regs = 0;
2707 int num_pseudo_regs = 0;
2708
2709 /* Ensure we always have a target descriptor. */
2710 if (!tdesc_has_registers (tdesc))
2711 tdesc = tdesc_aarch64;
2712
2713 gdb_assert (tdesc);
2714
2715 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2716
2717 if (feature == NULL)
2718 return NULL;
2719
2720 tdesc_data = tdesc_data_alloc ();
2721
2722 /* Validate the descriptor provides the mandatory core R registers
2723 and allocate their numbers. */
2724 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2725 valid_p &=
2726 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2727 aarch64_r_register_names[i]);
2728
2729 num_regs = AARCH64_X0_REGNUM + i;
2730
2731 /* Look for the V registers. */
2732 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2733 if (feature)
2734 {
2735 /* Validate the descriptor provides the mandatory V registers
2736 and allocate their numbers. */
2737 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2738 valid_p &=
2739 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2740 aarch64_v_register_names[i]);
2741
2742 num_regs = AARCH64_V0_REGNUM + i;
2743
2744 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2745 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2746 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2747 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2748 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2749 }
2750
2751 if (!valid_p)
2752 {
2753 tdesc_data_cleanup (tdesc_data);
2754 return NULL;
2755 }
2756
2757 /* AArch64 code is always little-endian. */
2758 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2759
2760 /* If there is already a candidate, use it. */
2761 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2762 best_arch != NULL;
2763 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2764 {
2765 /* Found a match. */
2766 break;
2767 }
2768
2769 if (best_arch != NULL)
2770 {
2771 if (tdesc_data != NULL)
2772 tdesc_data_cleanup (tdesc_data);
2773 return best_arch->gdbarch;
2774 }
2775
2776 tdep = xcalloc (1, sizeof (struct gdbarch_tdep));
2777 gdbarch = gdbarch_alloc (&info, tdep);
2778
2779 /* This should be low enough for everything. */
2780 tdep->lowest_pc = 0x20;
2781 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2782 tdep->jb_elt_size = 8;
2783
2784 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2785 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2786
2787 /* Frame handling. */
2788 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2789 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2790 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2791
2792 /* Advance PC across function entry code. */
2793 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2794
2795 /* The stack grows downward. */
2796 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2797
2798 /* Breakpoint manipulation. */
2799 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2800 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2801 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2802
2803 /* Information about registers, etc. */
2804 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2805 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2806 set_gdbarch_num_regs (gdbarch, num_regs);
2807
2808 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2809 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2810 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2811 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2812 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2813 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2814 aarch64_pseudo_register_reggroup_p);
2815
2816 /* ABI */
2817 set_gdbarch_short_bit (gdbarch, 16);
2818 set_gdbarch_int_bit (gdbarch, 32);
2819 set_gdbarch_float_bit (gdbarch, 32);
2820 set_gdbarch_double_bit (gdbarch, 64);
2821 set_gdbarch_long_double_bit (gdbarch, 128);
2822 set_gdbarch_long_bit (gdbarch, 64);
2823 set_gdbarch_long_long_bit (gdbarch, 64);
2824 set_gdbarch_ptr_bit (gdbarch, 64);
2825 set_gdbarch_char_signed (gdbarch, 0);
2826 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2827 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2828 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2829
2830 /* Internal <-> external register number maps. */
2831 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2832
2833 /* Returning results. */
2834 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2835
2836 /* Disassembly. */
2837 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2838
2839 /* Virtual tables. */
2840 set_gdbarch_vbit_in_delta (gdbarch, 1);
2841
2842 /* Hook in the ABI-specific overrides, if they have been registered. */
2843 info.target_desc = tdesc;
2844 info.tdep_info = (void *) tdesc_data;
2845 gdbarch_init_osabi (info, gdbarch);
2846
2847 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2848
2849 /* Add some default predicates. */
2850 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2851 dwarf2_append_unwinders (gdbarch);
2852 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2853
2854 frame_base_set_default (gdbarch, &aarch64_normal_base);
2855
2856 /* Now we have tuned the configuration, set a few final things,
2857 based on what the OS ABI has told us. */
2858
2859 if (tdep->jb_pc >= 0)
2860 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2861
2862 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2863
2864 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2865
2866 /* Add standard register aliases. */
2867 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2868 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2869 value_of_aarch64_user_reg,
2870 &aarch64_register_aliases[i].regnum);
2871
2872 return gdbarch;
2873 }
2874
2875 static void
2876 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2877 {
2878 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2879
2880 if (tdep == NULL)
2881 return;
2882
2883 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2884 paddress (gdbarch, tdep->lowest_pc));
2885 }
2886
2887 /* Suppress warning from -Wmissing-prototypes. */
2888 extern initialize_file_ftype _initialize_aarch64_tdep;
2889
2890 void
2891 _initialize_aarch64_tdep (void)
2892 {
2893 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2894 aarch64_dump_tdep);
2895
2896 initialize_tdesc_aarch64 ();
2897
2898 /* Debug this file's internals. */
2899 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2900 Set AArch64 debugging."), _("\
2901 Show AArch64 debugging."), _("\
2902 When on, AArch64 specific debugging is enabled."),
2903 NULL,
2904 show_aarch64_debug,
2905 &setdebuglist, &showdebuglist);
2906 }
2907
2908 /* AArch64 process record-replay related structures, defines etc. */
2909
2910 #define submask(x) ((1L << ((x) + 1)) - 1)
2911 #define bit(obj,st) (((obj) >> (st)) & 1)
2912 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
2913
2914 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2915 do \
2916 { \
2917 unsigned int reg_len = LENGTH; \
2918 if (reg_len) \
2919 { \
2920 REGS = XNEWVEC (uint32_t, reg_len); \
2921 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2922 } \
2923 } \
2924 while (0)
2925
2926 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2927 do \
2928 { \
2929 unsigned int mem_len = LENGTH; \
2930 if (mem_len) \
2931 { \
2932 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2933 memcpy(&MEMS->len, &RECORD_BUF[0], \
2934 sizeof(struct aarch64_mem_r) * LENGTH); \
2935 } \
2936 } \
2937 while (0)
2938
2939 /* AArch64 record/replay structures and enumerations. */
2940
2941 struct aarch64_mem_r
2942 {
2943 uint64_t len; /* Record length. */
2944 uint64_t addr; /* Memory address. */
2945 };
2946
2947 enum aarch64_record_result
2948 {
2949 AARCH64_RECORD_SUCCESS,
2950 AARCH64_RECORD_FAILURE,
2951 AARCH64_RECORD_UNSUPPORTED,
2952 AARCH64_RECORD_UNKNOWN
2953 };
2954
2955 typedef struct insn_decode_record_t
2956 {
2957 struct gdbarch *gdbarch;
2958 struct regcache *regcache;
2959 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2960 uint32_t aarch64_insn; /* Insn to be recorded. */
2961 uint32_t mem_rec_count; /* Count of memory records. */
2962 uint32_t reg_rec_count; /* Count of register records. */
2963 uint32_t *aarch64_regs; /* Registers to be recorded. */
2964 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2965 } insn_decode_record;
2966
2967 /* Record handler for data processing - register instructions. */
2968
2969 static unsigned int
2970 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2971 {
2972 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2973 uint32_t record_buf[4];
2974
2975 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2976 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2977 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2978
2979 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2980 {
2981 uint8_t setflags;
2982
2983 /* Logical (shifted register). */
2984 if (insn_bits24_27 == 0x0a)
2985 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2986 /* Add/subtract. */
2987 else if (insn_bits24_27 == 0x0b)
2988 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2989 else
2990 return AARCH64_RECORD_UNKNOWN;
2991
2992 record_buf[0] = reg_rd;
2993 aarch64_insn_r->reg_rec_count = 1;
2994 if (setflags)
2995 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2996 }
2997 else
2998 {
2999 if (insn_bits24_27 == 0x0b)
3000 {
3001 /* Data-processing (3 source). */
3002 record_buf[0] = reg_rd;
3003 aarch64_insn_r->reg_rec_count = 1;
3004 }
3005 else if (insn_bits24_27 == 0x0a)
3006 {
3007 if (insn_bits21_23 == 0x00)
3008 {
3009 /* Add/subtract (with carry). */
3010 record_buf[0] = reg_rd;
3011 aarch64_insn_r->reg_rec_count = 1;
3012 if (bit (aarch64_insn_r->aarch64_insn, 29))
3013 {
3014 record_buf[1] = AARCH64_CPSR_REGNUM;
3015 aarch64_insn_r->reg_rec_count = 2;
3016 }
3017 }
3018 else if (insn_bits21_23 == 0x02)
3019 {
3020 /* Conditional compare (register) and conditional compare
3021 (immediate) instructions. */
3022 record_buf[0] = AARCH64_CPSR_REGNUM;
3023 aarch64_insn_r->reg_rec_count = 1;
3024 }
3025 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3026 {
3027 /* CConditional select. */
3028 /* Data-processing (2 source). */
3029 /* Data-processing (1 source). */
3030 record_buf[0] = reg_rd;
3031 aarch64_insn_r->reg_rec_count = 1;
3032 }
3033 else
3034 return AARCH64_RECORD_UNKNOWN;
3035 }
3036 }
3037
3038 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3039 record_buf);
3040 return AARCH64_RECORD_SUCCESS;
3041 }
3042
3043 /* Record handler for data processing - immediate instructions. */
3044
3045 static unsigned int
3046 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3047 {
3048 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
3049 uint32_t record_buf[4];
3050
3051 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3052 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3053 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3054 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3055
3056 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3057 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3058 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3059 {
3060 record_buf[0] = reg_rd;
3061 aarch64_insn_r->reg_rec_count = 1;
3062 }
3063 else if (insn_bits24_27 == 0x01)
3064 {
3065 /* Add/Subtract (immediate). */
3066 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3067 record_buf[0] = reg_rd;
3068 aarch64_insn_r->reg_rec_count = 1;
3069 if (setflags)
3070 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3071 }
3072 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3073 {
3074 /* Logical (immediate). */
3075 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3076 record_buf[0] = reg_rd;
3077 aarch64_insn_r->reg_rec_count = 1;
3078 if (setflags)
3079 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3080 }
3081 else
3082 return AARCH64_RECORD_UNKNOWN;
3083
3084 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3085 record_buf);
3086 return AARCH64_RECORD_SUCCESS;
3087 }
3088
3089 /* Record handler for branch, exception generation and system instructions. */
3090
3091 static unsigned int
3092 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3093 {
3094 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3095 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3096 uint32_t record_buf[4];
3097
3098 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3099 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3100 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3101
3102 if (insn_bits28_31 == 0x0d)
3103 {
3104 /* Exception generation instructions. */
3105 if (insn_bits24_27 == 0x04)
3106 {
3107 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3108 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3109 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3110 {
3111 ULONGEST svc_number;
3112
3113 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3114 &svc_number);
3115 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3116 svc_number);
3117 }
3118 else
3119 return AARCH64_RECORD_UNSUPPORTED;
3120 }
3121 /* System instructions. */
3122 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3123 {
3124 uint32_t reg_rt, reg_crn;
3125
3126 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3127 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3128
3129 /* Record rt in case of sysl and mrs instructions. */
3130 if (bit (aarch64_insn_r->aarch64_insn, 21))
3131 {
3132 record_buf[0] = reg_rt;
3133 aarch64_insn_r->reg_rec_count = 1;
3134 }
3135 /* Record cpsr for hint and msr(immediate) instructions. */
3136 else if (reg_crn == 0x02 || reg_crn == 0x04)
3137 {
3138 record_buf[0] = AARCH64_CPSR_REGNUM;
3139 aarch64_insn_r->reg_rec_count = 1;
3140 }
3141 }
3142 /* Unconditional branch (register). */
3143 else if((insn_bits24_27 & 0x0e) == 0x06)
3144 {
3145 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3146 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3147 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3148 }
3149 else
3150 return AARCH64_RECORD_UNKNOWN;
3151 }
3152 /* Unconditional branch (immediate). */
3153 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3154 {
3155 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3156 if (bit (aarch64_insn_r->aarch64_insn, 31))
3157 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3158 }
3159 else
3160 /* Compare & branch (immediate), Test & branch (immediate) and
3161 Conditional branch (immediate). */
3162 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3163
3164 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3165 record_buf);
3166 return AARCH64_RECORD_SUCCESS;
3167 }
3168
3169 /* Record handler for advanced SIMD load and store instructions. */
3170
3171 static unsigned int
3172 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3173 {
3174 CORE_ADDR address;
3175 uint64_t addr_offset = 0;
3176 uint32_t record_buf[24];
3177 uint64_t record_buf_mem[24];
3178 uint32_t reg_rn, reg_rt;
3179 uint32_t reg_index = 0, mem_index = 0;
3180 uint8_t opcode_bits, size_bits;
3181
3182 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3183 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3184 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3185 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3186 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3187
3188 if (record_debug)
3189 {
3190 fprintf_unfiltered (gdb_stdlog,
3191 "Process record: Advanced SIMD load/store\n");
3192 }
3193
3194 /* Load/store single structure. */
3195 if (bit (aarch64_insn_r->aarch64_insn, 24))
3196 {
3197 uint8_t sindex, scale, selem, esize, replicate = 0;
3198 scale = opcode_bits >> 2;
3199 selem = ((opcode_bits & 0x02) |
3200 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3201 switch (scale)
3202 {
3203 case 1:
3204 if (size_bits & 0x01)
3205 return AARCH64_RECORD_UNKNOWN;
3206 break;
3207 case 2:
3208 if ((size_bits >> 1) & 0x01)
3209 return AARCH64_RECORD_UNKNOWN;
3210 if (size_bits & 0x01)
3211 {
3212 if (!((opcode_bits >> 1) & 0x01))
3213 scale = 3;
3214 else
3215 return AARCH64_RECORD_UNKNOWN;
3216 }
3217 break;
3218 case 3:
3219 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3220 {
3221 scale = size_bits;
3222 replicate = 1;
3223 break;
3224 }
3225 else
3226 return AARCH64_RECORD_UNKNOWN;
3227 default:
3228 break;
3229 }
3230 esize = 8 << scale;
3231 if (replicate)
3232 for (sindex = 0; sindex < selem; sindex++)
3233 {
3234 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3235 reg_rt = (reg_rt + 1) % 32;
3236 }
3237 else
3238 {
3239 for (sindex = 0; sindex < selem; sindex++)
3240 if (bit (aarch64_insn_r->aarch64_insn, 22))
3241 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3242 else
3243 {
3244 record_buf_mem[mem_index++] = esize / 8;
3245 record_buf_mem[mem_index++] = address + addr_offset;
3246 }
3247 addr_offset = addr_offset + (esize / 8);
3248 reg_rt = (reg_rt + 1) % 32;
3249 }
3250 }
3251 /* Load/store multiple structure. */
3252 else
3253 {
3254 uint8_t selem, esize, rpt, elements;
3255 uint8_t eindex, rindex;
3256
3257 esize = 8 << size_bits;
3258 if (bit (aarch64_insn_r->aarch64_insn, 30))
3259 elements = 128 / esize;
3260 else
3261 elements = 64 / esize;
3262
3263 switch (opcode_bits)
3264 {
3265 /*LD/ST4 (4 Registers). */
3266 case 0:
3267 rpt = 1;
3268 selem = 4;
3269 break;
3270 /*LD/ST1 (4 Registers). */
3271 case 2:
3272 rpt = 4;
3273 selem = 1;
3274 break;
3275 /*LD/ST3 (3 Registers). */
3276 case 4:
3277 rpt = 1;
3278 selem = 3;
3279 break;
3280 /*LD/ST1 (3 Registers). */
3281 case 6:
3282 rpt = 3;
3283 selem = 1;
3284 break;
3285 /*LD/ST1 (1 Register). */
3286 case 7:
3287 rpt = 1;
3288 selem = 1;
3289 break;
3290 /*LD/ST2 (2 Registers). */
3291 case 8:
3292 rpt = 1;
3293 selem = 2;
3294 break;
3295 /*LD/ST1 (2 Registers). */
3296 case 10:
3297 rpt = 2;
3298 selem = 1;
3299 break;
3300 default:
3301 return AARCH64_RECORD_UNSUPPORTED;
3302 break;
3303 }
3304 for (rindex = 0; rindex < rpt; rindex++)
3305 for (eindex = 0; eindex < elements; eindex++)
3306 {
3307 uint8_t reg_tt, sindex;
3308 reg_tt = (reg_rt + rindex) % 32;
3309 for (sindex = 0; sindex < selem; sindex++)
3310 {
3311 if (bit (aarch64_insn_r->aarch64_insn, 22))
3312 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3313 else
3314 {
3315 record_buf_mem[mem_index++] = esize / 8;
3316 record_buf_mem[mem_index++] = address + addr_offset;
3317 }
3318 addr_offset = addr_offset + (esize / 8);
3319 reg_tt = (reg_tt + 1) % 32;
3320 }
3321 }
3322 }
3323
3324 if (bit (aarch64_insn_r->aarch64_insn, 23))
3325 record_buf[reg_index++] = reg_rn;
3326
3327 aarch64_insn_r->reg_rec_count = reg_index;
3328 aarch64_insn_r->mem_rec_count = mem_index / 2;
3329 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3330 record_buf_mem);
3331 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3332 record_buf);
3333 return AARCH64_RECORD_SUCCESS;
3334 }
3335
3336 /* Record handler for load and store instructions. */
3337
3338 static unsigned int
3339 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3340 {
3341 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3342 uint8_t insn_bit23, insn_bit21;
3343 uint8_t opc, size_bits, ld_flag, vector_flag;
3344 uint32_t reg_rn, reg_rt, reg_rt2;
3345 uint64_t datasize, offset;
3346 uint32_t record_buf[8];
3347 uint64_t record_buf_mem[8];
3348 CORE_ADDR address;
3349
3350 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3351 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3352 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3353 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3354 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3355 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3356 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3357 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3358 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3359 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3360 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3361
3362 /* Load/store exclusive. */
3363 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3364 {
3365 if (record_debug)
3366 {
3367 fprintf_unfiltered (gdb_stdlog,
3368 "Process record: load/store exclusive\n");
3369 }
3370
3371 if (ld_flag)
3372 {
3373 record_buf[0] = reg_rt;
3374 aarch64_insn_r->reg_rec_count = 1;
3375 if (insn_bit21)
3376 {
3377 record_buf[1] = reg_rt2;
3378 aarch64_insn_r->reg_rec_count = 2;
3379 }
3380 }
3381 else
3382 {
3383 if (insn_bit21)
3384 datasize = (8 << size_bits) * 2;
3385 else
3386 datasize = (8 << size_bits);
3387 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3388 &address);
3389 record_buf_mem[0] = datasize / 8;
3390 record_buf_mem[1] = address;
3391 aarch64_insn_r->mem_rec_count = 1;
3392 if (!insn_bit23)
3393 {
3394 /* Save register rs. */
3395 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3396 aarch64_insn_r->reg_rec_count = 1;
3397 }
3398 }
3399 }
3400 /* Load register (literal) instructions decoding. */
3401 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3402 {
3403 if (record_debug)
3404 {
3405 fprintf_unfiltered (gdb_stdlog,
3406 "Process record: load register (literal)\n");
3407 }
3408 if (vector_flag)
3409 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3410 else
3411 record_buf[0] = reg_rt;
3412 aarch64_insn_r->reg_rec_count = 1;
3413 }
3414 /* All types of load/store pair instructions decoding. */
3415 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3416 {
3417 if (record_debug)
3418 {
3419 fprintf_unfiltered (gdb_stdlog,
3420 "Process record: load/store pair\n");
3421 }
3422
3423 if (ld_flag)
3424 {
3425 if (vector_flag)
3426 {
3427 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3428 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3429 }
3430 else
3431 {
3432 record_buf[0] = reg_rt;
3433 record_buf[1] = reg_rt2;
3434 }
3435 aarch64_insn_r->reg_rec_count = 2;
3436 }
3437 else
3438 {
3439 uint16_t imm7_off;
3440 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3441 if (!vector_flag)
3442 size_bits = size_bits >> 1;
3443 datasize = 8 << (2 + size_bits);
3444 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3445 offset = offset << (2 + size_bits);
3446 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3447 &address);
3448 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3449 {
3450 if (imm7_off & 0x40)
3451 address = address - offset;
3452 else
3453 address = address + offset;
3454 }
3455
3456 record_buf_mem[0] = datasize / 8;
3457 record_buf_mem[1] = address;
3458 record_buf_mem[2] = datasize / 8;
3459 record_buf_mem[3] = address + (datasize / 8);
3460 aarch64_insn_r->mem_rec_count = 2;
3461 }
3462 if (bit (aarch64_insn_r->aarch64_insn, 23))
3463 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3464 }
3465 /* Load/store register (unsigned immediate) instructions. */
3466 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3467 {
3468 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3469 if (!(opc >> 1))
3470 if (opc & 0x01)
3471 ld_flag = 0x01;
3472 else
3473 ld_flag = 0x0;
3474 else
3475 if (size_bits != 0x03)
3476 ld_flag = 0x01;
3477 else
3478 return AARCH64_RECORD_UNKNOWN;
3479
3480 if (record_debug)
3481 {
3482 fprintf_unfiltered (gdb_stdlog,
3483 "Process record: load/store (unsigned immediate):"
3484 " size %x V %d opc %x\n", size_bits, vector_flag,
3485 opc);
3486 }
3487
3488 if (!ld_flag)
3489 {
3490 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3491 datasize = 8 << size_bits;
3492 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3493 &address);
3494 offset = offset << size_bits;
3495 address = address + offset;
3496
3497 record_buf_mem[0] = datasize >> 3;
3498 record_buf_mem[1] = address;
3499 aarch64_insn_r->mem_rec_count = 1;
3500 }
3501 else
3502 {
3503 if (vector_flag)
3504 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3505 else
3506 record_buf[0] = reg_rt;
3507 aarch64_insn_r->reg_rec_count = 1;
3508 }
3509 }
3510 /* Load/store register (register offset) instructions. */
3511 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3512 && insn_bits10_11 == 0x02 && insn_bit21)
3513 {
3514 if (record_debug)
3515 {
3516 fprintf_unfiltered (gdb_stdlog,
3517 "Process record: load/store (register offset)\n");
3518 }
3519 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3520 if (!(opc >> 1))
3521 if (opc & 0x01)
3522 ld_flag = 0x01;
3523 else
3524 ld_flag = 0x0;
3525 else
3526 if (size_bits != 0x03)
3527 ld_flag = 0x01;
3528 else
3529 return AARCH64_RECORD_UNKNOWN;
3530
3531 if (!ld_flag)
3532 {
3533 uint64_t reg_rm_val;
3534 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3535 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3536 if (bit (aarch64_insn_r->aarch64_insn, 12))
3537 offset = reg_rm_val << size_bits;
3538 else
3539 offset = reg_rm_val;
3540 datasize = 8 << size_bits;
3541 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3542 &address);
3543 address = address + offset;
3544 record_buf_mem[0] = datasize >> 3;
3545 record_buf_mem[1] = address;
3546 aarch64_insn_r->mem_rec_count = 1;
3547 }
3548 else
3549 {
3550 if (vector_flag)
3551 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3552 else
3553 record_buf[0] = reg_rt;
3554 aarch64_insn_r->reg_rec_count = 1;
3555 }
3556 }
3557 /* Load/store register (immediate and unprivileged) instructions. */
3558 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3559 && !insn_bit21)
3560 {
3561 if (record_debug)
3562 {
3563 fprintf_unfiltered (gdb_stdlog,
3564 "Process record: load/store (immediate and unprivileged)\n");
3565 }
3566 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3567 if (!(opc >> 1))
3568 if (opc & 0x01)
3569 ld_flag = 0x01;
3570 else
3571 ld_flag = 0x0;
3572 else
3573 if (size_bits != 0x03)
3574 ld_flag = 0x01;
3575 else
3576 return AARCH64_RECORD_UNKNOWN;
3577
3578 if (!ld_flag)
3579 {
3580 uint16_t imm9_off;
3581 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3582 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3583 datasize = 8 << size_bits;
3584 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3585 &address);
3586 if (insn_bits10_11 != 0x01)
3587 {
3588 if (imm9_off & 0x0100)
3589 address = address - offset;
3590 else
3591 address = address + offset;
3592 }
3593 record_buf_mem[0] = datasize >> 3;
3594 record_buf_mem[1] = address;
3595 aarch64_insn_r->mem_rec_count = 1;
3596 }
3597 else
3598 {
3599 if (vector_flag)
3600 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3601 else
3602 record_buf[0] = reg_rt;
3603 aarch64_insn_r->reg_rec_count = 1;
3604 }
3605 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3606 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3607 }
3608 /* Advanced SIMD load/store instructions. */
3609 else
3610 return aarch64_record_asimd_load_store (aarch64_insn_r);
3611
3612 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3613 record_buf_mem);
3614 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3615 record_buf);
3616 return AARCH64_RECORD_SUCCESS;
3617 }
3618
3619 /* Record handler for data processing SIMD and floating point instructions. */
3620
3621 static unsigned int
3622 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3623 {
3624 uint8_t insn_bit21, opcode, rmode, reg_rd;
3625 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3626 uint8_t insn_bits11_14;
3627 uint32_t record_buf[2];
3628
3629 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3630 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3631 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3632 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3633 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3634 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3635 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3636 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3637 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3638
3639 if (record_debug)
3640 {
3641 fprintf_unfiltered (gdb_stdlog,
3642 "Process record: data processing SIMD/FP: ");
3643 }
3644
3645 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3646 {
3647 /* Floating point - fixed point conversion instructions. */
3648 if (!insn_bit21)
3649 {
3650 if (record_debug)
3651 fprintf_unfiltered (gdb_stdlog, "FP - fixed point conversion");
3652
3653 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3654 record_buf[0] = reg_rd;
3655 else
3656 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3657 }
3658 /* Floating point - conditional compare instructions. */
3659 else if (insn_bits10_11 == 0x01)
3660 {
3661 if (record_debug)
3662 fprintf_unfiltered (gdb_stdlog, "FP - conditional compare");
3663
3664 record_buf[0] = AARCH64_CPSR_REGNUM;
3665 }
3666 /* Floating point - data processing (2-source) and
3667 conditional select instructions. */
3668 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3669 {
3670 if (record_debug)
3671 fprintf_unfiltered (gdb_stdlog, "FP - DP (2-source)");
3672
3673 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3674 }
3675 else if (insn_bits10_11 == 0x00)
3676 {
3677 /* Floating point - immediate instructions. */
3678 if ((insn_bits12_15 & 0x01) == 0x01
3679 || (insn_bits12_15 & 0x07) == 0x04)
3680 {
3681 if (record_debug)
3682 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3683 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3684 }
3685 /* Floating point - compare instructions. */
3686 else if ((insn_bits12_15 & 0x03) == 0x02)
3687 {
3688 if (record_debug)
3689 fprintf_unfiltered (gdb_stdlog, "FP - immediate");
3690 record_buf[0] = AARCH64_CPSR_REGNUM;
3691 }
3692 /* Floating point - integer conversions instructions. */
3693 else if (insn_bits12_15 == 0x00)
3694 {
3695 /* Convert float to integer instruction. */
3696 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3697 {
3698 if (record_debug)
3699 fprintf_unfiltered (gdb_stdlog, "float to int conversion");
3700
3701 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3702 }
3703 /* Convert integer to float instruction. */
3704 else if ((opcode >> 1) == 0x01 && !rmode)
3705 {
3706 if (record_debug)
3707 fprintf_unfiltered (gdb_stdlog, "int to float conversion");
3708
3709 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3710 }
3711 /* Move float to integer instruction. */
3712 else if ((opcode >> 1) == 0x03)
3713 {
3714 if (record_debug)
3715 fprintf_unfiltered (gdb_stdlog, "move float to int");
3716
3717 if (!(opcode & 0x01))
3718 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3719 else
3720 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3721 }
3722 else
3723 return AARCH64_RECORD_UNKNOWN;
3724 }
3725 else
3726 return AARCH64_RECORD_UNKNOWN;
3727 }
3728 else
3729 return AARCH64_RECORD_UNKNOWN;
3730 }
3731 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3732 {
3733 if (record_debug)
3734 fprintf_unfiltered (gdb_stdlog, "SIMD copy");
3735
3736 /* Advanced SIMD copy instructions. */
3737 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3738 && !bit (aarch64_insn_r->aarch64_insn, 15)
3739 && bit (aarch64_insn_r->aarch64_insn, 10))
3740 {
3741 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3742 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3743 else
3744 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3745 }
3746 else
3747 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3748 }
3749 /* All remaining floating point or advanced SIMD instructions. */
3750 else
3751 {
3752 if (record_debug)
3753 fprintf_unfiltered (gdb_stdlog, "all remain");
3754
3755 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3756 }
3757
3758 if (record_debug)
3759 fprintf_unfiltered (gdb_stdlog, "\n");
3760
3761 aarch64_insn_r->reg_rec_count++;
3762 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3763 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3764 record_buf);
3765 return AARCH64_RECORD_SUCCESS;
3766 }
3767
3768 /* Decodes insns type and invokes its record handler. */
3769
3770 static unsigned int
3771 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3772 {
3773 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3774
3775 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3776 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3777 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3778 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3779
3780 /* Data processing - immediate instructions. */
3781 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3782 return aarch64_record_data_proc_imm (aarch64_insn_r);
3783
3784 /* Branch, exception generation and system instructions. */
3785 if (ins_bit26 && !ins_bit27 && ins_bit28)
3786 return aarch64_record_branch_except_sys (aarch64_insn_r);
3787
3788 /* Load and store instructions. */
3789 if (!ins_bit25 && ins_bit27)
3790 return aarch64_record_load_store (aarch64_insn_r);
3791
3792 /* Data processing - register instructions. */
3793 if (ins_bit25 && !ins_bit26 && ins_bit27)
3794 return aarch64_record_data_proc_reg (aarch64_insn_r);
3795
3796 /* Data processing - SIMD and floating point instructions. */
3797 if (ins_bit25 && ins_bit26 && ins_bit27)
3798 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3799
3800 return AARCH64_RECORD_UNSUPPORTED;
3801 }
3802
3803 /* Cleans up local record registers and memory allocations. */
3804
3805 static void
3806 deallocate_reg_mem (insn_decode_record *record)
3807 {
3808 xfree (record->aarch64_regs);
3809 xfree (record->aarch64_mems);
3810 }
3811
3812 /* Parse the current instruction and record the values of the registers and
3813 memory that will be changed in current instruction to record_arch_list
3814 return -1 if something is wrong. */
3815
3816 int
3817 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3818 CORE_ADDR insn_addr)
3819 {
3820 uint32_t rec_no = 0;
3821 uint8_t insn_size = 4;
3822 uint32_t ret = 0;
3823 ULONGEST t_bit = 0, insn_id = 0;
3824 gdb_byte buf[insn_size];
3825 insn_decode_record aarch64_record;
3826
3827 memset (&buf[0], 0, insn_size);
3828 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3829 target_read_memory (insn_addr, &buf[0], insn_size);
3830 aarch64_record.aarch64_insn
3831 = (uint32_t) extract_unsigned_integer (&buf[0],
3832 insn_size,
3833 gdbarch_byte_order (gdbarch));
3834 aarch64_record.regcache = regcache;
3835 aarch64_record.this_addr = insn_addr;
3836 aarch64_record.gdbarch = gdbarch;
3837
3838 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3839 if (ret == AARCH64_RECORD_UNSUPPORTED)
3840 {
3841 printf_unfiltered (_("Process record does not support instruction "
3842 "0x%0x at address %s.\n"),
3843 aarch64_record.aarch64_insn,
3844 paddress (gdbarch, insn_addr));
3845 ret = -1;
3846 }
3847
3848 if (0 == ret)
3849 {
3850 /* Record registers. */
3851 record_full_arch_list_add_reg (aarch64_record.regcache,
3852 AARCH64_PC_REGNUM);
3853 /* Always record register CPSR. */
3854 record_full_arch_list_add_reg (aarch64_record.regcache,
3855 AARCH64_CPSR_REGNUM);
3856 if (aarch64_record.aarch64_regs)
3857 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3858 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3859 aarch64_record.aarch64_regs[rec_no]))
3860 ret = -1;
3861
3862 /* Record memories. */
3863 if (aarch64_record.aarch64_mems)
3864 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3865 if (record_full_arch_list_add_mem
3866 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3867 aarch64_record.aarch64_mems[rec_no].len))
3868 ret = -1;
3869
3870 if (record_full_arch_list_add_end ())
3871 ret = -1;
3872 }
3873
3874 deallocate_reg_mem (&aarch64_record);
3875 return ret;
3876 }
This page took 0.108433 seconds and 4 git commands to generate.