[AArch64] Use int64_t for address offset
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
1 /* Common target dependent code for GDB on AArch64 systems.
2
3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "frame.h"
24 #include "inferior.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "dis-asm.h"
28 #include "regcache.h"
29 #include "reggroups.h"
30 #include "doublest.h"
31 #include "value.h"
32 #include "arch-utils.h"
33 #include "osabi.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
37 #include "objfiles.h"
38 #include "dwarf2-frame.h"
39 #include "gdbtypes.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
43 #include "language.h"
44 #include "infcall.h"
45 #include "ax.h"
46 #include "ax-gdb.h"
47
48 #include "aarch64-tdep.h"
49
50 #include "elf-bfd.h"
51 #include "elf/aarch64.h"
52
53 #include "vec.h"
54
55 #include "record.h"
56 #include "record-full.h"
57
58 #include "features/aarch64.c"
59
60 #include "arch/aarch64-insn.h"
61
62 #include "opcode/aarch64.h"
63
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
68 /* Pseudo register base numbers. */
69 #define AARCH64_Q0_REGNUM 0
70 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
74
75 /* The standard register names, and all the valid aliases for them. */
76 static const struct
77 {
78 const char *const name;
79 int regnum;
80 } aarch64_register_aliases[] =
81 {
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123 };
124
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
127 {
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139 };
140
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
143 {
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156 };
157
158 /* AArch64 prologue cache structure. */
159 struct aarch64_prologue_cache
160 {
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
163 CORE_ADDR func;
164
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
167 stub frame. */
168 CORE_ADDR prev_pc;
169
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
173 CORE_ADDR prev_sp;
174
175 /* Is the target available to read from? */
176 int available_p;
177
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
181 int framesize;
182
183 /* The register used to hold the frame pointer for this frame. */
184 int framereg;
185
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
188 };
189
190 static void
191 show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
193 {
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
195 }
196
197 /* Analyze a prologue, looking for a recognizable stack frame
198 and frame pointer. Scan until we encounter a store that could
199 clobber the stack frame unexpectedly, or an unknown instruction. */
200
201 static CORE_ADDR
202 aarch64_analyze_prologue (struct gdbarch *gdbarch,
203 CORE_ADDR start, CORE_ADDR limit,
204 struct aarch64_prologue_cache *cache)
205 {
206 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
207 int i;
208 pv_t regs[AARCH64_X_REGISTER_COUNT];
209 struct pv_area *stack;
210 struct cleanup *back_to;
211
212 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
213 regs[i] = pv_register (i, 0);
214 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
215 back_to = make_cleanup_free_pv_area (stack);
216
217 for (; start < limit; start += 4)
218 {
219 uint32_t insn;
220 aarch64_inst inst;
221
222 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
223
224 if (aarch64_decode_insn (insn, &inst, 1) != 0)
225 break;
226
227 if (inst.opcode->iclass == addsub_imm
228 && (inst.opcode->op == OP_ADD
229 || strcmp ("sub", inst.opcode->name) == 0))
230 {
231 unsigned rd = inst.operands[0].reg.regno;
232 unsigned rn = inst.operands[1].reg.regno;
233
234 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
235 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
236 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
237 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
238
239 if (inst.opcode->op == OP_ADD)
240 {
241 regs[rd] = pv_add_constant (regs[rn],
242 inst.operands[2].imm.value);
243 }
244 else
245 {
246 regs[rd] = pv_add_constant (regs[rn],
247 -inst.operands[2].imm.value);
248 }
249 }
250 else if (inst.opcode->iclass == pcreladdr
251 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
252 {
253 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
254 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
255
256 regs[inst.operands[0].reg.regno] = pv_unknown ();
257 }
258 else if (inst.opcode->iclass == branch_imm)
259 {
260 /* Stop analysis on branch. */
261 break;
262 }
263 else if (inst.opcode->iclass == condbranch)
264 {
265 /* Stop analysis on branch. */
266 break;
267 }
268 else if (inst.opcode->iclass == branch_reg)
269 {
270 /* Stop analysis on branch. */
271 break;
272 }
273 else if (inst.opcode->iclass == compbranch)
274 {
275 /* Stop analysis on branch. */
276 break;
277 }
278 else if (inst.opcode->op == OP_MOVZ)
279 {
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
282 }
283 else if (inst.opcode->iclass == log_shift
284 && strcmp (inst.opcode->name, "orr") == 0)
285 {
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288 unsigned rm = inst.operands[2].reg.regno;
289
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
293
294 if (inst.operands[2].shifter.amount == 0
295 && rn == AARCH64_SP_REGNUM)
296 regs[rd] = regs[rm];
297 else
298 {
299 if (aarch64_debug)
300 {
301 debug_printf ("aarch64: prologue analysis gave up "
302 "addr=%s opcode=0x%x (orr x register)\n",
303 core_addr_to_string_nz (start), insn);
304 }
305 break;
306 }
307 }
308 else if (inst.opcode->op == OP_STUR)
309 {
310 unsigned rt = inst.operands[0].reg.regno;
311 unsigned rn = inst.operands[1].addr.base_regno;
312 int is64
313 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
314
315 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
316 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
317 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
318 gdb_assert (!inst.operands[1].addr.offset.is_reg);
319
320 pv_area_store (stack, pv_add_constant (regs[rn],
321 inst.operands[1].addr.offset.imm),
322 is64 ? 8 : 4, regs[rt]);
323 }
324 else if ((inst.opcode->iclass == ldstpair_off
325 || inst.opcode->iclass == ldstpair_indexed)
326 && inst.operands[2].addr.preind
327 && strcmp ("stp", inst.opcode->name) == 0)
328 {
329 unsigned rt1 = inst.operands[0].reg.regno;
330 unsigned rt2 = inst.operands[1].reg.regno;
331 unsigned rn = inst.operands[2].addr.base_regno;
332 int32_t imm = inst.operands[2].addr.offset.imm;
333
334 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
335 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2);
336 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
337 gdb_assert (!inst.operands[2].addr.offset.is_reg);
338
339 /* If recording this store would invalidate the store area
340 (perhaps because rn is not known) then we should abandon
341 further prologue analysis. */
342 if (pv_area_store_would_trash (stack,
343 pv_add_constant (regs[rn], imm)))
344 break;
345
346 if (pv_area_store_would_trash (stack,
347 pv_add_constant (regs[rn], imm + 8)))
348 break;
349
350 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
351 regs[rt1]);
352 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
353 regs[rt2]);
354
355 if (inst.operands[2].addr.writeback)
356 regs[rn] = pv_add_constant (regs[rn], imm);
357
358 }
359 else if (inst.opcode->iclass == testbranch)
360 {
361 /* Stop analysis on branch. */
362 break;
363 }
364 else
365 {
366 if (aarch64_debug)
367 {
368 debug_printf ("aarch64: prologue analysis gave up addr=%s"
369 " opcode=0x%x\n",
370 core_addr_to_string_nz (start), insn);
371 }
372 break;
373 }
374 }
375
376 if (cache == NULL)
377 {
378 do_cleanups (back_to);
379 return start;
380 }
381
382 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
383 {
384 /* Frame pointer is fp. Frame size is constant. */
385 cache->framereg = AARCH64_FP_REGNUM;
386 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
387 }
388 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
389 {
390 /* Try the stack pointer. */
391 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
392 cache->framereg = AARCH64_SP_REGNUM;
393 }
394 else
395 {
396 /* We're just out of luck. We don't know where the frame is. */
397 cache->framereg = -1;
398 cache->framesize = 0;
399 }
400
401 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
402 {
403 CORE_ADDR offset;
404
405 if (pv_area_find_reg (stack, gdbarch, i, &offset))
406 cache->saved_regs[i].addr = offset;
407 }
408
409 do_cleanups (back_to);
410 return start;
411 }
412
413 /* Implement the "skip_prologue" gdbarch method. */
414
415 static CORE_ADDR
416 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
417 {
418 CORE_ADDR func_addr, limit_pc;
419
420 /* See if we can determine the end of the prologue via the symbol
421 table. If so, then return either PC, or the PC after the
422 prologue, whichever is greater. */
423 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
424 {
425 CORE_ADDR post_prologue_pc
426 = skip_prologue_using_sal (gdbarch, func_addr);
427
428 if (post_prologue_pc != 0)
429 return max (pc, post_prologue_pc);
430 }
431
432 /* Can't determine prologue from the symbol table, need to examine
433 instructions. */
434
435 /* Find an upper limit on the function prologue using the debug
436 information. If the debug information could not be used to
437 provide that bound, then use an arbitrary large number as the
438 upper bound. */
439 limit_pc = skip_prologue_using_sal (gdbarch, pc);
440 if (limit_pc == 0)
441 limit_pc = pc + 128; /* Magic. */
442
443 /* Try disassembling prologue. */
444 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
445 }
446
447 /* Scan the function prologue for THIS_FRAME and populate the prologue
448 cache CACHE. */
449
450 static void
451 aarch64_scan_prologue (struct frame_info *this_frame,
452 struct aarch64_prologue_cache *cache)
453 {
454 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
455 CORE_ADDR prologue_start;
456 CORE_ADDR prologue_end;
457 CORE_ADDR prev_pc = get_frame_pc (this_frame);
458 struct gdbarch *gdbarch = get_frame_arch (this_frame);
459
460 cache->prev_pc = prev_pc;
461
462 /* Assume we do not find a frame. */
463 cache->framereg = -1;
464 cache->framesize = 0;
465
466 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
467 &prologue_end))
468 {
469 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
470
471 if (sal.line == 0)
472 {
473 /* No line info so use the current PC. */
474 prologue_end = prev_pc;
475 }
476 else if (sal.end < prologue_end)
477 {
478 /* The next line begins after the function end. */
479 prologue_end = sal.end;
480 }
481
482 prologue_end = min (prologue_end, prev_pc);
483 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
484 }
485 else
486 {
487 CORE_ADDR frame_loc;
488
489 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
490 if (frame_loc == 0)
491 return;
492
493 cache->framereg = AARCH64_FP_REGNUM;
494 cache->framesize = 16;
495 cache->saved_regs[29].addr = 0;
496 cache->saved_regs[30].addr = 8;
497 }
498 }
499
500 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
501 function may throw an exception if the inferior's registers or memory is
502 not available. */
503
504 static void
505 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
506 struct aarch64_prologue_cache *cache)
507 {
508 CORE_ADDR unwound_fp;
509 int reg;
510
511 aarch64_scan_prologue (this_frame, cache);
512
513 if (cache->framereg == -1)
514 return;
515
516 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
517 if (unwound_fp == 0)
518 return;
519
520 cache->prev_sp = unwound_fp + cache->framesize;
521
522 /* Calculate actual addresses of saved registers using offsets
523 determined by aarch64_analyze_prologue. */
524 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
525 if (trad_frame_addr_p (cache->saved_regs, reg))
526 cache->saved_regs[reg].addr += cache->prev_sp;
527
528 cache->func = get_frame_func (this_frame);
529
530 cache->available_p = 1;
531 }
532
533 /* Allocate and fill in *THIS_CACHE with information about the prologue of
534 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
535 Return a pointer to the current aarch64_prologue_cache in
536 *THIS_CACHE. */
537
538 static struct aarch64_prologue_cache *
539 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
540 {
541 struct aarch64_prologue_cache *cache;
542
543 if (*this_cache != NULL)
544 return (struct aarch64_prologue_cache *) *this_cache;
545
546 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
547 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
548 *this_cache = cache;
549
550 TRY
551 {
552 aarch64_make_prologue_cache_1 (this_frame, cache);
553 }
554 CATCH (ex, RETURN_MASK_ERROR)
555 {
556 if (ex.error != NOT_AVAILABLE_ERROR)
557 throw_exception (ex);
558 }
559 END_CATCH
560
561 return cache;
562 }
563
564 /* Implement the "stop_reason" frame_unwind method. */
565
566 static enum unwind_stop_reason
567 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
568 void **this_cache)
569 {
570 struct aarch64_prologue_cache *cache
571 = aarch64_make_prologue_cache (this_frame, this_cache);
572
573 if (!cache->available_p)
574 return UNWIND_UNAVAILABLE;
575
576 /* Halt the backtrace at "_start". */
577 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
578 return UNWIND_OUTERMOST;
579
580 /* We've hit a wall, stop. */
581 if (cache->prev_sp == 0)
582 return UNWIND_OUTERMOST;
583
584 return UNWIND_NO_REASON;
585 }
586
587 /* Our frame ID for a normal frame is the current function's starting
588 PC and the caller's SP when we were called. */
589
590 static void
591 aarch64_prologue_this_id (struct frame_info *this_frame,
592 void **this_cache, struct frame_id *this_id)
593 {
594 struct aarch64_prologue_cache *cache
595 = aarch64_make_prologue_cache (this_frame, this_cache);
596
597 if (!cache->available_p)
598 *this_id = frame_id_build_unavailable_stack (cache->func);
599 else
600 *this_id = frame_id_build (cache->prev_sp, cache->func);
601 }
602
603 /* Implement the "prev_register" frame_unwind method. */
604
605 static struct value *
606 aarch64_prologue_prev_register (struct frame_info *this_frame,
607 void **this_cache, int prev_regnum)
608 {
609 struct aarch64_prologue_cache *cache
610 = aarch64_make_prologue_cache (this_frame, this_cache);
611
612 /* If we are asked to unwind the PC, then we need to return the LR
613 instead. The prologue may save PC, but it will point into this
614 frame's prologue, not the next frame's resume location. */
615 if (prev_regnum == AARCH64_PC_REGNUM)
616 {
617 CORE_ADDR lr;
618
619 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
620 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
621 }
622
623 /* SP is generally not saved to the stack, but this frame is
624 identified by the next frame's stack pointer at the time of the
625 call. The value was already reconstructed into PREV_SP. */
626 /*
627 +----------+ ^
628 | saved lr | |
629 +->| saved fp |--+
630 | | |
631 | | | <- Previous SP
632 | +----------+
633 | | saved lr |
634 +--| saved fp |<- FP
635 | |
636 | |<- SP
637 +----------+ */
638 if (prev_regnum == AARCH64_SP_REGNUM)
639 return frame_unwind_got_constant (this_frame, prev_regnum,
640 cache->prev_sp);
641
642 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
643 prev_regnum);
644 }
645
646 /* AArch64 prologue unwinder. */
647 struct frame_unwind aarch64_prologue_unwind =
648 {
649 NORMAL_FRAME,
650 aarch64_prologue_frame_unwind_stop_reason,
651 aarch64_prologue_this_id,
652 aarch64_prologue_prev_register,
653 NULL,
654 default_frame_sniffer
655 };
656
657 /* Allocate and fill in *THIS_CACHE with information about the prologue of
658 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
659 Return a pointer to the current aarch64_prologue_cache in
660 *THIS_CACHE. */
661
662 static struct aarch64_prologue_cache *
663 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
664 {
665 struct aarch64_prologue_cache *cache;
666
667 if (*this_cache != NULL)
668 return (struct aarch64_prologue_cache *) *this_cache;
669
670 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
671 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
672 *this_cache = cache;
673
674 TRY
675 {
676 cache->prev_sp = get_frame_register_unsigned (this_frame,
677 AARCH64_SP_REGNUM);
678 cache->prev_pc = get_frame_pc (this_frame);
679 cache->available_p = 1;
680 }
681 CATCH (ex, RETURN_MASK_ERROR)
682 {
683 if (ex.error != NOT_AVAILABLE_ERROR)
684 throw_exception (ex);
685 }
686 END_CATCH
687
688 return cache;
689 }
690
691 /* Implement the "stop_reason" frame_unwind method. */
692
693 static enum unwind_stop_reason
694 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
695 void **this_cache)
696 {
697 struct aarch64_prologue_cache *cache
698 = aarch64_make_stub_cache (this_frame, this_cache);
699
700 if (!cache->available_p)
701 return UNWIND_UNAVAILABLE;
702
703 return UNWIND_NO_REASON;
704 }
705
706 /* Our frame ID for a stub frame is the current SP and LR. */
707
708 static void
709 aarch64_stub_this_id (struct frame_info *this_frame,
710 void **this_cache, struct frame_id *this_id)
711 {
712 struct aarch64_prologue_cache *cache
713 = aarch64_make_stub_cache (this_frame, this_cache);
714
715 if (cache->available_p)
716 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
717 else
718 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
719 }
720
721 /* Implement the "sniffer" frame_unwind method. */
722
723 static int
724 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
725 struct frame_info *this_frame,
726 void **this_prologue_cache)
727 {
728 CORE_ADDR addr_in_block;
729 gdb_byte dummy[4];
730
731 addr_in_block = get_frame_address_in_block (this_frame);
732 if (in_plt_section (addr_in_block)
733 /* We also use the stub winder if the target memory is unreadable
734 to avoid having the prologue unwinder trying to read it. */
735 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
736 return 1;
737
738 return 0;
739 }
740
741 /* AArch64 stub unwinder. */
742 struct frame_unwind aarch64_stub_unwind =
743 {
744 NORMAL_FRAME,
745 aarch64_stub_frame_unwind_stop_reason,
746 aarch64_stub_this_id,
747 aarch64_prologue_prev_register,
748 NULL,
749 aarch64_stub_unwind_sniffer
750 };
751
752 /* Return the frame base address of *THIS_FRAME. */
753
754 static CORE_ADDR
755 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
756 {
757 struct aarch64_prologue_cache *cache
758 = aarch64_make_prologue_cache (this_frame, this_cache);
759
760 return cache->prev_sp - cache->framesize;
761 }
762
763 /* AArch64 default frame base information. */
764 struct frame_base aarch64_normal_base =
765 {
766 &aarch64_prologue_unwind,
767 aarch64_normal_frame_base,
768 aarch64_normal_frame_base,
769 aarch64_normal_frame_base
770 };
771
772 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
773 dummy frame. The frame ID's base needs to match the TOS value
774 saved by save_dummy_frame_tos () and returned from
775 aarch64_push_dummy_call, and the PC needs to match the dummy
776 frame's breakpoint. */
777
778 static struct frame_id
779 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
780 {
781 return frame_id_build (get_frame_register_unsigned (this_frame,
782 AARCH64_SP_REGNUM),
783 get_frame_pc (this_frame));
784 }
785
786 /* Implement the "unwind_pc" gdbarch method. */
787
788 static CORE_ADDR
789 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
790 {
791 CORE_ADDR pc
792 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
793
794 return pc;
795 }
796
797 /* Implement the "unwind_sp" gdbarch method. */
798
799 static CORE_ADDR
800 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
801 {
802 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
803 }
804
805 /* Return the value of the REGNUM register in the previous frame of
806 *THIS_FRAME. */
807
808 static struct value *
809 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
810 void **this_cache, int regnum)
811 {
812 CORE_ADDR lr;
813
814 switch (regnum)
815 {
816 case AARCH64_PC_REGNUM:
817 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
818 return frame_unwind_got_constant (this_frame, regnum, lr);
819
820 default:
821 internal_error (__FILE__, __LINE__,
822 _("Unexpected register %d"), regnum);
823 }
824 }
825
826 /* Implement the "init_reg" dwarf2_frame_ops method. */
827
828 static void
829 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
830 struct dwarf2_frame_state_reg *reg,
831 struct frame_info *this_frame)
832 {
833 switch (regnum)
834 {
835 case AARCH64_PC_REGNUM:
836 reg->how = DWARF2_FRAME_REG_FN;
837 reg->loc.fn = aarch64_dwarf2_prev_register;
838 break;
839 case AARCH64_SP_REGNUM:
840 reg->how = DWARF2_FRAME_REG_CFA;
841 break;
842 }
843 }
844
845 /* When arguments must be pushed onto the stack, they go on in reverse
846 order. The code below implements a FILO (stack) to do this. */
847
848 typedef struct
849 {
850 /* Value to pass on stack. It can be NULL if this item is for stack
851 padding. */
852 const gdb_byte *data;
853
854 /* Size in bytes of value to pass on stack. */
855 int len;
856 } stack_item_t;
857
858 DEF_VEC_O (stack_item_t);
859
860 /* Return the alignment (in bytes) of the given type. */
861
862 static int
863 aarch64_type_align (struct type *t)
864 {
865 int n;
866 int align;
867 int falign;
868
869 t = check_typedef (t);
870 switch (TYPE_CODE (t))
871 {
872 default:
873 /* Should never happen. */
874 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
875 return 4;
876
877 case TYPE_CODE_PTR:
878 case TYPE_CODE_ENUM:
879 case TYPE_CODE_INT:
880 case TYPE_CODE_FLT:
881 case TYPE_CODE_SET:
882 case TYPE_CODE_RANGE:
883 case TYPE_CODE_BITSTRING:
884 case TYPE_CODE_REF:
885 case TYPE_CODE_CHAR:
886 case TYPE_CODE_BOOL:
887 return TYPE_LENGTH (t);
888
889 case TYPE_CODE_ARRAY:
890 if (TYPE_VECTOR (t))
891 {
892 /* Use the natural alignment for vector types (the same for
893 scalar type), but the maximum alignment is 128-bit. */
894 if (TYPE_LENGTH (t) > 16)
895 return 16;
896 else
897 return TYPE_LENGTH (t);
898 }
899 else
900 return aarch64_type_align (TYPE_TARGET_TYPE (t));
901 case TYPE_CODE_COMPLEX:
902 return aarch64_type_align (TYPE_TARGET_TYPE (t));
903
904 case TYPE_CODE_STRUCT:
905 case TYPE_CODE_UNION:
906 align = 1;
907 for (n = 0; n < TYPE_NFIELDS (t); n++)
908 {
909 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
910 if (falign > align)
911 align = falign;
912 }
913 return align;
914 }
915 }
916
917 /* Return 1 if *TY is a homogeneous floating-point aggregate or
918 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
919 document; otherwise return 0. */
920
921 static int
922 is_hfa_or_hva (struct type *ty)
923 {
924 switch (TYPE_CODE (ty))
925 {
926 case TYPE_CODE_ARRAY:
927 {
928 struct type *target_ty = TYPE_TARGET_TYPE (ty);
929
930 if (TYPE_VECTOR (ty))
931 return 0;
932
933 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
934 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
935 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
936 && TYPE_VECTOR (target_ty))))
937 return 1;
938 break;
939 }
940
941 case TYPE_CODE_UNION:
942 case TYPE_CODE_STRUCT:
943 {
944 /* HFA or HVA has at most four members. */
945 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
946 {
947 struct type *member0_type;
948
949 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
950 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
951 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
952 && TYPE_VECTOR (member0_type)))
953 {
954 int i;
955
956 for (i = 0; i < TYPE_NFIELDS (ty); i++)
957 {
958 struct type *member1_type;
959
960 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
961 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
962 || (TYPE_LENGTH (member0_type)
963 != TYPE_LENGTH (member1_type)))
964 return 0;
965 }
966 return 1;
967 }
968 }
969 return 0;
970 }
971
972 default:
973 break;
974 }
975
976 return 0;
977 }
978
979 /* AArch64 function call information structure. */
980 struct aarch64_call_info
981 {
982 /* the current argument number. */
983 unsigned argnum;
984
985 /* The next general purpose register number, equivalent to NGRN as
986 described in the AArch64 Procedure Call Standard. */
987 unsigned ngrn;
988
989 /* The next SIMD and floating point register number, equivalent to
990 NSRN as described in the AArch64 Procedure Call Standard. */
991 unsigned nsrn;
992
993 /* The next stacked argument address, equivalent to NSAA as
994 described in the AArch64 Procedure Call Standard. */
995 unsigned nsaa;
996
997 /* Stack item vector. */
998 VEC(stack_item_t) *si;
999 };
1000
1001 /* Pass a value in a sequence of consecutive X registers. The caller
1002 is responsbile for ensuring sufficient registers are available. */
1003
1004 static void
1005 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1006 struct aarch64_call_info *info, struct type *type,
1007 struct value *arg)
1008 {
1009 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1010 int len = TYPE_LENGTH (type);
1011 enum type_code typecode = TYPE_CODE (type);
1012 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1013 const bfd_byte *buf = value_contents (arg);
1014
1015 info->argnum++;
1016
1017 while (len > 0)
1018 {
1019 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1020 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1021 byte_order);
1022
1023
1024 /* Adjust sub-word struct/union args when big-endian. */
1025 if (byte_order == BFD_ENDIAN_BIG
1026 && partial_len < X_REGISTER_SIZE
1027 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1028 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1029
1030 if (aarch64_debug)
1031 {
1032 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1033 gdbarch_register_name (gdbarch, regnum),
1034 phex (regval, X_REGISTER_SIZE));
1035 }
1036 regcache_cooked_write_unsigned (regcache, regnum, regval);
1037 len -= partial_len;
1038 buf += partial_len;
1039 regnum++;
1040 }
1041 }
1042
1043 /* Attempt to marshall a value in a V register. Return 1 if
1044 successful, or 0 if insufficient registers are available. This
1045 function, unlike the equivalent pass_in_x() function does not
1046 handle arguments spread across multiple registers. */
1047
1048 static int
1049 pass_in_v (struct gdbarch *gdbarch,
1050 struct regcache *regcache,
1051 struct aarch64_call_info *info,
1052 int len, const bfd_byte *buf)
1053 {
1054 if (info->nsrn < 8)
1055 {
1056 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1057 gdb_byte reg[V_REGISTER_SIZE];
1058
1059 info->argnum++;
1060 info->nsrn++;
1061
1062 memset (reg, 0, sizeof (reg));
1063 /* PCS C.1, the argument is allocated to the least significant
1064 bits of V register. */
1065 memcpy (reg, buf, len);
1066 regcache_cooked_write (regcache, regnum, reg);
1067
1068 if (aarch64_debug)
1069 {
1070 debug_printf ("arg %d in %s\n", info->argnum,
1071 gdbarch_register_name (gdbarch, regnum));
1072 }
1073 return 1;
1074 }
1075 info->nsrn = 8;
1076 return 0;
1077 }
1078
1079 /* Marshall an argument onto the stack. */
1080
1081 static void
1082 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1083 struct value *arg)
1084 {
1085 const bfd_byte *buf = value_contents (arg);
1086 int len = TYPE_LENGTH (type);
1087 int align;
1088 stack_item_t item;
1089
1090 info->argnum++;
1091
1092 align = aarch64_type_align (type);
1093
1094 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1095 Natural alignment of the argument's type. */
1096 align = align_up (align, 8);
1097
1098 /* The AArch64 PCS requires at most doubleword alignment. */
1099 if (align > 16)
1100 align = 16;
1101
1102 if (aarch64_debug)
1103 {
1104 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1105 info->nsaa);
1106 }
1107
1108 item.len = len;
1109 item.data = buf;
1110 VEC_safe_push (stack_item_t, info->si, &item);
1111
1112 info->nsaa += len;
1113 if (info->nsaa & (align - 1))
1114 {
1115 /* Push stack alignment padding. */
1116 int pad = align - (info->nsaa & (align - 1));
1117
1118 item.len = pad;
1119 item.data = NULL;
1120
1121 VEC_safe_push (stack_item_t, info->si, &item);
1122 info->nsaa += pad;
1123 }
1124 }
1125
1126 /* Marshall an argument into a sequence of one or more consecutive X
1127 registers or, if insufficient X registers are available then onto
1128 the stack. */
1129
1130 static void
1131 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1132 struct aarch64_call_info *info, struct type *type,
1133 struct value *arg)
1134 {
1135 int len = TYPE_LENGTH (type);
1136 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1137
1138 /* PCS C.13 - Pass in registers if we have enough spare */
1139 if (info->ngrn + nregs <= 8)
1140 {
1141 pass_in_x (gdbarch, regcache, info, type, arg);
1142 info->ngrn += nregs;
1143 }
1144 else
1145 {
1146 info->ngrn = 8;
1147 pass_on_stack (info, type, arg);
1148 }
1149 }
1150
1151 /* Pass a value in a V register, or on the stack if insufficient are
1152 available. */
1153
1154 static void
1155 pass_in_v_or_stack (struct gdbarch *gdbarch,
1156 struct regcache *regcache,
1157 struct aarch64_call_info *info,
1158 struct type *type,
1159 struct value *arg)
1160 {
1161 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1162 value_contents (arg)))
1163 pass_on_stack (info, type, arg);
1164 }
1165
1166 /* Implement the "push_dummy_call" gdbarch method. */
1167
1168 static CORE_ADDR
1169 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1170 struct regcache *regcache, CORE_ADDR bp_addr,
1171 int nargs,
1172 struct value **args, CORE_ADDR sp, int struct_return,
1173 CORE_ADDR struct_addr)
1174 {
1175 int argnum;
1176 struct aarch64_call_info info;
1177 struct type *func_type;
1178 struct type *return_type;
1179 int lang_struct_return;
1180
1181 memset (&info, 0, sizeof (info));
1182
1183 /* We need to know what the type of the called function is in order
1184 to determine the number of named/anonymous arguments for the
1185 actual argument placement, and the return type in order to handle
1186 return value correctly.
1187
1188 The generic code above us views the decision of return in memory
1189 or return in registers as a two stage processes. The language
1190 handler is consulted first and may decide to return in memory (eg
1191 class with copy constructor returned by value), this will cause
1192 the generic code to allocate space AND insert an initial leading
1193 argument.
1194
1195 If the language code does not decide to pass in memory then the
1196 target code is consulted.
1197
1198 If the language code decides to pass in memory we want to move
1199 the pointer inserted as the initial argument from the argument
1200 list and into X8, the conventional AArch64 struct return pointer
1201 register.
1202
1203 This is slightly awkward, ideally the flag "lang_struct_return"
1204 would be passed to the targets implementation of push_dummy_call.
1205 Rather that change the target interface we call the language code
1206 directly ourselves. */
1207
1208 func_type = check_typedef (value_type (function));
1209
1210 /* Dereference function pointer types. */
1211 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1212 func_type = TYPE_TARGET_TYPE (func_type);
1213
1214 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1215 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1216
1217 /* If language_pass_by_reference () returned true we will have been
1218 given an additional initial argument, a hidden pointer to the
1219 return slot in memory. */
1220 return_type = TYPE_TARGET_TYPE (func_type);
1221 lang_struct_return = language_pass_by_reference (return_type);
1222
1223 /* Set the return address. For the AArch64, the return breakpoint
1224 is always at BP_ADDR. */
1225 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1226
1227 /* If we were given an initial argument for the return slot because
1228 lang_struct_return was true, lose it. */
1229 if (lang_struct_return)
1230 {
1231 args++;
1232 nargs--;
1233 }
1234
1235 /* The struct_return pointer occupies X8. */
1236 if (struct_return || lang_struct_return)
1237 {
1238 if (aarch64_debug)
1239 {
1240 debug_printf ("struct return in %s = 0x%s\n",
1241 gdbarch_register_name (gdbarch,
1242 AARCH64_STRUCT_RETURN_REGNUM),
1243 paddress (gdbarch, struct_addr));
1244 }
1245 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1246 struct_addr);
1247 }
1248
1249 for (argnum = 0; argnum < nargs; argnum++)
1250 {
1251 struct value *arg = args[argnum];
1252 struct type *arg_type;
1253 int len;
1254
1255 arg_type = check_typedef (value_type (arg));
1256 len = TYPE_LENGTH (arg_type);
1257
1258 switch (TYPE_CODE (arg_type))
1259 {
1260 case TYPE_CODE_INT:
1261 case TYPE_CODE_BOOL:
1262 case TYPE_CODE_CHAR:
1263 case TYPE_CODE_RANGE:
1264 case TYPE_CODE_ENUM:
1265 if (len < 4)
1266 {
1267 /* Promote to 32 bit integer. */
1268 if (TYPE_UNSIGNED (arg_type))
1269 arg_type = builtin_type (gdbarch)->builtin_uint32;
1270 else
1271 arg_type = builtin_type (gdbarch)->builtin_int32;
1272 arg = value_cast (arg_type, arg);
1273 }
1274 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1275 break;
1276
1277 case TYPE_CODE_COMPLEX:
1278 if (info.nsrn <= 6)
1279 {
1280 const bfd_byte *buf = value_contents (arg);
1281 struct type *target_type =
1282 check_typedef (TYPE_TARGET_TYPE (arg_type));
1283
1284 pass_in_v (gdbarch, regcache, &info,
1285 TYPE_LENGTH (target_type), buf);
1286 pass_in_v (gdbarch, regcache, &info,
1287 TYPE_LENGTH (target_type),
1288 buf + TYPE_LENGTH (target_type));
1289 }
1290 else
1291 {
1292 info.nsrn = 8;
1293 pass_on_stack (&info, arg_type, arg);
1294 }
1295 break;
1296 case TYPE_CODE_FLT:
1297 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1298 break;
1299
1300 case TYPE_CODE_STRUCT:
1301 case TYPE_CODE_ARRAY:
1302 case TYPE_CODE_UNION:
1303 if (is_hfa_or_hva (arg_type))
1304 {
1305 int elements = TYPE_NFIELDS (arg_type);
1306
1307 /* Homogeneous Aggregates */
1308 if (info.nsrn + elements < 8)
1309 {
1310 int i;
1311
1312 for (i = 0; i < elements; i++)
1313 {
1314 /* We know that we have sufficient registers
1315 available therefore this will never fallback
1316 to the stack. */
1317 struct value *field =
1318 value_primitive_field (arg, 0, i, arg_type);
1319 struct type *field_type =
1320 check_typedef (value_type (field));
1321
1322 pass_in_v_or_stack (gdbarch, regcache, &info,
1323 field_type, field);
1324 }
1325 }
1326 else
1327 {
1328 info.nsrn = 8;
1329 pass_on_stack (&info, arg_type, arg);
1330 }
1331 }
1332 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1333 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1334 {
1335 /* Short vector types are passed in V registers. */
1336 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1337 }
1338 else if (len > 16)
1339 {
1340 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1341 invisible reference. */
1342
1343 /* Allocate aligned storage. */
1344 sp = align_down (sp - len, 16);
1345
1346 /* Write the real data into the stack. */
1347 write_memory (sp, value_contents (arg), len);
1348
1349 /* Construct the indirection. */
1350 arg_type = lookup_pointer_type (arg_type);
1351 arg = value_from_pointer (arg_type, sp);
1352 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1353 }
1354 else
1355 /* PCS C.15 / C.18 multiple values pass. */
1356 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1357 break;
1358
1359 default:
1360 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1361 break;
1362 }
1363 }
1364
1365 /* Make sure stack retains 16 byte alignment. */
1366 if (info.nsaa & 15)
1367 sp -= 16 - (info.nsaa & 15);
1368
1369 while (!VEC_empty (stack_item_t, info.si))
1370 {
1371 stack_item_t *si = VEC_last (stack_item_t, info.si);
1372
1373 sp -= si->len;
1374 if (si->data != NULL)
1375 write_memory (sp, si->data, si->len);
1376 VEC_pop (stack_item_t, info.si);
1377 }
1378
1379 VEC_free (stack_item_t, info.si);
1380
1381 /* Finally, update the SP register. */
1382 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1383
1384 return sp;
1385 }
1386
1387 /* Implement the "frame_align" gdbarch method. */
1388
1389 static CORE_ADDR
1390 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1391 {
1392 /* Align the stack to sixteen bytes. */
1393 return sp & ~(CORE_ADDR) 15;
1394 }
1395
1396 /* Return the type for an AdvSISD Q register. */
1397
1398 static struct type *
1399 aarch64_vnq_type (struct gdbarch *gdbarch)
1400 {
1401 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1402
1403 if (tdep->vnq_type == NULL)
1404 {
1405 struct type *t;
1406 struct type *elem;
1407
1408 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1409 TYPE_CODE_UNION);
1410
1411 elem = builtin_type (gdbarch)->builtin_uint128;
1412 append_composite_type_field (t, "u", elem);
1413
1414 elem = builtin_type (gdbarch)->builtin_int128;
1415 append_composite_type_field (t, "s", elem);
1416
1417 tdep->vnq_type = t;
1418 }
1419
1420 return tdep->vnq_type;
1421 }
1422
1423 /* Return the type for an AdvSISD D register. */
1424
1425 static struct type *
1426 aarch64_vnd_type (struct gdbarch *gdbarch)
1427 {
1428 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1429
1430 if (tdep->vnd_type == NULL)
1431 {
1432 struct type *t;
1433 struct type *elem;
1434
1435 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1436 TYPE_CODE_UNION);
1437
1438 elem = builtin_type (gdbarch)->builtin_double;
1439 append_composite_type_field (t, "f", elem);
1440
1441 elem = builtin_type (gdbarch)->builtin_uint64;
1442 append_composite_type_field (t, "u", elem);
1443
1444 elem = builtin_type (gdbarch)->builtin_int64;
1445 append_composite_type_field (t, "s", elem);
1446
1447 tdep->vnd_type = t;
1448 }
1449
1450 return tdep->vnd_type;
1451 }
1452
1453 /* Return the type for an AdvSISD S register. */
1454
1455 static struct type *
1456 aarch64_vns_type (struct gdbarch *gdbarch)
1457 {
1458 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1459
1460 if (tdep->vns_type == NULL)
1461 {
1462 struct type *t;
1463 struct type *elem;
1464
1465 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1466 TYPE_CODE_UNION);
1467
1468 elem = builtin_type (gdbarch)->builtin_float;
1469 append_composite_type_field (t, "f", elem);
1470
1471 elem = builtin_type (gdbarch)->builtin_uint32;
1472 append_composite_type_field (t, "u", elem);
1473
1474 elem = builtin_type (gdbarch)->builtin_int32;
1475 append_composite_type_field (t, "s", elem);
1476
1477 tdep->vns_type = t;
1478 }
1479
1480 return tdep->vns_type;
1481 }
1482
1483 /* Return the type for an AdvSISD H register. */
1484
1485 static struct type *
1486 aarch64_vnh_type (struct gdbarch *gdbarch)
1487 {
1488 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1489
1490 if (tdep->vnh_type == NULL)
1491 {
1492 struct type *t;
1493 struct type *elem;
1494
1495 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1496 TYPE_CODE_UNION);
1497
1498 elem = builtin_type (gdbarch)->builtin_uint16;
1499 append_composite_type_field (t, "u", elem);
1500
1501 elem = builtin_type (gdbarch)->builtin_int16;
1502 append_composite_type_field (t, "s", elem);
1503
1504 tdep->vnh_type = t;
1505 }
1506
1507 return tdep->vnh_type;
1508 }
1509
1510 /* Return the type for an AdvSISD B register. */
1511
1512 static struct type *
1513 aarch64_vnb_type (struct gdbarch *gdbarch)
1514 {
1515 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1516
1517 if (tdep->vnb_type == NULL)
1518 {
1519 struct type *t;
1520 struct type *elem;
1521
1522 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1523 TYPE_CODE_UNION);
1524
1525 elem = builtin_type (gdbarch)->builtin_uint8;
1526 append_composite_type_field (t, "u", elem);
1527
1528 elem = builtin_type (gdbarch)->builtin_int8;
1529 append_composite_type_field (t, "s", elem);
1530
1531 tdep->vnb_type = t;
1532 }
1533
1534 return tdep->vnb_type;
1535 }
1536
1537 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1538
1539 static int
1540 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1541 {
1542 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1543 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1544
1545 if (reg == AARCH64_DWARF_SP)
1546 return AARCH64_SP_REGNUM;
1547
1548 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1549 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1550
1551 return -1;
1552 }
1553 \f
1554
1555 /* Implement the "print_insn" gdbarch method. */
1556
1557 static int
1558 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1559 {
1560 info->symbols = NULL;
1561 return print_insn_aarch64 (memaddr, info);
1562 }
1563
1564 /* AArch64 BRK software debug mode instruction.
1565 Note that AArch64 code is always little-endian.
1566 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1567 static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1568
1569 /* Implement the "breakpoint_from_pc" gdbarch method. */
1570
1571 static const gdb_byte *
1572 aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1573 int *lenptr)
1574 {
1575 *lenptr = sizeof (aarch64_default_breakpoint);
1576 return aarch64_default_breakpoint;
1577 }
1578
1579 /* Extract from an array REGS containing the (raw) register state a
1580 function return value of type TYPE, and copy that, in virtual
1581 format, into VALBUF. */
1582
1583 static void
1584 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1585 gdb_byte *valbuf)
1586 {
1587 struct gdbarch *gdbarch = get_regcache_arch (regs);
1588 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1589
1590 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1591 {
1592 bfd_byte buf[V_REGISTER_SIZE];
1593 int len = TYPE_LENGTH (type);
1594
1595 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1596 memcpy (valbuf, buf, len);
1597 }
1598 else if (TYPE_CODE (type) == TYPE_CODE_INT
1599 || TYPE_CODE (type) == TYPE_CODE_CHAR
1600 || TYPE_CODE (type) == TYPE_CODE_BOOL
1601 || TYPE_CODE (type) == TYPE_CODE_PTR
1602 || TYPE_CODE (type) == TYPE_CODE_REF
1603 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1604 {
1605 /* If the the type is a plain integer, then the access is
1606 straight-forward. Otherwise we have to play around a bit
1607 more. */
1608 int len = TYPE_LENGTH (type);
1609 int regno = AARCH64_X0_REGNUM;
1610 ULONGEST tmp;
1611
1612 while (len > 0)
1613 {
1614 /* By using store_unsigned_integer we avoid having to do
1615 anything special for small big-endian values. */
1616 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1617 store_unsigned_integer (valbuf,
1618 (len > X_REGISTER_SIZE
1619 ? X_REGISTER_SIZE : len), byte_order, tmp);
1620 len -= X_REGISTER_SIZE;
1621 valbuf += X_REGISTER_SIZE;
1622 }
1623 }
1624 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1625 {
1626 int regno = AARCH64_V0_REGNUM;
1627 bfd_byte buf[V_REGISTER_SIZE];
1628 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1629 int len = TYPE_LENGTH (target_type);
1630
1631 regcache_cooked_read (regs, regno, buf);
1632 memcpy (valbuf, buf, len);
1633 valbuf += len;
1634 regcache_cooked_read (regs, regno + 1, buf);
1635 memcpy (valbuf, buf, len);
1636 valbuf += len;
1637 }
1638 else if (is_hfa_or_hva (type))
1639 {
1640 int elements = TYPE_NFIELDS (type);
1641 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1642 int len = TYPE_LENGTH (member_type);
1643 int i;
1644
1645 for (i = 0; i < elements; i++)
1646 {
1647 int regno = AARCH64_V0_REGNUM + i;
1648 bfd_byte buf[V_REGISTER_SIZE];
1649
1650 if (aarch64_debug)
1651 {
1652 debug_printf ("read HFA or HVA return value element %d from %s\n",
1653 i + 1,
1654 gdbarch_register_name (gdbarch, regno));
1655 }
1656 regcache_cooked_read (regs, regno, buf);
1657
1658 memcpy (valbuf, buf, len);
1659 valbuf += len;
1660 }
1661 }
1662 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1663 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1664 {
1665 /* Short vector is returned in V register. */
1666 gdb_byte buf[V_REGISTER_SIZE];
1667
1668 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1669 memcpy (valbuf, buf, TYPE_LENGTH (type));
1670 }
1671 else
1672 {
1673 /* For a structure or union the behaviour is as if the value had
1674 been stored to word-aligned memory and then loaded into
1675 registers with 64-bit load instruction(s). */
1676 int len = TYPE_LENGTH (type);
1677 int regno = AARCH64_X0_REGNUM;
1678 bfd_byte buf[X_REGISTER_SIZE];
1679
1680 while (len > 0)
1681 {
1682 regcache_cooked_read (regs, regno++, buf);
1683 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1684 len -= X_REGISTER_SIZE;
1685 valbuf += X_REGISTER_SIZE;
1686 }
1687 }
1688 }
1689
1690
1691 /* Will a function return an aggregate type in memory or in a
1692 register? Return 0 if an aggregate type can be returned in a
1693 register, 1 if it must be returned in memory. */
1694
1695 static int
1696 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1697 {
1698 type = check_typedef (type);
1699
1700 if (is_hfa_or_hva (type))
1701 {
1702 /* v0-v7 are used to return values and one register is allocated
1703 for one member. However, HFA or HVA has at most four members. */
1704 return 0;
1705 }
1706
1707 if (TYPE_LENGTH (type) > 16)
1708 {
1709 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1710 invisible reference. */
1711
1712 return 1;
1713 }
1714
1715 return 0;
1716 }
1717
1718 /* Write into appropriate registers a function return value of type
1719 TYPE, given in virtual format. */
1720
1721 static void
1722 aarch64_store_return_value (struct type *type, struct regcache *regs,
1723 const gdb_byte *valbuf)
1724 {
1725 struct gdbarch *gdbarch = get_regcache_arch (regs);
1726 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1727
1728 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1729 {
1730 bfd_byte buf[V_REGISTER_SIZE];
1731 int len = TYPE_LENGTH (type);
1732
1733 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1734 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1735 }
1736 else if (TYPE_CODE (type) == TYPE_CODE_INT
1737 || TYPE_CODE (type) == TYPE_CODE_CHAR
1738 || TYPE_CODE (type) == TYPE_CODE_BOOL
1739 || TYPE_CODE (type) == TYPE_CODE_PTR
1740 || TYPE_CODE (type) == TYPE_CODE_REF
1741 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1742 {
1743 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1744 {
1745 /* Values of one word or less are zero/sign-extended and
1746 returned in r0. */
1747 bfd_byte tmpbuf[X_REGISTER_SIZE];
1748 LONGEST val = unpack_long (type, valbuf);
1749
1750 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1751 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1752 }
1753 else
1754 {
1755 /* Integral values greater than one word are stored in
1756 consecutive registers starting with r0. This will always
1757 be a multiple of the regiser size. */
1758 int len = TYPE_LENGTH (type);
1759 int regno = AARCH64_X0_REGNUM;
1760
1761 while (len > 0)
1762 {
1763 regcache_cooked_write (regs, regno++, valbuf);
1764 len -= X_REGISTER_SIZE;
1765 valbuf += X_REGISTER_SIZE;
1766 }
1767 }
1768 }
1769 else if (is_hfa_or_hva (type))
1770 {
1771 int elements = TYPE_NFIELDS (type);
1772 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1773 int len = TYPE_LENGTH (member_type);
1774 int i;
1775
1776 for (i = 0; i < elements; i++)
1777 {
1778 int regno = AARCH64_V0_REGNUM + i;
1779 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1780
1781 if (aarch64_debug)
1782 {
1783 debug_printf ("write HFA or HVA return value element %d to %s\n",
1784 i + 1,
1785 gdbarch_register_name (gdbarch, regno));
1786 }
1787
1788 memcpy (tmpbuf, valbuf, len);
1789 regcache_cooked_write (regs, regno, tmpbuf);
1790 valbuf += len;
1791 }
1792 }
1793 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1794 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1795 {
1796 /* Short vector. */
1797 gdb_byte buf[V_REGISTER_SIZE];
1798
1799 memcpy (buf, valbuf, TYPE_LENGTH (type));
1800 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1801 }
1802 else
1803 {
1804 /* For a structure or union the behaviour is as if the value had
1805 been stored to word-aligned memory and then loaded into
1806 registers with 64-bit load instruction(s). */
1807 int len = TYPE_LENGTH (type);
1808 int regno = AARCH64_X0_REGNUM;
1809 bfd_byte tmpbuf[X_REGISTER_SIZE];
1810
1811 while (len > 0)
1812 {
1813 memcpy (tmpbuf, valbuf,
1814 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1815 regcache_cooked_write (regs, regno++, tmpbuf);
1816 len -= X_REGISTER_SIZE;
1817 valbuf += X_REGISTER_SIZE;
1818 }
1819 }
1820 }
1821
1822 /* Implement the "return_value" gdbarch method. */
1823
1824 static enum return_value_convention
1825 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1826 struct type *valtype, struct regcache *regcache,
1827 gdb_byte *readbuf, const gdb_byte *writebuf)
1828 {
1829
1830 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1831 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1832 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1833 {
1834 if (aarch64_return_in_memory (gdbarch, valtype))
1835 {
1836 if (aarch64_debug)
1837 debug_printf ("return value in memory\n");
1838 return RETURN_VALUE_STRUCT_CONVENTION;
1839 }
1840 }
1841
1842 if (writebuf)
1843 aarch64_store_return_value (valtype, regcache, writebuf);
1844
1845 if (readbuf)
1846 aarch64_extract_return_value (valtype, regcache, readbuf);
1847
1848 if (aarch64_debug)
1849 debug_printf ("return value in registers\n");
1850
1851 return RETURN_VALUE_REGISTER_CONVENTION;
1852 }
1853
1854 /* Implement the "get_longjmp_target" gdbarch method. */
1855
1856 static int
1857 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1858 {
1859 CORE_ADDR jb_addr;
1860 gdb_byte buf[X_REGISTER_SIZE];
1861 struct gdbarch *gdbarch = get_frame_arch (frame);
1862 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1863 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1864
1865 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1866
1867 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1868 X_REGISTER_SIZE))
1869 return 0;
1870
1871 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1872 return 1;
1873 }
1874
1875 /* Implement the "gen_return_address" gdbarch method. */
1876
1877 static void
1878 aarch64_gen_return_address (struct gdbarch *gdbarch,
1879 struct agent_expr *ax, struct axs_value *value,
1880 CORE_ADDR scope)
1881 {
1882 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1883 value->kind = axs_lvalue_register;
1884 value->u.reg = AARCH64_LR_REGNUM;
1885 }
1886 \f
1887
1888 /* Return the pseudo register name corresponding to register regnum. */
1889
1890 static const char *
1891 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1892 {
1893 static const char *const q_name[] =
1894 {
1895 "q0", "q1", "q2", "q3",
1896 "q4", "q5", "q6", "q7",
1897 "q8", "q9", "q10", "q11",
1898 "q12", "q13", "q14", "q15",
1899 "q16", "q17", "q18", "q19",
1900 "q20", "q21", "q22", "q23",
1901 "q24", "q25", "q26", "q27",
1902 "q28", "q29", "q30", "q31",
1903 };
1904
1905 static const char *const d_name[] =
1906 {
1907 "d0", "d1", "d2", "d3",
1908 "d4", "d5", "d6", "d7",
1909 "d8", "d9", "d10", "d11",
1910 "d12", "d13", "d14", "d15",
1911 "d16", "d17", "d18", "d19",
1912 "d20", "d21", "d22", "d23",
1913 "d24", "d25", "d26", "d27",
1914 "d28", "d29", "d30", "d31",
1915 };
1916
1917 static const char *const s_name[] =
1918 {
1919 "s0", "s1", "s2", "s3",
1920 "s4", "s5", "s6", "s7",
1921 "s8", "s9", "s10", "s11",
1922 "s12", "s13", "s14", "s15",
1923 "s16", "s17", "s18", "s19",
1924 "s20", "s21", "s22", "s23",
1925 "s24", "s25", "s26", "s27",
1926 "s28", "s29", "s30", "s31",
1927 };
1928
1929 static const char *const h_name[] =
1930 {
1931 "h0", "h1", "h2", "h3",
1932 "h4", "h5", "h6", "h7",
1933 "h8", "h9", "h10", "h11",
1934 "h12", "h13", "h14", "h15",
1935 "h16", "h17", "h18", "h19",
1936 "h20", "h21", "h22", "h23",
1937 "h24", "h25", "h26", "h27",
1938 "h28", "h29", "h30", "h31",
1939 };
1940
1941 static const char *const b_name[] =
1942 {
1943 "b0", "b1", "b2", "b3",
1944 "b4", "b5", "b6", "b7",
1945 "b8", "b9", "b10", "b11",
1946 "b12", "b13", "b14", "b15",
1947 "b16", "b17", "b18", "b19",
1948 "b20", "b21", "b22", "b23",
1949 "b24", "b25", "b26", "b27",
1950 "b28", "b29", "b30", "b31",
1951 };
1952
1953 regnum -= gdbarch_num_regs (gdbarch);
1954
1955 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1956 return q_name[regnum - AARCH64_Q0_REGNUM];
1957
1958 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1959 return d_name[regnum - AARCH64_D0_REGNUM];
1960
1961 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1962 return s_name[regnum - AARCH64_S0_REGNUM];
1963
1964 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1965 return h_name[regnum - AARCH64_H0_REGNUM];
1966
1967 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1968 return b_name[regnum - AARCH64_B0_REGNUM];
1969
1970 internal_error (__FILE__, __LINE__,
1971 _("aarch64_pseudo_register_name: bad register number %d"),
1972 regnum);
1973 }
1974
1975 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
1976
1977 static struct type *
1978 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1979 {
1980 regnum -= gdbarch_num_regs (gdbarch);
1981
1982 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1983 return aarch64_vnq_type (gdbarch);
1984
1985 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1986 return aarch64_vnd_type (gdbarch);
1987
1988 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1989 return aarch64_vns_type (gdbarch);
1990
1991 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1992 return aarch64_vnh_type (gdbarch);
1993
1994 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1995 return aarch64_vnb_type (gdbarch);
1996
1997 internal_error (__FILE__, __LINE__,
1998 _("aarch64_pseudo_register_type: bad register number %d"),
1999 regnum);
2000 }
2001
2002 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2003
2004 static int
2005 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2006 struct reggroup *group)
2007 {
2008 regnum -= gdbarch_num_regs (gdbarch);
2009
2010 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2011 return group == all_reggroup || group == vector_reggroup;
2012 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2013 return (group == all_reggroup || group == vector_reggroup
2014 || group == float_reggroup);
2015 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2016 return (group == all_reggroup || group == vector_reggroup
2017 || group == float_reggroup);
2018 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2019 return group == all_reggroup || group == vector_reggroup;
2020 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2021 return group == all_reggroup || group == vector_reggroup;
2022
2023 return group == all_reggroup;
2024 }
2025
2026 /* Implement the "pseudo_register_read_value" gdbarch method. */
2027
2028 static struct value *
2029 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2030 struct regcache *regcache,
2031 int regnum)
2032 {
2033 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2034 struct value *result_value;
2035 gdb_byte *buf;
2036
2037 result_value = allocate_value (register_type (gdbarch, regnum));
2038 VALUE_LVAL (result_value) = lval_register;
2039 VALUE_REGNUM (result_value) = regnum;
2040 buf = value_contents_raw (result_value);
2041
2042 regnum -= gdbarch_num_regs (gdbarch);
2043
2044 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2045 {
2046 enum register_status status;
2047 unsigned v_regnum;
2048
2049 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2050 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2051 if (status != REG_VALID)
2052 mark_value_bytes_unavailable (result_value, 0,
2053 TYPE_LENGTH (value_type (result_value)));
2054 else
2055 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2056 return result_value;
2057 }
2058
2059 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2060 {
2061 enum register_status status;
2062 unsigned v_regnum;
2063
2064 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2065 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2066 if (status != REG_VALID)
2067 mark_value_bytes_unavailable (result_value, 0,
2068 TYPE_LENGTH (value_type (result_value)));
2069 else
2070 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2071 return result_value;
2072 }
2073
2074 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2075 {
2076 enum register_status status;
2077 unsigned v_regnum;
2078
2079 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2080 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2081 if (status != REG_VALID)
2082 mark_value_bytes_unavailable (result_value, 0,
2083 TYPE_LENGTH (value_type (result_value)));
2084 else
2085 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2086 return result_value;
2087 }
2088
2089 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2090 {
2091 enum register_status status;
2092 unsigned v_regnum;
2093
2094 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2095 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2096 if (status != REG_VALID)
2097 mark_value_bytes_unavailable (result_value, 0,
2098 TYPE_LENGTH (value_type (result_value)));
2099 else
2100 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2101 return result_value;
2102 }
2103
2104 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2105 {
2106 enum register_status status;
2107 unsigned v_regnum;
2108
2109 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2110 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2111 if (status != REG_VALID)
2112 mark_value_bytes_unavailable (result_value, 0,
2113 TYPE_LENGTH (value_type (result_value)));
2114 else
2115 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2116 return result_value;
2117 }
2118
2119 gdb_assert_not_reached ("regnum out of bound");
2120 }
2121
2122 /* Implement the "pseudo_register_write" gdbarch method. */
2123
2124 static void
2125 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2126 int regnum, const gdb_byte *buf)
2127 {
2128 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2129
2130 /* Ensure the register buffer is zero, we want gdb writes of the
2131 various 'scalar' pseudo registers to behavior like architectural
2132 writes, register width bytes are written the remainder are set to
2133 zero. */
2134 memset (reg_buf, 0, sizeof (reg_buf));
2135
2136 regnum -= gdbarch_num_regs (gdbarch);
2137
2138 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2139 {
2140 /* pseudo Q registers */
2141 unsigned v_regnum;
2142
2143 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2144 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2145 regcache_raw_write (regcache, v_regnum, reg_buf);
2146 return;
2147 }
2148
2149 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2150 {
2151 /* pseudo D registers */
2152 unsigned v_regnum;
2153
2154 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2155 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2156 regcache_raw_write (regcache, v_regnum, reg_buf);
2157 return;
2158 }
2159
2160 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2161 {
2162 unsigned v_regnum;
2163
2164 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2165 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2166 regcache_raw_write (regcache, v_regnum, reg_buf);
2167 return;
2168 }
2169
2170 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2171 {
2172 /* pseudo H registers */
2173 unsigned v_regnum;
2174
2175 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2176 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2177 regcache_raw_write (regcache, v_regnum, reg_buf);
2178 return;
2179 }
2180
2181 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2182 {
2183 /* pseudo B registers */
2184 unsigned v_regnum;
2185
2186 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2187 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2188 regcache_raw_write (regcache, v_regnum, reg_buf);
2189 return;
2190 }
2191
2192 gdb_assert_not_reached ("regnum out of bound");
2193 }
2194
2195 /* Callback function for user_reg_add. */
2196
2197 static struct value *
2198 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2199 {
2200 const int *reg_p = (const int *) baton;
2201
2202 return value_of_register (*reg_p, frame);
2203 }
2204 \f
2205
2206 /* Implement the "software_single_step" gdbarch method, needed to
2207 single step through atomic sequences on AArch64. */
2208
2209 static int
2210 aarch64_software_single_step (struct frame_info *frame)
2211 {
2212 struct gdbarch *gdbarch = get_frame_arch (frame);
2213 struct address_space *aspace = get_frame_address_space (frame);
2214 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2215 const int insn_size = 4;
2216 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2217 CORE_ADDR pc = get_frame_pc (frame);
2218 CORE_ADDR breaks[2] = { -1, -1 };
2219 CORE_ADDR loc = pc;
2220 CORE_ADDR closing_insn = 0;
2221 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2222 byte_order_for_code);
2223 int index;
2224 int insn_count;
2225 int bc_insn_count = 0; /* Conditional branch instruction count. */
2226 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2227 aarch64_inst inst;
2228
2229 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2230 return 0;
2231
2232 /* Look for a Load Exclusive instruction which begins the sequence. */
2233 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2234 return 0;
2235
2236 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2237 {
2238 loc += insn_size;
2239 insn = read_memory_unsigned_integer (loc, insn_size,
2240 byte_order_for_code);
2241
2242 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2243 return 0;
2244 /* Check if the instruction is a conditional branch. */
2245 if (inst.opcode->iclass == condbranch)
2246 {
2247 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2248
2249 if (bc_insn_count >= 1)
2250 return 0;
2251
2252 /* It is, so we'll try to set a breakpoint at the destination. */
2253 breaks[1] = loc + inst.operands[0].imm.value;
2254
2255 bc_insn_count++;
2256 last_breakpoint++;
2257 }
2258
2259 /* Look for the Store Exclusive which closes the atomic sequence. */
2260 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2261 {
2262 closing_insn = loc;
2263 break;
2264 }
2265 }
2266
2267 /* We didn't find a closing Store Exclusive instruction, fall back. */
2268 if (!closing_insn)
2269 return 0;
2270
2271 /* Insert breakpoint after the end of the atomic sequence. */
2272 breaks[0] = loc + insn_size;
2273
2274 /* Check for duplicated breakpoints, and also check that the second
2275 breakpoint is not within the atomic sequence. */
2276 if (last_breakpoint
2277 && (breaks[1] == breaks[0]
2278 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2279 last_breakpoint = 0;
2280
2281 /* Insert the breakpoint at the end of the sequence, and one at the
2282 destination of the conditional branch, if it exists. */
2283 for (index = 0; index <= last_breakpoint; index++)
2284 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2285
2286 return 1;
2287 }
2288
2289 struct displaced_step_closure
2290 {
2291 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2292 is being displaced stepping. */
2293 int cond;
2294
2295 /* PC adjustment offset after displaced stepping. */
2296 int32_t pc_adjust;
2297 };
2298
2299 /* Data when visiting instructions for displaced stepping. */
2300
2301 struct aarch64_displaced_step_data
2302 {
2303 struct aarch64_insn_data base;
2304
2305 /* The address where the instruction will be executed at. */
2306 CORE_ADDR new_addr;
2307 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2308 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2309 /* Number of instructions in INSN_BUF. */
2310 unsigned insn_count;
2311 /* Registers when doing displaced stepping. */
2312 struct regcache *regs;
2313
2314 struct displaced_step_closure *dsc;
2315 };
2316
2317 /* Implementation of aarch64_insn_visitor method "b". */
2318
2319 static void
2320 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2321 struct aarch64_insn_data *data)
2322 {
2323 struct aarch64_displaced_step_data *dsd
2324 = (struct aarch64_displaced_step_data *) data;
2325 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2326
2327 if (can_encode_int32 (new_offset, 28))
2328 {
2329 /* Emit B rather than BL, because executing BL on a new address
2330 will get the wrong address into LR. In order to avoid this,
2331 we emit B, and update LR if the instruction is BL. */
2332 emit_b (dsd->insn_buf, 0, new_offset);
2333 dsd->insn_count++;
2334 }
2335 else
2336 {
2337 /* Write NOP. */
2338 emit_nop (dsd->insn_buf);
2339 dsd->insn_count++;
2340 dsd->dsc->pc_adjust = offset;
2341 }
2342
2343 if (is_bl)
2344 {
2345 /* Update LR. */
2346 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2347 data->insn_addr + 4);
2348 }
2349 }
2350
2351 /* Implementation of aarch64_insn_visitor method "b_cond". */
2352
2353 static void
2354 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2355 struct aarch64_insn_data *data)
2356 {
2357 struct aarch64_displaced_step_data *dsd
2358 = (struct aarch64_displaced_step_data *) data;
2359
2360 /* GDB has to fix up PC after displaced step this instruction
2361 differently according to the condition is true or false. Instead
2362 of checking COND against conditional flags, we can use
2363 the following instructions, and GDB can tell how to fix up PC
2364 according to the PC value.
2365
2366 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2367 INSN1 ;
2368 TAKEN:
2369 INSN2
2370 */
2371
2372 emit_bcond (dsd->insn_buf, cond, 8);
2373 dsd->dsc->cond = 1;
2374 dsd->dsc->pc_adjust = offset;
2375 dsd->insn_count = 1;
2376 }
2377
2378 /* Dynamically allocate a new register. If we know the register
2379 statically, we should make it a global as above instead of using this
2380 helper function. */
2381
2382 static struct aarch64_register
2383 aarch64_register (unsigned num, int is64)
2384 {
2385 return (struct aarch64_register) { num, is64 };
2386 }
2387
2388 /* Implementation of aarch64_insn_visitor method "cb". */
2389
2390 static void
2391 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2392 const unsigned rn, int is64,
2393 struct aarch64_insn_data *data)
2394 {
2395 struct aarch64_displaced_step_data *dsd
2396 = (struct aarch64_displaced_step_data *) data;
2397
2398 /* The offset is out of range for a compare and branch
2399 instruction. We can use the following instructions instead:
2400
2401 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2402 INSN1 ;
2403 TAKEN:
2404 INSN2
2405 */
2406 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2407 dsd->insn_count = 1;
2408 dsd->dsc->cond = 1;
2409 dsd->dsc->pc_adjust = offset;
2410 }
2411
2412 /* Implementation of aarch64_insn_visitor method "tb". */
2413
2414 static void
2415 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2416 const unsigned rt, unsigned bit,
2417 struct aarch64_insn_data *data)
2418 {
2419 struct aarch64_displaced_step_data *dsd
2420 = (struct aarch64_displaced_step_data *) data;
2421
2422 /* The offset is out of range for a test bit and branch
2423 instruction We can use the following instructions instead:
2424
2425 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2426 INSN1 ;
2427 TAKEN:
2428 INSN2
2429
2430 */
2431 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2432 dsd->insn_count = 1;
2433 dsd->dsc->cond = 1;
2434 dsd->dsc->pc_adjust = offset;
2435 }
2436
2437 /* Implementation of aarch64_insn_visitor method "adr". */
2438
2439 static void
2440 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2441 const int is_adrp, struct aarch64_insn_data *data)
2442 {
2443 struct aarch64_displaced_step_data *dsd
2444 = (struct aarch64_displaced_step_data *) data;
2445 /* We know exactly the address the ADR{P,} instruction will compute.
2446 We can just write it to the destination register. */
2447 CORE_ADDR address = data->insn_addr + offset;
2448
2449 if (is_adrp)
2450 {
2451 /* Clear the lower 12 bits of the offset to get the 4K page. */
2452 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2453 address & ~0xfff);
2454 }
2455 else
2456 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2457 address);
2458
2459 dsd->dsc->pc_adjust = 4;
2460 emit_nop (dsd->insn_buf);
2461 dsd->insn_count = 1;
2462 }
2463
2464 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2465
2466 static void
2467 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2468 const unsigned rt, const int is64,
2469 struct aarch64_insn_data *data)
2470 {
2471 struct aarch64_displaced_step_data *dsd
2472 = (struct aarch64_displaced_step_data *) data;
2473 CORE_ADDR address = data->insn_addr + offset;
2474 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2475
2476 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2477 address);
2478
2479 if (is_sw)
2480 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2481 aarch64_register (rt, 1), zero);
2482 else
2483 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2484 aarch64_register (rt, 1), zero);
2485
2486 dsd->dsc->pc_adjust = 4;
2487 }
2488
2489 /* Implementation of aarch64_insn_visitor method "others". */
2490
2491 static void
2492 aarch64_displaced_step_others (const uint32_t insn,
2493 struct aarch64_insn_data *data)
2494 {
2495 struct aarch64_displaced_step_data *dsd
2496 = (struct aarch64_displaced_step_data *) data;
2497
2498 aarch64_emit_insn (dsd->insn_buf, insn);
2499 dsd->insn_count = 1;
2500
2501 if ((insn & 0xfffffc1f) == 0xd65f0000)
2502 {
2503 /* RET */
2504 dsd->dsc->pc_adjust = 0;
2505 }
2506 else
2507 dsd->dsc->pc_adjust = 4;
2508 }
2509
2510 static const struct aarch64_insn_visitor visitor =
2511 {
2512 aarch64_displaced_step_b,
2513 aarch64_displaced_step_b_cond,
2514 aarch64_displaced_step_cb,
2515 aarch64_displaced_step_tb,
2516 aarch64_displaced_step_adr,
2517 aarch64_displaced_step_ldr_literal,
2518 aarch64_displaced_step_others,
2519 };
2520
2521 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2522
2523 struct displaced_step_closure *
2524 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2525 CORE_ADDR from, CORE_ADDR to,
2526 struct regcache *regs)
2527 {
2528 struct displaced_step_closure *dsc = NULL;
2529 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2530 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2531 struct aarch64_displaced_step_data dsd;
2532 aarch64_inst inst;
2533
2534 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2535 return NULL;
2536
2537 /* Look for a Load Exclusive instruction which begins the sequence. */
2538 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2539 {
2540 /* We can't displaced step atomic sequences. */
2541 return NULL;
2542 }
2543
2544 dsc = XCNEW (struct displaced_step_closure);
2545 dsd.base.insn_addr = from;
2546 dsd.new_addr = to;
2547 dsd.regs = regs;
2548 dsd.dsc = dsc;
2549 dsd.insn_count = 0;
2550 aarch64_relocate_instruction (insn, &visitor,
2551 (struct aarch64_insn_data *) &dsd);
2552 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2553
2554 if (dsd.insn_count != 0)
2555 {
2556 int i;
2557
2558 /* Instruction can be relocated to scratch pad. Copy
2559 relocated instruction(s) there. */
2560 for (i = 0; i < dsd.insn_count; i++)
2561 {
2562 if (debug_displaced)
2563 {
2564 debug_printf ("displaced: writing insn ");
2565 debug_printf ("%.8x", dsd.insn_buf[i]);
2566 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2567 }
2568 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2569 (ULONGEST) dsd.insn_buf[i]);
2570 }
2571 }
2572 else
2573 {
2574 xfree (dsc);
2575 dsc = NULL;
2576 }
2577
2578 return dsc;
2579 }
2580
2581 /* Implement the "displaced_step_fixup" gdbarch method. */
2582
2583 void
2584 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2585 struct displaced_step_closure *dsc,
2586 CORE_ADDR from, CORE_ADDR to,
2587 struct regcache *regs)
2588 {
2589 if (dsc->cond)
2590 {
2591 ULONGEST pc;
2592
2593 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2594 if (pc - to == 8)
2595 {
2596 /* Condition is true. */
2597 }
2598 else if (pc - to == 4)
2599 {
2600 /* Condition is false. */
2601 dsc->pc_adjust = 4;
2602 }
2603 else
2604 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2605 }
2606
2607 if (dsc->pc_adjust != 0)
2608 {
2609 if (debug_displaced)
2610 {
2611 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2612 paddress (gdbarch, from), dsc->pc_adjust);
2613 }
2614 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2615 from + dsc->pc_adjust);
2616 }
2617 }
2618
2619 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2620
2621 int
2622 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2623 struct displaced_step_closure *closure)
2624 {
2625 return 1;
2626 }
2627
2628 /* Initialize the current architecture based on INFO. If possible,
2629 re-use an architecture from ARCHES, which is a list of
2630 architectures already created during this debugging session.
2631
2632 Called e.g. at program startup, when reading a core file, and when
2633 reading a binary file. */
2634
2635 static struct gdbarch *
2636 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2637 {
2638 struct gdbarch_tdep *tdep;
2639 struct gdbarch *gdbarch;
2640 struct gdbarch_list *best_arch;
2641 struct tdesc_arch_data *tdesc_data = NULL;
2642 const struct target_desc *tdesc = info.target_desc;
2643 int i;
2644 int valid_p = 1;
2645 const struct tdesc_feature *feature;
2646 int num_regs = 0;
2647 int num_pseudo_regs = 0;
2648
2649 /* Ensure we always have a target descriptor. */
2650 if (!tdesc_has_registers (tdesc))
2651 tdesc = tdesc_aarch64;
2652
2653 gdb_assert (tdesc);
2654
2655 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2656
2657 if (feature == NULL)
2658 return NULL;
2659
2660 tdesc_data = tdesc_data_alloc ();
2661
2662 /* Validate the descriptor provides the mandatory core R registers
2663 and allocate their numbers. */
2664 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2665 valid_p &=
2666 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2667 aarch64_r_register_names[i]);
2668
2669 num_regs = AARCH64_X0_REGNUM + i;
2670
2671 /* Look for the V registers. */
2672 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2673 if (feature)
2674 {
2675 /* Validate the descriptor provides the mandatory V registers
2676 and allocate their numbers. */
2677 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2678 valid_p &=
2679 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2680 aarch64_v_register_names[i]);
2681
2682 num_regs = AARCH64_V0_REGNUM + i;
2683
2684 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2685 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2686 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2687 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2688 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2689 }
2690
2691 if (!valid_p)
2692 {
2693 tdesc_data_cleanup (tdesc_data);
2694 return NULL;
2695 }
2696
2697 /* AArch64 code is always little-endian. */
2698 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2699
2700 /* If there is already a candidate, use it. */
2701 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2702 best_arch != NULL;
2703 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2704 {
2705 /* Found a match. */
2706 break;
2707 }
2708
2709 if (best_arch != NULL)
2710 {
2711 if (tdesc_data != NULL)
2712 tdesc_data_cleanup (tdesc_data);
2713 return best_arch->gdbarch;
2714 }
2715
2716 tdep = XCNEW (struct gdbarch_tdep);
2717 gdbarch = gdbarch_alloc (&info, tdep);
2718
2719 /* This should be low enough for everything. */
2720 tdep->lowest_pc = 0x20;
2721 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2722 tdep->jb_elt_size = 8;
2723
2724 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2725 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2726
2727 /* Frame handling. */
2728 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2729 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2730 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2731
2732 /* Advance PC across function entry code. */
2733 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2734
2735 /* The stack grows downward. */
2736 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2737
2738 /* Breakpoint manipulation. */
2739 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
2740 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2741 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2742
2743 /* Information about registers, etc. */
2744 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2745 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2746 set_gdbarch_num_regs (gdbarch, num_regs);
2747
2748 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2749 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2750 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2751 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2752 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2753 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2754 aarch64_pseudo_register_reggroup_p);
2755
2756 /* ABI */
2757 set_gdbarch_short_bit (gdbarch, 16);
2758 set_gdbarch_int_bit (gdbarch, 32);
2759 set_gdbarch_float_bit (gdbarch, 32);
2760 set_gdbarch_double_bit (gdbarch, 64);
2761 set_gdbarch_long_double_bit (gdbarch, 128);
2762 set_gdbarch_long_bit (gdbarch, 64);
2763 set_gdbarch_long_long_bit (gdbarch, 64);
2764 set_gdbarch_ptr_bit (gdbarch, 64);
2765 set_gdbarch_char_signed (gdbarch, 0);
2766 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2767 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2768 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2769
2770 /* Internal <-> external register number maps. */
2771 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2772
2773 /* Returning results. */
2774 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2775
2776 /* Disassembly. */
2777 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2778
2779 /* Virtual tables. */
2780 set_gdbarch_vbit_in_delta (gdbarch, 1);
2781
2782 /* Hook in the ABI-specific overrides, if they have been registered. */
2783 info.target_desc = tdesc;
2784 info.tdep_info = (void *) tdesc_data;
2785 gdbarch_init_osabi (info, gdbarch);
2786
2787 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2788
2789 /* Add some default predicates. */
2790 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2791 dwarf2_append_unwinders (gdbarch);
2792 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2793
2794 frame_base_set_default (gdbarch, &aarch64_normal_base);
2795
2796 /* Now we have tuned the configuration, set a few final things,
2797 based on what the OS ABI has told us. */
2798
2799 if (tdep->jb_pc >= 0)
2800 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2801
2802 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2803
2804 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2805
2806 /* Add standard register aliases. */
2807 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2808 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2809 value_of_aarch64_user_reg,
2810 &aarch64_register_aliases[i].regnum);
2811
2812 return gdbarch;
2813 }
2814
2815 static void
2816 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2817 {
2818 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2819
2820 if (tdep == NULL)
2821 return;
2822
2823 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2824 paddress (gdbarch, tdep->lowest_pc));
2825 }
2826
2827 /* Suppress warning from -Wmissing-prototypes. */
2828 extern initialize_file_ftype _initialize_aarch64_tdep;
2829
2830 void
2831 _initialize_aarch64_tdep (void)
2832 {
2833 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2834 aarch64_dump_tdep);
2835
2836 initialize_tdesc_aarch64 ();
2837
2838 /* Debug this file's internals. */
2839 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2840 Set AArch64 debugging."), _("\
2841 Show AArch64 debugging."), _("\
2842 When on, AArch64 specific debugging is enabled."),
2843 NULL,
2844 show_aarch64_debug,
2845 &setdebuglist, &showdebuglist);
2846 }
2847
2848 /* AArch64 process record-replay related structures, defines etc. */
2849
2850 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2851 do \
2852 { \
2853 unsigned int reg_len = LENGTH; \
2854 if (reg_len) \
2855 { \
2856 REGS = XNEWVEC (uint32_t, reg_len); \
2857 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2858 } \
2859 } \
2860 while (0)
2861
2862 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2863 do \
2864 { \
2865 unsigned int mem_len = LENGTH; \
2866 if (mem_len) \
2867 { \
2868 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2869 memcpy(&MEMS->len, &RECORD_BUF[0], \
2870 sizeof(struct aarch64_mem_r) * LENGTH); \
2871 } \
2872 } \
2873 while (0)
2874
2875 /* AArch64 record/replay structures and enumerations. */
2876
2877 struct aarch64_mem_r
2878 {
2879 uint64_t len; /* Record length. */
2880 uint64_t addr; /* Memory address. */
2881 };
2882
2883 enum aarch64_record_result
2884 {
2885 AARCH64_RECORD_SUCCESS,
2886 AARCH64_RECORD_FAILURE,
2887 AARCH64_RECORD_UNSUPPORTED,
2888 AARCH64_RECORD_UNKNOWN
2889 };
2890
2891 typedef struct insn_decode_record_t
2892 {
2893 struct gdbarch *gdbarch;
2894 struct regcache *regcache;
2895 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2896 uint32_t aarch64_insn; /* Insn to be recorded. */
2897 uint32_t mem_rec_count; /* Count of memory records. */
2898 uint32_t reg_rec_count; /* Count of register records. */
2899 uint32_t *aarch64_regs; /* Registers to be recorded. */
2900 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2901 } insn_decode_record;
2902
2903 /* Record handler for data processing - register instructions. */
2904
2905 static unsigned int
2906 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2907 {
2908 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2909 uint32_t record_buf[4];
2910
2911 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2912 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2913 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2914
2915 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2916 {
2917 uint8_t setflags;
2918
2919 /* Logical (shifted register). */
2920 if (insn_bits24_27 == 0x0a)
2921 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2922 /* Add/subtract. */
2923 else if (insn_bits24_27 == 0x0b)
2924 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2925 else
2926 return AARCH64_RECORD_UNKNOWN;
2927
2928 record_buf[0] = reg_rd;
2929 aarch64_insn_r->reg_rec_count = 1;
2930 if (setflags)
2931 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2932 }
2933 else
2934 {
2935 if (insn_bits24_27 == 0x0b)
2936 {
2937 /* Data-processing (3 source). */
2938 record_buf[0] = reg_rd;
2939 aarch64_insn_r->reg_rec_count = 1;
2940 }
2941 else if (insn_bits24_27 == 0x0a)
2942 {
2943 if (insn_bits21_23 == 0x00)
2944 {
2945 /* Add/subtract (with carry). */
2946 record_buf[0] = reg_rd;
2947 aarch64_insn_r->reg_rec_count = 1;
2948 if (bit (aarch64_insn_r->aarch64_insn, 29))
2949 {
2950 record_buf[1] = AARCH64_CPSR_REGNUM;
2951 aarch64_insn_r->reg_rec_count = 2;
2952 }
2953 }
2954 else if (insn_bits21_23 == 0x02)
2955 {
2956 /* Conditional compare (register) and conditional compare
2957 (immediate) instructions. */
2958 record_buf[0] = AARCH64_CPSR_REGNUM;
2959 aarch64_insn_r->reg_rec_count = 1;
2960 }
2961 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2962 {
2963 /* CConditional select. */
2964 /* Data-processing (2 source). */
2965 /* Data-processing (1 source). */
2966 record_buf[0] = reg_rd;
2967 aarch64_insn_r->reg_rec_count = 1;
2968 }
2969 else
2970 return AARCH64_RECORD_UNKNOWN;
2971 }
2972 }
2973
2974 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2975 record_buf);
2976 return AARCH64_RECORD_SUCCESS;
2977 }
2978
2979 /* Record handler for data processing - immediate instructions. */
2980
2981 static unsigned int
2982 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2983 {
2984 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2985 uint32_t record_buf[4];
2986
2987 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2988 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2989 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2990 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2991
2992 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2993 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2994 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2995 {
2996 record_buf[0] = reg_rd;
2997 aarch64_insn_r->reg_rec_count = 1;
2998 }
2999 else if (insn_bits24_27 == 0x01)
3000 {
3001 /* Add/Subtract (immediate). */
3002 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3003 record_buf[0] = reg_rd;
3004 aarch64_insn_r->reg_rec_count = 1;
3005 if (setflags)
3006 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3007 }
3008 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3009 {
3010 /* Logical (immediate). */
3011 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3012 record_buf[0] = reg_rd;
3013 aarch64_insn_r->reg_rec_count = 1;
3014 if (setflags)
3015 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3016 }
3017 else
3018 return AARCH64_RECORD_UNKNOWN;
3019
3020 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3021 record_buf);
3022 return AARCH64_RECORD_SUCCESS;
3023 }
3024
3025 /* Record handler for branch, exception generation and system instructions. */
3026
3027 static unsigned int
3028 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3029 {
3030 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3031 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3032 uint32_t record_buf[4];
3033
3034 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3035 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3036 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3037
3038 if (insn_bits28_31 == 0x0d)
3039 {
3040 /* Exception generation instructions. */
3041 if (insn_bits24_27 == 0x04)
3042 {
3043 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3044 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3045 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3046 {
3047 ULONGEST svc_number;
3048
3049 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3050 &svc_number);
3051 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3052 svc_number);
3053 }
3054 else
3055 return AARCH64_RECORD_UNSUPPORTED;
3056 }
3057 /* System instructions. */
3058 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3059 {
3060 uint32_t reg_rt, reg_crn;
3061
3062 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3063 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3064
3065 /* Record rt in case of sysl and mrs instructions. */
3066 if (bit (aarch64_insn_r->aarch64_insn, 21))
3067 {
3068 record_buf[0] = reg_rt;
3069 aarch64_insn_r->reg_rec_count = 1;
3070 }
3071 /* Record cpsr for hint and msr(immediate) instructions. */
3072 else if (reg_crn == 0x02 || reg_crn == 0x04)
3073 {
3074 record_buf[0] = AARCH64_CPSR_REGNUM;
3075 aarch64_insn_r->reg_rec_count = 1;
3076 }
3077 }
3078 /* Unconditional branch (register). */
3079 else if((insn_bits24_27 & 0x0e) == 0x06)
3080 {
3081 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3082 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3083 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3084 }
3085 else
3086 return AARCH64_RECORD_UNKNOWN;
3087 }
3088 /* Unconditional branch (immediate). */
3089 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3090 {
3091 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3092 if (bit (aarch64_insn_r->aarch64_insn, 31))
3093 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3094 }
3095 else
3096 /* Compare & branch (immediate), Test & branch (immediate) and
3097 Conditional branch (immediate). */
3098 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3099
3100 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3101 record_buf);
3102 return AARCH64_RECORD_SUCCESS;
3103 }
3104
3105 /* Record handler for advanced SIMD load and store instructions. */
3106
3107 static unsigned int
3108 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3109 {
3110 CORE_ADDR address;
3111 uint64_t addr_offset = 0;
3112 uint32_t record_buf[24];
3113 uint64_t record_buf_mem[24];
3114 uint32_t reg_rn, reg_rt;
3115 uint32_t reg_index = 0, mem_index = 0;
3116 uint8_t opcode_bits, size_bits;
3117
3118 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3119 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3120 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3121 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3122 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3123
3124 if (record_debug)
3125 debug_printf ("Process record: Advanced SIMD load/store\n");
3126
3127 /* Load/store single structure. */
3128 if (bit (aarch64_insn_r->aarch64_insn, 24))
3129 {
3130 uint8_t sindex, scale, selem, esize, replicate = 0;
3131 scale = opcode_bits >> 2;
3132 selem = ((opcode_bits & 0x02) |
3133 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3134 switch (scale)
3135 {
3136 case 1:
3137 if (size_bits & 0x01)
3138 return AARCH64_RECORD_UNKNOWN;
3139 break;
3140 case 2:
3141 if ((size_bits >> 1) & 0x01)
3142 return AARCH64_RECORD_UNKNOWN;
3143 if (size_bits & 0x01)
3144 {
3145 if (!((opcode_bits >> 1) & 0x01))
3146 scale = 3;
3147 else
3148 return AARCH64_RECORD_UNKNOWN;
3149 }
3150 break;
3151 case 3:
3152 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3153 {
3154 scale = size_bits;
3155 replicate = 1;
3156 break;
3157 }
3158 else
3159 return AARCH64_RECORD_UNKNOWN;
3160 default:
3161 break;
3162 }
3163 esize = 8 << scale;
3164 if (replicate)
3165 for (sindex = 0; sindex < selem; sindex++)
3166 {
3167 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3168 reg_rt = (reg_rt + 1) % 32;
3169 }
3170 else
3171 {
3172 for (sindex = 0; sindex < selem; sindex++)
3173 {
3174 if (bit (aarch64_insn_r->aarch64_insn, 22))
3175 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3176 else
3177 {
3178 record_buf_mem[mem_index++] = esize / 8;
3179 record_buf_mem[mem_index++] = address + addr_offset;
3180 }
3181 addr_offset = addr_offset + (esize / 8);
3182 reg_rt = (reg_rt + 1) % 32;
3183 }
3184 }
3185 }
3186 /* Load/store multiple structure. */
3187 else
3188 {
3189 uint8_t selem, esize, rpt, elements;
3190 uint8_t eindex, rindex;
3191
3192 esize = 8 << size_bits;
3193 if (bit (aarch64_insn_r->aarch64_insn, 30))
3194 elements = 128 / esize;
3195 else
3196 elements = 64 / esize;
3197
3198 switch (opcode_bits)
3199 {
3200 /*LD/ST4 (4 Registers). */
3201 case 0:
3202 rpt = 1;
3203 selem = 4;
3204 break;
3205 /*LD/ST1 (4 Registers). */
3206 case 2:
3207 rpt = 4;
3208 selem = 1;
3209 break;
3210 /*LD/ST3 (3 Registers). */
3211 case 4:
3212 rpt = 1;
3213 selem = 3;
3214 break;
3215 /*LD/ST1 (3 Registers). */
3216 case 6:
3217 rpt = 3;
3218 selem = 1;
3219 break;
3220 /*LD/ST1 (1 Register). */
3221 case 7:
3222 rpt = 1;
3223 selem = 1;
3224 break;
3225 /*LD/ST2 (2 Registers). */
3226 case 8:
3227 rpt = 1;
3228 selem = 2;
3229 break;
3230 /*LD/ST1 (2 Registers). */
3231 case 10:
3232 rpt = 2;
3233 selem = 1;
3234 break;
3235 default:
3236 return AARCH64_RECORD_UNSUPPORTED;
3237 break;
3238 }
3239 for (rindex = 0; rindex < rpt; rindex++)
3240 for (eindex = 0; eindex < elements; eindex++)
3241 {
3242 uint8_t reg_tt, sindex;
3243 reg_tt = (reg_rt + rindex) % 32;
3244 for (sindex = 0; sindex < selem; sindex++)
3245 {
3246 if (bit (aarch64_insn_r->aarch64_insn, 22))
3247 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3248 else
3249 {
3250 record_buf_mem[mem_index++] = esize / 8;
3251 record_buf_mem[mem_index++] = address + addr_offset;
3252 }
3253 addr_offset = addr_offset + (esize / 8);
3254 reg_tt = (reg_tt + 1) % 32;
3255 }
3256 }
3257 }
3258
3259 if (bit (aarch64_insn_r->aarch64_insn, 23))
3260 record_buf[reg_index++] = reg_rn;
3261
3262 aarch64_insn_r->reg_rec_count = reg_index;
3263 aarch64_insn_r->mem_rec_count = mem_index / 2;
3264 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3265 record_buf_mem);
3266 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3267 record_buf);
3268 return AARCH64_RECORD_SUCCESS;
3269 }
3270
3271 /* Record handler for load and store instructions. */
3272
3273 static unsigned int
3274 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3275 {
3276 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3277 uint8_t insn_bit23, insn_bit21;
3278 uint8_t opc, size_bits, ld_flag, vector_flag;
3279 uint32_t reg_rn, reg_rt, reg_rt2;
3280 uint64_t datasize, offset;
3281 uint32_t record_buf[8];
3282 uint64_t record_buf_mem[8];
3283 CORE_ADDR address;
3284
3285 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3286 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3287 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3288 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3289 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3290 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3291 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3292 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3293 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3294 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3295 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3296
3297 /* Load/store exclusive. */
3298 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3299 {
3300 if (record_debug)
3301 debug_printf ("Process record: load/store exclusive\n");
3302
3303 if (ld_flag)
3304 {
3305 record_buf[0] = reg_rt;
3306 aarch64_insn_r->reg_rec_count = 1;
3307 if (insn_bit21)
3308 {
3309 record_buf[1] = reg_rt2;
3310 aarch64_insn_r->reg_rec_count = 2;
3311 }
3312 }
3313 else
3314 {
3315 if (insn_bit21)
3316 datasize = (8 << size_bits) * 2;
3317 else
3318 datasize = (8 << size_bits);
3319 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3320 &address);
3321 record_buf_mem[0] = datasize / 8;
3322 record_buf_mem[1] = address;
3323 aarch64_insn_r->mem_rec_count = 1;
3324 if (!insn_bit23)
3325 {
3326 /* Save register rs. */
3327 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3328 aarch64_insn_r->reg_rec_count = 1;
3329 }
3330 }
3331 }
3332 /* Load register (literal) instructions decoding. */
3333 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3334 {
3335 if (record_debug)
3336 debug_printf ("Process record: load register (literal)\n");
3337 if (vector_flag)
3338 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3339 else
3340 record_buf[0] = reg_rt;
3341 aarch64_insn_r->reg_rec_count = 1;
3342 }
3343 /* All types of load/store pair instructions decoding. */
3344 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3345 {
3346 if (record_debug)
3347 debug_printf ("Process record: load/store pair\n");
3348
3349 if (ld_flag)
3350 {
3351 if (vector_flag)
3352 {
3353 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3354 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3355 }
3356 else
3357 {
3358 record_buf[0] = reg_rt;
3359 record_buf[1] = reg_rt2;
3360 }
3361 aarch64_insn_r->reg_rec_count = 2;
3362 }
3363 else
3364 {
3365 uint16_t imm7_off;
3366 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3367 if (!vector_flag)
3368 size_bits = size_bits >> 1;
3369 datasize = 8 << (2 + size_bits);
3370 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3371 offset = offset << (2 + size_bits);
3372 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3373 &address);
3374 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3375 {
3376 if (imm7_off & 0x40)
3377 address = address - offset;
3378 else
3379 address = address + offset;
3380 }
3381
3382 record_buf_mem[0] = datasize / 8;
3383 record_buf_mem[1] = address;
3384 record_buf_mem[2] = datasize / 8;
3385 record_buf_mem[3] = address + (datasize / 8);
3386 aarch64_insn_r->mem_rec_count = 2;
3387 }
3388 if (bit (aarch64_insn_r->aarch64_insn, 23))
3389 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3390 }
3391 /* Load/store register (unsigned immediate) instructions. */
3392 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3393 {
3394 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3395 if (!(opc >> 1))
3396 if (opc & 0x01)
3397 ld_flag = 0x01;
3398 else
3399 ld_flag = 0x0;
3400 else
3401 if (size_bits != 0x03)
3402 ld_flag = 0x01;
3403 else
3404 return AARCH64_RECORD_UNKNOWN;
3405
3406 if (record_debug)
3407 {
3408 debug_printf ("Process record: load/store (unsigned immediate):"
3409 " size %x V %d opc %x\n", size_bits, vector_flag,
3410 opc);
3411 }
3412
3413 if (!ld_flag)
3414 {
3415 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3416 datasize = 8 << size_bits;
3417 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3418 &address);
3419 offset = offset << size_bits;
3420 address = address + offset;
3421
3422 record_buf_mem[0] = datasize >> 3;
3423 record_buf_mem[1] = address;
3424 aarch64_insn_r->mem_rec_count = 1;
3425 }
3426 else
3427 {
3428 if (vector_flag)
3429 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3430 else
3431 record_buf[0] = reg_rt;
3432 aarch64_insn_r->reg_rec_count = 1;
3433 }
3434 }
3435 /* Load/store register (register offset) instructions. */
3436 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3437 && insn_bits10_11 == 0x02 && insn_bit21)
3438 {
3439 if (record_debug)
3440 debug_printf ("Process record: load/store (register offset)\n");
3441 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3442 if (!(opc >> 1))
3443 if (opc & 0x01)
3444 ld_flag = 0x01;
3445 else
3446 ld_flag = 0x0;
3447 else
3448 if (size_bits != 0x03)
3449 ld_flag = 0x01;
3450 else
3451 return AARCH64_RECORD_UNKNOWN;
3452
3453 if (!ld_flag)
3454 {
3455 ULONGEST reg_rm_val;
3456
3457 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3458 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3459 if (bit (aarch64_insn_r->aarch64_insn, 12))
3460 offset = reg_rm_val << size_bits;
3461 else
3462 offset = reg_rm_val;
3463 datasize = 8 << size_bits;
3464 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3465 &address);
3466 address = address + offset;
3467 record_buf_mem[0] = datasize >> 3;
3468 record_buf_mem[1] = address;
3469 aarch64_insn_r->mem_rec_count = 1;
3470 }
3471 else
3472 {
3473 if (vector_flag)
3474 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3475 else
3476 record_buf[0] = reg_rt;
3477 aarch64_insn_r->reg_rec_count = 1;
3478 }
3479 }
3480 /* Load/store register (immediate and unprivileged) instructions. */
3481 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3482 && !insn_bit21)
3483 {
3484 if (record_debug)
3485 {
3486 debug_printf ("Process record: load/store "
3487 "(immediate and unprivileged)\n");
3488 }
3489 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3490 if (!(opc >> 1))
3491 if (opc & 0x01)
3492 ld_flag = 0x01;
3493 else
3494 ld_flag = 0x0;
3495 else
3496 if (size_bits != 0x03)
3497 ld_flag = 0x01;
3498 else
3499 return AARCH64_RECORD_UNKNOWN;
3500
3501 if (!ld_flag)
3502 {
3503 uint16_t imm9_off;
3504 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3505 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3506 datasize = 8 << size_bits;
3507 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3508 &address);
3509 if (insn_bits10_11 != 0x01)
3510 {
3511 if (imm9_off & 0x0100)
3512 address = address - offset;
3513 else
3514 address = address + offset;
3515 }
3516 record_buf_mem[0] = datasize >> 3;
3517 record_buf_mem[1] = address;
3518 aarch64_insn_r->mem_rec_count = 1;
3519 }
3520 else
3521 {
3522 if (vector_flag)
3523 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3524 else
3525 record_buf[0] = reg_rt;
3526 aarch64_insn_r->reg_rec_count = 1;
3527 }
3528 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3529 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3530 }
3531 /* Advanced SIMD load/store instructions. */
3532 else
3533 return aarch64_record_asimd_load_store (aarch64_insn_r);
3534
3535 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3536 record_buf_mem);
3537 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3538 record_buf);
3539 return AARCH64_RECORD_SUCCESS;
3540 }
3541
3542 /* Record handler for data processing SIMD and floating point instructions. */
3543
3544 static unsigned int
3545 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3546 {
3547 uint8_t insn_bit21, opcode, rmode, reg_rd;
3548 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3549 uint8_t insn_bits11_14;
3550 uint32_t record_buf[2];
3551
3552 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3553 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3554 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3555 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3556 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3557 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3558 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3559 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3560 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3561
3562 if (record_debug)
3563 debug_printf ("Process record: data processing SIMD/FP: ");
3564
3565 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3566 {
3567 /* Floating point - fixed point conversion instructions. */
3568 if (!insn_bit21)
3569 {
3570 if (record_debug)
3571 debug_printf ("FP - fixed point conversion");
3572
3573 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3574 record_buf[0] = reg_rd;
3575 else
3576 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3577 }
3578 /* Floating point - conditional compare instructions. */
3579 else if (insn_bits10_11 == 0x01)
3580 {
3581 if (record_debug)
3582 debug_printf ("FP - conditional compare");
3583
3584 record_buf[0] = AARCH64_CPSR_REGNUM;
3585 }
3586 /* Floating point - data processing (2-source) and
3587 conditional select instructions. */
3588 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3589 {
3590 if (record_debug)
3591 debug_printf ("FP - DP (2-source)");
3592
3593 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3594 }
3595 else if (insn_bits10_11 == 0x00)
3596 {
3597 /* Floating point - immediate instructions. */
3598 if ((insn_bits12_15 & 0x01) == 0x01
3599 || (insn_bits12_15 & 0x07) == 0x04)
3600 {
3601 if (record_debug)
3602 debug_printf ("FP - immediate");
3603 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3604 }
3605 /* Floating point - compare instructions. */
3606 else if ((insn_bits12_15 & 0x03) == 0x02)
3607 {
3608 if (record_debug)
3609 debug_printf ("FP - immediate");
3610 record_buf[0] = AARCH64_CPSR_REGNUM;
3611 }
3612 /* Floating point - integer conversions instructions. */
3613 else if (insn_bits12_15 == 0x00)
3614 {
3615 /* Convert float to integer instruction. */
3616 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3617 {
3618 if (record_debug)
3619 debug_printf ("float to int conversion");
3620
3621 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3622 }
3623 /* Convert integer to float instruction. */
3624 else if ((opcode >> 1) == 0x01 && !rmode)
3625 {
3626 if (record_debug)
3627 debug_printf ("int to float conversion");
3628
3629 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3630 }
3631 /* Move float to integer instruction. */
3632 else if ((opcode >> 1) == 0x03)
3633 {
3634 if (record_debug)
3635 debug_printf ("move float to int");
3636
3637 if (!(opcode & 0x01))
3638 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3639 else
3640 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3641 }
3642 else
3643 return AARCH64_RECORD_UNKNOWN;
3644 }
3645 else
3646 return AARCH64_RECORD_UNKNOWN;
3647 }
3648 else
3649 return AARCH64_RECORD_UNKNOWN;
3650 }
3651 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3652 {
3653 if (record_debug)
3654 debug_printf ("SIMD copy");
3655
3656 /* Advanced SIMD copy instructions. */
3657 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3658 && !bit (aarch64_insn_r->aarch64_insn, 15)
3659 && bit (aarch64_insn_r->aarch64_insn, 10))
3660 {
3661 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3662 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3663 else
3664 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3665 }
3666 else
3667 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3668 }
3669 /* All remaining floating point or advanced SIMD instructions. */
3670 else
3671 {
3672 if (record_debug)
3673 debug_printf ("all remain");
3674
3675 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3676 }
3677
3678 if (record_debug)
3679 debug_printf ("\n");
3680
3681 aarch64_insn_r->reg_rec_count++;
3682 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3683 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3684 record_buf);
3685 return AARCH64_RECORD_SUCCESS;
3686 }
3687
3688 /* Decodes insns type and invokes its record handler. */
3689
3690 static unsigned int
3691 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3692 {
3693 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3694
3695 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3696 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3697 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3698 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3699
3700 /* Data processing - immediate instructions. */
3701 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3702 return aarch64_record_data_proc_imm (aarch64_insn_r);
3703
3704 /* Branch, exception generation and system instructions. */
3705 if (ins_bit26 && !ins_bit27 && ins_bit28)
3706 return aarch64_record_branch_except_sys (aarch64_insn_r);
3707
3708 /* Load and store instructions. */
3709 if (!ins_bit25 && ins_bit27)
3710 return aarch64_record_load_store (aarch64_insn_r);
3711
3712 /* Data processing - register instructions. */
3713 if (ins_bit25 && !ins_bit26 && ins_bit27)
3714 return aarch64_record_data_proc_reg (aarch64_insn_r);
3715
3716 /* Data processing - SIMD and floating point instructions. */
3717 if (ins_bit25 && ins_bit26 && ins_bit27)
3718 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3719
3720 return AARCH64_RECORD_UNSUPPORTED;
3721 }
3722
3723 /* Cleans up local record registers and memory allocations. */
3724
3725 static void
3726 deallocate_reg_mem (insn_decode_record *record)
3727 {
3728 xfree (record->aarch64_regs);
3729 xfree (record->aarch64_mems);
3730 }
3731
3732 /* Parse the current instruction and record the values of the registers and
3733 memory that will be changed in current instruction to record_arch_list
3734 return -1 if something is wrong. */
3735
3736 int
3737 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3738 CORE_ADDR insn_addr)
3739 {
3740 uint32_t rec_no = 0;
3741 uint8_t insn_size = 4;
3742 uint32_t ret = 0;
3743 gdb_byte buf[insn_size];
3744 insn_decode_record aarch64_record;
3745
3746 memset (&buf[0], 0, insn_size);
3747 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3748 target_read_memory (insn_addr, &buf[0], insn_size);
3749 aarch64_record.aarch64_insn
3750 = (uint32_t) extract_unsigned_integer (&buf[0],
3751 insn_size,
3752 gdbarch_byte_order (gdbarch));
3753 aarch64_record.regcache = regcache;
3754 aarch64_record.this_addr = insn_addr;
3755 aarch64_record.gdbarch = gdbarch;
3756
3757 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3758 if (ret == AARCH64_RECORD_UNSUPPORTED)
3759 {
3760 printf_unfiltered (_("Process record does not support instruction "
3761 "0x%0x at address %s.\n"),
3762 aarch64_record.aarch64_insn,
3763 paddress (gdbarch, insn_addr));
3764 ret = -1;
3765 }
3766
3767 if (0 == ret)
3768 {
3769 /* Record registers. */
3770 record_full_arch_list_add_reg (aarch64_record.regcache,
3771 AARCH64_PC_REGNUM);
3772 /* Always record register CPSR. */
3773 record_full_arch_list_add_reg (aarch64_record.regcache,
3774 AARCH64_CPSR_REGNUM);
3775 if (aarch64_record.aarch64_regs)
3776 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3777 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3778 aarch64_record.aarch64_regs[rec_no]))
3779 ret = -1;
3780
3781 /* Record memories. */
3782 if (aarch64_record.aarch64_mems)
3783 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3784 if (record_full_arch_list_add_mem
3785 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3786 aarch64_record.aarch64_mems[rec_no].len))
3787 ret = -1;
3788
3789 if (record_full_arch_list_add_end ())
3790 ret = -1;
3791 }
3792
3793 deallocate_reg_mem (&aarch64_record);
3794 return ret;
3795 }
This page took 0.116818 seconds and 5 git commands to generate.