Fix warning in gdb.base/signals-state-child.c
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
618f726f 3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
57
07b287a0 58#include "features/aarch64.c"
07b287a0 59
787749ea
PL
60#include "arch/aarch64-insn.h"
61
f77ee802
YQ
62#include "opcode/aarch64.h"
63
64#define submask(x) ((1L << ((x) + 1)) - 1)
65#define bit(obj,st) (((obj) >> (st)) & 1)
66#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
07b287a0
MS
68/* Pseudo register base numbers. */
69#define AARCH64_Q0_REGNUM 0
70#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
74
75/* The standard register names, and all the valid aliases for them. */
76static const struct
77{
78 const char *const name;
79 int regnum;
80} aarch64_register_aliases[] =
81{
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123};
124
125/* The required core 'R' registers. */
126static const char *const aarch64_r_register_names[] =
127{
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139};
140
141/* The FP/SIMD 'V' registers. */
142static const char *const aarch64_v_register_names[] =
143{
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156};
157
158/* AArch64 prologue cache structure. */
159struct aarch64_prologue_cache
160{
db634143
PL
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
163 CORE_ADDR func;
164
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
167 stub frame. */
168 CORE_ADDR prev_pc;
169
07b287a0
MS
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
173 CORE_ADDR prev_sp;
174
7dfa3edc
PL
175 /* Is the target available to read from? */
176 int available_p;
177
07b287a0
MS
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
181 int framesize;
182
183 /* The register used to hold the frame pointer for this frame. */
184 int framereg;
185
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
188};
189
07b287a0
MS
190static void
191show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
193{
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
195}
196
07b287a0
MS
197/* Analyze a prologue, looking for a recognizable stack frame
198 and frame pointer. Scan until we encounter a store that could
199 clobber the stack frame unexpectedly, or an unknown instruction. */
200
201static CORE_ADDR
202aarch64_analyze_prologue (struct gdbarch *gdbarch,
203 CORE_ADDR start, CORE_ADDR limit,
204 struct aarch64_prologue_cache *cache)
205{
206 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
207 int i;
208 pv_t regs[AARCH64_X_REGISTER_COUNT];
209 struct pv_area *stack;
210 struct cleanup *back_to;
211
212 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
213 regs[i] = pv_register (i, 0);
214 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
215 back_to = make_cleanup_free_pv_area (stack);
216
217 for (; start < limit; start += 4)
218 {
219 uint32_t insn;
d9ebcbce 220 aarch64_inst inst;
07b287a0
MS
221
222 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
223
d9ebcbce
YQ
224 if (aarch64_decode_insn (insn, &inst, 1) != 0)
225 break;
226
227 if (inst.opcode->iclass == addsub_imm
228 && (inst.opcode->op == OP_ADD
229 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 230 {
d9ebcbce
YQ
231 unsigned rd = inst.operands[0].reg.regno;
232 unsigned rn = inst.operands[1].reg.regno;
233
234 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
235 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
236 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
237 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
238
239 if (inst.opcode->op == OP_ADD)
240 {
241 regs[rd] = pv_add_constant (regs[rn],
242 inst.operands[2].imm.value);
243 }
244 else
245 {
246 regs[rd] = pv_add_constant (regs[rn],
247 -inst.operands[2].imm.value);
248 }
249 }
250 else if (inst.opcode->iclass == pcreladdr
251 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
252 {
253 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
254 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
255
256 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 257 }
d9ebcbce 258 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
259 {
260 /* Stop analysis on branch. */
261 break;
262 }
d9ebcbce 263 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
264 {
265 /* Stop analysis on branch. */
266 break;
267 }
d9ebcbce 268 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
269 {
270 /* Stop analysis on branch. */
271 break;
272 }
d9ebcbce 273 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
274 {
275 /* Stop analysis on branch. */
276 break;
277 }
d9ebcbce
YQ
278 else if (inst.opcode->op == OP_MOVZ)
279 {
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
282 }
283 else if (inst.opcode->iclass == log_shift
284 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 285 {
d9ebcbce
YQ
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288 unsigned rm = inst.operands[2].reg.regno;
289
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
293
294 if (inst.operands[2].shifter.amount == 0
295 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
296 regs[rd] = regs[rm];
297 else
298 {
299 if (aarch64_debug)
b277c936
PL
300 {
301 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 302 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
303 core_addr_to_string_nz (start), insn);
304 }
07b287a0
MS
305 break;
306 }
307 }
d9ebcbce 308 else if (inst.opcode->op == OP_STUR)
07b287a0 309 {
d9ebcbce
YQ
310 unsigned rt = inst.operands[0].reg.regno;
311 unsigned rn = inst.operands[1].addr.base_regno;
312 int is64
313 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
314
315 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
316 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
317 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
318 gdb_assert (!inst.operands[1].addr.offset.is_reg);
319
320 pv_area_store (stack, pv_add_constant (regs[rn],
321 inst.operands[1].addr.offset.imm),
07b287a0
MS
322 is64 ? 8 : 4, regs[rt]);
323 }
d9ebcbce
YQ
324 else if ((inst.opcode->iclass == ldstpair_off
325 || inst.opcode->iclass == ldstpair_indexed)
326 && inst.operands[2].addr.preind
327 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 328 {
d9ebcbce
YQ
329 unsigned rt1 = inst.operands[0].reg.regno;
330 unsigned rt2 = inst.operands[1].reg.regno;
331 unsigned rn = inst.operands[2].addr.base_regno;
332 int32_t imm = inst.operands[2].addr.offset.imm;
333
334 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
335 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2);
336 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
337 gdb_assert (!inst.operands[2].addr.offset.is_reg);
338
07b287a0
MS
339 /* If recording this store would invalidate the store area
340 (perhaps because rn is not known) then we should abandon
341 further prologue analysis. */
342 if (pv_area_store_would_trash (stack,
343 pv_add_constant (regs[rn], imm)))
344 break;
345
346 if (pv_area_store_would_trash (stack,
347 pv_add_constant (regs[rn], imm + 8)))
348 break;
349
350 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
351 regs[rt1]);
352 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
353 regs[rt2]);
14ac654f 354
d9ebcbce 355 if (inst.operands[2].addr.writeback)
93d96012 356 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 357
07b287a0 358 }
d9ebcbce 359 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
360 {
361 /* Stop analysis on branch. */
362 break;
363 }
364 else
365 {
366 if (aarch64_debug)
b277c936 367 {
0a0da556 368 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
369 " opcode=0x%x\n",
370 core_addr_to_string_nz (start), insn);
371 }
07b287a0
MS
372 break;
373 }
374 }
375
376 if (cache == NULL)
377 {
378 do_cleanups (back_to);
379 return start;
380 }
381
382 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
383 {
384 /* Frame pointer is fp. Frame size is constant. */
385 cache->framereg = AARCH64_FP_REGNUM;
386 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
387 }
388 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
389 {
390 /* Try the stack pointer. */
391 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
392 cache->framereg = AARCH64_SP_REGNUM;
393 }
394 else
395 {
396 /* We're just out of luck. We don't know where the frame is. */
397 cache->framereg = -1;
398 cache->framesize = 0;
399 }
400
401 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
402 {
403 CORE_ADDR offset;
404
405 if (pv_area_find_reg (stack, gdbarch, i, &offset))
406 cache->saved_regs[i].addr = offset;
407 }
408
409 do_cleanups (back_to);
410 return start;
411}
412
413/* Implement the "skip_prologue" gdbarch method. */
414
415static CORE_ADDR
416aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
417{
07b287a0 418 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
419
420 /* See if we can determine the end of the prologue via the symbol
421 table. If so, then return either PC, or the PC after the
422 prologue, whichever is greater. */
423 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
424 {
425 CORE_ADDR post_prologue_pc
426 = skip_prologue_using_sal (gdbarch, func_addr);
427
428 if (post_prologue_pc != 0)
429 return max (pc, post_prologue_pc);
430 }
431
432 /* Can't determine prologue from the symbol table, need to examine
433 instructions. */
434
435 /* Find an upper limit on the function prologue using the debug
436 information. If the debug information could not be used to
437 provide that bound, then use an arbitrary large number as the
438 upper bound. */
439 limit_pc = skip_prologue_using_sal (gdbarch, pc);
440 if (limit_pc == 0)
441 limit_pc = pc + 128; /* Magic. */
442
443 /* Try disassembling prologue. */
444 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
445}
446
447/* Scan the function prologue for THIS_FRAME and populate the prologue
448 cache CACHE. */
449
450static void
451aarch64_scan_prologue (struct frame_info *this_frame,
452 struct aarch64_prologue_cache *cache)
453{
454 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
455 CORE_ADDR prologue_start;
456 CORE_ADDR prologue_end;
457 CORE_ADDR prev_pc = get_frame_pc (this_frame);
458 struct gdbarch *gdbarch = get_frame_arch (this_frame);
459
db634143
PL
460 cache->prev_pc = prev_pc;
461
07b287a0
MS
462 /* Assume we do not find a frame. */
463 cache->framereg = -1;
464 cache->framesize = 0;
465
466 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
467 &prologue_end))
468 {
469 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
470
471 if (sal.line == 0)
472 {
473 /* No line info so use the current PC. */
474 prologue_end = prev_pc;
475 }
476 else if (sal.end < prologue_end)
477 {
478 /* The next line begins after the function end. */
479 prologue_end = sal.end;
480 }
481
482 prologue_end = min (prologue_end, prev_pc);
483 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
484 }
485 else
486 {
487 CORE_ADDR frame_loc;
07b287a0
MS
488
489 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
490 if (frame_loc == 0)
491 return;
492
493 cache->framereg = AARCH64_FP_REGNUM;
494 cache->framesize = 16;
495 cache->saved_regs[29].addr = 0;
496 cache->saved_regs[30].addr = 8;
497 }
498}
499
7dfa3edc
PL
500/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
501 function may throw an exception if the inferior's registers or memory is
502 not available. */
07b287a0 503
7dfa3edc
PL
504static void
505aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
506 struct aarch64_prologue_cache *cache)
07b287a0 507{
07b287a0
MS
508 CORE_ADDR unwound_fp;
509 int reg;
510
07b287a0
MS
511 aarch64_scan_prologue (this_frame, cache);
512
513 if (cache->framereg == -1)
7dfa3edc 514 return;
07b287a0
MS
515
516 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
517 if (unwound_fp == 0)
7dfa3edc 518 return;
07b287a0
MS
519
520 cache->prev_sp = unwound_fp + cache->framesize;
521
522 /* Calculate actual addresses of saved registers using offsets
523 determined by aarch64_analyze_prologue. */
524 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
525 if (trad_frame_addr_p (cache->saved_regs, reg))
526 cache->saved_regs[reg].addr += cache->prev_sp;
527
db634143
PL
528 cache->func = get_frame_func (this_frame);
529
7dfa3edc
PL
530 cache->available_p = 1;
531}
532
533/* Allocate and fill in *THIS_CACHE with information about the prologue of
534 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
535 Return a pointer to the current aarch64_prologue_cache in
536 *THIS_CACHE. */
537
538static struct aarch64_prologue_cache *
539aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
540{
541 struct aarch64_prologue_cache *cache;
542
543 if (*this_cache != NULL)
9a3c8263 544 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
545
546 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
547 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
548 *this_cache = cache;
549
550 TRY
551 {
552 aarch64_make_prologue_cache_1 (this_frame, cache);
553 }
554 CATCH (ex, RETURN_MASK_ERROR)
555 {
556 if (ex.error != NOT_AVAILABLE_ERROR)
557 throw_exception (ex);
558 }
559 END_CATCH
560
07b287a0
MS
561 return cache;
562}
563
7dfa3edc
PL
564/* Implement the "stop_reason" frame_unwind method. */
565
566static enum unwind_stop_reason
567aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
568 void **this_cache)
569{
570 struct aarch64_prologue_cache *cache
571 = aarch64_make_prologue_cache (this_frame, this_cache);
572
573 if (!cache->available_p)
574 return UNWIND_UNAVAILABLE;
575
576 /* Halt the backtrace at "_start". */
577 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
578 return UNWIND_OUTERMOST;
579
580 /* We've hit a wall, stop. */
581 if (cache->prev_sp == 0)
582 return UNWIND_OUTERMOST;
583
584 return UNWIND_NO_REASON;
585}
586
07b287a0
MS
587/* Our frame ID for a normal frame is the current function's starting
588 PC and the caller's SP when we were called. */
589
590static void
591aarch64_prologue_this_id (struct frame_info *this_frame,
592 void **this_cache, struct frame_id *this_id)
593{
7c8edfae
PL
594 struct aarch64_prologue_cache *cache
595 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 596
7dfa3edc
PL
597 if (!cache->available_p)
598 *this_id = frame_id_build_unavailable_stack (cache->func);
599 else
600 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
601}
602
603/* Implement the "prev_register" frame_unwind method. */
604
605static struct value *
606aarch64_prologue_prev_register (struct frame_info *this_frame,
607 void **this_cache, int prev_regnum)
608{
7c8edfae
PL
609 struct aarch64_prologue_cache *cache
610 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
611
612 /* If we are asked to unwind the PC, then we need to return the LR
613 instead. The prologue may save PC, but it will point into this
614 frame's prologue, not the next frame's resume location. */
615 if (prev_regnum == AARCH64_PC_REGNUM)
616 {
617 CORE_ADDR lr;
618
619 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
620 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
621 }
622
623 /* SP is generally not saved to the stack, but this frame is
624 identified by the next frame's stack pointer at the time of the
625 call. The value was already reconstructed into PREV_SP. */
626 /*
627 +----------+ ^
628 | saved lr | |
629 +->| saved fp |--+
630 | | |
631 | | | <- Previous SP
632 | +----------+
633 | | saved lr |
634 +--| saved fp |<- FP
635 | |
636 | |<- SP
637 +----------+ */
638 if (prev_regnum == AARCH64_SP_REGNUM)
639 return frame_unwind_got_constant (this_frame, prev_regnum,
640 cache->prev_sp);
641
642 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
643 prev_regnum);
644}
645
646/* AArch64 prologue unwinder. */
647struct frame_unwind aarch64_prologue_unwind =
648{
649 NORMAL_FRAME,
7dfa3edc 650 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
651 aarch64_prologue_this_id,
652 aarch64_prologue_prev_register,
653 NULL,
654 default_frame_sniffer
655};
656
8b61f75d
PL
657/* Allocate and fill in *THIS_CACHE with information about the prologue of
658 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
659 Return a pointer to the current aarch64_prologue_cache in
660 *THIS_CACHE. */
07b287a0
MS
661
662static struct aarch64_prologue_cache *
8b61f75d 663aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 664{
07b287a0 665 struct aarch64_prologue_cache *cache;
8b61f75d
PL
666
667 if (*this_cache != NULL)
9a3c8263 668 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
669
670 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
671 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 672 *this_cache = cache;
07b287a0 673
02a2a705
PL
674 TRY
675 {
676 cache->prev_sp = get_frame_register_unsigned (this_frame,
677 AARCH64_SP_REGNUM);
678 cache->prev_pc = get_frame_pc (this_frame);
679 cache->available_p = 1;
680 }
681 CATCH (ex, RETURN_MASK_ERROR)
682 {
683 if (ex.error != NOT_AVAILABLE_ERROR)
684 throw_exception (ex);
685 }
686 END_CATCH
07b287a0
MS
687
688 return cache;
689}
690
02a2a705
PL
691/* Implement the "stop_reason" frame_unwind method. */
692
693static enum unwind_stop_reason
694aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
695 void **this_cache)
696{
697 struct aarch64_prologue_cache *cache
698 = aarch64_make_stub_cache (this_frame, this_cache);
699
700 if (!cache->available_p)
701 return UNWIND_UNAVAILABLE;
702
703 return UNWIND_NO_REASON;
704}
705
07b287a0
MS
706/* Our frame ID for a stub frame is the current SP and LR. */
707
708static void
709aarch64_stub_this_id (struct frame_info *this_frame,
710 void **this_cache, struct frame_id *this_id)
711{
8b61f75d
PL
712 struct aarch64_prologue_cache *cache
713 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 714
02a2a705
PL
715 if (cache->available_p)
716 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
717 else
718 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
719}
720
721/* Implement the "sniffer" frame_unwind method. */
722
723static int
724aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
725 struct frame_info *this_frame,
726 void **this_prologue_cache)
727{
728 CORE_ADDR addr_in_block;
729 gdb_byte dummy[4];
730
731 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 732 if (in_plt_section (addr_in_block)
07b287a0
MS
733 /* We also use the stub winder if the target memory is unreadable
734 to avoid having the prologue unwinder trying to read it. */
735 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
736 return 1;
737
738 return 0;
739}
740
741/* AArch64 stub unwinder. */
742struct frame_unwind aarch64_stub_unwind =
743{
744 NORMAL_FRAME,
02a2a705 745 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
746 aarch64_stub_this_id,
747 aarch64_prologue_prev_register,
748 NULL,
749 aarch64_stub_unwind_sniffer
750};
751
752/* Return the frame base address of *THIS_FRAME. */
753
754static CORE_ADDR
755aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
756{
7c8edfae
PL
757 struct aarch64_prologue_cache *cache
758 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
759
760 return cache->prev_sp - cache->framesize;
761}
762
763/* AArch64 default frame base information. */
764struct frame_base aarch64_normal_base =
765{
766 &aarch64_prologue_unwind,
767 aarch64_normal_frame_base,
768 aarch64_normal_frame_base,
769 aarch64_normal_frame_base
770};
771
772/* Assuming THIS_FRAME is a dummy, return the frame ID of that
773 dummy frame. The frame ID's base needs to match the TOS value
774 saved by save_dummy_frame_tos () and returned from
775 aarch64_push_dummy_call, and the PC needs to match the dummy
776 frame's breakpoint. */
777
778static struct frame_id
779aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
780{
781 return frame_id_build (get_frame_register_unsigned (this_frame,
782 AARCH64_SP_REGNUM),
783 get_frame_pc (this_frame));
784}
785
786/* Implement the "unwind_pc" gdbarch method. */
787
788static CORE_ADDR
789aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
790{
791 CORE_ADDR pc
792 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
793
794 return pc;
795}
796
797/* Implement the "unwind_sp" gdbarch method. */
798
799static CORE_ADDR
800aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
801{
802 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
803}
804
805/* Return the value of the REGNUM register in the previous frame of
806 *THIS_FRAME. */
807
808static struct value *
809aarch64_dwarf2_prev_register (struct frame_info *this_frame,
810 void **this_cache, int regnum)
811{
07b287a0
MS
812 CORE_ADDR lr;
813
814 switch (regnum)
815 {
816 case AARCH64_PC_REGNUM:
817 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
818 return frame_unwind_got_constant (this_frame, regnum, lr);
819
820 default:
821 internal_error (__FILE__, __LINE__,
822 _("Unexpected register %d"), regnum);
823 }
824}
825
826/* Implement the "init_reg" dwarf2_frame_ops method. */
827
828static void
829aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
830 struct dwarf2_frame_state_reg *reg,
831 struct frame_info *this_frame)
832{
833 switch (regnum)
834 {
835 case AARCH64_PC_REGNUM:
836 reg->how = DWARF2_FRAME_REG_FN;
837 reg->loc.fn = aarch64_dwarf2_prev_register;
838 break;
839 case AARCH64_SP_REGNUM:
840 reg->how = DWARF2_FRAME_REG_CFA;
841 break;
842 }
843}
844
845/* When arguments must be pushed onto the stack, they go on in reverse
846 order. The code below implements a FILO (stack) to do this. */
847
848typedef struct
849{
c3c87445
YQ
850 /* Value to pass on stack. It can be NULL if this item is for stack
851 padding. */
7c543f7b 852 const gdb_byte *data;
07b287a0
MS
853
854 /* Size in bytes of value to pass on stack. */
855 int len;
856} stack_item_t;
857
858DEF_VEC_O (stack_item_t);
859
860/* Return the alignment (in bytes) of the given type. */
861
862static int
863aarch64_type_align (struct type *t)
864{
865 int n;
866 int align;
867 int falign;
868
869 t = check_typedef (t);
870 switch (TYPE_CODE (t))
871 {
872 default:
873 /* Should never happen. */
874 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
875 return 4;
876
877 case TYPE_CODE_PTR:
878 case TYPE_CODE_ENUM:
879 case TYPE_CODE_INT:
880 case TYPE_CODE_FLT:
881 case TYPE_CODE_SET:
882 case TYPE_CODE_RANGE:
883 case TYPE_CODE_BITSTRING:
884 case TYPE_CODE_REF:
885 case TYPE_CODE_CHAR:
886 case TYPE_CODE_BOOL:
887 return TYPE_LENGTH (t);
888
889 case TYPE_CODE_ARRAY:
238f2452
YQ
890 if (TYPE_VECTOR (t))
891 {
892 /* Use the natural alignment for vector types (the same for
893 scalar type), but the maximum alignment is 128-bit. */
894 if (TYPE_LENGTH (t) > 16)
895 return 16;
896 else
897 return TYPE_LENGTH (t);
898 }
899 else
900 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
901 case TYPE_CODE_COMPLEX:
902 return aarch64_type_align (TYPE_TARGET_TYPE (t));
903
904 case TYPE_CODE_STRUCT:
905 case TYPE_CODE_UNION:
906 align = 1;
907 for (n = 0; n < TYPE_NFIELDS (t); n++)
908 {
909 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
910 if (falign > align)
911 align = falign;
912 }
913 return align;
914 }
915}
916
cd635f74
YQ
917/* Return 1 if *TY is a homogeneous floating-point aggregate or
918 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
919 document; otherwise return 0. */
07b287a0
MS
920
921static int
cd635f74 922is_hfa_or_hva (struct type *ty)
07b287a0
MS
923{
924 switch (TYPE_CODE (ty))
925 {
926 case TYPE_CODE_ARRAY:
927 {
928 struct type *target_ty = TYPE_TARGET_TYPE (ty);
238f2452
YQ
929
930 if (TYPE_VECTOR (ty))
931 return 0;
932
cd635f74
YQ
933 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
934 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
935 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
936 && TYPE_VECTOR (target_ty))))
07b287a0
MS
937 return 1;
938 break;
939 }
940
941 case TYPE_CODE_UNION:
942 case TYPE_CODE_STRUCT:
943 {
cd635f74 944 /* HFA or HVA has at most four members. */
07b287a0
MS
945 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
946 {
947 struct type *member0_type;
948
949 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
cd635f74
YQ
950 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
951 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
952 && TYPE_VECTOR (member0_type)))
07b287a0
MS
953 {
954 int i;
955
956 for (i = 0; i < TYPE_NFIELDS (ty); i++)
957 {
958 struct type *member1_type;
959
960 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
961 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
962 || (TYPE_LENGTH (member0_type)
963 != TYPE_LENGTH (member1_type)))
964 return 0;
965 }
966 return 1;
967 }
968 }
969 return 0;
970 }
971
972 default:
973 break;
974 }
975
976 return 0;
977}
978
979/* AArch64 function call information structure. */
980struct aarch64_call_info
981{
982 /* the current argument number. */
983 unsigned argnum;
984
985 /* The next general purpose register number, equivalent to NGRN as
986 described in the AArch64 Procedure Call Standard. */
987 unsigned ngrn;
988
989 /* The next SIMD and floating point register number, equivalent to
990 NSRN as described in the AArch64 Procedure Call Standard. */
991 unsigned nsrn;
992
993 /* The next stacked argument address, equivalent to NSAA as
994 described in the AArch64 Procedure Call Standard. */
995 unsigned nsaa;
996
997 /* Stack item vector. */
998 VEC(stack_item_t) *si;
999};
1000
1001/* Pass a value in a sequence of consecutive X registers. The caller
1002 is responsbile for ensuring sufficient registers are available. */
1003
1004static void
1005pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1006 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1007 struct value *arg)
07b287a0
MS
1008{
1009 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1010 int len = TYPE_LENGTH (type);
1011 enum type_code typecode = TYPE_CODE (type);
1012 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1013 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1014
1015 info->argnum++;
1016
1017 while (len > 0)
1018 {
1019 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1020 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1021 byte_order);
1022
1023
1024 /* Adjust sub-word struct/union args when big-endian. */
1025 if (byte_order == BFD_ENDIAN_BIG
1026 && partial_len < X_REGISTER_SIZE
1027 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1028 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1029
1030 if (aarch64_debug)
b277c936
PL
1031 {
1032 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1033 gdbarch_register_name (gdbarch, regnum),
1034 phex (regval, X_REGISTER_SIZE));
1035 }
07b287a0
MS
1036 regcache_cooked_write_unsigned (regcache, regnum, regval);
1037 len -= partial_len;
1038 buf += partial_len;
1039 regnum++;
1040 }
1041}
1042
1043/* Attempt to marshall a value in a V register. Return 1 if
1044 successful, or 0 if insufficient registers are available. This
1045 function, unlike the equivalent pass_in_x() function does not
1046 handle arguments spread across multiple registers. */
1047
1048static int
1049pass_in_v (struct gdbarch *gdbarch,
1050 struct regcache *regcache,
1051 struct aarch64_call_info *info,
0735fddd 1052 int len, const bfd_byte *buf)
07b287a0
MS
1053{
1054 if (info->nsrn < 8)
1055 {
07b287a0 1056 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1057 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1058
1059 info->argnum++;
1060 info->nsrn++;
1061
0735fddd
YQ
1062 memset (reg, 0, sizeof (reg));
1063 /* PCS C.1, the argument is allocated to the least significant
1064 bits of V register. */
1065 memcpy (reg, buf, len);
1066 regcache_cooked_write (regcache, regnum, reg);
1067
07b287a0 1068 if (aarch64_debug)
b277c936
PL
1069 {
1070 debug_printf ("arg %d in %s\n", info->argnum,
1071 gdbarch_register_name (gdbarch, regnum));
1072 }
07b287a0
MS
1073 return 1;
1074 }
1075 info->nsrn = 8;
1076 return 0;
1077}
1078
1079/* Marshall an argument onto the stack. */
1080
1081static void
1082pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1083 struct value *arg)
07b287a0 1084{
8e80f9d1 1085 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1086 int len = TYPE_LENGTH (type);
1087 int align;
1088 stack_item_t item;
1089
1090 info->argnum++;
1091
1092 align = aarch64_type_align (type);
1093
1094 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1095 Natural alignment of the argument's type. */
1096 align = align_up (align, 8);
1097
1098 /* The AArch64 PCS requires at most doubleword alignment. */
1099 if (align > 16)
1100 align = 16;
1101
1102 if (aarch64_debug)
b277c936
PL
1103 {
1104 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1105 info->nsaa);
1106 }
07b287a0
MS
1107
1108 item.len = len;
1109 item.data = buf;
1110 VEC_safe_push (stack_item_t, info->si, &item);
1111
1112 info->nsaa += len;
1113 if (info->nsaa & (align - 1))
1114 {
1115 /* Push stack alignment padding. */
1116 int pad = align - (info->nsaa & (align - 1));
1117
1118 item.len = pad;
c3c87445 1119 item.data = NULL;
07b287a0
MS
1120
1121 VEC_safe_push (stack_item_t, info->si, &item);
1122 info->nsaa += pad;
1123 }
1124}
1125
1126/* Marshall an argument into a sequence of one or more consecutive X
1127 registers or, if insufficient X registers are available then onto
1128 the stack. */
1129
1130static void
1131pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1132 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1133 struct value *arg)
07b287a0
MS
1134{
1135 int len = TYPE_LENGTH (type);
1136 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1137
1138 /* PCS C.13 - Pass in registers if we have enough spare */
1139 if (info->ngrn + nregs <= 8)
1140 {
8e80f9d1 1141 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1142 info->ngrn += nregs;
1143 }
1144 else
1145 {
1146 info->ngrn = 8;
8e80f9d1 1147 pass_on_stack (info, type, arg);
07b287a0
MS
1148 }
1149}
1150
1151/* Pass a value in a V register, or on the stack if insufficient are
1152 available. */
1153
1154static void
1155pass_in_v_or_stack (struct gdbarch *gdbarch,
1156 struct regcache *regcache,
1157 struct aarch64_call_info *info,
1158 struct type *type,
8e80f9d1 1159 struct value *arg)
07b287a0 1160{
0735fddd
YQ
1161 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1162 value_contents (arg)))
8e80f9d1 1163 pass_on_stack (info, type, arg);
07b287a0
MS
1164}
1165
1166/* Implement the "push_dummy_call" gdbarch method. */
1167
1168static CORE_ADDR
1169aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1170 struct regcache *regcache, CORE_ADDR bp_addr,
1171 int nargs,
1172 struct value **args, CORE_ADDR sp, int struct_return,
1173 CORE_ADDR struct_addr)
1174{
07b287a0 1175 int argnum;
07b287a0
MS
1176 struct aarch64_call_info info;
1177 struct type *func_type;
1178 struct type *return_type;
1179 int lang_struct_return;
1180
1181 memset (&info, 0, sizeof (info));
1182
1183 /* We need to know what the type of the called function is in order
1184 to determine the number of named/anonymous arguments for the
1185 actual argument placement, and the return type in order to handle
1186 return value correctly.
1187
1188 The generic code above us views the decision of return in memory
1189 or return in registers as a two stage processes. The language
1190 handler is consulted first and may decide to return in memory (eg
1191 class with copy constructor returned by value), this will cause
1192 the generic code to allocate space AND insert an initial leading
1193 argument.
1194
1195 If the language code does not decide to pass in memory then the
1196 target code is consulted.
1197
1198 If the language code decides to pass in memory we want to move
1199 the pointer inserted as the initial argument from the argument
1200 list and into X8, the conventional AArch64 struct return pointer
1201 register.
1202
1203 This is slightly awkward, ideally the flag "lang_struct_return"
1204 would be passed to the targets implementation of push_dummy_call.
1205 Rather that change the target interface we call the language code
1206 directly ourselves. */
1207
1208 func_type = check_typedef (value_type (function));
1209
1210 /* Dereference function pointer types. */
1211 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1212 func_type = TYPE_TARGET_TYPE (func_type);
1213
1214 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1215 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1216
1217 /* If language_pass_by_reference () returned true we will have been
1218 given an additional initial argument, a hidden pointer to the
1219 return slot in memory. */
1220 return_type = TYPE_TARGET_TYPE (func_type);
1221 lang_struct_return = language_pass_by_reference (return_type);
1222
1223 /* Set the return address. For the AArch64, the return breakpoint
1224 is always at BP_ADDR. */
1225 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1226
1227 /* If we were given an initial argument for the return slot because
1228 lang_struct_return was true, lose it. */
1229 if (lang_struct_return)
1230 {
1231 args++;
1232 nargs--;
1233 }
1234
1235 /* The struct_return pointer occupies X8. */
1236 if (struct_return || lang_struct_return)
1237 {
1238 if (aarch64_debug)
b277c936
PL
1239 {
1240 debug_printf ("struct return in %s = 0x%s\n",
1241 gdbarch_register_name (gdbarch,
1242 AARCH64_STRUCT_RETURN_REGNUM),
1243 paddress (gdbarch, struct_addr));
1244 }
07b287a0
MS
1245 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1246 struct_addr);
1247 }
1248
1249 for (argnum = 0; argnum < nargs; argnum++)
1250 {
1251 struct value *arg = args[argnum];
1252 struct type *arg_type;
1253 int len;
1254
1255 arg_type = check_typedef (value_type (arg));
1256 len = TYPE_LENGTH (arg_type);
1257
1258 switch (TYPE_CODE (arg_type))
1259 {
1260 case TYPE_CODE_INT:
1261 case TYPE_CODE_BOOL:
1262 case TYPE_CODE_CHAR:
1263 case TYPE_CODE_RANGE:
1264 case TYPE_CODE_ENUM:
1265 if (len < 4)
1266 {
1267 /* Promote to 32 bit integer. */
1268 if (TYPE_UNSIGNED (arg_type))
1269 arg_type = builtin_type (gdbarch)->builtin_uint32;
1270 else
1271 arg_type = builtin_type (gdbarch)->builtin_int32;
1272 arg = value_cast (arg_type, arg);
1273 }
8e80f9d1 1274 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1275 break;
1276
1277 case TYPE_CODE_COMPLEX:
1278 if (info.nsrn <= 6)
1279 {
1280 const bfd_byte *buf = value_contents (arg);
1281 struct type *target_type =
1282 check_typedef (TYPE_TARGET_TYPE (arg_type));
1283
07b287a0 1284 pass_in_v (gdbarch, regcache, &info,
0735fddd
YQ
1285 TYPE_LENGTH (target_type), buf);
1286 pass_in_v (gdbarch, regcache, &info,
1287 TYPE_LENGTH (target_type),
07b287a0
MS
1288 buf + TYPE_LENGTH (target_type));
1289 }
1290 else
1291 {
1292 info.nsrn = 8;
8e80f9d1 1293 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1294 }
1295 break;
1296 case TYPE_CODE_FLT:
8e80f9d1 1297 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1298 break;
1299
1300 case TYPE_CODE_STRUCT:
1301 case TYPE_CODE_ARRAY:
1302 case TYPE_CODE_UNION:
cd635f74 1303 if (is_hfa_or_hva (arg_type))
07b287a0
MS
1304 {
1305 int elements = TYPE_NFIELDS (arg_type);
1306
1307 /* Homogeneous Aggregates */
1308 if (info.nsrn + elements < 8)
1309 {
1310 int i;
1311
1312 for (i = 0; i < elements; i++)
1313 {
1314 /* We know that we have sufficient registers
1315 available therefore this will never fallback
1316 to the stack. */
1317 struct value *field =
1318 value_primitive_field (arg, 0, i, arg_type);
1319 struct type *field_type =
1320 check_typedef (value_type (field));
1321
8e80f9d1
YQ
1322 pass_in_v_or_stack (gdbarch, regcache, &info,
1323 field_type, field);
07b287a0
MS
1324 }
1325 }
1326 else
1327 {
1328 info.nsrn = 8;
8e80f9d1 1329 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1330 }
1331 }
238f2452
YQ
1332 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1333 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1334 {
1335 /* Short vector types are passed in V registers. */
1336 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1337 }
07b287a0
MS
1338 else if (len > 16)
1339 {
1340 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1341 invisible reference. */
1342
1343 /* Allocate aligned storage. */
1344 sp = align_down (sp - len, 16);
1345
1346 /* Write the real data into the stack. */
1347 write_memory (sp, value_contents (arg), len);
1348
1349 /* Construct the indirection. */
1350 arg_type = lookup_pointer_type (arg_type);
1351 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1352 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1353 }
1354 else
1355 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1356 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1357 break;
1358
1359 default:
8e80f9d1 1360 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1361 break;
1362 }
1363 }
1364
1365 /* Make sure stack retains 16 byte alignment. */
1366 if (info.nsaa & 15)
1367 sp -= 16 - (info.nsaa & 15);
1368
1369 while (!VEC_empty (stack_item_t, info.si))
1370 {
1371 stack_item_t *si = VEC_last (stack_item_t, info.si);
1372
1373 sp -= si->len;
c3c87445
YQ
1374 if (si->data != NULL)
1375 write_memory (sp, si->data, si->len);
07b287a0
MS
1376 VEC_pop (stack_item_t, info.si);
1377 }
1378
1379 VEC_free (stack_item_t, info.si);
1380
1381 /* Finally, update the SP register. */
1382 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1383
1384 return sp;
1385}
1386
1387/* Implement the "frame_align" gdbarch method. */
1388
1389static CORE_ADDR
1390aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1391{
1392 /* Align the stack to sixteen bytes. */
1393 return sp & ~(CORE_ADDR) 15;
1394}
1395
1396/* Return the type for an AdvSISD Q register. */
1397
1398static struct type *
1399aarch64_vnq_type (struct gdbarch *gdbarch)
1400{
1401 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1402
1403 if (tdep->vnq_type == NULL)
1404 {
1405 struct type *t;
1406 struct type *elem;
1407
1408 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1409 TYPE_CODE_UNION);
1410
1411 elem = builtin_type (gdbarch)->builtin_uint128;
1412 append_composite_type_field (t, "u", elem);
1413
1414 elem = builtin_type (gdbarch)->builtin_int128;
1415 append_composite_type_field (t, "s", elem);
1416
1417 tdep->vnq_type = t;
1418 }
1419
1420 return tdep->vnq_type;
1421}
1422
1423/* Return the type for an AdvSISD D register. */
1424
1425static struct type *
1426aarch64_vnd_type (struct gdbarch *gdbarch)
1427{
1428 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1429
1430 if (tdep->vnd_type == NULL)
1431 {
1432 struct type *t;
1433 struct type *elem;
1434
1435 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1436 TYPE_CODE_UNION);
1437
1438 elem = builtin_type (gdbarch)->builtin_double;
1439 append_composite_type_field (t, "f", elem);
1440
1441 elem = builtin_type (gdbarch)->builtin_uint64;
1442 append_composite_type_field (t, "u", elem);
1443
1444 elem = builtin_type (gdbarch)->builtin_int64;
1445 append_composite_type_field (t, "s", elem);
1446
1447 tdep->vnd_type = t;
1448 }
1449
1450 return tdep->vnd_type;
1451}
1452
1453/* Return the type for an AdvSISD S register. */
1454
1455static struct type *
1456aarch64_vns_type (struct gdbarch *gdbarch)
1457{
1458 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1459
1460 if (tdep->vns_type == NULL)
1461 {
1462 struct type *t;
1463 struct type *elem;
1464
1465 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1466 TYPE_CODE_UNION);
1467
1468 elem = builtin_type (gdbarch)->builtin_float;
1469 append_composite_type_field (t, "f", elem);
1470
1471 elem = builtin_type (gdbarch)->builtin_uint32;
1472 append_composite_type_field (t, "u", elem);
1473
1474 elem = builtin_type (gdbarch)->builtin_int32;
1475 append_composite_type_field (t, "s", elem);
1476
1477 tdep->vns_type = t;
1478 }
1479
1480 return tdep->vns_type;
1481}
1482
1483/* Return the type for an AdvSISD H register. */
1484
1485static struct type *
1486aarch64_vnh_type (struct gdbarch *gdbarch)
1487{
1488 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1489
1490 if (tdep->vnh_type == NULL)
1491 {
1492 struct type *t;
1493 struct type *elem;
1494
1495 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1496 TYPE_CODE_UNION);
1497
1498 elem = builtin_type (gdbarch)->builtin_uint16;
1499 append_composite_type_field (t, "u", elem);
1500
1501 elem = builtin_type (gdbarch)->builtin_int16;
1502 append_composite_type_field (t, "s", elem);
1503
1504 tdep->vnh_type = t;
1505 }
1506
1507 return tdep->vnh_type;
1508}
1509
1510/* Return the type for an AdvSISD B register. */
1511
1512static struct type *
1513aarch64_vnb_type (struct gdbarch *gdbarch)
1514{
1515 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1516
1517 if (tdep->vnb_type == NULL)
1518 {
1519 struct type *t;
1520 struct type *elem;
1521
1522 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1523 TYPE_CODE_UNION);
1524
1525 elem = builtin_type (gdbarch)->builtin_uint8;
1526 append_composite_type_field (t, "u", elem);
1527
1528 elem = builtin_type (gdbarch)->builtin_int8;
1529 append_composite_type_field (t, "s", elem);
1530
1531 tdep->vnb_type = t;
1532 }
1533
1534 return tdep->vnb_type;
1535}
1536
1537/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1538
1539static int
1540aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1541{
1542 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1543 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1544
1545 if (reg == AARCH64_DWARF_SP)
1546 return AARCH64_SP_REGNUM;
1547
1548 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1549 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1550
1551 return -1;
1552}
1553\f
1554
1555/* Implement the "print_insn" gdbarch method. */
1556
1557static int
1558aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1559{
1560 info->symbols = NULL;
1561 return print_insn_aarch64 (memaddr, info);
1562}
1563
1564/* AArch64 BRK software debug mode instruction.
1565 Note that AArch64 code is always little-endian.
1566 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
948f8e3d 1567static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0
MS
1568
1569/* Implement the "breakpoint_from_pc" gdbarch method. */
1570
948f8e3d 1571static const gdb_byte *
07b287a0
MS
1572aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1573 int *lenptr)
1574{
07b287a0
MS
1575 *lenptr = sizeof (aarch64_default_breakpoint);
1576 return aarch64_default_breakpoint;
1577}
1578
1579/* Extract from an array REGS containing the (raw) register state a
1580 function return value of type TYPE, and copy that, in virtual
1581 format, into VALBUF. */
1582
1583static void
1584aarch64_extract_return_value (struct type *type, struct regcache *regs,
1585 gdb_byte *valbuf)
1586{
1587 struct gdbarch *gdbarch = get_regcache_arch (regs);
1588 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1589
1590 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1591 {
1592 bfd_byte buf[V_REGISTER_SIZE];
1593 int len = TYPE_LENGTH (type);
1594
1595 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1596 memcpy (valbuf, buf, len);
1597 }
1598 else if (TYPE_CODE (type) == TYPE_CODE_INT
1599 || TYPE_CODE (type) == TYPE_CODE_CHAR
1600 || TYPE_CODE (type) == TYPE_CODE_BOOL
1601 || TYPE_CODE (type) == TYPE_CODE_PTR
1602 || TYPE_CODE (type) == TYPE_CODE_REF
1603 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1604 {
1605 /* If the the type is a plain integer, then the access is
1606 straight-forward. Otherwise we have to play around a bit
1607 more. */
1608 int len = TYPE_LENGTH (type);
1609 int regno = AARCH64_X0_REGNUM;
1610 ULONGEST tmp;
1611
1612 while (len > 0)
1613 {
1614 /* By using store_unsigned_integer we avoid having to do
1615 anything special for small big-endian values. */
1616 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1617 store_unsigned_integer (valbuf,
1618 (len > X_REGISTER_SIZE
1619 ? X_REGISTER_SIZE : len), byte_order, tmp);
1620 len -= X_REGISTER_SIZE;
1621 valbuf += X_REGISTER_SIZE;
1622 }
1623 }
1624 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1625 {
1626 int regno = AARCH64_V0_REGNUM;
1627 bfd_byte buf[V_REGISTER_SIZE];
1628 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1629 int len = TYPE_LENGTH (target_type);
1630
1631 regcache_cooked_read (regs, regno, buf);
1632 memcpy (valbuf, buf, len);
1633 valbuf += len;
1634 regcache_cooked_read (regs, regno + 1, buf);
1635 memcpy (valbuf, buf, len);
1636 valbuf += len;
1637 }
cd635f74 1638 else if (is_hfa_or_hva (type))
07b287a0
MS
1639 {
1640 int elements = TYPE_NFIELDS (type);
1641 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1642 int len = TYPE_LENGTH (member_type);
1643 int i;
1644
1645 for (i = 0; i < elements; i++)
1646 {
1647 int regno = AARCH64_V0_REGNUM + i;
db3516bb 1648 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
1649
1650 if (aarch64_debug)
b277c936 1651 {
cd635f74 1652 debug_printf ("read HFA or HVA return value element %d from %s\n",
b277c936
PL
1653 i + 1,
1654 gdbarch_register_name (gdbarch, regno));
1655 }
07b287a0
MS
1656 regcache_cooked_read (regs, regno, buf);
1657
1658 memcpy (valbuf, buf, len);
1659 valbuf += len;
1660 }
1661 }
238f2452
YQ
1662 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1663 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1664 {
1665 /* Short vector is returned in V register. */
1666 gdb_byte buf[V_REGISTER_SIZE];
1667
1668 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1669 memcpy (valbuf, buf, TYPE_LENGTH (type));
1670 }
07b287a0
MS
1671 else
1672 {
1673 /* For a structure or union the behaviour is as if the value had
1674 been stored to word-aligned memory and then loaded into
1675 registers with 64-bit load instruction(s). */
1676 int len = TYPE_LENGTH (type);
1677 int regno = AARCH64_X0_REGNUM;
1678 bfd_byte buf[X_REGISTER_SIZE];
1679
1680 while (len > 0)
1681 {
1682 regcache_cooked_read (regs, regno++, buf);
1683 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1684 len -= X_REGISTER_SIZE;
1685 valbuf += X_REGISTER_SIZE;
1686 }
1687 }
1688}
1689
1690
1691/* Will a function return an aggregate type in memory or in a
1692 register? Return 0 if an aggregate type can be returned in a
1693 register, 1 if it must be returned in memory. */
1694
1695static int
1696aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1697{
f168693b 1698 type = check_typedef (type);
07b287a0 1699
cd635f74 1700 if (is_hfa_or_hva (type))
07b287a0 1701 {
cd635f74
YQ
1702 /* v0-v7 are used to return values and one register is allocated
1703 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
1704 return 0;
1705 }
1706
1707 if (TYPE_LENGTH (type) > 16)
1708 {
1709 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1710 invisible reference. */
1711
1712 return 1;
1713 }
1714
1715 return 0;
1716}
1717
1718/* Write into appropriate registers a function return value of type
1719 TYPE, given in virtual format. */
1720
1721static void
1722aarch64_store_return_value (struct type *type, struct regcache *regs,
1723 const gdb_byte *valbuf)
1724{
1725 struct gdbarch *gdbarch = get_regcache_arch (regs);
1726 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1727
1728 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1729 {
1730 bfd_byte buf[V_REGISTER_SIZE];
1731 int len = TYPE_LENGTH (type);
1732
1733 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1734 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1735 }
1736 else if (TYPE_CODE (type) == TYPE_CODE_INT
1737 || TYPE_CODE (type) == TYPE_CODE_CHAR
1738 || TYPE_CODE (type) == TYPE_CODE_BOOL
1739 || TYPE_CODE (type) == TYPE_CODE_PTR
1740 || TYPE_CODE (type) == TYPE_CODE_REF
1741 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1742 {
1743 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1744 {
1745 /* Values of one word or less are zero/sign-extended and
1746 returned in r0. */
1747 bfd_byte tmpbuf[X_REGISTER_SIZE];
1748 LONGEST val = unpack_long (type, valbuf);
1749
1750 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1751 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1752 }
1753 else
1754 {
1755 /* Integral values greater than one word are stored in
1756 consecutive registers starting with r0. This will always
1757 be a multiple of the regiser size. */
1758 int len = TYPE_LENGTH (type);
1759 int regno = AARCH64_X0_REGNUM;
1760
1761 while (len > 0)
1762 {
1763 regcache_cooked_write (regs, regno++, valbuf);
1764 len -= X_REGISTER_SIZE;
1765 valbuf += X_REGISTER_SIZE;
1766 }
1767 }
1768 }
cd635f74 1769 else if (is_hfa_or_hva (type))
07b287a0
MS
1770 {
1771 int elements = TYPE_NFIELDS (type);
1772 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1773 int len = TYPE_LENGTH (member_type);
1774 int i;
1775
1776 for (i = 0; i < elements; i++)
1777 {
1778 int regno = AARCH64_V0_REGNUM + i;
1779 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1780
1781 if (aarch64_debug)
b277c936 1782 {
cd635f74 1783 debug_printf ("write HFA or HVA return value element %d to %s\n",
b277c936
PL
1784 i + 1,
1785 gdbarch_register_name (gdbarch, regno));
1786 }
07b287a0
MS
1787
1788 memcpy (tmpbuf, valbuf, len);
1789 regcache_cooked_write (regs, regno, tmpbuf);
1790 valbuf += len;
1791 }
1792 }
238f2452
YQ
1793 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1794 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1795 {
1796 /* Short vector. */
1797 gdb_byte buf[V_REGISTER_SIZE];
1798
1799 memcpy (buf, valbuf, TYPE_LENGTH (type));
1800 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1801 }
07b287a0
MS
1802 else
1803 {
1804 /* For a structure or union the behaviour is as if the value had
1805 been stored to word-aligned memory and then loaded into
1806 registers with 64-bit load instruction(s). */
1807 int len = TYPE_LENGTH (type);
1808 int regno = AARCH64_X0_REGNUM;
1809 bfd_byte tmpbuf[X_REGISTER_SIZE];
1810
1811 while (len > 0)
1812 {
1813 memcpy (tmpbuf, valbuf,
1814 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1815 regcache_cooked_write (regs, regno++, tmpbuf);
1816 len -= X_REGISTER_SIZE;
1817 valbuf += X_REGISTER_SIZE;
1818 }
1819 }
1820}
1821
1822/* Implement the "return_value" gdbarch method. */
1823
1824static enum return_value_convention
1825aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1826 struct type *valtype, struct regcache *regcache,
1827 gdb_byte *readbuf, const gdb_byte *writebuf)
1828{
07b287a0
MS
1829
1830 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1831 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1832 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1833 {
1834 if (aarch64_return_in_memory (gdbarch, valtype))
1835 {
1836 if (aarch64_debug)
b277c936 1837 debug_printf ("return value in memory\n");
07b287a0
MS
1838 return RETURN_VALUE_STRUCT_CONVENTION;
1839 }
1840 }
1841
1842 if (writebuf)
1843 aarch64_store_return_value (valtype, regcache, writebuf);
1844
1845 if (readbuf)
1846 aarch64_extract_return_value (valtype, regcache, readbuf);
1847
1848 if (aarch64_debug)
b277c936 1849 debug_printf ("return value in registers\n");
07b287a0
MS
1850
1851 return RETURN_VALUE_REGISTER_CONVENTION;
1852}
1853
1854/* Implement the "get_longjmp_target" gdbarch method. */
1855
1856static int
1857aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1858{
1859 CORE_ADDR jb_addr;
1860 gdb_byte buf[X_REGISTER_SIZE];
1861 struct gdbarch *gdbarch = get_frame_arch (frame);
1862 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1863 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1864
1865 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1866
1867 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1868 X_REGISTER_SIZE))
1869 return 0;
1870
1871 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1872 return 1;
1873}
ea873d8e
PL
1874
1875/* Implement the "gen_return_address" gdbarch method. */
1876
1877static void
1878aarch64_gen_return_address (struct gdbarch *gdbarch,
1879 struct agent_expr *ax, struct axs_value *value,
1880 CORE_ADDR scope)
1881{
1882 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1883 value->kind = axs_lvalue_register;
1884 value->u.reg = AARCH64_LR_REGNUM;
1885}
07b287a0
MS
1886\f
1887
1888/* Return the pseudo register name corresponding to register regnum. */
1889
1890static const char *
1891aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1892{
1893 static const char *const q_name[] =
1894 {
1895 "q0", "q1", "q2", "q3",
1896 "q4", "q5", "q6", "q7",
1897 "q8", "q9", "q10", "q11",
1898 "q12", "q13", "q14", "q15",
1899 "q16", "q17", "q18", "q19",
1900 "q20", "q21", "q22", "q23",
1901 "q24", "q25", "q26", "q27",
1902 "q28", "q29", "q30", "q31",
1903 };
1904
1905 static const char *const d_name[] =
1906 {
1907 "d0", "d1", "d2", "d3",
1908 "d4", "d5", "d6", "d7",
1909 "d8", "d9", "d10", "d11",
1910 "d12", "d13", "d14", "d15",
1911 "d16", "d17", "d18", "d19",
1912 "d20", "d21", "d22", "d23",
1913 "d24", "d25", "d26", "d27",
1914 "d28", "d29", "d30", "d31",
1915 };
1916
1917 static const char *const s_name[] =
1918 {
1919 "s0", "s1", "s2", "s3",
1920 "s4", "s5", "s6", "s7",
1921 "s8", "s9", "s10", "s11",
1922 "s12", "s13", "s14", "s15",
1923 "s16", "s17", "s18", "s19",
1924 "s20", "s21", "s22", "s23",
1925 "s24", "s25", "s26", "s27",
1926 "s28", "s29", "s30", "s31",
1927 };
1928
1929 static const char *const h_name[] =
1930 {
1931 "h0", "h1", "h2", "h3",
1932 "h4", "h5", "h6", "h7",
1933 "h8", "h9", "h10", "h11",
1934 "h12", "h13", "h14", "h15",
1935 "h16", "h17", "h18", "h19",
1936 "h20", "h21", "h22", "h23",
1937 "h24", "h25", "h26", "h27",
1938 "h28", "h29", "h30", "h31",
1939 };
1940
1941 static const char *const b_name[] =
1942 {
1943 "b0", "b1", "b2", "b3",
1944 "b4", "b5", "b6", "b7",
1945 "b8", "b9", "b10", "b11",
1946 "b12", "b13", "b14", "b15",
1947 "b16", "b17", "b18", "b19",
1948 "b20", "b21", "b22", "b23",
1949 "b24", "b25", "b26", "b27",
1950 "b28", "b29", "b30", "b31",
1951 };
1952
1953 regnum -= gdbarch_num_regs (gdbarch);
1954
1955 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1956 return q_name[regnum - AARCH64_Q0_REGNUM];
1957
1958 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1959 return d_name[regnum - AARCH64_D0_REGNUM];
1960
1961 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1962 return s_name[regnum - AARCH64_S0_REGNUM];
1963
1964 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1965 return h_name[regnum - AARCH64_H0_REGNUM];
1966
1967 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1968 return b_name[regnum - AARCH64_B0_REGNUM];
1969
1970 internal_error (__FILE__, __LINE__,
1971 _("aarch64_pseudo_register_name: bad register number %d"),
1972 regnum);
1973}
1974
1975/* Implement the "pseudo_register_type" tdesc_arch_data method. */
1976
1977static struct type *
1978aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1979{
1980 regnum -= gdbarch_num_regs (gdbarch);
1981
1982 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1983 return aarch64_vnq_type (gdbarch);
1984
1985 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1986 return aarch64_vnd_type (gdbarch);
1987
1988 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1989 return aarch64_vns_type (gdbarch);
1990
1991 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1992 return aarch64_vnh_type (gdbarch);
1993
1994 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1995 return aarch64_vnb_type (gdbarch);
1996
1997 internal_error (__FILE__, __LINE__,
1998 _("aarch64_pseudo_register_type: bad register number %d"),
1999 regnum);
2000}
2001
2002/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2003
2004static int
2005aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2006 struct reggroup *group)
2007{
2008 regnum -= gdbarch_num_regs (gdbarch);
2009
2010 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2011 return group == all_reggroup || group == vector_reggroup;
2012 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2013 return (group == all_reggroup || group == vector_reggroup
2014 || group == float_reggroup);
2015 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2016 return (group == all_reggroup || group == vector_reggroup
2017 || group == float_reggroup);
2018 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2019 return group == all_reggroup || group == vector_reggroup;
2020 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2021 return group == all_reggroup || group == vector_reggroup;
2022
2023 return group == all_reggroup;
2024}
2025
2026/* Implement the "pseudo_register_read_value" gdbarch method. */
2027
2028static struct value *
2029aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2030 struct regcache *regcache,
2031 int regnum)
2032{
2033 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2034 struct value *result_value;
2035 gdb_byte *buf;
2036
2037 result_value = allocate_value (register_type (gdbarch, regnum));
2038 VALUE_LVAL (result_value) = lval_register;
2039 VALUE_REGNUM (result_value) = regnum;
2040 buf = value_contents_raw (result_value);
2041
2042 regnum -= gdbarch_num_regs (gdbarch);
2043
2044 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2045 {
2046 enum register_status status;
2047 unsigned v_regnum;
2048
2049 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2050 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2051 if (status != REG_VALID)
2052 mark_value_bytes_unavailable (result_value, 0,
2053 TYPE_LENGTH (value_type (result_value)));
2054 else
2055 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2056 return result_value;
2057 }
2058
2059 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2060 {
2061 enum register_status status;
2062 unsigned v_regnum;
2063
2064 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2065 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2066 if (status != REG_VALID)
2067 mark_value_bytes_unavailable (result_value, 0,
2068 TYPE_LENGTH (value_type (result_value)));
2069 else
2070 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2071 return result_value;
2072 }
2073
2074 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2075 {
2076 enum register_status status;
2077 unsigned v_regnum;
2078
2079 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2080 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2081 if (status != REG_VALID)
2082 mark_value_bytes_unavailable (result_value, 0,
2083 TYPE_LENGTH (value_type (result_value)));
2084 else
2085 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2086 return result_value;
2087 }
2088
2089 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2090 {
2091 enum register_status status;
2092 unsigned v_regnum;
2093
2094 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2095 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2096 if (status != REG_VALID)
2097 mark_value_bytes_unavailable (result_value, 0,
2098 TYPE_LENGTH (value_type (result_value)));
2099 else
2100 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2101 return result_value;
2102 }
2103
2104 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2105 {
2106 enum register_status status;
2107 unsigned v_regnum;
2108
2109 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2110 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2111 if (status != REG_VALID)
2112 mark_value_bytes_unavailable (result_value, 0,
2113 TYPE_LENGTH (value_type (result_value)));
2114 else
2115 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2116 return result_value;
2117 }
2118
2119 gdb_assert_not_reached ("regnum out of bound");
2120}
2121
2122/* Implement the "pseudo_register_write" gdbarch method. */
2123
2124static void
2125aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2126 int regnum, const gdb_byte *buf)
2127{
2128 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2129
2130 /* Ensure the register buffer is zero, we want gdb writes of the
2131 various 'scalar' pseudo registers to behavior like architectural
2132 writes, register width bytes are written the remainder are set to
2133 zero. */
2134 memset (reg_buf, 0, sizeof (reg_buf));
2135
2136 regnum -= gdbarch_num_regs (gdbarch);
2137
2138 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2139 {
2140 /* pseudo Q registers */
2141 unsigned v_regnum;
2142
2143 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2144 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2145 regcache_raw_write (regcache, v_regnum, reg_buf);
2146 return;
2147 }
2148
2149 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2150 {
2151 /* pseudo D registers */
2152 unsigned v_regnum;
2153
2154 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2155 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2156 regcache_raw_write (regcache, v_regnum, reg_buf);
2157 return;
2158 }
2159
2160 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2161 {
2162 unsigned v_regnum;
2163
2164 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2165 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2166 regcache_raw_write (regcache, v_regnum, reg_buf);
2167 return;
2168 }
2169
2170 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2171 {
2172 /* pseudo H registers */
2173 unsigned v_regnum;
2174
2175 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2176 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2177 regcache_raw_write (regcache, v_regnum, reg_buf);
2178 return;
2179 }
2180
2181 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2182 {
2183 /* pseudo B registers */
2184 unsigned v_regnum;
2185
2186 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2187 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2188 regcache_raw_write (regcache, v_regnum, reg_buf);
2189 return;
2190 }
2191
2192 gdb_assert_not_reached ("regnum out of bound");
2193}
2194
07b287a0
MS
2195/* Callback function for user_reg_add. */
2196
2197static struct value *
2198value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2199{
9a3c8263 2200 const int *reg_p = (const int *) baton;
07b287a0
MS
2201
2202 return value_of_register (*reg_p, frame);
2203}
2204\f
2205
9404b58f
KM
2206/* Implement the "software_single_step" gdbarch method, needed to
2207 single step through atomic sequences on AArch64. */
2208
2209static int
2210aarch64_software_single_step (struct frame_info *frame)
2211{
2212 struct gdbarch *gdbarch = get_frame_arch (frame);
2213 struct address_space *aspace = get_frame_address_space (frame);
2214 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2215 const int insn_size = 4;
2216 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2217 CORE_ADDR pc = get_frame_pc (frame);
2218 CORE_ADDR breaks[2] = { -1, -1 };
2219 CORE_ADDR loc = pc;
2220 CORE_ADDR closing_insn = 0;
2221 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2222 byte_order_for_code);
2223 int index;
2224 int insn_count;
2225 int bc_insn_count = 0; /* Conditional branch instruction count. */
2226 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2227 aarch64_inst inst;
2228
43cdf5ae 2229 if (aarch64_decode_insn (insn, &inst, 1) != 0)
f77ee802 2230 return 0;
9404b58f
KM
2231
2232 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2233 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
9404b58f
KM
2234 return 0;
2235
2236 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2237 {
9404b58f
KM
2238 loc += insn_size;
2239 insn = read_memory_unsigned_integer (loc, insn_size,
2240 byte_order_for_code);
2241
43cdf5ae 2242 if (aarch64_decode_insn (insn, &inst, 1) != 0)
f77ee802 2243 return 0;
9404b58f 2244 /* Check if the instruction is a conditional branch. */
f77ee802 2245 if (inst.opcode->iclass == condbranch)
9404b58f 2246 {
f77ee802
YQ
2247 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2248
9404b58f
KM
2249 if (bc_insn_count >= 1)
2250 return 0;
2251
2252 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2253 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2254
2255 bc_insn_count++;
2256 last_breakpoint++;
2257 }
2258
2259 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2260 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2261 {
2262 closing_insn = loc;
2263 break;
2264 }
2265 }
2266
2267 /* We didn't find a closing Store Exclusive instruction, fall back. */
2268 if (!closing_insn)
2269 return 0;
2270
2271 /* Insert breakpoint after the end of the atomic sequence. */
2272 breaks[0] = loc + insn_size;
2273
2274 /* Check for duplicated breakpoints, and also check that the second
2275 breakpoint is not within the atomic sequence. */
2276 if (last_breakpoint
2277 && (breaks[1] == breaks[0]
2278 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2279 last_breakpoint = 0;
2280
2281 /* Insert the breakpoint at the end of the sequence, and one at the
2282 destination of the conditional branch, if it exists. */
2283 for (index = 0; index <= last_breakpoint; index++)
2284 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2285
2286 return 1;
2287}
2288
b6542f81
YQ
2289struct displaced_step_closure
2290{
2291 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2292 is being displaced stepping. */
2293 int cond;
2294
2295 /* PC adjustment offset after displaced stepping. */
2296 int32_t pc_adjust;
2297};
2298
2299/* Data when visiting instructions for displaced stepping. */
2300
2301struct aarch64_displaced_step_data
2302{
2303 struct aarch64_insn_data base;
2304
2305 /* The address where the instruction will be executed at. */
2306 CORE_ADDR new_addr;
2307 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2308 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2309 /* Number of instructions in INSN_BUF. */
2310 unsigned insn_count;
2311 /* Registers when doing displaced stepping. */
2312 struct regcache *regs;
2313
2314 struct displaced_step_closure *dsc;
2315};
2316
2317/* Implementation of aarch64_insn_visitor method "b". */
2318
2319static void
2320aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2321 struct aarch64_insn_data *data)
2322{
2323 struct aarch64_displaced_step_data *dsd
2324 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2325 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2326
2327 if (can_encode_int32 (new_offset, 28))
2328 {
2329 /* Emit B rather than BL, because executing BL on a new address
2330 will get the wrong address into LR. In order to avoid this,
2331 we emit B, and update LR if the instruction is BL. */
2332 emit_b (dsd->insn_buf, 0, new_offset);
2333 dsd->insn_count++;
2334 }
2335 else
2336 {
2337 /* Write NOP. */
2338 emit_nop (dsd->insn_buf);
2339 dsd->insn_count++;
2340 dsd->dsc->pc_adjust = offset;
2341 }
2342
2343 if (is_bl)
2344 {
2345 /* Update LR. */
2346 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2347 data->insn_addr + 4);
2348 }
2349}
2350
2351/* Implementation of aarch64_insn_visitor method "b_cond". */
2352
2353static void
2354aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2355 struct aarch64_insn_data *data)
2356{
2357 struct aarch64_displaced_step_data *dsd
2358 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2359
2360 /* GDB has to fix up PC after displaced step this instruction
2361 differently according to the condition is true or false. Instead
2362 of checking COND against conditional flags, we can use
2363 the following instructions, and GDB can tell how to fix up PC
2364 according to the PC value.
2365
2366 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2367 INSN1 ;
2368 TAKEN:
2369 INSN2
2370 */
2371
2372 emit_bcond (dsd->insn_buf, cond, 8);
2373 dsd->dsc->cond = 1;
2374 dsd->dsc->pc_adjust = offset;
2375 dsd->insn_count = 1;
2376}
2377
2378/* Dynamically allocate a new register. If we know the register
2379 statically, we should make it a global as above instead of using this
2380 helper function. */
2381
2382static struct aarch64_register
2383aarch64_register (unsigned num, int is64)
2384{
2385 return (struct aarch64_register) { num, is64 };
2386}
2387
2388/* Implementation of aarch64_insn_visitor method "cb". */
2389
2390static void
2391aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2392 const unsigned rn, int is64,
2393 struct aarch64_insn_data *data)
2394{
2395 struct aarch64_displaced_step_data *dsd
2396 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2397
2398 /* The offset is out of range for a compare and branch
2399 instruction. We can use the following instructions instead:
2400
2401 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2402 INSN1 ;
2403 TAKEN:
2404 INSN2
2405 */
2406 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2407 dsd->insn_count = 1;
2408 dsd->dsc->cond = 1;
2409 dsd->dsc->pc_adjust = offset;
2410}
2411
2412/* Implementation of aarch64_insn_visitor method "tb". */
2413
2414static void
2415aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2416 const unsigned rt, unsigned bit,
2417 struct aarch64_insn_data *data)
2418{
2419 struct aarch64_displaced_step_data *dsd
2420 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2421
2422 /* The offset is out of range for a test bit and branch
2423 instruction We can use the following instructions instead:
2424
2425 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2426 INSN1 ;
2427 TAKEN:
2428 INSN2
2429
2430 */
2431 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2432 dsd->insn_count = 1;
2433 dsd->dsc->cond = 1;
2434 dsd->dsc->pc_adjust = offset;
2435}
2436
2437/* Implementation of aarch64_insn_visitor method "adr". */
2438
2439static void
2440aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2441 const int is_adrp, struct aarch64_insn_data *data)
2442{
2443 struct aarch64_displaced_step_data *dsd
2444 = (struct aarch64_displaced_step_data *) data;
2445 /* We know exactly the address the ADR{P,} instruction will compute.
2446 We can just write it to the destination register. */
2447 CORE_ADDR address = data->insn_addr + offset;
2448
2449 if (is_adrp)
2450 {
2451 /* Clear the lower 12 bits of the offset to get the 4K page. */
2452 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2453 address & ~0xfff);
2454 }
2455 else
2456 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2457 address);
2458
2459 dsd->dsc->pc_adjust = 4;
2460 emit_nop (dsd->insn_buf);
2461 dsd->insn_count = 1;
2462}
2463
2464/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2465
2466static void
2467aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2468 const unsigned rt, const int is64,
2469 struct aarch64_insn_data *data)
2470{
2471 struct aarch64_displaced_step_data *dsd
2472 = (struct aarch64_displaced_step_data *) data;
2473 CORE_ADDR address = data->insn_addr + offset;
2474 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2475
2476 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2477 address);
2478
2479 if (is_sw)
2480 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2481 aarch64_register (rt, 1), zero);
2482 else
2483 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2484 aarch64_register (rt, 1), zero);
2485
2486 dsd->dsc->pc_adjust = 4;
2487}
2488
2489/* Implementation of aarch64_insn_visitor method "others". */
2490
2491static void
2492aarch64_displaced_step_others (const uint32_t insn,
2493 struct aarch64_insn_data *data)
2494{
2495 struct aarch64_displaced_step_data *dsd
2496 = (struct aarch64_displaced_step_data *) data;
2497
e1c587c3 2498 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2499 dsd->insn_count = 1;
2500
2501 if ((insn & 0xfffffc1f) == 0xd65f0000)
2502 {
2503 /* RET */
2504 dsd->dsc->pc_adjust = 0;
2505 }
2506 else
2507 dsd->dsc->pc_adjust = 4;
2508}
2509
2510static const struct aarch64_insn_visitor visitor =
2511{
2512 aarch64_displaced_step_b,
2513 aarch64_displaced_step_b_cond,
2514 aarch64_displaced_step_cb,
2515 aarch64_displaced_step_tb,
2516 aarch64_displaced_step_adr,
2517 aarch64_displaced_step_ldr_literal,
2518 aarch64_displaced_step_others,
2519};
2520
2521/* Implement the "displaced_step_copy_insn" gdbarch method. */
2522
2523struct displaced_step_closure *
2524aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2525 CORE_ADDR from, CORE_ADDR to,
2526 struct regcache *regs)
2527{
2528 struct displaced_step_closure *dsc = NULL;
2529 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2530 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2531 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2532 aarch64_inst inst;
2533
2534 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2535 return NULL;
b6542f81
YQ
2536
2537 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2538 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2539 {
2540 /* We can't displaced step atomic sequences. */
2541 return NULL;
2542 }
2543
2544 dsc = XCNEW (struct displaced_step_closure);
2545 dsd.base.insn_addr = from;
2546 dsd.new_addr = to;
2547 dsd.regs = regs;
2548 dsd.dsc = dsc;
034f1a81 2549 dsd.insn_count = 0;
b6542f81
YQ
2550 aarch64_relocate_instruction (insn, &visitor,
2551 (struct aarch64_insn_data *) &dsd);
2552 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2553
2554 if (dsd.insn_count != 0)
2555 {
2556 int i;
2557
2558 /* Instruction can be relocated to scratch pad. Copy
2559 relocated instruction(s) there. */
2560 for (i = 0; i < dsd.insn_count; i++)
2561 {
2562 if (debug_displaced)
2563 {
2564 debug_printf ("displaced: writing insn ");
2565 debug_printf ("%.8x", dsd.insn_buf[i]);
2566 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2567 }
2568 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2569 (ULONGEST) dsd.insn_buf[i]);
2570 }
2571 }
2572 else
2573 {
2574 xfree (dsc);
2575 dsc = NULL;
2576 }
2577
2578 return dsc;
2579}
2580
2581/* Implement the "displaced_step_fixup" gdbarch method. */
2582
2583void
2584aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2585 struct displaced_step_closure *dsc,
2586 CORE_ADDR from, CORE_ADDR to,
2587 struct regcache *regs)
2588{
2589 if (dsc->cond)
2590 {
2591 ULONGEST pc;
2592
2593 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2594 if (pc - to == 8)
2595 {
2596 /* Condition is true. */
2597 }
2598 else if (pc - to == 4)
2599 {
2600 /* Condition is false. */
2601 dsc->pc_adjust = 4;
2602 }
2603 else
2604 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2605 }
2606
2607 if (dsc->pc_adjust != 0)
2608 {
2609 if (debug_displaced)
2610 {
2611 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2612 paddress (gdbarch, from), dsc->pc_adjust);
2613 }
2614 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2615 from + dsc->pc_adjust);
2616 }
2617}
2618
2619/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2620
2621int
2622aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2623 struct displaced_step_closure *closure)
2624{
2625 return 1;
2626}
2627
07b287a0
MS
2628/* Initialize the current architecture based on INFO. If possible,
2629 re-use an architecture from ARCHES, which is a list of
2630 architectures already created during this debugging session.
2631
2632 Called e.g. at program startup, when reading a core file, and when
2633 reading a binary file. */
2634
2635static struct gdbarch *
2636aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2637{
2638 struct gdbarch_tdep *tdep;
2639 struct gdbarch *gdbarch;
2640 struct gdbarch_list *best_arch;
2641 struct tdesc_arch_data *tdesc_data = NULL;
2642 const struct target_desc *tdesc = info.target_desc;
2643 int i;
07b287a0
MS
2644 int valid_p = 1;
2645 const struct tdesc_feature *feature;
2646 int num_regs = 0;
2647 int num_pseudo_regs = 0;
2648
2649 /* Ensure we always have a target descriptor. */
2650 if (!tdesc_has_registers (tdesc))
2651 tdesc = tdesc_aarch64;
2652
2653 gdb_assert (tdesc);
2654
2655 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2656
2657 if (feature == NULL)
2658 return NULL;
2659
2660 tdesc_data = tdesc_data_alloc ();
2661
2662 /* Validate the descriptor provides the mandatory core R registers
2663 and allocate their numbers. */
2664 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2665 valid_p &=
2666 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2667 aarch64_r_register_names[i]);
2668
2669 num_regs = AARCH64_X0_REGNUM + i;
2670
2671 /* Look for the V registers. */
2672 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2673 if (feature)
2674 {
2675 /* Validate the descriptor provides the mandatory V registers
2676 and allocate their numbers. */
2677 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2678 valid_p &=
2679 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2680 aarch64_v_register_names[i]);
2681
2682 num_regs = AARCH64_V0_REGNUM + i;
2683
2684 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2685 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2686 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2687 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2688 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2689 }
2690
2691 if (!valid_p)
2692 {
2693 tdesc_data_cleanup (tdesc_data);
2694 return NULL;
2695 }
2696
2697 /* AArch64 code is always little-endian. */
2698 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2699
2700 /* If there is already a candidate, use it. */
2701 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2702 best_arch != NULL;
2703 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2704 {
2705 /* Found a match. */
2706 break;
2707 }
2708
2709 if (best_arch != NULL)
2710 {
2711 if (tdesc_data != NULL)
2712 tdesc_data_cleanup (tdesc_data);
2713 return best_arch->gdbarch;
2714 }
2715
8d749320 2716 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2717 gdbarch = gdbarch_alloc (&info, tdep);
2718
2719 /* This should be low enough for everything. */
2720 tdep->lowest_pc = 0x20;
2721 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2722 tdep->jb_elt_size = 8;
2723
2724 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2725 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2726
07b287a0
MS
2727 /* Frame handling. */
2728 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2729 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2730 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2731
2732 /* Advance PC across function entry code. */
2733 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2734
2735 /* The stack grows downward. */
2736 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2737
2738 /* Breakpoint manipulation. */
2739 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
07b287a0 2740 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2741 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2742
2743 /* Information about registers, etc. */
2744 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2745 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2746 set_gdbarch_num_regs (gdbarch, num_regs);
2747
2748 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2749 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2750 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2751 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2752 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2753 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2754 aarch64_pseudo_register_reggroup_p);
2755
2756 /* ABI */
2757 set_gdbarch_short_bit (gdbarch, 16);
2758 set_gdbarch_int_bit (gdbarch, 32);
2759 set_gdbarch_float_bit (gdbarch, 32);
2760 set_gdbarch_double_bit (gdbarch, 64);
2761 set_gdbarch_long_double_bit (gdbarch, 128);
2762 set_gdbarch_long_bit (gdbarch, 64);
2763 set_gdbarch_long_long_bit (gdbarch, 64);
2764 set_gdbarch_ptr_bit (gdbarch, 64);
2765 set_gdbarch_char_signed (gdbarch, 0);
2766 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2767 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2768 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2769
2770 /* Internal <-> external register number maps. */
2771 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2772
2773 /* Returning results. */
2774 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2775
2776 /* Disassembly. */
2777 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2778
2779 /* Virtual tables. */
2780 set_gdbarch_vbit_in_delta (gdbarch, 1);
2781
2782 /* Hook in the ABI-specific overrides, if they have been registered. */
2783 info.target_desc = tdesc;
2784 info.tdep_info = (void *) tdesc_data;
2785 gdbarch_init_osabi (info, gdbarch);
2786
2787 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2788
2789 /* Add some default predicates. */
2790 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2791 dwarf2_append_unwinders (gdbarch);
2792 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2793
2794 frame_base_set_default (gdbarch, &aarch64_normal_base);
2795
2796 /* Now we have tuned the configuration, set a few final things,
2797 based on what the OS ABI has told us. */
2798
2799 if (tdep->jb_pc >= 0)
2800 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2801
ea873d8e
PL
2802 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2803
07b287a0
MS
2804 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2805
2806 /* Add standard register aliases. */
2807 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2808 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2809 value_of_aarch64_user_reg,
2810 &aarch64_register_aliases[i].regnum);
2811
2812 return gdbarch;
2813}
2814
2815static void
2816aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2817{
2818 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2819
2820 if (tdep == NULL)
2821 return;
2822
2823 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2824 paddress (gdbarch, tdep->lowest_pc));
2825}
2826
2827/* Suppress warning from -Wmissing-prototypes. */
2828extern initialize_file_ftype _initialize_aarch64_tdep;
2829
2830void
2831_initialize_aarch64_tdep (void)
2832{
2833 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2834 aarch64_dump_tdep);
2835
2836 initialize_tdesc_aarch64 ();
07b287a0
MS
2837
2838 /* Debug this file's internals. */
2839 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2840Set AArch64 debugging."), _("\
2841Show AArch64 debugging."), _("\
2842When on, AArch64 specific debugging is enabled."),
2843 NULL,
2844 show_aarch64_debug,
2845 &setdebuglist, &showdebuglist);
2846}
99afc88b
OJ
2847
2848/* AArch64 process record-replay related structures, defines etc. */
2849
99afc88b
OJ
2850#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2851 do \
2852 { \
2853 unsigned int reg_len = LENGTH; \
2854 if (reg_len) \
2855 { \
2856 REGS = XNEWVEC (uint32_t, reg_len); \
2857 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2858 } \
2859 } \
2860 while (0)
2861
2862#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2863 do \
2864 { \
2865 unsigned int mem_len = LENGTH; \
2866 if (mem_len) \
2867 { \
2868 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2869 memcpy(&MEMS->len, &RECORD_BUF[0], \
2870 sizeof(struct aarch64_mem_r) * LENGTH); \
2871 } \
2872 } \
2873 while (0)
2874
2875/* AArch64 record/replay structures and enumerations. */
2876
2877struct aarch64_mem_r
2878{
2879 uint64_t len; /* Record length. */
2880 uint64_t addr; /* Memory address. */
2881};
2882
2883enum aarch64_record_result
2884{
2885 AARCH64_RECORD_SUCCESS,
2886 AARCH64_RECORD_FAILURE,
2887 AARCH64_RECORD_UNSUPPORTED,
2888 AARCH64_RECORD_UNKNOWN
2889};
2890
2891typedef struct insn_decode_record_t
2892{
2893 struct gdbarch *gdbarch;
2894 struct regcache *regcache;
2895 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2896 uint32_t aarch64_insn; /* Insn to be recorded. */
2897 uint32_t mem_rec_count; /* Count of memory records. */
2898 uint32_t reg_rec_count; /* Count of register records. */
2899 uint32_t *aarch64_regs; /* Registers to be recorded. */
2900 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2901} insn_decode_record;
2902
2903/* Record handler for data processing - register instructions. */
2904
2905static unsigned int
2906aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2907{
2908 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2909 uint32_t record_buf[4];
2910
2911 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2912 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2913 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2914
2915 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2916 {
2917 uint8_t setflags;
2918
2919 /* Logical (shifted register). */
2920 if (insn_bits24_27 == 0x0a)
2921 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2922 /* Add/subtract. */
2923 else if (insn_bits24_27 == 0x0b)
2924 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2925 else
2926 return AARCH64_RECORD_UNKNOWN;
2927
2928 record_buf[0] = reg_rd;
2929 aarch64_insn_r->reg_rec_count = 1;
2930 if (setflags)
2931 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2932 }
2933 else
2934 {
2935 if (insn_bits24_27 == 0x0b)
2936 {
2937 /* Data-processing (3 source). */
2938 record_buf[0] = reg_rd;
2939 aarch64_insn_r->reg_rec_count = 1;
2940 }
2941 else if (insn_bits24_27 == 0x0a)
2942 {
2943 if (insn_bits21_23 == 0x00)
2944 {
2945 /* Add/subtract (with carry). */
2946 record_buf[0] = reg_rd;
2947 aarch64_insn_r->reg_rec_count = 1;
2948 if (bit (aarch64_insn_r->aarch64_insn, 29))
2949 {
2950 record_buf[1] = AARCH64_CPSR_REGNUM;
2951 aarch64_insn_r->reg_rec_count = 2;
2952 }
2953 }
2954 else if (insn_bits21_23 == 0x02)
2955 {
2956 /* Conditional compare (register) and conditional compare
2957 (immediate) instructions. */
2958 record_buf[0] = AARCH64_CPSR_REGNUM;
2959 aarch64_insn_r->reg_rec_count = 1;
2960 }
2961 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2962 {
2963 /* CConditional select. */
2964 /* Data-processing (2 source). */
2965 /* Data-processing (1 source). */
2966 record_buf[0] = reg_rd;
2967 aarch64_insn_r->reg_rec_count = 1;
2968 }
2969 else
2970 return AARCH64_RECORD_UNKNOWN;
2971 }
2972 }
2973
2974 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2975 record_buf);
2976 return AARCH64_RECORD_SUCCESS;
2977}
2978
2979/* Record handler for data processing - immediate instructions. */
2980
2981static unsigned int
2982aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2983{
78cc6c2d 2984 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
2985 uint32_t record_buf[4];
2986
2987 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
2988 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2989 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2990
2991 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2992 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2993 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2994 {
2995 record_buf[0] = reg_rd;
2996 aarch64_insn_r->reg_rec_count = 1;
2997 }
2998 else if (insn_bits24_27 == 0x01)
2999 {
3000 /* Add/Subtract (immediate). */
3001 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3002 record_buf[0] = reg_rd;
3003 aarch64_insn_r->reg_rec_count = 1;
3004 if (setflags)
3005 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3006 }
3007 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3008 {
3009 /* Logical (immediate). */
3010 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3011 record_buf[0] = reg_rd;
3012 aarch64_insn_r->reg_rec_count = 1;
3013 if (setflags)
3014 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3015 }
3016 else
3017 return AARCH64_RECORD_UNKNOWN;
3018
3019 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3020 record_buf);
3021 return AARCH64_RECORD_SUCCESS;
3022}
3023
3024/* Record handler for branch, exception generation and system instructions. */
3025
3026static unsigned int
3027aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3028{
3029 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3030 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3031 uint32_t record_buf[4];
3032
3033 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3034 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3035 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3036
3037 if (insn_bits28_31 == 0x0d)
3038 {
3039 /* Exception generation instructions. */
3040 if (insn_bits24_27 == 0x04)
3041 {
5d98d3cd
YQ
3042 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3043 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3044 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3045 {
3046 ULONGEST svc_number;
3047
3048 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3049 &svc_number);
3050 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3051 svc_number);
3052 }
3053 else
3054 return AARCH64_RECORD_UNSUPPORTED;
3055 }
3056 /* System instructions. */
3057 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3058 {
3059 uint32_t reg_rt, reg_crn;
3060
3061 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3062 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3063
3064 /* Record rt in case of sysl and mrs instructions. */
3065 if (bit (aarch64_insn_r->aarch64_insn, 21))
3066 {
3067 record_buf[0] = reg_rt;
3068 aarch64_insn_r->reg_rec_count = 1;
3069 }
3070 /* Record cpsr for hint and msr(immediate) instructions. */
3071 else if (reg_crn == 0x02 || reg_crn == 0x04)
3072 {
3073 record_buf[0] = AARCH64_CPSR_REGNUM;
3074 aarch64_insn_r->reg_rec_count = 1;
3075 }
3076 }
3077 /* Unconditional branch (register). */
3078 else if((insn_bits24_27 & 0x0e) == 0x06)
3079 {
3080 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3081 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3082 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3083 }
3084 else
3085 return AARCH64_RECORD_UNKNOWN;
3086 }
3087 /* Unconditional branch (immediate). */
3088 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3089 {
3090 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3091 if (bit (aarch64_insn_r->aarch64_insn, 31))
3092 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3093 }
3094 else
3095 /* Compare & branch (immediate), Test & branch (immediate) and
3096 Conditional branch (immediate). */
3097 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3098
3099 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3100 record_buf);
3101 return AARCH64_RECORD_SUCCESS;
3102}
3103
3104/* Record handler for advanced SIMD load and store instructions. */
3105
3106static unsigned int
3107aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3108{
3109 CORE_ADDR address;
3110 uint64_t addr_offset = 0;
3111 uint32_t record_buf[24];
3112 uint64_t record_buf_mem[24];
3113 uint32_t reg_rn, reg_rt;
3114 uint32_t reg_index = 0, mem_index = 0;
3115 uint8_t opcode_bits, size_bits;
3116
3117 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3118 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3119 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3120 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3121 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3122
3123 if (record_debug)
b277c936 3124 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3125
3126 /* Load/store single structure. */
3127 if (bit (aarch64_insn_r->aarch64_insn, 24))
3128 {
3129 uint8_t sindex, scale, selem, esize, replicate = 0;
3130 scale = opcode_bits >> 2;
3131 selem = ((opcode_bits & 0x02) |
3132 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3133 switch (scale)
3134 {
3135 case 1:
3136 if (size_bits & 0x01)
3137 return AARCH64_RECORD_UNKNOWN;
3138 break;
3139 case 2:
3140 if ((size_bits >> 1) & 0x01)
3141 return AARCH64_RECORD_UNKNOWN;
3142 if (size_bits & 0x01)
3143 {
3144 if (!((opcode_bits >> 1) & 0x01))
3145 scale = 3;
3146 else
3147 return AARCH64_RECORD_UNKNOWN;
3148 }
3149 break;
3150 case 3:
3151 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3152 {
3153 scale = size_bits;
3154 replicate = 1;
3155 break;
3156 }
3157 else
3158 return AARCH64_RECORD_UNKNOWN;
3159 default:
3160 break;
3161 }
3162 esize = 8 << scale;
3163 if (replicate)
3164 for (sindex = 0; sindex < selem; sindex++)
3165 {
3166 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3167 reg_rt = (reg_rt + 1) % 32;
3168 }
3169 else
3170 {
3171 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3172 {
3173 if (bit (aarch64_insn_r->aarch64_insn, 22))
3174 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3175 else
3176 {
3177 record_buf_mem[mem_index++] = esize / 8;
3178 record_buf_mem[mem_index++] = address + addr_offset;
3179 }
3180 addr_offset = addr_offset + (esize / 8);
3181 reg_rt = (reg_rt + 1) % 32;
3182 }
99afc88b
OJ
3183 }
3184 }
3185 /* Load/store multiple structure. */
3186 else
3187 {
3188 uint8_t selem, esize, rpt, elements;
3189 uint8_t eindex, rindex;
3190
3191 esize = 8 << size_bits;
3192 if (bit (aarch64_insn_r->aarch64_insn, 30))
3193 elements = 128 / esize;
3194 else
3195 elements = 64 / esize;
3196
3197 switch (opcode_bits)
3198 {
3199 /*LD/ST4 (4 Registers). */
3200 case 0:
3201 rpt = 1;
3202 selem = 4;
3203 break;
3204 /*LD/ST1 (4 Registers). */
3205 case 2:
3206 rpt = 4;
3207 selem = 1;
3208 break;
3209 /*LD/ST3 (3 Registers). */
3210 case 4:
3211 rpt = 1;
3212 selem = 3;
3213 break;
3214 /*LD/ST1 (3 Registers). */
3215 case 6:
3216 rpt = 3;
3217 selem = 1;
3218 break;
3219 /*LD/ST1 (1 Register). */
3220 case 7:
3221 rpt = 1;
3222 selem = 1;
3223 break;
3224 /*LD/ST2 (2 Registers). */
3225 case 8:
3226 rpt = 1;
3227 selem = 2;
3228 break;
3229 /*LD/ST1 (2 Registers). */
3230 case 10:
3231 rpt = 2;
3232 selem = 1;
3233 break;
3234 default:
3235 return AARCH64_RECORD_UNSUPPORTED;
3236 break;
3237 }
3238 for (rindex = 0; rindex < rpt; rindex++)
3239 for (eindex = 0; eindex < elements; eindex++)
3240 {
3241 uint8_t reg_tt, sindex;
3242 reg_tt = (reg_rt + rindex) % 32;
3243 for (sindex = 0; sindex < selem; sindex++)
3244 {
3245 if (bit (aarch64_insn_r->aarch64_insn, 22))
3246 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3247 else
3248 {
3249 record_buf_mem[mem_index++] = esize / 8;
3250 record_buf_mem[mem_index++] = address + addr_offset;
3251 }
3252 addr_offset = addr_offset + (esize / 8);
3253 reg_tt = (reg_tt + 1) % 32;
3254 }
3255 }
3256 }
3257
3258 if (bit (aarch64_insn_r->aarch64_insn, 23))
3259 record_buf[reg_index++] = reg_rn;
3260
3261 aarch64_insn_r->reg_rec_count = reg_index;
3262 aarch64_insn_r->mem_rec_count = mem_index / 2;
3263 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3264 record_buf_mem);
3265 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3266 record_buf);
3267 return AARCH64_RECORD_SUCCESS;
3268}
3269
3270/* Record handler for load and store instructions. */
3271
3272static unsigned int
3273aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3274{
3275 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3276 uint8_t insn_bit23, insn_bit21;
3277 uint8_t opc, size_bits, ld_flag, vector_flag;
3278 uint32_t reg_rn, reg_rt, reg_rt2;
3279 uint64_t datasize, offset;
3280 uint32_t record_buf[8];
3281 uint64_t record_buf_mem[8];
3282 CORE_ADDR address;
3283
3284 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3285 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3286 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3287 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3288 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3289 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3290 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3291 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3292 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3293 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3294 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3295
3296 /* Load/store exclusive. */
3297 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3298 {
3299 if (record_debug)
b277c936 3300 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3301
3302 if (ld_flag)
3303 {
3304 record_buf[0] = reg_rt;
3305 aarch64_insn_r->reg_rec_count = 1;
3306 if (insn_bit21)
3307 {
3308 record_buf[1] = reg_rt2;
3309 aarch64_insn_r->reg_rec_count = 2;
3310 }
3311 }
3312 else
3313 {
3314 if (insn_bit21)
3315 datasize = (8 << size_bits) * 2;
3316 else
3317 datasize = (8 << size_bits);
3318 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3319 &address);
3320 record_buf_mem[0] = datasize / 8;
3321 record_buf_mem[1] = address;
3322 aarch64_insn_r->mem_rec_count = 1;
3323 if (!insn_bit23)
3324 {
3325 /* Save register rs. */
3326 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3327 aarch64_insn_r->reg_rec_count = 1;
3328 }
3329 }
3330 }
3331 /* Load register (literal) instructions decoding. */
3332 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3333 {
3334 if (record_debug)
b277c936 3335 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3336 if (vector_flag)
3337 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3338 else
3339 record_buf[0] = reg_rt;
3340 aarch64_insn_r->reg_rec_count = 1;
3341 }
3342 /* All types of load/store pair instructions decoding. */
3343 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3344 {
3345 if (record_debug)
b277c936 3346 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3347
3348 if (ld_flag)
3349 {
3350 if (vector_flag)
3351 {
3352 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3353 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3354 }
3355 else
3356 {
3357 record_buf[0] = reg_rt;
3358 record_buf[1] = reg_rt2;
3359 }
3360 aarch64_insn_r->reg_rec_count = 2;
3361 }
3362 else
3363 {
3364 uint16_t imm7_off;
3365 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3366 if (!vector_flag)
3367 size_bits = size_bits >> 1;
3368 datasize = 8 << (2 + size_bits);
3369 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3370 offset = offset << (2 + size_bits);
3371 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3372 &address);
3373 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3374 {
3375 if (imm7_off & 0x40)
3376 address = address - offset;
3377 else
3378 address = address + offset;
3379 }
3380
3381 record_buf_mem[0] = datasize / 8;
3382 record_buf_mem[1] = address;
3383 record_buf_mem[2] = datasize / 8;
3384 record_buf_mem[3] = address + (datasize / 8);
3385 aarch64_insn_r->mem_rec_count = 2;
3386 }
3387 if (bit (aarch64_insn_r->aarch64_insn, 23))
3388 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3389 }
3390 /* Load/store register (unsigned immediate) instructions. */
3391 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3392 {
3393 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3394 if (!(opc >> 1))
3395 if (opc & 0x01)
3396 ld_flag = 0x01;
3397 else
3398 ld_flag = 0x0;
3399 else
3400 if (size_bits != 0x03)
3401 ld_flag = 0x01;
3402 else
3403 return AARCH64_RECORD_UNKNOWN;
3404
3405 if (record_debug)
3406 {
b277c936
PL
3407 debug_printf ("Process record: load/store (unsigned immediate):"
3408 " size %x V %d opc %x\n", size_bits, vector_flag,
3409 opc);
99afc88b
OJ
3410 }
3411
3412 if (!ld_flag)
3413 {
3414 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3415 datasize = 8 << size_bits;
3416 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3417 &address);
3418 offset = offset << size_bits;
3419 address = address + offset;
3420
3421 record_buf_mem[0] = datasize >> 3;
3422 record_buf_mem[1] = address;
3423 aarch64_insn_r->mem_rec_count = 1;
3424 }
3425 else
3426 {
3427 if (vector_flag)
3428 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3429 else
3430 record_buf[0] = reg_rt;
3431 aarch64_insn_r->reg_rec_count = 1;
3432 }
3433 }
3434 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3435 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3436 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3437 {
3438 if (record_debug)
b277c936 3439 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3440 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3441 if (!(opc >> 1))
3442 if (opc & 0x01)
3443 ld_flag = 0x01;
3444 else
3445 ld_flag = 0x0;
3446 else
3447 if (size_bits != 0x03)
3448 ld_flag = 0x01;
3449 else
3450 return AARCH64_RECORD_UNKNOWN;
3451
3452 if (!ld_flag)
3453 {
d9436c7c
PA
3454 ULONGEST reg_rm_val;
3455
99afc88b
OJ
3456 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3457 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3458 if (bit (aarch64_insn_r->aarch64_insn, 12))
3459 offset = reg_rm_val << size_bits;
3460 else
3461 offset = reg_rm_val;
3462 datasize = 8 << size_bits;
3463 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3464 &address);
3465 address = address + offset;
3466 record_buf_mem[0] = datasize >> 3;
3467 record_buf_mem[1] = address;
3468 aarch64_insn_r->mem_rec_count = 1;
3469 }
3470 else
3471 {
3472 if (vector_flag)
3473 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3474 else
3475 record_buf[0] = reg_rt;
3476 aarch64_insn_r->reg_rec_count = 1;
3477 }
3478 }
3479 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3480 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3481 && !insn_bit21)
99afc88b
OJ
3482 {
3483 if (record_debug)
3484 {
b277c936
PL
3485 debug_printf ("Process record: load/store "
3486 "(immediate and unprivileged)\n");
99afc88b
OJ
3487 }
3488 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3489 if (!(opc >> 1))
3490 if (opc & 0x01)
3491 ld_flag = 0x01;
3492 else
3493 ld_flag = 0x0;
3494 else
3495 if (size_bits != 0x03)
3496 ld_flag = 0x01;
3497 else
3498 return AARCH64_RECORD_UNKNOWN;
3499
3500 if (!ld_flag)
3501 {
3502 uint16_t imm9_off;
3503 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3504 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3505 datasize = 8 << size_bits;
3506 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3507 &address);
3508 if (insn_bits10_11 != 0x01)
3509 {
3510 if (imm9_off & 0x0100)
3511 address = address - offset;
3512 else
3513 address = address + offset;
3514 }
3515 record_buf_mem[0] = datasize >> 3;
3516 record_buf_mem[1] = address;
3517 aarch64_insn_r->mem_rec_count = 1;
3518 }
3519 else
3520 {
3521 if (vector_flag)
3522 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3523 else
3524 record_buf[0] = reg_rt;
3525 aarch64_insn_r->reg_rec_count = 1;
3526 }
3527 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3528 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3529 }
3530 /* Advanced SIMD load/store instructions. */
3531 else
3532 return aarch64_record_asimd_load_store (aarch64_insn_r);
3533
3534 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3535 record_buf_mem);
3536 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3537 record_buf);
3538 return AARCH64_RECORD_SUCCESS;
3539}
3540
3541/* Record handler for data processing SIMD and floating point instructions. */
3542
3543static unsigned int
3544aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3545{
3546 uint8_t insn_bit21, opcode, rmode, reg_rd;
3547 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3548 uint8_t insn_bits11_14;
3549 uint32_t record_buf[2];
3550
3551 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3552 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3553 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3554 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3555 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3556 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3557 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3558 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3559 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3560
3561 if (record_debug)
b277c936 3562 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3563
3564 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3565 {
3566 /* Floating point - fixed point conversion instructions. */
3567 if (!insn_bit21)
3568 {
3569 if (record_debug)
b277c936 3570 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3571
3572 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3573 record_buf[0] = reg_rd;
3574 else
3575 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3576 }
3577 /* Floating point - conditional compare instructions. */
3578 else if (insn_bits10_11 == 0x01)
3579 {
3580 if (record_debug)
b277c936 3581 debug_printf ("FP - conditional compare");
99afc88b
OJ
3582
3583 record_buf[0] = AARCH64_CPSR_REGNUM;
3584 }
3585 /* Floating point - data processing (2-source) and
3586 conditional select instructions. */
3587 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3588 {
3589 if (record_debug)
b277c936 3590 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3591
3592 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3593 }
3594 else if (insn_bits10_11 == 0x00)
3595 {
3596 /* Floating point - immediate instructions. */
3597 if ((insn_bits12_15 & 0x01) == 0x01
3598 || (insn_bits12_15 & 0x07) == 0x04)
3599 {
3600 if (record_debug)
b277c936 3601 debug_printf ("FP - immediate");
99afc88b
OJ
3602 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3603 }
3604 /* Floating point - compare instructions. */
3605 else if ((insn_bits12_15 & 0x03) == 0x02)
3606 {
3607 if (record_debug)
b277c936 3608 debug_printf ("FP - immediate");
99afc88b
OJ
3609 record_buf[0] = AARCH64_CPSR_REGNUM;
3610 }
3611 /* Floating point - integer conversions instructions. */
f62fce35 3612 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3613 {
3614 /* Convert float to integer instruction. */
3615 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3616 {
3617 if (record_debug)
b277c936 3618 debug_printf ("float to int conversion");
99afc88b
OJ
3619
3620 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3621 }
3622 /* Convert integer to float instruction. */
3623 else if ((opcode >> 1) == 0x01 && !rmode)
3624 {
3625 if (record_debug)
b277c936 3626 debug_printf ("int to float conversion");
99afc88b
OJ
3627
3628 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3629 }
3630 /* Move float to integer instruction. */
3631 else if ((opcode >> 1) == 0x03)
3632 {
3633 if (record_debug)
b277c936 3634 debug_printf ("move float to int");
99afc88b
OJ
3635
3636 if (!(opcode & 0x01))
3637 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3638 else
3639 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3640 }
f62fce35
YQ
3641 else
3642 return AARCH64_RECORD_UNKNOWN;
99afc88b 3643 }
f62fce35
YQ
3644 else
3645 return AARCH64_RECORD_UNKNOWN;
99afc88b 3646 }
f62fce35
YQ
3647 else
3648 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3649 }
3650 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3651 {
3652 if (record_debug)
b277c936 3653 debug_printf ("SIMD copy");
99afc88b
OJ
3654
3655 /* Advanced SIMD copy instructions. */
3656 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3657 && !bit (aarch64_insn_r->aarch64_insn, 15)
3658 && bit (aarch64_insn_r->aarch64_insn, 10))
3659 {
3660 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3661 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3662 else
3663 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3664 }
3665 else
3666 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3667 }
3668 /* All remaining floating point or advanced SIMD instructions. */
3669 else
3670 {
3671 if (record_debug)
b277c936 3672 debug_printf ("all remain");
99afc88b
OJ
3673
3674 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3675 }
3676
3677 if (record_debug)
b277c936 3678 debug_printf ("\n");
99afc88b
OJ
3679
3680 aarch64_insn_r->reg_rec_count++;
3681 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3682 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3683 record_buf);
3684 return AARCH64_RECORD_SUCCESS;
3685}
3686
3687/* Decodes insns type and invokes its record handler. */
3688
3689static unsigned int
3690aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3691{
3692 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3693
3694 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3695 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3696 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3697 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3698
3699 /* Data processing - immediate instructions. */
3700 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3701 return aarch64_record_data_proc_imm (aarch64_insn_r);
3702
3703 /* Branch, exception generation and system instructions. */
3704 if (ins_bit26 && !ins_bit27 && ins_bit28)
3705 return aarch64_record_branch_except_sys (aarch64_insn_r);
3706
3707 /* Load and store instructions. */
3708 if (!ins_bit25 && ins_bit27)
3709 return aarch64_record_load_store (aarch64_insn_r);
3710
3711 /* Data processing - register instructions. */
3712 if (ins_bit25 && !ins_bit26 && ins_bit27)
3713 return aarch64_record_data_proc_reg (aarch64_insn_r);
3714
3715 /* Data processing - SIMD and floating point instructions. */
3716 if (ins_bit25 && ins_bit26 && ins_bit27)
3717 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3718
3719 return AARCH64_RECORD_UNSUPPORTED;
3720}
3721
3722/* Cleans up local record registers and memory allocations. */
3723
3724static void
3725deallocate_reg_mem (insn_decode_record *record)
3726{
3727 xfree (record->aarch64_regs);
3728 xfree (record->aarch64_mems);
3729}
3730
3731/* Parse the current instruction and record the values of the registers and
3732 memory that will be changed in current instruction to record_arch_list
3733 return -1 if something is wrong. */
3734
3735int
3736aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3737 CORE_ADDR insn_addr)
3738{
3739 uint32_t rec_no = 0;
3740 uint8_t insn_size = 4;
3741 uint32_t ret = 0;
99afc88b
OJ
3742 gdb_byte buf[insn_size];
3743 insn_decode_record aarch64_record;
3744
3745 memset (&buf[0], 0, insn_size);
3746 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3747 target_read_memory (insn_addr, &buf[0], insn_size);
3748 aarch64_record.aarch64_insn
3749 = (uint32_t) extract_unsigned_integer (&buf[0],
3750 insn_size,
3751 gdbarch_byte_order (gdbarch));
3752 aarch64_record.regcache = regcache;
3753 aarch64_record.this_addr = insn_addr;
3754 aarch64_record.gdbarch = gdbarch;
3755
3756 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3757 if (ret == AARCH64_RECORD_UNSUPPORTED)
3758 {
3759 printf_unfiltered (_("Process record does not support instruction "
3760 "0x%0x at address %s.\n"),
3761 aarch64_record.aarch64_insn,
3762 paddress (gdbarch, insn_addr));
3763 ret = -1;
3764 }
3765
3766 if (0 == ret)
3767 {
3768 /* Record registers. */
3769 record_full_arch_list_add_reg (aarch64_record.regcache,
3770 AARCH64_PC_REGNUM);
3771 /* Always record register CPSR. */
3772 record_full_arch_list_add_reg (aarch64_record.regcache,
3773 AARCH64_CPSR_REGNUM);
3774 if (aarch64_record.aarch64_regs)
3775 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3776 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3777 aarch64_record.aarch64_regs[rec_no]))
3778 ret = -1;
3779
3780 /* Record memories. */
3781 if (aarch64_record.aarch64_mems)
3782 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3783 if (record_full_arch_list_add_mem
3784 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3785 aarch64_record.aarch64_mems[rec_no].len))
3786 ret = -1;
3787
3788 if (record_full_arch_list_add_end ())
3789 ret = -1;
3790 }
3791
3792 deallocate_reg_mem (&aarch64_record);
3793 return ret;
3794}
This page took 0.407268 seconds and 4 git commands to generate.