S390: Hardware breakpoint support
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
618f726f 3 Copyright (C) 2009-2016 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
57
07b287a0 58#include "features/aarch64.c"
07b287a0 59
787749ea
PL
60#include "arch/aarch64-insn.h"
61
f77ee802
YQ
62#include "opcode/aarch64.h"
63
64#define submask(x) ((1L << ((x) + 1)) - 1)
65#define bit(obj,st) (((obj) >> (st)) & 1)
66#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
07b287a0
MS
68/* Pseudo register base numbers. */
69#define AARCH64_Q0_REGNUM 0
70#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
74
75/* The standard register names, and all the valid aliases for them. */
76static const struct
77{
78 const char *const name;
79 int regnum;
80} aarch64_register_aliases[] =
81{
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123};
124
125/* The required core 'R' registers. */
126static const char *const aarch64_r_register_names[] =
127{
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139};
140
141/* The FP/SIMD 'V' registers. */
142static const char *const aarch64_v_register_names[] =
143{
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156};
157
158/* AArch64 prologue cache structure. */
159struct aarch64_prologue_cache
160{
db634143
PL
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
163 CORE_ADDR func;
164
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
167 stub frame. */
168 CORE_ADDR prev_pc;
169
07b287a0
MS
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
173 CORE_ADDR prev_sp;
174
7dfa3edc
PL
175 /* Is the target available to read from? */
176 int available_p;
177
07b287a0
MS
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
181 int framesize;
182
183 /* The register used to hold the frame pointer for this frame. */
184 int framereg;
185
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
188};
189
07b287a0
MS
190static void
191show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
193{
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
195}
196
07b287a0
MS
197/* Analyze a prologue, looking for a recognizable stack frame
198 and frame pointer. Scan until we encounter a store that could
199 clobber the stack frame unexpectedly, or an unknown instruction. */
200
201static CORE_ADDR
202aarch64_analyze_prologue (struct gdbarch *gdbarch,
203 CORE_ADDR start, CORE_ADDR limit,
204 struct aarch64_prologue_cache *cache)
205{
206 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
207 int i;
208 pv_t regs[AARCH64_X_REGISTER_COUNT];
209 struct pv_area *stack;
210 struct cleanup *back_to;
211
212 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
213 regs[i] = pv_register (i, 0);
214 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
215 back_to = make_cleanup_free_pv_area (stack);
216
217 for (; start < limit; start += 4)
218 {
219 uint32_t insn;
d9ebcbce 220 aarch64_inst inst;
07b287a0
MS
221
222 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
223
d9ebcbce
YQ
224 if (aarch64_decode_insn (insn, &inst, 1) != 0)
225 break;
226
227 if (inst.opcode->iclass == addsub_imm
228 && (inst.opcode->op == OP_ADD
229 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 230 {
d9ebcbce
YQ
231 unsigned rd = inst.operands[0].reg.regno;
232 unsigned rn = inst.operands[1].reg.regno;
233
234 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
235 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
236 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
237 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
238
239 if (inst.opcode->op == OP_ADD)
240 {
241 regs[rd] = pv_add_constant (regs[rn],
242 inst.operands[2].imm.value);
243 }
244 else
245 {
246 regs[rd] = pv_add_constant (regs[rn],
247 -inst.operands[2].imm.value);
248 }
249 }
250 else if (inst.opcode->iclass == pcreladdr
251 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
252 {
253 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
254 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
255
256 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 257 }
d9ebcbce 258 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
259 {
260 /* Stop analysis on branch. */
261 break;
262 }
d9ebcbce 263 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
264 {
265 /* Stop analysis on branch. */
266 break;
267 }
d9ebcbce 268 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
269 {
270 /* Stop analysis on branch. */
271 break;
272 }
d9ebcbce 273 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
274 {
275 /* Stop analysis on branch. */
276 break;
277 }
d9ebcbce
YQ
278 else if (inst.opcode->op == OP_MOVZ)
279 {
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
282 }
283 else if (inst.opcode->iclass == log_shift
284 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 285 {
d9ebcbce
YQ
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288 unsigned rm = inst.operands[2].reg.regno;
289
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
293
294 if (inst.operands[2].shifter.amount == 0
295 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
296 regs[rd] = regs[rm];
297 else
298 {
299 if (aarch64_debug)
b277c936
PL
300 {
301 debug_printf ("aarch64: prologue analysis gave up "
0a0da556 302 "addr=%s opcode=0x%x (orr x register)\n",
b277c936
PL
303 core_addr_to_string_nz (start), insn);
304 }
07b287a0
MS
305 break;
306 }
307 }
d9ebcbce 308 else if (inst.opcode->op == OP_STUR)
07b287a0 309 {
d9ebcbce
YQ
310 unsigned rt = inst.operands[0].reg.regno;
311 unsigned rn = inst.operands[1].addr.base_regno;
312 int is64
313 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
314
315 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
316 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
317 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
318 gdb_assert (!inst.operands[1].addr.offset.is_reg);
319
320 pv_area_store (stack, pv_add_constant (regs[rn],
321 inst.operands[1].addr.offset.imm),
07b287a0
MS
322 is64 ? 8 : 4, regs[rt]);
323 }
d9ebcbce 324 else if ((inst.opcode->iclass == ldstpair_off
03bcd739
YQ
325 || (inst.opcode->iclass == ldstpair_indexed
326 && inst.operands[2].addr.preind))
d9ebcbce 327 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 328 {
03bcd739 329 /* STP with addressing mode Pre-indexed and Base register. */
d9ebcbce
YQ
330 unsigned rt1 = inst.operands[0].reg.regno;
331 unsigned rt2 = inst.operands[1].reg.regno;
332 unsigned rn = inst.operands[2].addr.base_regno;
333 int32_t imm = inst.operands[2].addr.offset.imm;
334
335 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
336 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2);
337 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
338 gdb_assert (!inst.operands[2].addr.offset.is_reg);
339
07b287a0
MS
340 /* If recording this store would invalidate the store area
341 (perhaps because rn is not known) then we should abandon
342 further prologue analysis. */
343 if (pv_area_store_would_trash (stack,
344 pv_add_constant (regs[rn], imm)))
345 break;
346
347 if (pv_area_store_would_trash (stack,
348 pv_add_constant (regs[rn], imm + 8)))
349 break;
350
351 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
352 regs[rt1]);
353 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
354 regs[rt2]);
14ac654f 355
d9ebcbce 356 if (inst.operands[2].addr.writeback)
93d96012 357 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 358
07b287a0 359 }
d9ebcbce 360 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
361 {
362 /* Stop analysis on branch. */
363 break;
364 }
365 else
366 {
367 if (aarch64_debug)
b277c936 368 {
0a0da556 369 debug_printf ("aarch64: prologue analysis gave up addr=%s"
b277c936
PL
370 " opcode=0x%x\n",
371 core_addr_to_string_nz (start), insn);
372 }
07b287a0
MS
373 break;
374 }
375 }
376
377 if (cache == NULL)
378 {
379 do_cleanups (back_to);
380 return start;
381 }
382
383 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
384 {
385 /* Frame pointer is fp. Frame size is constant. */
386 cache->framereg = AARCH64_FP_REGNUM;
387 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
388 }
389 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
390 {
391 /* Try the stack pointer. */
392 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
393 cache->framereg = AARCH64_SP_REGNUM;
394 }
395 else
396 {
397 /* We're just out of luck. We don't know where the frame is. */
398 cache->framereg = -1;
399 cache->framesize = 0;
400 }
401
402 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
403 {
404 CORE_ADDR offset;
405
406 if (pv_area_find_reg (stack, gdbarch, i, &offset))
407 cache->saved_regs[i].addr = offset;
408 }
409
410 do_cleanups (back_to);
411 return start;
412}
413
414/* Implement the "skip_prologue" gdbarch method. */
415
416static CORE_ADDR
417aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
418{
07b287a0 419 CORE_ADDR func_addr, limit_pc;
07b287a0
MS
420
421 /* See if we can determine the end of the prologue via the symbol
422 table. If so, then return either PC, or the PC after the
423 prologue, whichever is greater. */
424 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
425 {
426 CORE_ADDR post_prologue_pc
427 = skip_prologue_using_sal (gdbarch, func_addr);
428
429 if (post_prologue_pc != 0)
430 return max (pc, post_prologue_pc);
431 }
432
433 /* Can't determine prologue from the symbol table, need to examine
434 instructions. */
435
436 /* Find an upper limit on the function prologue using the debug
437 information. If the debug information could not be used to
438 provide that bound, then use an arbitrary large number as the
439 upper bound. */
440 limit_pc = skip_prologue_using_sal (gdbarch, pc);
441 if (limit_pc == 0)
442 limit_pc = pc + 128; /* Magic. */
443
444 /* Try disassembling prologue. */
445 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
446}
447
448/* Scan the function prologue for THIS_FRAME and populate the prologue
449 cache CACHE. */
450
451static void
452aarch64_scan_prologue (struct frame_info *this_frame,
453 struct aarch64_prologue_cache *cache)
454{
455 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
456 CORE_ADDR prologue_start;
457 CORE_ADDR prologue_end;
458 CORE_ADDR prev_pc = get_frame_pc (this_frame);
459 struct gdbarch *gdbarch = get_frame_arch (this_frame);
460
db634143
PL
461 cache->prev_pc = prev_pc;
462
07b287a0
MS
463 /* Assume we do not find a frame. */
464 cache->framereg = -1;
465 cache->framesize = 0;
466
467 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
468 &prologue_end))
469 {
470 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
471
472 if (sal.line == 0)
473 {
474 /* No line info so use the current PC. */
475 prologue_end = prev_pc;
476 }
477 else if (sal.end < prologue_end)
478 {
479 /* The next line begins after the function end. */
480 prologue_end = sal.end;
481 }
482
483 prologue_end = min (prologue_end, prev_pc);
484 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
485 }
486 else
487 {
488 CORE_ADDR frame_loc;
07b287a0
MS
489
490 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
491 if (frame_loc == 0)
492 return;
493
494 cache->framereg = AARCH64_FP_REGNUM;
495 cache->framesize = 16;
496 cache->saved_regs[29].addr = 0;
497 cache->saved_regs[30].addr = 8;
498 }
499}
500
7dfa3edc
PL
501/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
502 function may throw an exception if the inferior's registers or memory is
503 not available. */
07b287a0 504
7dfa3edc
PL
505static void
506aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
507 struct aarch64_prologue_cache *cache)
07b287a0 508{
07b287a0
MS
509 CORE_ADDR unwound_fp;
510 int reg;
511
07b287a0
MS
512 aarch64_scan_prologue (this_frame, cache);
513
514 if (cache->framereg == -1)
7dfa3edc 515 return;
07b287a0
MS
516
517 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
518 if (unwound_fp == 0)
7dfa3edc 519 return;
07b287a0
MS
520
521 cache->prev_sp = unwound_fp + cache->framesize;
522
523 /* Calculate actual addresses of saved registers using offsets
524 determined by aarch64_analyze_prologue. */
525 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
526 if (trad_frame_addr_p (cache->saved_regs, reg))
527 cache->saved_regs[reg].addr += cache->prev_sp;
528
db634143
PL
529 cache->func = get_frame_func (this_frame);
530
7dfa3edc
PL
531 cache->available_p = 1;
532}
533
534/* Allocate and fill in *THIS_CACHE with information about the prologue of
535 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
536 Return a pointer to the current aarch64_prologue_cache in
537 *THIS_CACHE. */
538
539static struct aarch64_prologue_cache *
540aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
541{
542 struct aarch64_prologue_cache *cache;
543
544 if (*this_cache != NULL)
9a3c8263 545 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
546
547 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
548 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
549 *this_cache = cache;
550
551 TRY
552 {
553 aarch64_make_prologue_cache_1 (this_frame, cache);
554 }
555 CATCH (ex, RETURN_MASK_ERROR)
556 {
557 if (ex.error != NOT_AVAILABLE_ERROR)
558 throw_exception (ex);
559 }
560 END_CATCH
561
07b287a0
MS
562 return cache;
563}
564
7dfa3edc
PL
565/* Implement the "stop_reason" frame_unwind method. */
566
567static enum unwind_stop_reason
568aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
569 void **this_cache)
570{
571 struct aarch64_prologue_cache *cache
572 = aarch64_make_prologue_cache (this_frame, this_cache);
573
574 if (!cache->available_p)
575 return UNWIND_UNAVAILABLE;
576
577 /* Halt the backtrace at "_start". */
578 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
579 return UNWIND_OUTERMOST;
580
581 /* We've hit a wall, stop. */
582 if (cache->prev_sp == 0)
583 return UNWIND_OUTERMOST;
584
585 return UNWIND_NO_REASON;
586}
587
07b287a0
MS
588/* Our frame ID for a normal frame is the current function's starting
589 PC and the caller's SP when we were called. */
590
591static void
592aarch64_prologue_this_id (struct frame_info *this_frame,
593 void **this_cache, struct frame_id *this_id)
594{
7c8edfae
PL
595 struct aarch64_prologue_cache *cache
596 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 597
7dfa3edc
PL
598 if (!cache->available_p)
599 *this_id = frame_id_build_unavailable_stack (cache->func);
600 else
601 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
602}
603
604/* Implement the "prev_register" frame_unwind method. */
605
606static struct value *
607aarch64_prologue_prev_register (struct frame_info *this_frame,
608 void **this_cache, int prev_regnum)
609{
7c8edfae
PL
610 struct aarch64_prologue_cache *cache
611 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
612
613 /* If we are asked to unwind the PC, then we need to return the LR
614 instead. The prologue may save PC, but it will point into this
615 frame's prologue, not the next frame's resume location. */
616 if (prev_regnum == AARCH64_PC_REGNUM)
617 {
618 CORE_ADDR lr;
619
620 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
621 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
622 }
623
624 /* SP is generally not saved to the stack, but this frame is
625 identified by the next frame's stack pointer at the time of the
626 call. The value was already reconstructed into PREV_SP. */
627 /*
628 +----------+ ^
629 | saved lr | |
630 +->| saved fp |--+
631 | | |
632 | | | <- Previous SP
633 | +----------+
634 | | saved lr |
635 +--| saved fp |<- FP
636 | |
637 | |<- SP
638 +----------+ */
639 if (prev_regnum == AARCH64_SP_REGNUM)
640 return frame_unwind_got_constant (this_frame, prev_regnum,
641 cache->prev_sp);
642
643 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
644 prev_regnum);
645}
646
647/* AArch64 prologue unwinder. */
648struct frame_unwind aarch64_prologue_unwind =
649{
650 NORMAL_FRAME,
7dfa3edc 651 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
652 aarch64_prologue_this_id,
653 aarch64_prologue_prev_register,
654 NULL,
655 default_frame_sniffer
656};
657
8b61f75d
PL
658/* Allocate and fill in *THIS_CACHE with information about the prologue of
659 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
660 Return a pointer to the current aarch64_prologue_cache in
661 *THIS_CACHE. */
07b287a0
MS
662
663static struct aarch64_prologue_cache *
8b61f75d 664aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 665{
07b287a0 666 struct aarch64_prologue_cache *cache;
8b61f75d
PL
667
668 if (*this_cache != NULL)
9a3c8263 669 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
670
671 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
672 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 673 *this_cache = cache;
07b287a0 674
02a2a705
PL
675 TRY
676 {
677 cache->prev_sp = get_frame_register_unsigned (this_frame,
678 AARCH64_SP_REGNUM);
679 cache->prev_pc = get_frame_pc (this_frame);
680 cache->available_p = 1;
681 }
682 CATCH (ex, RETURN_MASK_ERROR)
683 {
684 if (ex.error != NOT_AVAILABLE_ERROR)
685 throw_exception (ex);
686 }
687 END_CATCH
07b287a0
MS
688
689 return cache;
690}
691
02a2a705
PL
692/* Implement the "stop_reason" frame_unwind method. */
693
694static enum unwind_stop_reason
695aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
696 void **this_cache)
697{
698 struct aarch64_prologue_cache *cache
699 = aarch64_make_stub_cache (this_frame, this_cache);
700
701 if (!cache->available_p)
702 return UNWIND_UNAVAILABLE;
703
704 return UNWIND_NO_REASON;
705}
706
07b287a0
MS
707/* Our frame ID for a stub frame is the current SP and LR. */
708
709static void
710aarch64_stub_this_id (struct frame_info *this_frame,
711 void **this_cache, struct frame_id *this_id)
712{
8b61f75d
PL
713 struct aarch64_prologue_cache *cache
714 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 715
02a2a705
PL
716 if (cache->available_p)
717 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
718 else
719 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
720}
721
722/* Implement the "sniffer" frame_unwind method. */
723
724static int
725aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
726 struct frame_info *this_frame,
727 void **this_prologue_cache)
728{
729 CORE_ADDR addr_in_block;
730 gdb_byte dummy[4];
731
732 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 733 if (in_plt_section (addr_in_block)
07b287a0
MS
734 /* We also use the stub winder if the target memory is unreadable
735 to avoid having the prologue unwinder trying to read it. */
736 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
737 return 1;
738
739 return 0;
740}
741
742/* AArch64 stub unwinder. */
743struct frame_unwind aarch64_stub_unwind =
744{
745 NORMAL_FRAME,
02a2a705 746 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
747 aarch64_stub_this_id,
748 aarch64_prologue_prev_register,
749 NULL,
750 aarch64_stub_unwind_sniffer
751};
752
753/* Return the frame base address of *THIS_FRAME. */
754
755static CORE_ADDR
756aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
757{
7c8edfae
PL
758 struct aarch64_prologue_cache *cache
759 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
760
761 return cache->prev_sp - cache->framesize;
762}
763
764/* AArch64 default frame base information. */
765struct frame_base aarch64_normal_base =
766{
767 &aarch64_prologue_unwind,
768 aarch64_normal_frame_base,
769 aarch64_normal_frame_base,
770 aarch64_normal_frame_base
771};
772
773/* Assuming THIS_FRAME is a dummy, return the frame ID of that
774 dummy frame. The frame ID's base needs to match the TOS value
775 saved by save_dummy_frame_tos () and returned from
776 aarch64_push_dummy_call, and the PC needs to match the dummy
777 frame's breakpoint. */
778
779static struct frame_id
780aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
781{
782 return frame_id_build (get_frame_register_unsigned (this_frame,
783 AARCH64_SP_REGNUM),
784 get_frame_pc (this_frame));
785}
786
787/* Implement the "unwind_pc" gdbarch method. */
788
789static CORE_ADDR
790aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
791{
792 CORE_ADDR pc
793 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
794
795 return pc;
796}
797
798/* Implement the "unwind_sp" gdbarch method. */
799
800static CORE_ADDR
801aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
802{
803 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
804}
805
806/* Return the value of the REGNUM register in the previous frame of
807 *THIS_FRAME. */
808
809static struct value *
810aarch64_dwarf2_prev_register (struct frame_info *this_frame,
811 void **this_cache, int regnum)
812{
07b287a0
MS
813 CORE_ADDR lr;
814
815 switch (regnum)
816 {
817 case AARCH64_PC_REGNUM:
818 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
819 return frame_unwind_got_constant (this_frame, regnum, lr);
820
821 default:
822 internal_error (__FILE__, __LINE__,
823 _("Unexpected register %d"), regnum);
824 }
825}
826
827/* Implement the "init_reg" dwarf2_frame_ops method. */
828
829static void
830aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
831 struct dwarf2_frame_state_reg *reg,
832 struct frame_info *this_frame)
833{
834 switch (regnum)
835 {
836 case AARCH64_PC_REGNUM:
837 reg->how = DWARF2_FRAME_REG_FN;
838 reg->loc.fn = aarch64_dwarf2_prev_register;
839 break;
840 case AARCH64_SP_REGNUM:
841 reg->how = DWARF2_FRAME_REG_CFA;
842 break;
843 }
844}
845
846/* When arguments must be pushed onto the stack, they go on in reverse
847 order. The code below implements a FILO (stack) to do this. */
848
849typedef struct
850{
c3c87445
YQ
851 /* Value to pass on stack. It can be NULL if this item is for stack
852 padding. */
7c543f7b 853 const gdb_byte *data;
07b287a0
MS
854
855 /* Size in bytes of value to pass on stack. */
856 int len;
857} stack_item_t;
858
859DEF_VEC_O (stack_item_t);
860
861/* Return the alignment (in bytes) of the given type. */
862
863static int
864aarch64_type_align (struct type *t)
865{
866 int n;
867 int align;
868 int falign;
869
870 t = check_typedef (t);
871 switch (TYPE_CODE (t))
872 {
873 default:
874 /* Should never happen. */
875 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
876 return 4;
877
878 case TYPE_CODE_PTR:
879 case TYPE_CODE_ENUM:
880 case TYPE_CODE_INT:
881 case TYPE_CODE_FLT:
882 case TYPE_CODE_SET:
883 case TYPE_CODE_RANGE:
884 case TYPE_CODE_BITSTRING:
885 case TYPE_CODE_REF:
886 case TYPE_CODE_CHAR:
887 case TYPE_CODE_BOOL:
888 return TYPE_LENGTH (t);
889
890 case TYPE_CODE_ARRAY:
238f2452
YQ
891 if (TYPE_VECTOR (t))
892 {
893 /* Use the natural alignment for vector types (the same for
894 scalar type), but the maximum alignment is 128-bit. */
895 if (TYPE_LENGTH (t) > 16)
896 return 16;
897 else
898 return TYPE_LENGTH (t);
899 }
900 else
901 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
902 case TYPE_CODE_COMPLEX:
903 return aarch64_type_align (TYPE_TARGET_TYPE (t));
904
905 case TYPE_CODE_STRUCT:
906 case TYPE_CODE_UNION:
907 align = 1;
908 for (n = 0; n < TYPE_NFIELDS (t); n++)
909 {
910 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
911 if (falign > align)
912 align = falign;
913 }
914 return align;
915 }
916}
917
cd635f74
YQ
918/* Return 1 if *TY is a homogeneous floating-point aggregate or
919 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
920 document; otherwise return 0. */
07b287a0
MS
921
922static int
cd635f74 923is_hfa_or_hva (struct type *ty)
07b287a0
MS
924{
925 switch (TYPE_CODE (ty))
926 {
927 case TYPE_CODE_ARRAY:
928 {
929 struct type *target_ty = TYPE_TARGET_TYPE (ty);
238f2452
YQ
930
931 if (TYPE_VECTOR (ty))
932 return 0;
933
cd635f74
YQ
934 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
935 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
936 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
937 && TYPE_VECTOR (target_ty))))
07b287a0
MS
938 return 1;
939 break;
940 }
941
942 case TYPE_CODE_UNION:
943 case TYPE_CODE_STRUCT:
944 {
cd635f74 945 /* HFA or HVA has at most four members. */
07b287a0
MS
946 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
947 {
948 struct type *member0_type;
949
950 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
cd635f74
YQ
951 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
952 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
953 && TYPE_VECTOR (member0_type)))
07b287a0
MS
954 {
955 int i;
956
957 for (i = 0; i < TYPE_NFIELDS (ty); i++)
958 {
959 struct type *member1_type;
960
961 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
962 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
963 || (TYPE_LENGTH (member0_type)
964 != TYPE_LENGTH (member1_type)))
965 return 0;
966 }
967 return 1;
968 }
969 }
970 return 0;
971 }
972
973 default:
974 break;
975 }
976
977 return 0;
978}
979
980/* AArch64 function call information structure. */
981struct aarch64_call_info
982{
983 /* the current argument number. */
984 unsigned argnum;
985
986 /* The next general purpose register number, equivalent to NGRN as
987 described in the AArch64 Procedure Call Standard. */
988 unsigned ngrn;
989
990 /* The next SIMD and floating point register number, equivalent to
991 NSRN as described in the AArch64 Procedure Call Standard. */
992 unsigned nsrn;
993
994 /* The next stacked argument address, equivalent to NSAA as
995 described in the AArch64 Procedure Call Standard. */
996 unsigned nsaa;
997
998 /* Stack item vector. */
999 VEC(stack_item_t) *si;
1000};
1001
1002/* Pass a value in a sequence of consecutive X registers. The caller
1003 is responsbile for ensuring sufficient registers are available. */
1004
1005static void
1006pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1007 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1008 struct value *arg)
07b287a0
MS
1009{
1010 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1011 int len = TYPE_LENGTH (type);
1012 enum type_code typecode = TYPE_CODE (type);
1013 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1014 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1015
1016 info->argnum++;
1017
1018 while (len > 0)
1019 {
1020 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1021 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1022 byte_order);
1023
1024
1025 /* Adjust sub-word struct/union args when big-endian. */
1026 if (byte_order == BFD_ENDIAN_BIG
1027 && partial_len < X_REGISTER_SIZE
1028 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1029 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1030
1031 if (aarch64_debug)
b277c936
PL
1032 {
1033 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1034 gdbarch_register_name (gdbarch, regnum),
1035 phex (regval, X_REGISTER_SIZE));
1036 }
07b287a0
MS
1037 regcache_cooked_write_unsigned (regcache, regnum, regval);
1038 len -= partial_len;
1039 buf += partial_len;
1040 regnum++;
1041 }
1042}
1043
1044/* Attempt to marshall a value in a V register. Return 1 if
1045 successful, or 0 if insufficient registers are available. This
1046 function, unlike the equivalent pass_in_x() function does not
1047 handle arguments spread across multiple registers. */
1048
1049static int
1050pass_in_v (struct gdbarch *gdbarch,
1051 struct regcache *regcache,
1052 struct aarch64_call_info *info,
0735fddd 1053 int len, const bfd_byte *buf)
07b287a0
MS
1054{
1055 if (info->nsrn < 8)
1056 {
07b287a0 1057 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1058 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1059
1060 info->argnum++;
1061 info->nsrn++;
1062
0735fddd
YQ
1063 memset (reg, 0, sizeof (reg));
1064 /* PCS C.1, the argument is allocated to the least significant
1065 bits of V register. */
1066 memcpy (reg, buf, len);
1067 regcache_cooked_write (regcache, regnum, reg);
1068
07b287a0 1069 if (aarch64_debug)
b277c936
PL
1070 {
1071 debug_printf ("arg %d in %s\n", info->argnum,
1072 gdbarch_register_name (gdbarch, regnum));
1073 }
07b287a0
MS
1074 return 1;
1075 }
1076 info->nsrn = 8;
1077 return 0;
1078}
1079
1080/* Marshall an argument onto the stack. */
1081
1082static void
1083pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1084 struct value *arg)
07b287a0 1085{
8e80f9d1 1086 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1087 int len = TYPE_LENGTH (type);
1088 int align;
1089 stack_item_t item;
1090
1091 info->argnum++;
1092
1093 align = aarch64_type_align (type);
1094
1095 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1096 Natural alignment of the argument's type. */
1097 align = align_up (align, 8);
1098
1099 /* The AArch64 PCS requires at most doubleword alignment. */
1100 if (align > 16)
1101 align = 16;
1102
1103 if (aarch64_debug)
b277c936
PL
1104 {
1105 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1106 info->nsaa);
1107 }
07b287a0
MS
1108
1109 item.len = len;
1110 item.data = buf;
1111 VEC_safe_push (stack_item_t, info->si, &item);
1112
1113 info->nsaa += len;
1114 if (info->nsaa & (align - 1))
1115 {
1116 /* Push stack alignment padding. */
1117 int pad = align - (info->nsaa & (align - 1));
1118
1119 item.len = pad;
c3c87445 1120 item.data = NULL;
07b287a0
MS
1121
1122 VEC_safe_push (stack_item_t, info->si, &item);
1123 info->nsaa += pad;
1124 }
1125}
1126
1127/* Marshall an argument into a sequence of one or more consecutive X
1128 registers or, if insufficient X registers are available then onto
1129 the stack. */
1130
1131static void
1132pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1133 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1134 struct value *arg)
07b287a0
MS
1135{
1136 int len = TYPE_LENGTH (type);
1137 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1138
1139 /* PCS C.13 - Pass in registers if we have enough spare */
1140 if (info->ngrn + nregs <= 8)
1141 {
8e80f9d1 1142 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1143 info->ngrn += nregs;
1144 }
1145 else
1146 {
1147 info->ngrn = 8;
8e80f9d1 1148 pass_on_stack (info, type, arg);
07b287a0
MS
1149 }
1150}
1151
1152/* Pass a value in a V register, or on the stack if insufficient are
1153 available. */
1154
1155static void
1156pass_in_v_or_stack (struct gdbarch *gdbarch,
1157 struct regcache *regcache,
1158 struct aarch64_call_info *info,
1159 struct type *type,
8e80f9d1 1160 struct value *arg)
07b287a0 1161{
0735fddd
YQ
1162 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1163 value_contents (arg)))
8e80f9d1 1164 pass_on_stack (info, type, arg);
07b287a0
MS
1165}
1166
1167/* Implement the "push_dummy_call" gdbarch method. */
1168
1169static CORE_ADDR
1170aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1171 struct regcache *regcache, CORE_ADDR bp_addr,
1172 int nargs,
1173 struct value **args, CORE_ADDR sp, int struct_return,
1174 CORE_ADDR struct_addr)
1175{
07b287a0 1176 int argnum;
07b287a0
MS
1177 struct aarch64_call_info info;
1178 struct type *func_type;
1179 struct type *return_type;
1180 int lang_struct_return;
1181
1182 memset (&info, 0, sizeof (info));
1183
1184 /* We need to know what the type of the called function is in order
1185 to determine the number of named/anonymous arguments for the
1186 actual argument placement, and the return type in order to handle
1187 return value correctly.
1188
1189 The generic code above us views the decision of return in memory
1190 or return in registers as a two stage processes. The language
1191 handler is consulted first and may decide to return in memory (eg
1192 class with copy constructor returned by value), this will cause
1193 the generic code to allocate space AND insert an initial leading
1194 argument.
1195
1196 If the language code does not decide to pass in memory then the
1197 target code is consulted.
1198
1199 If the language code decides to pass in memory we want to move
1200 the pointer inserted as the initial argument from the argument
1201 list and into X8, the conventional AArch64 struct return pointer
1202 register.
1203
1204 This is slightly awkward, ideally the flag "lang_struct_return"
1205 would be passed to the targets implementation of push_dummy_call.
1206 Rather that change the target interface we call the language code
1207 directly ourselves. */
1208
1209 func_type = check_typedef (value_type (function));
1210
1211 /* Dereference function pointer types. */
1212 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1213 func_type = TYPE_TARGET_TYPE (func_type);
1214
1215 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1216 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1217
1218 /* If language_pass_by_reference () returned true we will have been
1219 given an additional initial argument, a hidden pointer to the
1220 return slot in memory. */
1221 return_type = TYPE_TARGET_TYPE (func_type);
1222 lang_struct_return = language_pass_by_reference (return_type);
1223
1224 /* Set the return address. For the AArch64, the return breakpoint
1225 is always at BP_ADDR. */
1226 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1227
1228 /* If we were given an initial argument for the return slot because
1229 lang_struct_return was true, lose it. */
1230 if (lang_struct_return)
1231 {
1232 args++;
1233 nargs--;
1234 }
1235
1236 /* The struct_return pointer occupies X8. */
1237 if (struct_return || lang_struct_return)
1238 {
1239 if (aarch64_debug)
b277c936
PL
1240 {
1241 debug_printf ("struct return in %s = 0x%s\n",
1242 gdbarch_register_name (gdbarch,
1243 AARCH64_STRUCT_RETURN_REGNUM),
1244 paddress (gdbarch, struct_addr));
1245 }
07b287a0
MS
1246 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1247 struct_addr);
1248 }
1249
1250 for (argnum = 0; argnum < nargs; argnum++)
1251 {
1252 struct value *arg = args[argnum];
1253 struct type *arg_type;
1254 int len;
1255
1256 arg_type = check_typedef (value_type (arg));
1257 len = TYPE_LENGTH (arg_type);
1258
1259 switch (TYPE_CODE (arg_type))
1260 {
1261 case TYPE_CODE_INT:
1262 case TYPE_CODE_BOOL:
1263 case TYPE_CODE_CHAR:
1264 case TYPE_CODE_RANGE:
1265 case TYPE_CODE_ENUM:
1266 if (len < 4)
1267 {
1268 /* Promote to 32 bit integer. */
1269 if (TYPE_UNSIGNED (arg_type))
1270 arg_type = builtin_type (gdbarch)->builtin_uint32;
1271 else
1272 arg_type = builtin_type (gdbarch)->builtin_int32;
1273 arg = value_cast (arg_type, arg);
1274 }
8e80f9d1 1275 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1276 break;
1277
1278 case TYPE_CODE_COMPLEX:
1279 if (info.nsrn <= 6)
1280 {
1281 const bfd_byte *buf = value_contents (arg);
1282 struct type *target_type =
1283 check_typedef (TYPE_TARGET_TYPE (arg_type));
1284
07b287a0 1285 pass_in_v (gdbarch, regcache, &info,
0735fddd
YQ
1286 TYPE_LENGTH (target_type), buf);
1287 pass_in_v (gdbarch, regcache, &info,
1288 TYPE_LENGTH (target_type),
07b287a0
MS
1289 buf + TYPE_LENGTH (target_type));
1290 }
1291 else
1292 {
1293 info.nsrn = 8;
8e80f9d1 1294 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1295 }
1296 break;
1297 case TYPE_CODE_FLT:
8e80f9d1 1298 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1299 break;
1300
1301 case TYPE_CODE_STRUCT:
1302 case TYPE_CODE_ARRAY:
1303 case TYPE_CODE_UNION:
cd635f74 1304 if (is_hfa_or_hva (arg_type))
07b287a0
MS
1305 {
1306 int elements = TYPE_NFIELDS (arg_type);
1307
1308 /* Homogeneous Aggregates */
1309 if (info.nsrn + elements < 8)
1310 {
1311 int i;
1312
1313 for (i = 0; i < elements; i++)
1314 {
1315 /* We know that we have sufficient registers
1316 available therefore this will never fallback
1317 to the stack. */
1318 struct value *field =
1319 value_primitive_field (arg, 0, i, arg_type);
1320 struct type *field_type =
1321 check_typedef (value_type (field));
1322
8e80f9d1
YQ
1323 pass_in_v_or_stack (gdbarch, regcache, &info,
1324 field_type, field);
07b287a0
MS
1325 }
1326 }
1327 else
1328 {
1329 info.nsrn = 8;
8e80f9d1 1330 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1331 }
1332 }
238f2452
YQ
1333 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1334 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1335 {
1336 /* Short vector types are passed in V registers. */
1337 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1338 }
07b287a0
MS
1339 else if (len > 16)
1340 {
1341 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1342 invisible reference. */
1343
1344 /* Allocate aligned storage. */
1345 sp = align_down (sp - len, 16);
1346
1347 /* Write the real data into the stack. */
1348 write_memory (sp, value_contents (arg), len);
1349
1350 /* Construct the indirection. */
1351 arg_type = lookup_pointer_type (arg_type);
1352 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1353 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1354 }
1355 else
1356 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1357 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1358 break;
1359
1360 default:
8e80f9d1 1361 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1362 break;
1363 }
1364 }
1365
1366 /* Make sure stack retains 16 byte alignment. */
1367 if (info.nsaa & 15)
1368 sp -= 16 - (info.nsaa & 15);
1369
1370 while (!VEC_empty (stack_item_t, info.si))
1371 {
1372 stack_item_t *si = VEC_last (stack_item_t, info.si);
1373
1374 sp -= si->len;
c3c87445
YQ
1375 if (si->data != NULL)
1376 write_memory (sp, si->data, si->len);
07b287a0
MS
1377 VEC_pop (stack_item_t, info.si);
1378 }
1379
1380 VEC_free (stack_item_t, info.si);
1381
1382 /* Finally, update the SP register. */
1383 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1384
1385 return sp;
1386}
1387
1388/* Implement the "frame_align" gdbarch method. */
1389
1390static CORE_ADDR
1391aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1392{
1393 /* Align the stack to sixteen bytes. */
1394 return sp & ~(CORE_ADDR) 15;
1395}
1396
1397/* Return the type for an AdvSISD Q register. */
1398
1399static struct type *
1400aarch64_vnq_type (struct gdbarch *gdbarch)
1401{
1402 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1403
1404 if (tdep->vnq_type == NULL)
1405 {
1406 struct type *t;
1407 struct type *elem;
1408
1409 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1410 TYPE_CODE_UNION);
1411
1412 elem = builtin_type (gdbarch)->builtin_uint128;
1413 append_composite_type_field (t, "u", elem);
1414
1415 elem = builtin_type (gdbarch)->builtin_int128;
1416 append_composite_type_field (t, "s", elem);
1417
1418 tdep->vnq_type = t;
1419 }
1420
1421 return tdep->vnq_type;
1422}
1423
1424/* Return the type for an AdvSISD D register. */
1425
1426static struct type *
1427aarch64_vnd_type (struct gdbarch *gdbarch)
1428{
1429 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1430
1431 if (tdep->vnd_type == NULL)
1432 {
1433 struct type *t;
1434 struct type *elem;
1435
1436 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1437 TYPE_CODE_UNION);
1438
1439 elem = builtin_type (gdbarch)->builtin_double;
1440 append_composite_type_field (t, "f", elem);
1441
1442 elem = builtin_type (gdbarch)->builtin_uint64;
1443 append_composite_type_field (t, "u", elem);
1444
1445 elem = builtin_type (gdbarch)->builtin_int64;
1446 append_composite_type_field (t, "s", elem);
1447
1448 tdep->vnd_type = t;
1449 }
1450
1451 return tdep->vnd_type;
1452}
1453
1454/* Return the type for an AdvSISD S register. */
1455
1456static struct type *
1457aarch64_vns_type (struct gdbarch *gdbarch)
1458{
1459 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1460
1461 if (tdep->vns_type == NULL)
1462 {
1463 struct type *t;
1464 struct type *elem;
1465
1466 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1467 TYPE_CODE_UNION);
1468
1469 elem = builtin_type (gdbarch)->builtin_float;
1470 append_composite_type_field (t, "f", elem);
1471
1472 elem = builtin_type (gdbarch)->builtin_uint32;
1473 append_composite_type_field (t, "u", elem);
1474
1475 elem = builtin_type (gdbarch)->builtin_int32;
1476 append_composite_type_field (t, "s", elem);
1477
1478 tdep->vns_type = t;
1479 }
1480
1481 return tdep->vns_type;
1482}
1483
1484/* Return the type for an AdvSISD H register. */
1485
1486static struct type *
1487aarch64_vnh_type (struct gdbarch *gdbarch)
1488{
1489 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1490
1491 if (tdep->vnh_type == NULL)
1492 {
1493 struct type *t;
1494 struct type *elem;
1495
1496 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1497 TYPE_CODE_UNION);
1498
1499 elem = builtin_type (gdbarch)->builtin_uint16;
1500 append_composite_type_field (t, "u", elem);
1501
1502 elem = builtin_type (gdbarch)->builtin_int16;
1503 append_composite_type_field (t, "s", elem);
1504
1505 tdep->vnh_type = t;
1506 }
1507
1508 return tdep->vnh_type;
1509}
1510
1511/* Return the type for an AdvSISD B register. */
1512
1513static struct type *
1514aarch64_vnb_type (struct gdbarch *gdbarch)
1515{
1516 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1517
1518 if (tdep->vnb_type == NULL)
1519 {
1520 struct type *t;
1521 struct type *elem;
1522
1523 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1524 TYPE_CODE_UNION);
1525
1526 elem = builtin_type (gdbarch)->builtin_uint8;
1527 append_composite_type_field (t, "u", elem);
1528
1529 elem = builtin_type (gdbarch)->builtin_int8;
1530 append_composite_type_field (t, "s", elem);
1531
1532 tdep->vnb_type = t;
1533 }
1534
1535 return tdep->vnb_type;
1536}
1537
1538/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1539
1540static int
1541aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1542{
1543 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1544 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1545
1546 if (reg == AARCH64_DWARF_SP)
1547 return AARCH64_SP_REGNUM;
1548
1549 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1550 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1551
1552 return -1;
1553}
1554\f
1555
1556/* Implement the "print_insn" gdbarch method. */
1557
1558static int
1559aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1560{
1561 info->symbols = NULL;
1562 return print_insn_aarch64 (memaddr, info);
1563}
1564
1565/* AArch64 BRK software debug mode instruction.
1566 Note that AArch64 code is always little-endian.
1567 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
948f8e3d 1568static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0
MS
1569
1570/* Implement the "breakpoint_from_pc" gdbarch method. */
1571
948f8e3d 1572static const gdb_byte *
07b287a0
MS
1573aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1574 int *lenptr)
1575{
07b287a0
MS
1576 *lenptr = sizeof (aarch64_default_breakpoint);
1577 return aarch64_default_breakpoint;
1578}
1579
1580/* Extract from an array REGS containing the (raw) register state a
1581 function return value of type TYPE, and copy that, in virtual
1582 format, into VALBUF. */
1583
1584static void
1585aarch64_extract_return_value (struct type *type, struct regcache *regs,
1586 gdb_byte *valbuf)
1587{
1588 struct gdbarch *gdbarch = get_regcache_arch (regs);
1589 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1590
1591 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1592 {
1593 bfd_byte buf[V_REGISTER_SIZE];
1594 int len = TYPE_LENGTH (type);
1595
1596 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1597 memcpy (valbuf, buf, len);
1598 }
1599 else if (TYPE_CODE (type) == TYPE_CODE_INT
1600 || TYPE_CODE (type) == TYPE_CODE_CHAR
1601 || TYPE_CODE (type) == TYPE_CODE_BOOL
1602 || TYPE_CODE (type) == TYPE_CODE_PTR
1603 || TYPE_CODE (type) == TYPE_CODE_REF
1604 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1605 {
1606 /* If the the type is a plain integer, then the access is
1607 straight-forward. Otherwise we have to play around a bit
1608 more. */
1609 int len = TYPE_LENGTH (type);
1610 int regno = AARCH64_X0_REGNUM;
1611 ULONGEST tmp;
1612
1613 while (len > 0)
1614 {
1615 /* By using store_unsigned_integer we avoid having to do
1616 anything special for small big-endian values. */
1617 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1618 store_unsigned_integer (valbuf,
1619 (len > X_REGISTER_SIZE
1620 ? X_REGISTER_SIZE : len), byte_order, tmp);
1621 len -= X_REGISTER_SIZE;
1622 valbuf += X_REGISTER_SIZE;
1623 }
1624 }
1625 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1626 {
1627 int regno = AARCH64_V0_REGNUM;
1628 bfd_byte buf[V_REGISTER_SIZE];
1629 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1630 int len = TYPE_LENGTH (target_type);
1631
1632 regcache_cooked_read (regs, regno, buf);
1633 memcpy (valbuf, buf, len);
1634 valbuf += len;
1635 regcache_cooked_read (regs, regno + 1, buf);
1636 memcpy (valbuf, buf, len);
1637 valbuf += len;
1638 }
cd635f74 1639 else if (is_hfa_or_hva (type))
07b287a0
MS
1640 {
1641 int elements = TYPE_NFIELDS (type);
1642 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1643 int len = TYPE_LENGTH (member_type);
1644 int i;
1645
1646 for (i = 0; i < elements; i++)
1647 {
1648 int regno = AARCH64_V0_REGNUM + i;
db3516bb 1649 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
1650
1651 if (aarch64_debug)
b277c936 1652 {
cd635f74 1653 debug_printf ("read HFA or HVA return value element %d from %s\n",
b277c936
PL
1654 i + 1,
1655 gdbarch_register_name (gdbarch, regno));
1656 }
07b287a0
MS
1657 regcache_cooked_read (regs, regno, buf);
1658
1659 memcpy (valbuf, buf, len);
1660 valbuf += len;
1661 }
1662 }
238f2452
YQ
1663 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1664 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1665 {
1666 /* Short vector is returned in V register. */
1667 gdb_byte buf[V_REGISTER_SIZE];
1668
1669 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1670 memcpy (valbuf, buf, TYPE_LENGTH (type));
1671 }
07b287a0
MS
1672 else
1673 {
1674 /* For a structure or union the behaviour is as if the value had
1675 been stored to word-aligned memory and then loaded into
1676 registers with 64-bit load instruction(s). */
1677 int len = TYPE_LENGTH (type);
1678 int regno = AARCH64_X0_REGNUM;
1679 bfd_byte buf[X_REGISTER_SIZE];
1680
1681 while (len > 0)
1682 {
1683 regcache_cooked_read (regs, regno++, buf);
1684 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1685 len -= X_REGISTER_SIZE;
1686 valbuf += X_REGISTER_SIZE;
1687 }
1688 }
1689}
1690
1691
1692/* Will a function return an aggregate type in memory or in a
1693 register? Return 0 if an aggregate type can be returned in a
1694 register, 1 if it must be returned in memory. */
1695
1696static int
1697aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1698{
f168693b 1699 type = check_typedef (type);
07b287a0 1700
cd635f74 1701 if (is_hfa_or_hva (type))
07b287a0 1702 {
cd635f74
YQ
1703 /* v0-v7 are used to return values and one register is allocated
1704 for one member. However, HFA or HVA has at most four members. */
07b287a0
MS
1705 return 0;
1706 }
1707
1708 if (TYPE_LENGTH (type) > 16)
1709 {
1710 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1711 invisible reference. */
1712
1713 return 1;
1714 }
1715
1716 return 0;
1717}
1718
1719/* Write into appropriate registers a function return value of type
1720 TYPE, given in virtual format. */
1721
1722static void
1723aarch64_store_return_value (struct type *type, struct regcache *regs,
1724 const gdb_byte *valbuf)
1725{
1726 struct gdbarch *gdbarch = get_regcache_arch (regs);
1727 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1728
1729 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1730 {
1731 bfd_byte buf[V_REGISTER_SIZE];
1732 int len = TYPE_LENGTH (type);
1733
1734 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1735 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1736 }
1737 else if (TYPE_CODE (type) == TYPE_CODE_INT
1738 || TYPE_CODE (type) == TYPE_CODE_CHAR
1739 || TYPE_CODE (type) == TYPE_CODE_BOOL
1740 || TYPE_CODE (type) == TYPE_CODE_PTR
1741 || TYPE_CODE (type) == TYPE_CODE_REF
1742 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1743 {
1744 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1745 {
1746 /* Values of one word or less are zero/sign-extended and
1747 returned in r0. */
1748 bfd_byte tmpbuf[X_REGISTER_SIZE];
1749 LONGEST val = unpack_long (type, valbuf);
1750
1751 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1752 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1753 }
1754 else
1755 {
1756 /* Integral values greater than one word are stored in
1757 consecutive registers starting with r0. This will always
1758 be a multiple of the regiser size. */
1759 int len = TYPE_LENGTH (type);
1760 int regno = AARCH64_X0_REGNUM;
1761
1762 while (len > 0)
1763 {
1764 regcache_cooked_write (regs, regno++, valbuf);
1765 len -= X_REGISTER_SIZE;
1766 valbuf += X_REGISTER_SIZE;
1767 }
1768 }
1769 }
cd635f74 1770 else if (is_hfa_or_hva (type))
07b287a0
MS
1771 {
1772 int elements = TYPE_NFIELDS (type);
1773 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1774 int len = TYPE_LENGTH (member_type);
1775 int i;
1776
1777 for (i = 0; i < elements; i++)
1778 {
1779 int regno = AARCH64_V0_REGNUM + i;
1780 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1781
1782 if (aarch64_debug)
b277c936 1783 {
cd635f74 1784 debug_printf ("write HFA or HVA return value element %d to %s\n",
b277c936
PL
1785 i + 1,
1786 gdbarch_register_name (gdbarch, regno));
1787 }
07b287a0
MS
1788
1789 memcpy (tmpbuf, valbuf, len);
1790 regcache_cooked_write (regs, regno, tmpbuf);
1791 valbuf += len;
1792 }
1793 }
238f2452
YQ
1794 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1795 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1796 {
1797 /* Short vector. */
1798 gdb_byte buf[V_REGISTER_SIZE];
1799
1800 memcpy (buf, valbuf, TYPE_LENGTH (type));
1801 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1802 }
07b287a0
MS
1803 else
1804 {
1805 /* For a structure or union the behaviour is as if the value had
1806 been stored to word-aligned memory and then loaded into
1807 registers with 64-bit load instruction(s). */
1808 int len = TYPE_LENGTH (type);
1809 int regno = AARCH64_X0_REGNUM;
1810 bfd_byte tmpbuf[X_REGISTER_SIZE];
1811
1812 while (len > 0)
1813 {
1814 memcpy (tmpbuf, valbuf,
1815 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1816 regcache_cooked_write (regs, regno++, tmpbuf);
1817 len -= X_REGISTER_SIZE;
1818 valbuf += X_REGISTER_SIZE;
1819 }
1820 }
1821}
1822
1823/* Implement the "return_value" gdbarch method. */
1824
1825static enum return_value_convention
1826aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1827 struct type *valtype, struct regcache *regcache,
1828 gdb_byte *readbuf, const gdb_byte *writebuf)
1829{
07b287a0
MS
1830
1831 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1832 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1833 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1834 {
1835 if (aarch64_return_in_memory (gdbarch, valtype))
1836 {
1837 if (aarch64_debug)
b277c936 1838 debug_printf ("return value in memory\n");
07b287a0
MS
1839 return RETURN_VALUE_STRUCT_CONVENTION;
1840 }
1841 }
1842
1843 if (writebuf)
1844 aarch64_store_return_value (valtype, regcache, writebuf);
1845
1846 if (readbuf)
1847 aarch64_extract_return_value (valtype, regcache, readbuf);
1848
1849 if (aarch64_debug)
b277c936 1850 debug_printf ("return value in registers\n");
07b287a0
MS
1851
1852 return RETURN_VALUE_REGISTER_CONVENTION;
1853}
1854
1855/* Implement the "get_longjmp_target" gdbarch method. */
1856
1857static int
1858aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1859{
1860 CORE_ADDR jb_addr;
1861 gdb_byte buf[X_REGISTER_SIZE];
1862 struct gdbarch *gdbarch = get_frame_arch (frame);
1863 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1864 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1865
1866 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1867
1868 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1869 X_REGISTER_SIZE))
1870 return 0;
1871
1872 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1873 return 1;
1874}
ea873d8e
PL
1875
1876/* Implement the "gen_return_address" gdbarch method. */
1877
1878static void
1879aarch64_gen_return_address (struct gdbarch *gdbarch,
1880 struct agent_expr *ax, struct axs_value *value,
1881 CORE_ADDR scope)
1882{
1883 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1884 value->kind = axs_lvalue_register;
1885 value->u.reg = AARCH64_LR_REGNUM;
1886}
07b287a0
MS
1887\f
1888
1889/* Return the pseudo register name corresponding to register regnum. */
1890
1891static const char *
1892aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1893{
1894 static const char *const q_name[] =
1895 {
1896 "q0", "q1", "q2", "q3",
1897 "q4", "q5", "q6", "q7",
1898 "q8", "q9", "q10", "q11",
1899 "q12", "q13", "q14", "q15",
1900 "q16", "q17", "q18", "q19",
1901 "q20", "q21", "q22", "q23",
1902 "q24", "q25", "q26", "q27",
1903 "q28", "q29", "q30", "q31",
1904 };
1905
1906 static const char *const d_name[] =
1907 {
1908 "d0", "d1", "d2", "d3",
1909 "d4", "d5", "d6", "d7",
1910 "d8", "d9", "d10", "d11",
1911 "d12", "d13", "d14", "d15",
1912 "d16", "d17", "d18", "d19",
1913 "d20", "d21", "d22", "d23",
1914 "d24", "d25", "d26", "d27",
1915 "d28", "d29", "d30", "d31",
1916 };
1917
1918 static const char *const s_name[] =
1919 {
1920 "s0", "s1", "s2", "s3",
1921 "s4", "s5", "s6", "s7",
1922 "s8", "s9", "s10", "s11",
1923 "s12", "s13", "s14", "s15",
1924 "s16", "s17", "s18", "s19",
1925 "s20", "s21", "s22", "s23",
1926 "s24", "s25", "s26", "s27",
1927 "s28", "s29", "s30", "s31",
1928 };
1929
1930 static const char *const h_name[] =
1931 {
1932 "h0", "h1", "h2", "h3",
1933 "h4", "h5", "h6", "h7",
1934 "h8", "h9", "h10", "h11",
1935 "h12", "h13", "h14", "h15",
1936 "h16", "h17", "h18", "h19",
1937 "h20", "h21", "h22", "h23",
1938 "h24", "h25", "h26", "h27",
1939 "h28", "h29", "h30", "h31",
1940 };
1941
1942 static const char *const b_name[] =
1943 {
1944 "b0", "b1", "b2", "b3",
1945 "b4", "b5", "b6", "b7",
1946 "b8", "b9", "b10", "b11",
1947 "b12", "b13", "b14", "b15",
1948 "b16", "b17", "b18", "b19",
1949 "b20", "b21", "b22", "b23",
1950 "b24", "b25", "b26", "b27",
1951 "b28", "b29", "b30", "b31",
1952 };
1953
1954 regnum -= gdbarch_num_regs (gdbarch);
1955
1956 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1957 return q_name[regnum - AARCH64_Q0_REGNUM];
1958
1959 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1960 return d_name[regnum - AARCH64_D0_REGNUM];
1961
1962 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1963 return s_name[regnum - AARCH64_S0_REGNUM];
1964
1965 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1966 return h_name[regnum - AARCH64_H0_REGNUM];
1967
1968 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1969 return b_name[regnum - AARCH64_B0_REGNUM];
1970
1971 internal_error (__FILE__, __LINE__,
1972 _("aarch64_pseudo_register_name: bad register number %d"),
1973 regnum);
1974}
1975
1976/* Implement the "pseudo_register_type" tdesc_arch_data method. */
1977
1978static struct type *
1979aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1980{
1981 regnum -= gdbarch_num_regs (gdbarch);
1982
1983 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1984 return aarch64_vnq_type (gdbarch);
1985
1986 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1987 return aarch64_vnd_type (gdbarch);
1988
1989 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1990 return aarch64_vns_type (gdbarch);
1991
1992 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1993 return aarch64_vnh_type (gdbarch);
1994
1995 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1996 return aarch64_vnb_type (gdbarch);
1997
1998 internal_error (__FILE__, __LINE__,
1999 _("aarch64_pseudo_register_type: bad register number %d"),
2000 regnum);
2001}
2002
2003/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2004
2005static int
2006aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2007 struct reggroup *group)
2008{
2009 regnum -= gdbarch_num_regs (gdbarch);
2010
2011 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2012 return group == all_reggroup || group == vector_reggroup;
2013 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2014 return (group == all_reggroup || group == vector_reggroup
2015 || group == float_reggroup);
2016 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2017 return (group == all_reggroup || group == vector_reggroup
2018 || group == float_reggroup);
2019 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2020 return group == all_reggroup || group == vector_reggroup;
2021 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2022 return group == all_reggroup || group == vector_reggroup;
2023
2024 return group == all_reggroup;
2025}
2026
2027/* Implement the "pseudo_register_read_value" gdbarch method. */
2028
2029static struct value *
2030aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2031 struct regcache *regcache,
2032 int regnum)
2033{
2034 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2035 struct value *result_value;
2036 gdb_byte *buf;
2037
2038 result_value = allocate_value (register_type (gdbarch, regnum));
2039 VALUE_LVAL (result_value) = lval_register;
2040 VALUE_REGNUM (result_value) = regnum;
2041 buf = value_contents_raw (result_value);
2042
2043 regnum -= gdbarch_num_regs (gdbarch);
2044
2045 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2046 {
2047 enum register_status status;
2048 unsigned v_regnum;
2049
2050 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2051 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2052 if (status != REG_VALID)
2053 mark_value_bytes_unavailable (result_value, 0,
2054 TYPE_LENGTH (value_type (result_value)));
2055 else
2056 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2057 return result_value;
2058 }
2059
2060 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2061 {
2062 enum register_status status;
2063 unsigned v_regnum;
2064
2065 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2066 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2067 if (status != REG_VALID)
2068 mark_value_bytes_unavailable (result_value, 0,
2069 TYPE_LENGTH (value_type (result_value)));
2070 else
2071 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2072 return result_value;
2073 }
2074
2075 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2076 {
2077 enum register_status status;
2078 unsigned v_regnum;
2079
2080 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2081 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2082 if (status != REG_VALID)
2083 mark_value_bytes_unavailable (result_value, 0,
2084 TYPE_LENGTH (value_type (result_value)));
2085 else
2086 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2087 return result_value;
2088 }
2089
2090 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2091 {
2092 enum register_status status;
2093 unsigned v_regnum;
2094
2095 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2096 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2097 if (status != REG_VALID)
2098 mark_value_bytes_unavailable (result_value, 0,
2099 TYPE_LENGTH (value_type (result_value)));
2100 else
2101 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2102 return result_value;
2103 }
2104
2105 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2106 {
2107 enum register_status status;
2108 unsigned v_regnum;
2109
2110 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2111 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2112 if (status != REG_VALID)
2113 mark_value_bytes_unavailable (result_value, 0,
2114 TYPE_LENGTH (value_type (result_value)));
2115 else
2116 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2117 return result_value;
2118 }
2119
2120 gdb_assert_not_reached ("regnum out of bound");
2121}
2122
2123/* Implement the "pseudo_register_write" gdbarch method. */
2124
2125static void
2126aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2127 int regnum, const gdb_byte *buf)
2128{
2129 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2130
2131 /* Ensure the register buffer is zero, we want gdb writes of the
2132 various 'scalar' pseudo registers to behavior like architectural
2133 writes, register width bytes are written the remainder are set to
2134 zero. */
2135 memset (reg_buf, 0, sizeof (reg_buf));
2136
2137 regnum -= gdbarch_num_regs (gdbarch);
2138
2139 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2140 {
2141 /* pseudo Q registers */
2142 unsigned v_regnum;
2143
2144 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2145 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2146 regcache_raw_write (regcache, v_regnum, reg_buf);
2147 return;
2148 }
2149
2150 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2151 {
2152 /* pseudo D registers */
2153 unsigned v_regnum;
2154
2155 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2156 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2157 regcache_raw_write (regcache, v_regnum, reg_buf);
2158 return;
2159 }
2160
2161 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2162 {
2163 unsigned v_regnum;
2164
2165 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2166 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2167 regcache_raw_write (regcache, v_regnum, reg_buf);
2168 return;
2169 }
2170
2171 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2172 {
2173 /* pseudo H registers */
2174 unsigned v_regnum;
2175
2176 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2177 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2178 regcache_raw_write (regcache, v_regnum, reg_buf);
2179 return;
2180 }
2181
2182 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2183 {
2184 /* pseudo B registers */
2185 unsigned v_regnum;
2186
2187 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2188 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2189 regcache_raw_write (regcache, v_regnum, reg_buf);
2190 return;
2191 }
2192
2193 gdb_assert_not_reached ("regnum out of bound");
2194}
2195
07b287a0
MS
2196/* Callback function for user_reg_add. */
2197
2198static struct value *
2199value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2200{
9a3c8263 2201 const int *reg_p = (const int *) baton;
07b287a0
MS
2202
2203 return value_of_register (*reg_p, frame);
2204}
2205\f
2206
9404b58f
KM
2207/* Implement the "software_single_step" gdbarch method, needed to
2208 single step through atomic sequences on AArch64. */
2209
2210static int
2211aarch64_software_single_step (struct frame_info *frame)
2212{
2213 struct gdbarch *gdbarch = get_frame_arch (frame);
2214 struct address_space *aspace = get_frame_address_space (frame);
2215 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2216 const int insn_size = 4;
2217 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2218 CORE_ADDR pc = get_frame_pc (frame);
2219 CORE_ADDR breaks[2] = { -1, -1 };
2220 CORE_ADDR loc = pc;
2221 CORE_ADDR closing_insn = 0;
2222 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2223 byte_order_for_code);
2224 int index;
2225 int insn_count;
2226 int bc_insn_count = 0; /* Conditional branch instruction count. */
2227 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2228 aarch64_inst inst;
2229
43cdf5ae 2230 if (aarch64_decode_insn (insn, &inst, 1) != 0)
f77ee802 2231 return 0;
9404b58f
KM
2232
2233 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2234 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
9404b58f
KM
2235 return 0;
2236
2237 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2238 {
9404b58f
KM
2239 loc += insn_size;
2240 insn = read_memory_unsigned_integer (loc, insn_size,
2241 byte_order_for_code);
2242
43cdf5ae 2243 if (aarch64_decode_insn (insn, &inst, 1) != 0)
f77ee802 2244 return 0;
9404b58f 2245 /* Check if the instruction is a conditional branch. */
f77ee802 2246 if (inst.opcode->iclass == condbranch)
9404b58f 2247 {
f77ee802
YQ
2248 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2249
9404b58f
KM
2250 if (bc_insn_count >= 1)
2251 return 0;
2252
2253 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2254 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2255
2256 bc_insn_count++;
2257 last_breakpoint++;
2258 }
2259
2260 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2261 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2262 {
2263 closing_insn = loc;
2264 break;
2265 }
2266 }
2267
2268 /* We didn't find a closing Store Exclusive instruction, fall back. */
2269 if (!closing_insn)
2270 return 0;
2271
2272 /* Insert breakpoint after the end of the atomic sequence. */
2273 breaks[0] = loc + insn_size;
2274
2275 /* Check for duplicated breakpoints, and also check that the second
2276 breakpoint is not within the atomic sequence. */
2277 if (last_breakpoint
2278 && (breaks[1] == breaks[0]
2279 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2280 last_breakpoint = 0;
2281
2282 /* Insert the breakpoint at the end of the sequence, and one at the
2283 destination of the conditional branch, if it exists. */
2284 for (index = 0; index <= last_breakpoint; index++)
2285 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2286
2287 return 1;
2288}
2289
b6542f81
YQ
2290struct displaced_step_closure
2291{
2292 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2293 is being displaced stepping. */
2294 int cond;
2295
2296 /* PC adjustment offset after displaced stepping. */
2297 int32_t pc_adjust;
2298};
2299
2300/* Data when visiting instructions for displaced stepping. */
2301
2302struct aarch64_displaced_step_data
2303{
2304 struct aarch64_insn_data base;
2305
2306 /* The address where the instruction will be executed at. */
2307 CORE_ADDR new_addr;
2308 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2309 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2310 /* Number of instructions in INSN_BUF. */
2311 unsigned insn_count;
2312 /* Registers when doing displaced stepping. */
2313 struct regcache *regs;
2314
2315 struct displaced_step_closure *dsc;
2316};
2317
2318/* Implementation of aarch64_insn_visitor method "b". */
2319
2320static void
2321aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2322 struct aarch64_insn_data *data)
2323{
2324 struct aarch64_displaced_step_data *dsd
2325 = (struct aarch64_displaced_step_data *) data;
2ac09a5b 2326 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
b6542f81
YQ
2327
2328 if (can_encode_int32 (new_offset, 28))
2329 {
2330 /* Emit B rather than BL, because executing BL on a new address
2331 will get the wrong address into LR. In order to avoid this,
2332 we emit B, and update LR if the instruction is BL. */
2333 emit_b (dsd->insn_buf, 0, new_offset);
2334 dsd->insn_count++;
2335 }
2336 else
2337 {
2338 /* Write NOP. */
2339 emit_nop (dsd->insn_buf);
2340 dsd->insn_count++;
2341 dsd->dsc->pc_adjust = offset;
2342 }
2343
2344 if (is_bl)
2345 {
2346 /* Update LR. */
2347 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2348 data->insn_addr + 4);
2349 }
2350}
2351
2352/* Implementation of aarch64_insn_visitor method "b_cond". */
2353
2354static void
2355aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2356 struct aarch64_insn_data *data)
2357{
2358 struct aarch64_displaced_step_data *dsd
2359 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2360
2361 /* GDB has to fix up PC after displaced step this instruction
2362 differently according to the condition is true or false. Instead
2363 of checking COND against conditional flags, we can use
2364 the following instructions, and GDB can tell how to fix up PC
2365 according to the PC value.
2366
2367 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2368 INSN1 ;
2369 TAKEN:
2370 INSN2
2371 */
2372
2373 emit_bcond (dsd->insn_buf, cond, 8);
2374 dsd->dsc->cond = 1;
2375 dsd->dsc->pc_adjust = offset;
2376 dsd->insn_count = 1;
2377}
2378
2379/* Dynamically allocate a new register. If we know the register
2380 statically, we should make it a global as above instead of using this
2381 helper function. */
2382
2383static struct aarch64_register
2384aarch64_register (unsigned num, int is64)
2385{
2386 return (struct aarch64_register) { num, is64 };
2387}
2388
2389/* Implementation of aarch64_insn_visitor method "cb". */
2390
2391static void
2392aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2393 const unsigned rn, int is64,
2394 struct aarch64_insn_data *data)
2395{
2396 struct aarch64_displaced_step_data *dsd
2397 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2398
2399 /* The offset is out of range for a compare and branch
2400 instruction. We can use the following instructions instead:
2401
2402 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2403 INSN1 ;
2404 TAKEN:
2405 INSN2
2406 */
2407 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2408 dsd->insn_count = 1;
2409 dsd->dsc->cond = 1;
2410 dsd->dsc->pc_adjust = offset;
2411}
2412
2413/* Implementation of aarch64_insn_visitor method "tb". */
2414
2415static void
2416aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2417 const unsigned rt, unsigned bit,
2418 struct aarch64_insn_data *data)
2419{
2420 struct aarch64_displaced_step_data *dsd
2421 = (struct aarch64_displaced_step_data *) data;
b6542f81
YQ
2422
2423 /* The offset is out of range for a test bit and branch
2424 instruction We can use the following instructions instead:
2425
2426 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2427 INSN1 ;
2428 TAKEN:
2429 INSN2
2430
2431 */
2432 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2433 dsd->insn_count = 1;
2434 dsd->dsc->cond = 1;
2435 dsd->dsc->pc_adjust = offset;
2436}
2437
2438/* Implementation of aarch64_insn_visitor method "adr". */
2439
2440static void
2441aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2442 const int is_adrp, struct aarch64_insn_data *data)
2443{
2444 struct aarch64_displaced_step_data *dsd
2445 = (struct aarch64_displaced_step_data *) data;
2446 /* We know exactly the address the ADR{P,} instruction will compute.
2447 We can just write it to the destination register. */
2448 CORE_ADDR address = data->insn_addr + offset;
2449
2450 if (is_adrp)
2451 {
2452 /* Clear the lower 12 bits of the offset to get the 4K page. */
2453 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2454 address & ~0xfff);
2455 }
2456 else
2457 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2458 address);
2459
2460 dsd->dsc->pc_adjust = 4;
2461 emit_nop (dsd->insn_buf);
2462 dsd->insn_count = 1;
2463}
2464
2465/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2466
2467static void
2468aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2469 const unsigned rt, const int is64,
2470 struct aarch64_insn_data *data)
2471{
2472 struct aarch64_displaced_step_data *dsd
2473 = (struct aarch64_displaced_step_data *) data;
2474 CORE_ADDR address = data->insn_addr + offset;
2475 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2476
2477 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2478 address);
2479
2480 if (is_sw)
2481 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2482 aarch64_register (rt, 1), zero);
2483 else
2484 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2485 aarch64_register (rt, 1), zero);
2486
2487 dsd->dsc->pc_adjust = 4;
2488}
2489
2490/* Implementation of aarch64_insn_visitor method "others". */
2491
2492static void
2493aarch64_displaced_step_others (const uint32_t insn,
2494 struct aarch64_insn_data *data)
2495{
2496 struct aarch64_displaced_step_data *dsd
2497 = (struct aarch64_displaced_step_data *) data;
2498
e1c587c3 2499 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2500 dsd->insn_count = 1;
2501
2502 if ((insn & 0xfffffc1f) == 0xd65f0000)
2503 {
2504 /* RET */
2505 dsd->dsc->pc_adjust = 0;
2506 }
2507 else
2508 dsd->dsc->pc_adjust = 4;
2509}
2510
2511static const struct aarch64_insn_visitor visitor =
2512{
2513 aarch64_displaced_step_b,
2514 aarch64_displaced_step_b_cond,
2515 aarch64_displaced_step_cb,
2516 aarch64_displaced_step_tb,
2517 aarch64_displaced_step_adr,
2518 aarch64_displaced_step_ldr_literal,
2519 aarch64_displaced_step_others,
2520};
2521
2522/* Implement the "displaced_step_copy_insn" gdbarch method. */
2523
2524struct displaced_step_closure *
2525aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2526 CORE_ADDR from, CORE_ADDR to,
2527 struct regcache *regs)
2528{
2529 struct displaced_step_closure *dsc = NULL;
2530 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2531 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2532 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2533 aarch64_inst inst;
2534
2535 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2536 return NULL;
b6542f81
YQ
2537
2538 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2539 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2540 {
2541 /* We can't displaced step atomic sequences. */
2542 return NULL;
2543 }
2544
2545 dsc = XCNEW (struct displaced_step_closure);
2546 dsd.base.insn_addr = from;
2547 dsd.new_addr = to;
2548 dsd.regs = regs;
2549 dsd.dsc = dsc;
034f1a81 2550 dsd.insn_count = 0;
b6542f81
YQ
2551 aarch64_relocate_instruction (insn, &visitor,
2552 (struct aarch64_insn_data *) &dsd);
2553 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2554
2555 if (dsd.insn_count != 0)
2556 {
2557 int i;
2558
2559 /* Instruction can be relocated to scratch pad. Copy
2560 relocated instruction(s) there. */
2561 for (i = 0; i < dsd.insn_count; i++)
2562 {
2563 if (debug_displaced)
2564 {
2565 debug_printf ("displaced: writing insn ");
2566 debug_printf ("%.8x", dsd.insn_buf[i]);
2567 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2568 }
2569 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2570 (ULONGEST) dsd.insn_buf[i]);
2571 }
2572 }
2573 else
2574 {
2575 xfree (dsc);
2576 dsc = NULL;
2577 }
2578
2579 return dsc;
2580}
2581
2582/* Implement the "displaced_step_fixup" gdbarch method. */
2583
2584void
2585aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2586 struct displaced_step_closure *dsc,
2587 CORE_ADDR from, CORE_ADDR to,
2588 struct regcache *regs)
2589{
2590 if (dsc->cond)
2591 {
2592 ULONGEST pc;
2593
2594 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2595 if (pc - to == 8)
2596 {
2597 /* Condition is true. */
2598 }
2599 else if (pc - to == 4)
2600 {
2601 /* Condition is false. */
2602 dsc->pc_adjust = 4;
2603 }
2604 else
2605 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2606 }
2607
2608 if (dsc->pc_adjust != 0)
2609 {
2610 if (debug_displaced)
2611 {
2612 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2613 paddress (gdbarch, from), dsc->pc_adjust);
2614 }
2615 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2616 from + dsc->pc_adjust);
2617 }
2618}
2619
2620/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2621
2622int
2623aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2624 struct displaced_step_closure *closure)
2625{
2626 return 1;
2627}
2628
07b287a0
MS
2629/* Initialize the current architecture based on INFO. If possible,
2630 re-use an architecture from ARCHES, which is a list of
2631 architectures already created during this debugging session.
2632
2633 Called e.g. at program startup, when reading a core file, and when
2634 reading a binary file. */
2635
2636static struct gdbarch *
2637aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2638{
2639 struct gdbarch_tdep *tdep;
2640 struct gdbarch *gdbarch;
2641 struct gdbarch_list *best_arch;
2642 struct tdesc_arch_data *tdesc_data = NULL;
2643 const struct target_desc *tdesc = info.target_desc;
2644 int i;
07b287a0
MS
2645 int valid_p = 1;
2646 const struct tdesc_feature *feature;
2647 int num_regs = 0;
2648 int num_pseudo_regs = 0;
2649
2650 /* Ensure we always have a target descriptor. */
2651 if (!tdesc_has_registers (tdesc))
2652 tdesc = tdesc_aarch64;
2653
2654 gdb_assert (tdesc);
2655
2656 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2657
2658 if (feature == NULL)
2659 return NULL;
2660
2661 tdesc_data = tdesc_data_alloc ();
2662
2663 /* Validate the descriptor provides the mandatory core R registers
2664 and allocate their numbers. */
2665 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2666 valid_p &=
2667 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2668 aarch64_r_register_names[i]);
2669
2670 num_regs = AARCH64_X0_REGNUM + i;
2671
2672 /* Look for the V registers. */
2673 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2674 if (feature)
2675 {
2676 /* Validate the descriptor provides the mandatory V registers
2677 and allocate their numbers. */
2678 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2679 valid_p &=
2680 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2681 aarch64_v_register_names[i]);
2682
2683 num_regs = AARCH64_V0_REGNUM + i;
2684
2685 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2686 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2687 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2688 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2689 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2690 }
2691
2692 if (!valid_p)
2693 {
2694 tdesc_data_cleanup (tdesc_data);
2695 return NULL;
2696 }
2697
2698 /* AArch64 code is always little-endian. */
2699 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2700
2701 /* If there is already a candidate, use it. */
2702 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2703 best_arch != NULL;
2704 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2705 {
2706 /* Found a match. */
2707 break;
2708 }
2709
2710 if (best_arch != NULL)
2711 {
2712 if (tdesc_data != NULL)
2713 tdesc_data_cleanup (tdesc_data);
2714 return best_arch->gdbarch;
2715 }
2716
8d749320 2717 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2718 gdbarch = gdbarch_alloc (&info, tdep);
2719
2720 /* This should be low enough for everything. */
2721 tdep->lowest_pc = 0x20;
2722 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2723 tdep->jb_elt_size = 8;
2724
2725 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2726 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2727
07b287a0
MS
2728 /* Frame handling. */
2729 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2730 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2731 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2732
2733 /* Advance PC across function entry code. */
2734 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2735
2736 /* The stack grows downward. */
2737 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2738
2739 /* Breakpoint manipulation. */
2740 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
07b287a0 2741 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2742 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2743
2744 /* Information about registers, etc. */
2745 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2746 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2747 set_gdbarch_num_regs (gdbarch, num_regs);
2748
2749 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2750 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2751 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2752 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2753 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2754 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2755 aarch64_pseudo_register_reggroup_p);
2756
2757 /* ABI */
2758 set_gdbarch_short_bit (gdbarch, 16);
2759 set_gdbarch_int_bit (gdbarch, 32);
2760 set_gdbarch_float_bit (gdbarch, 32);
2761 set_gdbarch_double_bit (gdbarch, 64);
2762 set_gdbarch_long_double_bit (gdbarch, 128);
2763 set_gdbarch_long_bit (gdbarch, 64);
2764 set_gdbarch_long_long_bit (gdbarch, 64);
2765 set_gdbarch_ptr_bit (gdbarch, 64);
2766 set_gdbarch_char_signed (gdbarch, 0);
2767 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2768 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2769 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2770
2771 /* Internal <-> external register number maps. */
2772 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2773
2774 /* Returning results. */
2775 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2776
2777 /* Disassembly. */
2778 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2779
2780 /* Virtual tables. */
2781 set_gdbarch_vbit_in_delta (gdbarch, 1);
2782
2783 /* Hook in the ABI-specific overrides, if they have been registered. */
2784 info.target_desc = tdesc;
2785 info.tdep_info = (void *) tdesc_data;
2786 gdbarch_init_osabi (info, gdbarch);
2787
2788 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2789
2790 /* Add some default predicates. */
2791 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2792 dwarf2_append_unwinders (gdbarch);
2793 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2794
2795 frame_base_set_default (gdbarch, &aarch64_normal_base);
2796
2797 /* Now we have tuned the configuration, set a few final things,
2798 based on what the OS ABI has told us. */
2799
2800 if (tdep->jb_pc >= 0)
2801 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2802
ea873d8e
PL
2803 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2804
07b287a0
MS
2805 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2806
2807 /* Add standard register aliases. */
2808 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2809 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2810 value_of_aarch64_user_reg,
2811 &aarch64_register_aliases[i].regnum);
2812
2813 return gdbarch;
2814}
2815
2816static void
2817aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2818{
2819 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2820
2821 if (tdep == NULL)
2822 return;
2823
2824 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2825 paddress (gdbarch, tdep->lowest_pc));
2826}
2827
2828/* Suppress warning from -Wmissing-prototypes. */
2829extern initialize_file_ftype _initialize_aarch64_tdep;
2830
2831void
2832_initialize_aarch64_tdep (void)
2833{
2834 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2835 aarch64_dump_tdep);
2836
2837 initialize_tdesc_aarch64 ();
07b287a0
MS
2838
2839 /* Debug this file's internals. */
2840 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2841Set AArch64 debugging."), _("\
2842Show AArch64 debugging."), _("\
2843When on, AArch64 specific debugging is enabled."),
2844 NULL,
2845 show_aarch64_debug,
2846 &setdebuglist, &showdebuglist);
2847}
99afc88b
OJ
2848
2849/* AArch64 process record-replay related structures, defines etc. */
2850
99afc88b
OJ
2851#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2852 do \
2853 { \
2854 unsigned int reg_len = LENGTH; \
2855 if (reg_len) \
2856 { \
2857 REGS = XNEWVEC (uint32_t, reg_len); \
2858 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2859 } \
2860 } \
2861 while (0)
2862
2863#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2864 do \
2865 { \
2866 unsigned int mem_len = LENGTH; \
2867 if (mem_len) \
2868 { \
2869 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2870 memcpy(&MEMS->len, &RECORD_BUF[0], \
2871 sizeof(struct aarch64_mem_r) * LENGTH); \
2872 } \
2873 } \
2874 while (0)
2875
2876/* AArch64 record/replay structures and enumerations. */
2877
2878struct aarch64_mem_r
2879{
2880 uint64_t len; /* Record length. */
2881 uint64_t addr; /* Memory address. */
2882};
2883
2884enum aarch64_record_result
2885{
2886 AARCH64_RECORD_SUCCESS,
2887 AARCH64_RECORD_FAILURE,
2888 AARCH64_RECORD_UNSUPPORTED,
2889 AARCH64_RECORD_UNKNOWN
2890};
2891
2892typedef struct insn_decode_record_t
2893{
2894 struct gdbarch *gdbarch;
2895 struct regcache *regcache;
2896 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2897 uint32_t aarch64_insn; /* Insn to be recorded. */
2898 uint32_t mem_rec_count; /* Count of memory records. */
2899 uint32_t reg_rec_count; /* Count of register records. */
2900 uint32_t *aarch64_regs; /* Registers to be recorded. */
2901 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2902} insn_decode_record;
2903
2904/* Record handler for data processing - register instructions. */
2905
2906static unsigned int
2907aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2908{
2909 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2910 uint32_t record_buf[4];
2911
2912 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2913 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2914 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2915
2916 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2917 {
2918 uint8_t setflags;
2919
2920 /* Logical (shifted register). */
2921 if (insn_bits24_27 == 0x0a)
2922 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2923 /* Add/subtract. */
2924 else if (insn_bits24_27 == 0x0b)
2925 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2926 else
2927 return AARCH64_RECORD_UNKNOWN;
2928
2929 record_buf[0] = reg_rd;
2930 aarch64_insn_r->reg_rec_count = 1;
2931 if (setflags)
2932 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2933 }
2934 else
2935 {
2936 if (insn_bits24_27 == 0x0b)
2937 {
2938 /* Data-processing (3 source). */
2939 record_buf[0] = reg_rd;
2940 aarch64_insn_r->reg_rec_count = 1;
2941 }
2942 else if (insn_bits24_27 == 0x0a)
2943 {
2944 if (insn_bits21_23 == 0x00)
2945 {
2946 /* Add/subtract (with carry). */
2947 record_buf[0] = reg_rd;
2948 aarch64_insn_r->reg_rec_count = 1;
2949 if (bit (aarch64_insn_r->aarch64_insn, 29))
2950 {
2951 record_buf[1] = AARCH64_CPSR_REGNUM;
2952 aarch64_insn_r->reg_rec_count = 2;
2953 }
2954 }
2955 else if (insn_bits21_23 == 0x02)
2956 {
2957 /* Conditional compare (register) and conditional compare
2958 (immediate) instructions. */
2959 record_buf[0] = AARCH64_CPSR_REGNUM;
2960 aarch64_insn_r->reg_rec_count = 1;
2961 }
2962 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2963 {
2964 /* CConditional select. */
2965 /* Data-processing (2 source). */
2966 /* Data-processing (1 source). */
2967 record_buf[0] = reg_rd;
2968 aarch64_insn_r->reg_rec_count = 1;
2969 }
2970 else
2971 return AARCH64_RECORD_UNKNOWN;
2972 }
2973 }
2974
2975 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2976 record_buf);
2977 return AARCH64_RECORD_SUCCESS;
2978}
2979
2980/* Record handler for data processing - immediate instructions. */
2981
2982static unsigned int
2983aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2984{
78cc6c2d 2985 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
99afc88b
OJ
2986 uint32_t record_buf[4];
2987
2988 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
99afc88b
OJ
2989 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2990 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2991
2992 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2993 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2994 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2995 {
2996 record_buf[0] = reg_rd;
2997 aarch64_insn_r->reg_rec_count = 1;
2998 }
2999 else if (insn_bits24_27 == 0x01)
3000 {
3001 /* Add/Subtract (immediate). */
3002 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3003 record_buf[0] = reg_rd;
3004 aarch64_insn_r->reg_rec_count = 1;
3005 if (setflags)
3006 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3007 }
3008 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3009 {
3010 /* Logical (immediate). */
3011 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3012 record_buf[0] = reg_rd;
3013 aarch64_insn_r->reg_rec_count = 1;
3014 if (setflags)
3015 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3016 }
3017 else
3018 return AARCH64_RECORD_UNKNOWN;
3019
3020 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3021 record_buf);
3022 return AARCH64_RECORD_SUCCESS;
3023}
3024
3025/* Record handler for branch, exception generation and system instructions. */
3026
3027static unsigned int
3028aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3029{
3030 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3031 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3032 uint32_t record_buf[4];
3033
3034 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3035 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3036 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3037
3038 if (insn_bits28_31 == 0x0d)
3039 {
3040 /* Exception generation instructions. */
3041 if (insn_bits24_27 == 0x04)
3042 {
5d98d3cd
YQ
3043 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3044 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3045 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3046 {
3047 ULONGEST svc_number;
3048
3049 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3050 &svc_number);
3051 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3052 svc_number);
3053 }
3054 else
3055 return AARCH64_RECORD_UNSUPPORTED;
3056 }
3057 /* System instructions. */
3058 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3059 {
3060 uint32_t reg_rt, reg_crn;
3061
3062 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3063 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3064
3065 /* Record rt in case of sysl and mrs instructions. */
3066 if (bit (aarch64_insn_r->aarch64_insn, 21))
3067 {
3068 record_buf[0] = reg_rt;
3069 aarch64_insn_r->reg_rec_count = 1;
3070 }
3071 /* Record cpsr for hint and msr(immediate) instructions. */
3072 else if (reg_crn == 0x02 || reg_crn == 0x04)
3073 {
3074 record_buf[0] = AARCH64_CPSR_REGNUM;
3075 aarch64_insn_r->reg_rec_count = 1;
3076 }
3077 }
3078 /* Unconditional branch (register). */
3079 else if((insn_bits24_27 & 0x0e) == 0x06)
3080 {
3081 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3082 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3083 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3084 }
3085 else
3086 return AARCH64_RECORD_UNKNOWN;
3087 }
3088 /* Unconditional branch (immediate). */
3089 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3090 {
3091 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3092 if (bit (aarch64_insn_r->aarch64_insn, 31))
3093 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3094 }
3095 else
3096 /* Compare & branch (immediate), Test & branch (immediate) and
3097 Conditional branch (immediate). */
3098 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3099
3100 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3101 record_buf);
3102 return AARCH64_RECORD_SUCCESS;
3103}
3104
3105/* Record handler for advanced SIMD load and store instructions. */
3106
3107static unsigned int
3108aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3109{
3110 CORE_ADDR address;
3111 uint64_t addr_offset = 0;
3112 uint32_t record_buf[24];
3113 uint64_t record_buf_mem[24];
3114 uint32_t reg_rn, reg_rt;
3115 uint32_t reg_index = 0, mem_index = 0;
3116 uint8_t opcode_bits, size_bits;
3117
3118 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3119 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3120 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3121 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3122 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3123
3124 if (record_debug)
b277c936 3125 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3126
3127 /* Load/store single structure. */
3128 if (bit (aarch64_insn_r->aarch64_insn, 24))
3129 {
3130 uint8_t sindex, scale, selem, esize, replicate = 0;
3131 scale = opcode_bits >> 2;
3132 selem = ((opcode_bits & 0x02) |
3133 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3134 switch (scale)
3135 {
3136 case 1:
3137 if (size_bits & 0x01)
3138 return AARCH64_RECORD_UNKNOWN;
3139 break;
3140 case 2:
3141 if ((size_bits >> 1) & 0x01)
3142 return AARCH64_RECORD_UNKNOWN;
3143 if (size_bits & 0x01)
3144 {
3145 if (!((opcode_bits >> 1) & 0x01))
3146 scale = 3;
3147 else
3148 return AARCH64_RECORD_UNKNOWN;
3149 }
3150 break;
3151 case 3:
3152 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3153 {
3154 scale = size_bits;
3155 replicate = 1;
3156 break;
3157 }
3158 else
3159 return AARCH64_RECORD_UNKNOWN;
3160 default:
3161 break;
3162 }
3163 esize = 8 << scale;
3164 if (replicate)
3165 for (sindex = 0; sindex < selem; sindex++)
3166 {
3167 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3168 reg_rt = (reg_rt + 1) % 32;
3169 }
3170 else
3171 {
3172 for (sindex = 0; sindex < selem; sindex++)
a2e3e93f
SM
3173 {
3174 if (bit (aarch64_insn_r->aarch64_insn, 22))
3175 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3176 else
3177 {
3178 record_buf_mem[mem_index++] = esize / 8;
3179 record_buf_mem[mem_index++] = address + addr_offset;
3180 }
3181 addr_offset = addr_offset + (esize / 8);
3182 reg_rt = (reg_rt + 1) % 32;
3183 }
99afc88b
OJ
3184 }
3185 }
3186 /* Load/store multiple structure. */
3187 else
3188 {
3189 uint8_t selem, esize, rpt, elements;
3190 uint8_t eindex, rindex;
3191
3192 esize = 8 << size_bits;
3193 if (bit (aarch64_insn_r->aarch64_insn, 30))
3194 elements = 128 / esize;
3195 else
3196 elements = 64 / esize;
3197
3198 switch (opcode_bits)
3199 {
3200 /*LD/ST4 (4 Registers). */
3201 case 0:
3202 rpt = 1;
3203 selem = 4;
3204 break;
3205 /*LD/ST1 (4 Registers). */
3206 case 2:
3207 rpt = 4;
3208 selem = 1;
3209 break;
3210 /*LD/ST3 (3 Registers). */
3211 case 4:
3212 rpt = 1;
3213 selem = 3;
3214 break;
3215 /*LD/ST1 (3 Registers). */
3216 case 6:
3217 rpt = 3;
3218 selem = 1;
3219 break;
3220 /*LD/ST1 (1 Register). */
3221 case 7:
3222 rpt = 1;
3223 selem = 1;
3224 break;
3225 /*LD/ST2 (2 Registers). */
3226 case 8:
3227 rpt = 1;
3228 selem = 2;
3229 break;
3230 /*LD/ST1 (2 Registers). */
3231 case 10:
3232 rpt = 2;
3233 selem = 1;
3234 break;
3235 default:
3236 return AARCH64_RECORD_UNSUPPORTED;
3237 break;
3238 }
3239 for (rindex = 0; rindex < rpt; rindex++)
3240 for (eindex = 0; eindex < elements; eindex++)
3241 {
3242 uint8_t reg_tt, sindex;
3243 reg_tt = (reg_rt + rindex) % 32;
3244 for (sindex = 0; sindex < selem; sindex++)
3245 {
3246 if (bit (aarch64_insn_r->aarch64_insn, 22))
3247 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3248 else
3249 {
3250 record_buf_mem[mem_index++] = esize / 8;
3251 record_buf_mem[mem_index++] = address + addr_offset;
3252 }
3253 addr_offset = addr_offset + (esize / 8);
3254 reg_tt = (reg_tt + 1) % 32;
3255 }
3256 }
3257 }
3258
3259 if (bit (aarch64_insn_r->aarch64_insn, 23))
3260 record_buf[reg_index++] = reg_rn;
3261
3262 aarch64_insn_r->reg_rec_count = reg_index;
3263 aarch64_insn_r->mem_rec_count = mem_index / 2;
3264 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3265 record_buf_mem);
3266 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3267 record_buf);
3268 return AARCH64_RECORD_SUCCESS;
3269}
3270
3271/* Record handler for load and store instructions. */
3272
3273static unsigned int
3274aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3275{
3276 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3277 uint8_t insn_bit23, insn_bit21;
3278 uint8_t opc, size_bits, ld_flag, vector_flag;
3279 uint32_t reg_rn, reg_rt, reg_rt2;
3280 uint64_t datasize, offset;
3281 uint32_t record_buf[8];
3282 uint64_t record_buf_mem[8];
3283 CORE_ADDR address;
3284
3285 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3286 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3287 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3288 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3289 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3290 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3291 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3292 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3293 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3294 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3295 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3296
3297 /* Load/store exclusive. */
3298 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3299 {
3300 if (record_debug)
b277c936 3301 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3302
3303 if (ld_flag)
3304 {
3305 record_buf[0] = reg_rt;
3306 aarch64_insn_r->reg_rec_count = 1;
3307 if (insn_bit21)
3308 {
3309 record_buf[1] = reg_rt2;
3310 aarch64_insn_r->reg_rec_count = 2;
3311 }
3312 }
3313 else
3314 {
3315 if (insn_bit21)
3316 datasize = (8 << size_bits) * 2;
3317 else
3318 datasize = (8 << size_bits);
3319 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3320 &address);
3321 record_buf_mem[0] = datasize / 8;
3322 record_buf_mem[1] = address;
3323 aarch64_insn_r->mem_rec_count = 1;
3324 if (!insn_bit23)
3325 {
3326 /* Save register rs. */
3327 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3328 aarch64_insn_r->reg_rec_count = 1;
3329 }
3330 }
3331 }
3332 /* Load register (literal) instructions decoding. */
3333 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3334 {
3335 if (record_debug)
b277c936 3336 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3337 if (vector_flag)
3338 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3339 else
3340 record_buf[0] = reg_rt;
3341 aarch64_insn_r->reg_rec_count = 1;
3342 }
3343 /* All types of load/store pair instructions decoding. */
3344 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3345 {
3346 if (record_debug)
b277c936 3347 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3348
3349 if (ld_flag)
3350 {
3351 if (vector_flag)
3352 {
3353 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3354 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3355 }
3356 else
3357 {
3358 record_buf[0] = reg_rt;
3359 record_buf[1] = reg_rt2;
3360 }
3361 aarch64_insn_r->reg_rec_count = 2;
3362 }
3363 else
3364 {
3365 uint16_t imm7_off;
3366 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3367 if (!vector_flag)
3368 size_bits = size_bits >> 1;
3369 datasize = 8 << (2 + size_bits);
3370 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3371 offset = offset << (2 + size_bits);
3372 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3373 &address);
3374 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3375 {
3376 if (imm7_off & 0x40)
3377 address = address - offset;
3378 else
3379 address = address + offset;
3380 }
3381
3382 record_buf_mem[0] = datasize / 8;
3383 record_buf_mem[1] = address;
3384 record_buf_mem[2] = datasize / 8;
3385 record_buf_mem[3] = address + (datasize / 8);
3386 aarch64_insn_r->mem_rec_count = 2;
3387 }
3388 if (bit (aarch64_insn_r->aarch64_insn, 23))
3389 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3390 }
3391 /* Load/store register (unsigned immediate) instructions. */
3392 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3393 {
3394 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3395 if (!(opc >> 1))
3396 if (opc & 0x01)
3397 ld_flag = 0x01;
3398 else
3399 ld_flag = 0x0;
3400 else
3401 if (size_bits != 0x03)
3402 ld_flag = 0x01;
3403 else
3404 return AARCH64_RECORD_UNKNOWN;
3405
3406 if (record_debug)
3407 {
b277c936
PL
3408 debug_printf ("Process record: load/store (unsigned immediate):"
3409 " size %x V %d opc %x\n", size_bits, vector_flag,
3410 opc);
99afc88b
OJ
3411 }
3412
3413 if (!ld_flag)
3414 {
3415 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3416 datasize = 8 << size_bits;
3417 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3418 &address);
3419 offset = offset << size_bits;
3420 address = address + offset;
3421
3422 record_buf_mem[0] = datasize >> 3;
3423 record_buf_mem[1] = address;
3424 aarch64_insn_r->mem_rec_count = 1;
3425 }
3426 else
3427 {
3428 if (vector_flag)
3429 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3430 else
3431 record_buf[0] = reg_rt;
3432 aarch64_insn_r->reg_rec_count = 1;
3433 }
3434 }
3435 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3436 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3437 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3438 {
3439 if (record_debug)
b277c936 3440 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3441 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3442 if (!(opc >> 1))
3443 if (opc & 0x01)
3444 ld_flag = 0x01;
3445 else
3446 ld_flag = 0x0;
3447 else
3448 if (size_bits != 0x03)
3449 ld_flag = 0x01;
3450 else
3451 return AARCH64_RECORD_UNKNOWN;
3452
3453 if (!ld_flag)
3454 {
d9436c7c
PA
3455 ULONGEST reg_rm_val;
3456
99afc88b
OJ
3457 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3458 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3459 if (bit (aarch64_insn_r->aarch64_insn, 12))
3460 offset = reg_rm_val << size_bits;
3461 else
3462 offset = reg_rm_val;
3463 datasize = 8 << size_bits;
3464 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3465 &address);
3466 address = address + offset;
3467 record_buf_mem[0] = datasize >> 3;
3468 record_buf_mem[1] = address;
3469 aarch64_insn_r->mem_rec_count = 1;
3470 }
3471 else
3472 {
3473 if (vector_flag)
3474 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3475 else
3476 record_buf[0] = reg_rt;
3477 aarch64_insn_r->reg_rec_count = 1;
3478 }
3479 }
3480 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3481 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3482 && !insn_bit21)
99afc88b
OJ
3483 {
3484 if (record_debug)
3485 {
b277c936
PL
3486 debug_printf ("Process record: load/store "
3487 "(immediate and unprivileged)\n");
99afc88b
OJ
3488 }
3489 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3490 if (!(opc >> 1))
3491 if (opc & 0x01)
3492 ld_flag = 0x01;
3493 else
3494 ld_flag = 0x0;
3495 else
3496 if (size_bits != 0x03)
3497 ld_flag = 0x01;
3498 else
3499 return AARCH64_RECORD_UNKNOWN;
3500
3501 if (!ld_flag)
3502 {
3503 uint16_t imm9_off;
3504 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3505 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3506 datasize = 8 << size_bits;
3507 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3508 &address);
3509 if (insn_bits10_11 != 0x01)
3510 {
3511 if (imm9_off & 0x0100)
3512 address = address - offset;
3513 else
3514 address = address + offset;
3515 }
3516 record_buf_mem[0] = datasize >> 3;
3517 record_buf_mem[1] = address;
3518 aarch64_insn_r->mem_rec_count = 1;
3519 }
3520 else
3521 {
3522 if (vector_flag)
3523 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3524 else
3525 record_buf[0] = reg_rt;
3526 aarch64_insn_r->reg_rec_count = 1;
3527 }
3528 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3529 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3530 }
3531 /* Advanced SIMD load/store instructions. */
3532 else
3533 return aarch64_record_asimd_load_store (aarch64_insn_r);
3534
3535 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3536 record_buf_mem);
3537 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3538 record_buf);
3539 return AARCH64_RECORD_SUCCESS;
3540}
3541
3542/* Record handler for data processing SIMD and floating point instructions. */
3543
3544static unsigned int
3545aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3546{
3547 uint8_t insn_bit21, opcode, rmode, reg_rd;
3548 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3549 uint8_t insn_bits11_14;
3550 uint32_t record_buf[2];
3551
3552 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3553 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3554 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3555 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3556 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3557 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3558 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3559 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3560 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3561
3562 if (record_debug)
b277c936 3563 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3564
3565 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3566 {
3567 /* Floating point - fixed point conversion instructions. */
3568 if (!insn_bit21)
3569 {
3570 if (record_debug)
b277c936 3571 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3572
3573 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3574 record_buf[0] = reg_rd;
3575 else
3576 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3577 }
3578 /* Floating point - conditional compare instructions. */
3579 else if (insn_bits10_11 == 0x01)
3580 {
3581 if (record_debug)
b277c936 3582 debug_printf ("FP - conditional compare");
99afc88b
OJ
3583
3584 record_buf[0] = AARCH64_CPSR_REGNUM;
3585 }
3586 /* Floating point - data processing (2-source) and
3587 conditional select instructions. */
3588 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3589 {
3590 if (record_debug)
b277c936 3591 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3592
3593 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3594 }
3595 else if (insn_bits10_11 == 0x00)
3596 {
3597 /* Floating point - immediate instructions. */
3598 if ((insn_bits12_15 & 0x01) == 0x01
3599 || (insn_bits12_15 & 0x07) == 0x04)
3600 {
3601 if (record_debug)
b277c936 3602 debug_printf ("FP - immediate");
99afc88b
OJ
3603 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3604 }
3605 /* Floating point - compare instructions. */
3606 else if ((insn_bits12_15 & 0x03) == 0x02)
3607 {
3608 if (record_debug)
b277c936 3609 debug_printf ("FP - immediate");
99afc88b
OJ
3610 record_buf[0] = AARCH64_CPSR_REGNUM;
3611 }
3612 /* Floating point - integer conversions instructions. */
f62fce35 3613 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3614 {
3615 /* Convert float to integer instruction. */
3616 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3617 {
3618 if (record_debug)
b277c936 3619 debug_printf ("float to int conversion");
99afc88b
OJ
3620
3621 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3622 }
3623 /* Convert integer to float instruction. */
3624 else if ((opcode >> 1) == 0x01 && !rmode)
3625 {
3626 if (record_debug)
b277c936 3627 debug_printf ("int to float conversion");
99afc88b
OJ
3628
3629 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3630 }
3631 /* Move float to integer instruction. */
3632 else if ((opcode >> 1) == 0x03)
3633 {
3634 if (record_debug)
b277c936 3635 debug_printf ("move float to int");
99afc88b
OJ
3636
3637 if (!(opcode & 0x01))
3638 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3639 else
3640 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3641 }
f62fce35
YQ
3642 else
3643 return AARCH64_RECORD_UNKNOWN;
99afc88b 3644 }
f62fce35
YQ
3645 else
3646 return AARCH64_RECORD_UNKNOWN;
99afc88b 3647 }
f62fce35
YQ
3648 else
3649 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3650 }
3651 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3652 {
3653 if (record_debug)
b277c936 3654 debug_printf ("SIMD copy");
99afc88b
OJ
3655
3656 /* Advanced SIMD copy instructions. */
3657 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3658 && !bit (aarch64_insn_r->aarch64_insn, 15)
3659 && bit (aarch64_insn_r->aarch64_insn, 10))
3660 {
3661 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3662 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3663 else
3664 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3665 }
3666 else
3667 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3668 }
3669 /* All remaining floating point or advanced SIMD instructions. */
3670 else
3671 {
3672 if (record_debug)
b277c936 3673 debug_printf ("all remain");
99afc88b
OJ
3674
3675 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3676 }
3677
3678 if (record_debug)
b277c936 3679 debug_printf ("\n");
99afc88b
OJ
3680
3681 aarch64_insn_r->reg_rec_count++;
3682 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3683 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3684 record_buf);
3685 return AARCH64_RECORD_SUCCESS;
3686}
3687
3688/* Decodes insns type and invokes its record handler. */
3689
3690static unsigned int
3691aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3692{
3693 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3694
3695 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3696 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3697 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3698 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3699
3700 /* Data processing - immediate instructions. */
3701 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3702 return aarch64_record_data_proc_imm (aarch64_insn_r);
3703
3704 /* Branch, exception generation and system instructions. */
3705 if (ins_bit26 && !ins_bit27 && ins_bit28)
3706 return aarch64_record_branch_except_sys (aarch64_insn_r);
3707
3708 /* Load and store instructions. */
3709 if (!ins_bit25 && ins_bit27)
3710 return aarch64_record_load_store (aarch64_insn_r);
3711
3712 /* Data processing - register instructions. */
3713 if (ins_bit25 && !ins_bit26 && ins_bit27)
3714 return aarch64_record_data_proc_reg (aarch64_insn_r);
3715
3716 /* Data processing - SIMD and floating point instructions. */
3717 if (ins_bit25 && ins_bit26 && ins_bit27)
3718 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3719
3720 return AARCH64_RECORD_UNSUPPORTED;
3721}
3722
3723/* Cleans up local record registers and memory allocations. */
3724
3725static void
3726deallocate_reg_mem (insn_decode_record *record)
3727{
3728 xfree (record->aarch64_regs);
3729 xfree (record->aarch64_mems);
3730}
3731
3732/* Parse the current instruction and record the values of the registers and
3733 memory that will be changed in current instruction to record_arch_list
3734 return -1 if something is wrong. */
3735
3736int
3737aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3738 CORE_ADDR insn_addr)
3739{
3740 uint32_t rec_no = 0;
3741 uint8_t insn_size = 4;
3742 uint32_t ret = 0;
99afc88b
OJ
3743 gdb_byte buf[insn_size];
3744 insn_decode_record aarch64_record;
3745
3746 memset (&buf[0], 0, insn_size);
3747 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3748 target_read_memory (insn_addr, &buf[0], insn_size);
3749 aarch64_record.aarch64_insn
3750 = (uint32_t) extract_unsigned_integer (&buf[0],
3751 insn_size,
3752 gdbarch_byte_order (gdbarch));
3753 aarch64_record.regcache = regcache;
3754 aarch64_record.this_addr = insn_addr;
3755 aarch64_record.gdbarch = gdbarch;
3756
3757 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3758 if (ret == AARCH64_RECORD_UNSUPPORTED)
3759 {
3760 printf_unfiltered (_("Process record does not support instruction "
3761 "0x%0x at address %s.\n"),
3762 aarch64_record.aarch64_insn,
3763 paddress (gdbarch, insn_addr));
3764 ret = -1;
3765 }
3766
3767 if (0 == ret)
3768 {
3769 /* Record registers. */
3770 record_full_arch_list_add_reg (aarch64_record.regcache,
3771 AARCH64_PC_REGNUM);
3772 /* Always record register CPSR. */
3773 record_full_arch_list_add_reg (aarch64_record.regcache,
3774 AARCH64_CPSR_REGNUM);
3775 if (aarch64_record.aarch64_regs)
3776 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3777 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3778 aarch64_record.aarch64_regs[rec_no]))
3779 ret = -1;
3780
3781 /* Record memories. */
3782 if (aarch64_record.aarch64_mems)
3783 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3784 if (record_full_arch_list_add_mem
3785 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3786 aarch64_record.aarch64_mems[rec_no].len))
3787 ret = -1;
3788
3789 if (record_full_arch_list_add_end ())
3790 ret = -1;
3791 }
3792
3793 deallocate_reg_mem (&aarch64_record);
3794 return ret;
3795}
This page took 0.394703 seconds and 4 git commands to generate.