[AArch64] Support gnu vector in inferior call
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
32d0add0 3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
57
07b287a0 58#include "features/aarch64.c"
07b287a0 59
787749ea
PL
60#include "arch/aarch64-insn.h"
61
f77ee802
YQ
62#include "opcode/aarch64.h"
63
64#define submask(x) ((1L << ((x) + 1)) - 1)
65#define bit(obj,st) (((obj) >> (st)) & 1)
66#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
07b287a0
MS
68/* Pseudo register base numbers. */
69#define AARCH64_Q0_REGNUM 0
70#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
74
75/* The standard register names, and all the valid aliases for them. */
76static const struct
77{
78 const char *const name;
79 int regnum;
80} aarch64_register_aliases[] =
81{
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123};
124
125/* The required core 'R' registers. */
126static const char *const aarch64_r_register_names[] =
127{
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139};
140
141/* The FP/SIMD 'V' registers. */
142static const char *const aarch64_v_register_names[] =
143{
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156};
157
158/* AArch64 prologue cache structure. */
159struct aarch64_prologue_cache
160{
db634143
PL
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
163 CORE_ADDR func;
164
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
167 stub frame. */
168 CORE_ADDR prev_pc;
169
07b287a0
MS
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
173 CORE_ADDR prev_sp;
174
7dfa3edc
PL
175 /* Is the target available to read from? */
176 int available_p;
177
07b287a0
MS
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
181 int framesize;
182
183 /* The register used to hold the frame pointer for this frame. */
184 int framereg;
185
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
188};
189
07b287a0
MS
190static void
191show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
193{
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
195}
196
07b287a0
MS
197/* Analyze a prologue, looking for a recognizable stack frame
198 and frame pointer. Scan until we encounter a store that could
199 clobber the stack frame unexpectedly, or an unknown instruction. */
200
201static CORE_ADDR
202aarch64_analyze_prologue (struct gdbarch *gdbarch,
203 CORE_ADDR start, CORE_ADDR limit,
204 struct aarch64_prologue_cache *cache)
205{
206 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
207 int i;
208 pv_t regs[AARCH64_X_REGISTER_COUNT];
209 struct pv_area *stack;
210 struct cleanup *back_to;
211
212 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
213 regs[i] = pv_register (i, 0);
214 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
215 back_to = make_cleanup_free_pv_area (stack);
216
217 for (; start < limit; start += 4)
218 {
219 uint32_t insn;
d9ebcbce 220 aarch64_inst inst;
07b287a0
MS
221
222 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
223
d9ebcbce
YQ
224 if (aarch64_decode_insn (insn, &inst, 1) != 0)
225 break;
226
227 if (inst.opcode->iclass == addsub_imm
228 && (inst.opcode->op == OP_ADD
229 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 230 {
d9ebcbce
YQ
231 unsigned rd = inst.operands[0].reg.regno;
232 unsigned rn = inst.operands[1].reg.regno;
233
234 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
235 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
236 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
237 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
238
239 if (inst.opcode->op == OP_ADD)
240 {
241 regs[rd] = pv_add_constant (regs[rn],
242 inst.operands[2].imm.value);
243 }
244 else
245 {
246 regs[rd] = pv_add_constant (regs[rn],
247 -inst.operands[2].imm.value);
248 }
249 }
250 else if (inst.opcode->iclass == pcreladdr
251 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
252 {
253 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
254 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
255
256 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 257 }
d9ebcbce 258 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
259 {
260 /* Stop analysis on branch. */
261 break;
262 }
d9ebcbce 263 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
264 {
265 /* Stop analysis on branch. */
266 break;
267 }
d9ebcbce 268 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
269 {
270 /* Stop analysis on branch. */
271 break;
272 }
d9ebcbce 273 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
274 {
275 /* Stop analysis on branch. */
276 break;
277 }
d9ebcbce
YQ
278 else if (inst.opcode->op == OP_MOVZ)
279 {
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
282 }
283 else if (inst.opcode->iclass == log_shift
284 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 285 {
d9ebcbce
YQ
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288 unsigned rm = inst.operands[2].reg.regno;
289
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
293
294 if (inst.operands[2].shifter.amount == 0
295 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
296 regs[rd] = regs[rm];
297 else
298 {
299 if (aarch64_debug)
b277c936
PL
300 {
301 debug_printf ("aarch64: prologue analysis gave up "
302 "addr=0x%s opcode=0x%x (orr x register)\n",
303 core_addr_to_string_nz (start), insn);
304 }
07b287a0
MS
305 break;
306 }
307 }
d9ebcbce 308 else if (inst.opcode->op == OP_STUR)
07b287a0 309 {
d9ebcbce
YQ
310 unsigned rt = inst.operands[0].reg.regno;
311 unsigned rn = inst.operands[1].addr.base_regno;
312 int is64
313 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
314
315 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
316 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
317 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
318 gdb_assert (!inst.operands[1].addr.offset.is_reg);
319
320 pv_area_store (stack, pv_add_constant (regs[rn],
321 inst.operands[1].addr.offset.imm),
07b287a0
MS
322 is64 ? 8 : 4, regs[rt]);
323 }
d9ebcbce
YQ
324 else if ((inst.opcode->iclass == ldstpair_off
325 || inst.opcode->iclass == ldstpair_indexed)
326 && inst.operands[2].addr.preind
327 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 328 {
d9ebcbce
YQ
329 unsigned rt1 = inst.operands[0].reg.regno;
330 unsigned rt2 = inst.operands[1].reg.regno;
331 unsigned rn = inst.operands[2].addr.base_regno;
332 int32_t imm = inst.operands[2].addr.offset.imm;
333
334 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
335 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2);
336 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
337 gdb_assert (!inst.operands[2].addr.offset.is_reg);
338
07b287a0
MS
339 /* If recording this store would invalidate the store area
340 (perhaps because rn is not known) then we should abandon
341 further prologue analysis. */
342 if (pv_area_store_would_trash (stack,
343 pv_add_constant (regs[rn], imm)))
344 break;
345
346 if (pv_area_store_would_trash (stack,
347 pv_add_constant (regs[rn], imm + 8)))
348 break;
349
350 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
351 regs[rt1]);
352 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
353 regs[rt2]);
14ac654f 354
d9ebcbce 355 if (inst.operands[2].addr.writeback)
93d96012 356 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 357
07b287a0 358 }
d9ebcbce 359 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
360 {
361 /* Stop analysis on branch. */
362 break;
363 }
364 else
365 {
366 if (aarch64_debug)
b277c936
PL
367 {
368 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
369 " opcode=0x%x\n",
370 core_addr_to_string_nz (start), insn);
371 }
07b287a0
MS
372 break;
373 }
374 }
375
376 if (cache == NULL)
377 {
378 do_cleanups (back_to);
379 return start;
380 }
381
382 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
383 {
384 /* Frame pointer is fp. Frame size is constant. */
385 cache->framereg = AARCH64_FP_REGNUM;
386 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
387 }
388 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
389 {
390 /* Try the stack pointer. */
391 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
392 cache->framereg = AARCH64_SP_REGNUM;
393 }
394 else
395 {
396 /* We're just out of luck. We don't know where the frame is. */
397 cache->framereg = -1;
398 cache->framesize = 0;
399 }
400
401 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
402 {
403 CORE_ADDR offset;
404
405 if (pv_area_find_reg (stack, gdbarch, i, &offset))
406 cache->saved_regs[i].addr = offset;
407 }
408
409 do_cleanups (back_to);
410 return start;
411}
412
413/* Implement the "skip_prologue" gdbarch method. */
414
415static CORE_ADDR
416aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
417{
418 unsigned long inst;
419 CORE_ADDR skip_pc;
420 CORE_ADDR func_addr, limit_pc;
421 struct symtab_and_line sal;
422
423 /* See if we can determine the end of the prologue via the symbol
424 table. If so, then return either PC, or the PC after the
425 prologue, whichever is greater. */
426 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
427 {
428 CORE_ADDR post_prologue_pc
429 = skip_prologue_using_sal (gdbarch, func_addr);
430
431 if (post_prologue_pc != 0)
432 return max (pc, post_prologue_pc);
433 }
434
435 /* Can't determine prologue from the symbol table, need to examine
436 instructions. */
437
438 /* Find an upper limit on the function prologue using the debug
439 information. If the debug information could not be used to
440 provide that bound, then use an arbitrary large number as the
441 upper bound. */
442 limit_pc = skip_prologue_using_sal (gdbarch, pc);
443 if (limit_pc == 0)
444 limit_pc = pc + 128; /* Magic. */
445
446 /* Try disassembling prologue. */
447 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
448}
449
450/* Scan the function prologue for THIS_FRAME and populate the prologue
451 cache CACHE. */
452
453static void
454aarch64_scan_prologue (struct frame_info *this_frame,
455 struct aarch64_prologue_cache *cache)
456{
457 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
458 CORE_ADDR prologue_start;
459 CORE_ADDR prologue_end;
460 CORE_ADDR prev_pc = get_frame_pc (this_frame);
461 struct gdbarch *gdbarch = get_frame_arch (this_frame);
462
db634143
PL
463 cache->prev_pc = prev_pc;
464
07b287a0
MS
465 /* Assume we do not find a frame. */
466 cache->framereg = -1;
467 cache->framesize = 0;
468
469 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
470 &prologue_end))
471 {
472 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
473
474 if (sal.line == 0)
475 {
476 /* No line info so use the current PC. */
477 prologue_end = prev_pc;
478 }
479 else if (sal.end < prologue_end)
480 {
481 /* The next line begins after the function end. */
482 prologue_end = sal.end;
483 }
484
485 prologue_end = min (prologue_end, prev_pc);
486 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
487 }
488 else
489 {
490 CORE_ADDR frame_loc;
491 LONGEST saved_fp;
492 LONGEST saved_lr;
493 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
494
495 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
496 if (frame_loc == 0)
497 return;
498
499 cache->framereg = AARCH64_FP_REGNUM;
500 cache->framesize = 16;
501 cache->saved_regs[29].addr = 0;
502 cache->saved_regs[30].addr = 8;
503 }
504}
505
7dfa3edc
PL
506/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
507 function may throw an exception if the inferior's registers or memory is
508 not available. */
07b287a0 509
7dfa3edc
PL
510static void
511aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
512 struct aarch64_prologue_cache *cache)
07b287a0 513{
07b287a0
MS
514 CORE_ADDR unwound_fp;
515 int reg;
516
07b287a0
MS
517 aarch64_scan_prologue (this_frame, cache);
518
519 if (cache->framereg == -1)
7dfa3edc 520 return;
07b287a0
MS
521
522 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
523 if (unwound_fp == 0)
7dfa3edc 524 return;
07b287a0
MS
525
526 cache->prev_sp = unwound_fp + cache->framesize;
527
528 /* Calculate actual addresses of saved registers using offsets
529 determined by aarch64_analyze_prologue. */
530 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
531 if (trad_frame_addr_p (cache->saved_regs, reg))
532 cache->saved_regs[reg].addr += cache->prev_sp;
533
db634143
PL
534 cache->func = get_frame_func (this_frame);
535
7dfa3edc
PL
536 cache->available_p = 1;
537}
538
539/* Allocate and fill in *THIS_CACHE with information about the prologue of
540 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
541 Return a pointer to the current aarch64_prologue_cache in
542 *THIS_CACHE. */
543
544static struct aarch64_prologue_cache *
545aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
546{
547 struct aarch64_prologue_cache *cache;
548
549 if (*this_cache != NULL)
9a3c8263 550 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
551
552 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
553 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
554 *this_cache = cache;
555
556 TRY
557 {
558 aarch64_make_prologue_cache_1 (this_frame, cache);
559 }
560 CATCH (ex, RETURN_MASK_ERROR)
561 {
562 if (ex.error != NOT_AVAILABLE_ERROR)
563 throw_exception (ex);
564 }
565 END_CATCH
566
07b287a0
MS
567 return cache;
568}
569
7dfa3edc
PL
570/* Implement the "stop_reason" frame_unwind method. */
571
572static enum unwind_stop_reason
573aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
574 void **this_cache)
575{
576 struct aarch64_prologue_cache *cache
577 = aarch64_make_prologue_cache (this_frame, this_cache);
578
579 if (!cache->available_p)
580 return UNWIND_UNAVAILABLE;
581
582 /* Halt the backtrace at "_start". */
583 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
584 return UNWIND_OUTERMOST;
585
586 /* We've hit a wall, stop. */
587 if (cache->prev_sp == 0)
588 return UNWIND_OUTERMOST;
589
590 return UNWIND_NO_REASON;
591}
592
07b287a0
MS
593/* Our frame ID for a normal frame is the current function's starting
594 PC and the caller's SP when we were called. */
595
596static void
597aarch64_prologue_this_id (struct frame_info *this_frame,
598 void **this_cache, struct frame_id *this_id)
599{
7c8edfae
PL
600 struct aarch64_prologue_cache *cache
601 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 602
7dfa3edc
PL
603 if (!cache->available_p)
604 *this_id = frame_id_build_unavailable_stack (cache->func);
605 else
606 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
607}
608
609/* Implement the "prev_register" frame_unwind method. */
610
611static struct value *
612aarch64_prologue_prev_register (struct frame_info *this_frame,
613 void **this_cache, int prev_regnum)
614{
615 struct gdbarch *gdbarch = get_frame_arch (this_frame);
7c8edfae
PL
616 struct aarch64_prologue_cache *cache
617 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
618
619 /* If we are asked to unwind the PC, then we need to return the LR
620 instead. The prologue may save PC, but it will point into this
621 frame's prologue, not the next frame's resume location. */
622 if (prev_regnum == AARCH64_PC_REGNUM)
623 {
624 CORE_ADDR lr;
625
626 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
627 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
628 }
629
630 /* SP is generally not saved to the stack, but this frame is
631 identified by the next frame's stack pointer at the time of the
632 call. The value was already reconstructed into PREV_SP. */
633 /*
634 +----------+ ^
635 | saved lr | |
636 +->| saved fp |--+
637 | | |
638 | | | <- Previous SP
639 | +----------+
640 | | saved lr |
641 +--| saved fp |<- FP
642 | |
643 | |<- SP
644 +----------+ */
645 if (prev_regnum == AARCH64_SP_REGNUM)
646 return frame_unwind_got_constant (this_frame, prev_regnum,
647 cache->prev_sp);
648
649 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
650 prev_regnum);
651}
652
653/* AArch64 prologue unwinder. */
654struct frame_unwind aarch64_prologue_unwind =
655{
656 NORMAL_FRAME,
7dfa3edc 657 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
658 aarch64_prologue_this_id,
659 aarch64_prologue_prev_register,
660 NULL,
661 default_frame_sniffer
662};
663
8b61f75d
PL
664/* Allocate and fill in *THIS_CACHE with information about the prologue of
665 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
666 Return a pointer to the current aarch64_prologue_cache in
667 *THIS_CACHE. */
07b287a0
MS
668
669static struct aarch64_prologue_cache *
8b61f75d 670aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 671{
07b287a0 672 struct aarch64_prologue_cache *cache;
8b61f75d
PL
673
674 if (*this_cache != NULL)
9a3c8263 675 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
676
677 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
678 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 679 *this_cache = cache;
07b287a0 680
02a2a705
PL
681 TRY
682 {
683 cache->prev_sp = get_frame_register_unsigned (this_frame,
684 AARCH64_SP_REGNUM);
685 cache->prev_pc = get_frame_pc (this_frame);
686 cache->available_p = 1;
687 }
688 CATCH (ex, RETURN_MASK_ERROR)
689 {
690 if (ex.error != NOT_AVAILABLE_ERROR)
691 throw_exception (ex);
692 }
693 END_CATCH
07b287a0
MS
694
695 return cache;
696}
697
02a2a705
PL
698/* Implement the "stop_reason" frame_unwind method. */
699
700static enum unwind_stop_reason
701aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
702 void **this_cache)
703{
704 struct aarch64_prologue_cache *cache
705 = aarch64_make_stub_cache (this_frame, this_cache);
706
707 if (!cache->available_p)
708 return UNWIND_UNAVAILABLE;
709
710 return UNWIND_NO_REASON;
711}
712
07b287a0
MS
713/* Our frame ID for a stub frame is the current SP and LR. */
714
715static void
716aarch64_stub_this_id (struct frame_info *this_frame,
717 void **this_cache, struct frame_id *this_id)
718{
8b61f75d
PL
719 struct aarch64_prologue_cache *cache
720 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 721
02a2a705
PL
722 if (cache->available_p)
723 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
724 else
725 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
726}
727
728/* Implement the "sniffer" frame_unwind method. */
729
730static int
731aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
732 struct frame_info *this_frame,
733 void **this_prologue_cache)
734{
735 CORE_ADDR addr_in_block;
736 gdb_byte dummy[4];
737
738 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 739 if (in_plt_section (addr_in_block)
07b287a0
MS
740 /* We also use the stub winder if the target memory is unreadable
741 to avoid having the prologue unwinder trying to read it. */
742 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
743 return 1;
744
745 return 0;
746}
747
748/* AArch64 stub unwinder. */
749struct frame_unwind aarch64_stub_unwind =
750{
751 NORMAL_FRAME,
02a2a705 752 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
753 aarch64_stub_this_id,
754 aarch64_prologue_prev_register,
755 NULL,
756 aarch64_stub_unwind_sniffer
757};
758
759/* Return the frame base address of *THIS_FRAME. */
760
761static CORE_ADDR
762aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
763{
7c8edfae
PL
764 struct aarch64_prologue_cache *cache
765 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
766
767 return cache->prev_sp - cache->framesize;
768}
769
770/* AArch64 default frame base information. */
771struct frame_base aarch64_normal_base =
772{
773 &aarch64_prologue_unwind,
774 aarch64_normal_frame_base,
775 aarch64_normal_frame_base,
776 aarch64_normal_frame_base
777};
778
779/* Assuming THIS_FRAME is a dummy, return the frame ID of that
780 dummy frame. The frame ID's base needs to match the TOS value
781 saved by save_dummy_frame_tos () and returned from
782 aarch64_push_dummy_call, and the PC needs to match the dummy
783 frame's breakpoint. */
784
785static struct frame_id
786aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
787{
788 return frame_id_build (get_frame_register_unsigned (this_frame,
789 AARCH64_SP_REGNUM),
790 get_frame_pc (this_frame));
791}
792
793/* Implement the "unwind_pc" gdbarch method. */
794
795static CORE_ADDR
796aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
797{
798 CORE_ADDR pc
799 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
800
801 return pc;
802}
803
804/* Implement the "unwind_sp" gdbarch method. */
805
806static CORE_ADDR
807aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
808{
809 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
810}
811
812/* Return the value of the REGNUM register in the previous frame of
813 *THIS_FRAME. */
814
815static struct value *
816aarch64_dwarf2_prev_register (struct frame_info *this_frame,
817 void **this_cache, int regnum)
818{
819 struct gdbarch *gdbarch = get_frame_arch (this_frame);
820 CORE_ADDR lr;
821
822 switch (regnum)
823 {
824 case AARCH64_PC_REGNUM:
825 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
826 return frame_unwind_got_constant (this_frame, regnum, lr);
827
828 default:
829 internal_error (__FILE__, __LINE__,
830 _("Unexpected register %d"), regnum);
831 }
832}
833
834/* Implement the "init_reg" dwarf2_frame_ops method. */
835
836static void
837aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
838 struct dwarf2_frame_state_reg *reg,
839 struct frame_info *this_frame)
840{
841 switch (regnum)
842 {
843 case AARCH64_PC_REGNUM:
844 reg->how = DWARF2_FRAME_REG_FN;
845 reg->loc.fn = aarch64_dwarf2_prev_register;
846 break;
847 case AARCH64_SP_REGNUM:
848 reg->how = DWARF2_FRAME_REG_CFA;
849 break;
850 }
851}
852
853/* When arguments must be pushed onto the stack, they go on in reverse
854 order. The code below implements a FILO (stack) to do this. */
855
856typedef struct
857{
858 /* Value to pass on stack. */
7c543f7b 859 const gdb_byte *data;
07b287a0
MS
860
861 /* Size in bytes of value to pass on stack. */
862 int len;
863} stack_item_t;
864
865DEF_VEC_O (stack_item_t);
866
867/* Return the alignment (in bytes) of the given type. */
868
869static int
870aarch64_type_align (struct type *t)
871{
872 int n;
873 int align;
874 int falign;
875
876 t = check_typedef (t);
877 switch (TYPE_CODE (t))
878 {
879 default:
880 /* Should never happen. */
881 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
882 return 4;
883
884 case TYPE_CODE_PTR:
885 case TYPE_CODE_ENUM:
886 case TYPE_CODE_INT:
887 case TYPE_CODE_FLT:
888 case TYPE_CODE_SET:
889 case TYPE_CODE_RANGE:
890 case TYPE_CODE_BITSTRING:
891 case TYPE_CODE_REF:
892 case TYPE_CODE_CHAR:
893 case TYPE_CODE_BOOL:
894 return TYPE_LENGTH (t);
895
896 case TYPE_CODE_ARRAY:
238f2452
YQ
897 if (TYPE_VECTOR (t))
898 {
899 /* Use the natural alignment for vector types (the same for
900 scalar type), but the maximum alignment is 128-bit. */
901 if (TYPE_LENGTH (t) > 16)
902 return 16;
903 else
904 return TYPE_LENGTH (t);
905 }
906 else
907 return aarch64_type_align (TYPE_TARGET_TYPE (t));
07b287a0
MS
908 case TYPE_CODE_COMPLEX:
909 return aarch64_type_align (TYPE_TARGET_TYPE (t));
910
911 case TYPE_CODE_STRUCT:
912 case TYPE_CODE_UNION:
913 align = 1;
914 for (n = 0; n < TYPE_NFIELDS (t); n++)
915 {
916 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
917 if (falign > align)
918 align = falign;
919 }
920 return align;
921 }
922}
923
924/* Return 1 if *TY is a homogeneous floating-point aggregate as
925 defined in the AAPCS64 ABI document; otherwise return 0. */
926
927static int
928is_hfa (struct type *ty)
929{
930 switch (TYPE_CODE (ty))
931 {
932 case TYPE_CODE_ARRAY:
933 {
934 struct type *target_ty = TYPE_TARGET_TYPE (ty);
238f2452
YQ
935
936 if (TYPE_VECTOR (ty))
937 return 0;
938
07b287a0
MS
939 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
940 return 1;
941 break;
942 }
943
944 case TYPE_CODE_UNION:
945 case TYPE_CODE_STRUCT:
946 {
947 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
948 {
949 struct type *member0_type;
950
951 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
952 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
953 {
954 int i;
955
956 for (i = 0; i < TYPE_NFIELDS (ty); i++)
957 {
958 struct type *member1_type;
959
960 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
961 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
962 || (TYPE_LENGTH (member0_type)
963 != TYPE_LENGTH (member1_type)))
964 return 0;
965 }
966 return 1;
967 }
968 }
969 return 0;
970 }
971
972 default:
973 break;
974 }
975
976 return 0;
977}
978
979/* AArch64 function call information structure. */
980struct aarch64_call_info
981{
982 /* the current argument number. */
983 unsigned argnum;
984
985 /* The next general purpose register number, equivalent to NGRN as
986 described in the AArch64 Procedure Call Standard. */
987 unsigned ngrn;
988
989 /* The next SIMD and floating point register number, equivalent to
990 NSRN as described in the AArch64 Procedure Call Standard. */
991 unsigned nsrn;
992
993 /* The next stacked argument address, equivalent to NSAA as
994 described in the AArch64 Procedure Call Standard. */
995 unsigned nsaa;
996
997 /* Stack item vector. */
998 VEC(stack_item_t) *si;
999};
1000
1001/* Pass a value in a sequence of consecutive X registers. The caller
1002 is responsbile for ensuring sufficient registers are available. */
1003
1004static void
1005pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1006 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1007 struct value *arg)
07b287a0
MS
1008{
1009 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1010 int len = TYPE_LENGTH (type);
1011 enum type_code typecode = TYPE_CODE (type);
1012 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 1013 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1014
1015 info->argnum++;
1016
1017 while (len > 0)
1018 {
1019 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1020 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1021 byte_order);
1022
1023
1024 /* Adjust sub-word struct/union args when big-endian. */
1025 if (byte_order == BFD_ENDIAN_BIG
1026 && partial_len < X_REGISTER_SIZE
1027 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1028 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1029
1030 if (aarch64_debug)
b277c936
PL
1031 {
1032 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1033 gdbarch_register_name (gdbarch, regnum),
1034 phex (regval, X_REGISTER_SIZE));
1035 }
07b287a0
MS
1036 regcache_cooked_write_unsigned (regcache, regnum, regval);
1037 len -= partial_len;
1038 buf += partial_len;
1039 regnum++;
1040 }
1041}
1042
1043/* Attempt to marshall a value in a V register. Return 1 if
1044 successful, or 0 if insufficient registers are available. This
1045 function, unlike the equivalent pass_in_x() function does not
1046 handle arguments spread across multiple registers. */
1047
1048static int
1049pass_in_v (struct gdbarch *gdbarch,
1050 struct regcache *regcache,
1051 struct aarch64_call_info *info,
0735fddd 1052 int len, const bfd_byte *buf)
07b287a0
MS
1053{
1054 if (info->nsrn < 8)
1055 {
1056 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1057 int regnum = AARCH64_V0_REGNUM + info->nsrn;
0735fddd 1058 gdb_byte reg[V_REGISTER_SIZE];
07b287a0
MS
1059
1060 info->argnum++;
1061 info->nsrn++;
1062
0735fddd
YQ
1063 memset (reg, 0, sizeof (reg));
1064 /* PCS C.1, the argument is allocated to the least significant
1065 bits of V register. */
1066 memcpy (reg, buf, len);
1067 regcache_cooked_write (regcache, regnum, reg);
1068
07b287a0 1069 if (aarch64_debug)
b277c936
PL
1070 {
1071 debug_printf ("arg %d in %s\n", info->argnum,
1072 gdbarch_register_name (gdbarch, regnum));
1073 }
07b287a0
MS
1074 return 1;
1075 }
1076 info->nsrn = 8;
1077 return 0;
1078}
1079
1080/* Marshall an argument onto the stack. */
1081
1082static void
1083pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1084 struct value *arg)
07b287a0 1085{
8e80f9d1 1086 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1087 int len = TYPE_LENGTH (type);
1088 int align;
1089 stack_item_t item;
1090
1091 info->argnum++;
1092
1093 align = aarch64_type_align (type);
1094
1095 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1096 Natural alignment of the argument's type. */
1097 align = align_up (align, 8);
1098
1099 /* The AArch64 PCS requires at most doubleword alignment. */
1100 if (align > 16)
1101 align = 16;
1102
1103 if (aarch64_debug)
b277c936
PL
1104 {
1105 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1106 info->nsaa);
1107 }
07b287a0
MS
1108
1109 item.len = len;
1110 item.data = buf;
1111 VEC_safe_push (stack_item_t, info->si, &item);
1112
1113 info->nsaa += len;
1114 if (info->nsaa & (align - 1))
1115 {
1116 /* Push stack alignment padding. */
1117 int pad = align - (info->nsaa & (align - 1));
1118
1119 item.len = pad;
1120 item.data = buf;
1121
1122 VEC_safe_push (stack_item_t, info->si, &item);
1123 info->nsaa += pad;
1124 }
1125}
1126
1127/* Marshall an argument into a sequence of one or more consecutive X
1128 registers or, if insufficient X registers are available then onto
1129 the stack. */
1130
1131static void
1132pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1133 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1134 struct value *arg)
07b287a0
MS
1135{
1136 int len = TYPE_LENGTH (type);
1137 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1138
1139 /* PCS C.13 - Pass in registers if we have enough spare */
1140 if (info->ngrn + nregs <= 8)
1141 {
8e80f9d1 1142 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1143 info->ngrn += nregs;
1144 }
1145 else
1146 {
1147 info->ngrn = 8;
8e80f9d1 1148 pass_on_stack (info, type, arg);
07b287a0
MS
1149 }
1150}
1151
1152/* Pass a value in a V register, or on the stack if insufficient are
1153 available. */
1154
1155static void
1156pass_in_v_or_stack (struct gdbarch *gdbarch,
1157 struct regcache *regcache,
1158 struct aarch64_call_info *info,
1159 struct type *type,
8e80f9d1 1160 struct value *arg)
07b287a0 1161{
0735fddd
YQ
1162 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1163 value_contents (arg)))
8e80f9d1 1164 pass_on_stack (info, type, arg);
07b287a0
MS
1165}
1166
1167/* Implement the "push_dummy_call" gdbarch method. */
1168
1169static CORE_ADDR
1170aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1171 struct regcache *regcache, CORE_ADDR bp_addr,
1172 int nargs,
1173 struct value **args, CORE_ADDR sp, int struct_return,
1174 CORE_ADDR struct_addr)
1175{
1176 int nstack = 0;
1177 int argnum;
1178 int x_argreg;
1179 int v_argreg;
1180 struct aarch64_call_info info;
1181 struct type *func_type;
1182 struct type *return_type;
1183 int lang_struct_return;
1184
1185 memset (&info, 0, sizeof (info));
1186
1187 /* We need to know what the type of the called function is in order
1188 to determine the number of named/anonymous arguments for the
1189 actual argument placement, and the return type in order to handle
1190 return value correctly.
1191
1192 The generic code above us views the decision of return in memory
1193 or return in registers as a two stage processes. The language
1194 handler is consulted first and may decide to return in memory (eg
1195 class with copy constructor returned by value), this will cause
1196 the generic code to allocate space AND insert an initial leading
1197 argument.
1198
1199 If the language code does not decide to pass in memory then the
1200 target code is consulted.
1201
1202 If the language code decides to pass in memory we want to move
1203 the pointer inserted as the initial argument from the argument
1204 list and into X8, the conventional AArch64 struct return pointer
1205 register.
1206
1207 This is slightly awkward, ideally the flag "lang_struct_return"
1208 would be passed to the targets implementation of push_dummy_call.
1209 Rather that change the target interface we call the language code
1210 directly ourselves. */
1211
1212 func_type = check_typedef (value_type (function));
1213
1214 /* Dereference function pointer types. */
1215 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1216 func_type = TYPE_TARGET_TYPE (func_type);
1217
1218 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1219 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1220
1221 /* If language_pass_by_reference () returned true we will have been
1222 given an additional initial argument, a hidden pointer to the
1223 return slot in memory. */
1224 return_type = TYPE_TARGET_TYPE (func_type);
1225 lang_struct_return = language_pass_by_reference (return_type);
1226
1227 /* Set the return address. For the AArch64, the return breakpoint
1228 is always at BP_ADDR. */
1229 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1230
1231 /* If we were given an initial argument for the return slot because
1232 lang_struct_return was true, lose it. */
1233 if (lang_struct_return)
1234 {
1235 args++;
1236 nargs--;
1237 }
1238
1239 /* The struct_return pointer occupies X8. */
1240 if (struct_return || lang_struct_return)
1241 {
1242 if (aarch64_debug)
b277c936
PL
1243 {
1244 debug_printf ("struct return in %s = 0x%s\n",
1245 gdbarch_register_name (gdbarch,
1246 AARCH64_STRUCT_RETURN_REGNUM),
1247 paddress (gdbarch, struct_addr));
1248 }
07b287a0
MS
1249 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1250 struct_addr);
1251 }
1252
1253 for (argnum = 0; argnum < nargs; argnum++)
1254 {
1255 struct value *arg = args[argnum];
1256 struct type *arg_type;
1257 int len;
1258
1259 arg_type = check_typedef (value_type (arg));
1260 len = TYPE_LENGTH (arg_type);
1261
1262 switch (TYPE_CODE (arg_type))
1263 {
1264 case TYPE_CODE_INT:
1265 case TYPE_CODE_BOOL:
1266 case TYPE_CODE_CHAR:
1267 case TYPE_CODE_RANGE:
1268 case TYPE_CODE_ENUM:
1269 if (len < 4)
1270 {
1271 /* Promote to 32 bit integer. */
1272 if (TYPE_UNSIGNED (arg_type))
1273 arg_type = builtin_type (gdbarch)->builtin_uint32;
1274 else
1275 arg_type = builtin_type (gdbarch)->builtin_int32;
1276 arg = value_cast (arg_type, arg);
1277 }
8e80f9d1 1278 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1279 break;
1280
1281 case TYPE_CODE_COMPLEX:
1282 if (info.nsrn <= 6)
1283 {
1284 const bfd_byte *buf = value_contents (arg);
1285 struct type *target_type =
1286 check_typedef (TYPE_TARGET_TYPE (arg_type));
1287
07b287a0 1288 pass_in_v (gdbarch, regcache, &info,
0735fddd
YQ
1289 TYPE_LENGTH (target_type), buf);
1290 pass_in_v (gdbarch, regcache, &info,
1291 TYPE_LENGTH (target_type),
07b287a0
MS
1292 buf + TYPE_LENGTH (target_type));
1293 }
1294 else
1295 {
1296 info.nsrn = 8;
8e80f9d1 1297 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1298 }
1299 break;
1300 case TYPE_CODE_FLT:
8e80f9d1 1301 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1302 break;
1303
1304 case TYPE_CODE_STRUCT:
1305 case TYPE_CODE_ARRAY:
1306 case TYPE_CODE_UNION:
1307 if (is_hfa (arg_type))
1308 {
1309 int elements = TYPE_NFIELDS (arg_type);
1310
1311 /* Homogeneous Aggregates */
1312 if (info.nsrn + elements < 8)
1313 {
1314 int i;
1315
1316 for (i = 0; i < elements; i++)
1317 {
1318 /* We know that we have sufficient registers
1319 available therefore this will never fallback
1320 to the stack. */
1321 struct value *field =
1322 value_primitive_field (arg, 0, i, arg_type);
1323 struct type *field_type =
1324 check_typedef (value_type (field));
1325
8e80f9d1
YQ
1326 pass_in_v_or_stack (gdbarch, regcache, &info,
1327 field_type, field);
07b287a0
MS
1328 }
1329 }
1330 else
1331 {
1332 info.nsrn = 8;
8e80f9d1 1333 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1334 }
1335 }
238f2452
YQ
1336 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1337 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1338 {
1339 /* Short vector types are passed in V registers. */
1340 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1341 }
07b287a0
MS
1342 else if (len > 16)
1343 {
1344 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1345 invisible reference. */
1346
1347 /* Allocate aligned storage. */
1348 sp = align_down (sp - len, 16);
1349
1350 /* Write the real data into the stack. */
1351 write_memory (sp, value_contents (arg), len);
1352
1353 /* Construct the indirection. */
1354 arg_type = lookup_pointer_type (arg_type);
1355 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1356 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1357 }
1358 else
1359 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1360 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1361 break;
1362
1363 default:
8e80f9d1 1364 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1365 break;
1366 }
1367 }
1368
1369 /* Make sure stack retains 16 byte alignment. */
1370 if (info.nsaa & 15)
1371 sp -= 16 - (info.nsaa & 15);
1372
1373 while (!VEC_empty (stack_item_t, info.si))
1374 {
1375 stack_item_t *si = VEC_last (stack_item_t, info.si);
1376
1377 sp -= si->len;
1378 write_memory (sp, si->data, si->len);
1379 VEC_pop (stack_item_t, info.si);
1380 }
1381
1382 VEC_free (stack_item_t, info.si);
1383
1384 /* Finally, update the SP register. */
1385 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1386
1387 return sp;
1388}
1389
1390/* Implement the "frame_align" gdbarch method. */
1391
1392static CORE_ADDR
1393aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1394{
1395 /* Align the stack to sixteen bytes. */
1396 return sp & ~(CORE_ADDR) 15;
1397}
1398
1399/* Return the type for an AdvSISD Q register. */
1400
1401static struct type *
1402aarch64_vnq_type (struct gdbarch *gdbarch)
1403{
1404 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1405
1406 if (tdep->vnq_type == NULL)
1407 {
1408 struct type *t;
1409 struct type *elem;
1410
1411 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1412 TYPE_CODE_UNION);
1413
1414 elem = builtin_type (gdbarch)->builtin_uint128;
1415 append_composite_type_field (t, "u", elem);
1416
1417 elem = builtin_type (gdbarch)->builtin_int128;
1418 append_composite_type_field (t, "s", elem);
1419
1420 tdep->vnq_type = t;
1421 }
1422
1423 return tdep->vnq_type;
1424}
1425
1426/* Return the type for an AdvSISD D register. */
1427
1428static struct type *
1429aarch64_vnd_type (struct gdbarch *gdbarch)
1430{
1431 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1432
1433 if (tdep->vnd_type == NULL)
1434 {
1435 struct type *t;
1436 struct type *elem;
1437
1438 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1439 TYPE_CODE_UNION);
1440
1441 elem = builtin_type (gdbarch)->builtin_double;
1442 append_composite_type_field (t, "f", elem);
1443
1444 elem = builtin_type (gdbarch)->builtin_uint64;
1445 append_composite_type_field (t, "u", elem);
1446
1447 elem = builtin_type (gdbarch)->builtin_int64;
1448 append_composite_type_field (t, "s", elem);
1449
1450 tdep->vnd_type = t;
1451 }
1452
1453 return tdep->vnd_type;
1454}
1455
1456/* Return the type for an AdvSISD S register. */
1457
1458static struct type *
1459aarch64_vns_type (struct gdbarch *gdbarch)
1460{
1461 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1462
1463 if (tdep->vns_type == NULL)
1464 {
1465 struct type *t;
1466 struct type *elem;
1467
1468 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1469 TYPE_CODE_UNION);
1470
1471 elem = builtin_type (gdbarch)->builtin_float;
1472 append_composite_type_field (t, "f", elem);
1473
1474 elem = builtin_type (gdbarch)->builtin_uint32;
1475 append_composite_type_field (t, "u", elem);
1476
1477 elem = builtin_type (gdbarch)->builtin_int32;
1478 append_composite_type_field (t, "s", elem);
1479
1480 tdep->vns_type = t;
1481 }
1482
1483 return tdep->vns_type;
1484}
1485
1486/* Return the type for an AdvSISD H register. */
1487
1488static struct type *
1489aarch64_vnh_type (struct gdbarch *gdbarch)
1490{
1491 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1492
1493 if (tdep->vnh_type == NULL)
1494 {
1495 struct type *t;
1496 struct type *elem;
1497
1498 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1499 TYPE_CODE_UNION);
1500
1501 elem = builtin_type (gdbarch)->builtin_uint16;
1502 append_composite_type_field (t, "u", elem);
1503
1504 elem = builtin_type (gdbarch)->builtin_int16;
1505 append_composite_type_field (t, "s", elem);
1506
1507 tdep->vnh_type = t;
1508 }
1509
1510 return tdep->vnh_type;
1511}
1512
1513/* Return the type for an AdvSISD B register. */
1514
1515static struct type *
1516aarch64_vnb_type (struct gdbarch *gdbarch)
1517{
1518 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1519
1520 if (tdep->vnb_type == NULL)
1521 {
1522 struct type *t;
1523 struct type *elem;
1524
1525 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1526 TYPE_CODE_UNION);
1527
1528 elem = builtin_type (gdbarch)->builtin_uint8;
1529 append_composite_type_field (t, "u", elem);
1530
1531 elem = builtin_type (gdbarch)->builtin_int8;
1532 append_composite_type_field (t, "s", elem);
1533
1534 tdep->vnb_type = t;
1535 }
1536
1537 return tdep->vnb_type;
1538}
1539
1540/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1541
1542static int
1543aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1544{
1545 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1546 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1547
1548 if (reg == AARCH64_DWARF_SP)
1549 return AARCH64_SP_REGNUM;
1550
1551 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1552 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1553
1554 return -1;
1555}
1556\f
1557
1558/* Implement the "print_insn" gdbarch method. */
1559
1560static int
1561aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1562{
1563 info->symbols = NULL;
1564 return print_insn_aarch64 (memaddr, info);
1565}
1566
1567/* AArch64 BRK software debug mode instruction.
1568 Note that AArch64 code is always little-endian.
1569 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
948f8e3d 1570static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0
MS
1571
1572/* Implement the "breakpoint_from_pc" gdbarch method. */
1573
948f8e3d 1574static const gdb_byte *
07b287a0
MS
1575aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1576 int *lenptr)
1577{
1578 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1579
1580 *lenptr = sizeof (aarch64_default_breakpoint);
1581 return aarch64_default_breakpoint;
1582}
1583
1584/* Extract from an array REGS containing the (raw) register state a
1585 function return value of type TYPE, and copy that, in virtual
1586 format, into VALBUF. */
1587
1588static void
1589aarch64_extract_return_value (struct type *type, struct regcache *regs,
1590 gdb_byte *valbuf)
1591{
1592 struct gdbarch *gdbarch = get_regcache_arch (regs);
1593 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1594
1595 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1596 {
1597 bfd_byte buf[V_REGISTER_SIZE];
1598 int len = TYPE_LENGTH (type);
1599
1600 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1601 memcpy (valbuf, buf, len);
1602 }
1603 else if (TYPE_CODE (type) == TYPE_CODE_INT
1604 || TYPE_CODE (type) == TYPE_CODE_CHAR
1605 || TYPE_CODE (type) == TYPE_CODE_BOOL
1606 || TYPE_CODE (type) == TYPE_CODE_PTR
1607 || TYPE_CODE (type) == TYPE_CODE_REF
1608 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1609 {
1610 /* If the the type is a plain integer, then the access is
1611 straight-forward. Otherwise we have to play around a bit
1612 more. */
1613 int len = TYPE_LENGTH (type);
1614 int regno = AARCH64_X0_REGNUM;
1615 ULONGEST tmp;
1616
1617 while (len > 0)
1618 {
1619 /* By using store_unsigned_integer we avoid having to do
1620 anything special for small big-endian values. */
1621 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1622 store_unsigned_integer (valbuf,
1623 (len > X_REGISTER_SIZE
1624 ? X_REGISTER_SIZE : len), byte_order, tmp);
1625 len -= X_REGISTER_SIZE;
1626 valbuf += X_REGISTER_SIZE;
1627 }
1628 }
1629 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1630 {
1631 int regno = AARCH64_V0_REGNUM;
1632 bfd_byte buf[V_REGISTER_SIZE];
1633 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1634 int len = TYPE_LENGTH (target_type);
1635
1636 regcache_cooked_read (regs, regno, buf);
1637 memcpy (valbuf, buf, len);
1638 valbuf += len;
1639 regcache_cooked_read (regs, regno + 1, buf);
1640 memcpy (valbuf, buf, len);
1641 valbuf += len;
1642 }
1643 else if (is_hfa (type))
1644 {
1645 int elements = TYPE_NFIELDS (type);
1646 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1647 int len = TYPE_LENGTH (member_type);
1648 int i;
1649
1650 for (i = 0; i < elements; i++)
1651 {
1652 int regno = AARCH64_V0_REGNUM + i;
db3516bb 1653 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
1654
1655 if (aarch64_debug)
b277c936
PL
1656 {
1657 debug_printf ("read HFA return value element %d from %s\n",
1658 i + 1,
1659 gdbarch_register_name (gdbarch, regno));
1660 }
07b287a0
MS
1661 regcache_cooked_read (regs, regno, buf);
1662
1663 memcpy (valbuf, buf, len);
1664 valbuf += len;
1665 }
1666 }
238f2452
YQ
1667 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1668 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1669 {
1670 /* Short vector is returned in V register. */
1671 gdb_byte buf[V_REGISTER_SIZE];
1672
1673 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1674 memcpy (valbuf, buf, TYPE_LENGTH (type));
1675 }
07b287a0
MS
1676 else
1677 {
1678 /* For a structure or union the behaviour is as if the value had
1679 been stored to word-aligned memory and then loaded into
1680 registers with 64-bit load instruction(s). */
1681 int len = TYPE_LENGTH (type);
1682 int regno = AARCH64_X0_REGNUM;
1683 bfd_byte buf[X_REGISTER_SIZE];
1684
1685 while (len > 0)
1686 {
1687 regcache_cooked_read (regs, regno++, buf);
1688 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1689 len -= X_REGISTER_SIZE;
1690 valbuf += X_REGISTER_SIZE;
1691 }
1692 }
1693}
1694
1695
1696/* Will a function return an aggregate type in memory or in a
1697 register? Return 0 if an aggregate type can be returned in a
1698 register, 1 if it must be returned in memory. */
1699
1700static int
1701aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1702{
1703 int nRc;
1704 enum type_code code;
1705
f168693b 1706 type = check_typedef (type);
07b287a0
MS
1707
1708 /* In the AArch64 ABI, "integer" like aggregate types are returned
1709 in registers. For an aggregate type to be integer like, its size
1710 must be less than or equal to 4 * X_REGISTER_SIZE. */
1711
1712 if (is_hfa (type))
1713 {
1714 /* PCS B.5 If the argument is a Named HFA, then the argument is
1715 used unmodified. */
1716 return 0;
1717 }
1718
1719 if (TYPE_LENGTH (type) > 16)
1720 {
1721 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1722 invisible reference. */
1723
1724 return 1;
1725 }
1726
1727 return 0;
1728}
1729
1730/* Write into appropriate registers a function return value of type
1731 TYPE, given in virtual format. */
1732
1733static void
1734aarch64_store_return_value (struct type *type, struct regcache *regs,
1735 const gdb_byte *valbuf)
1736{
1737 struct gdbarch *gdbarch = get_regcache_arch (regs);
1738 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1739
1740 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1741 {
1742 bfd_byte buf[V_REGISTER_SIZE];
1743 int len = TYPE_LENGTH (type);
1744
1745 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1746 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1747 }
1748 else if (TYPE_CODE (type) == TYPE_CODE_INT
1749 || TYPE_CODE (type) == TYPE_CODE_CHAR
1750 || TYPE_CODE (type) == TYPE_CODE_BOOL
1751 || TYPE_CODE (type) == TYPE_CODE_PTR
1752 || TYPE_CODE (type) == TYPE_CODE_REF
1753 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1754 {
1755 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1756 {
1757 /* Values of one word or less are zero/sign-extended and
1758 returned in r0. */
1759 bfd_byte tmpbuf[X_REGISTER_SIZE];
1760 LONGEST val = unpack_long (type, valbuf);
1761
1762 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1763 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1764 }
1765 else
1766 {
1767 /* Integral values greater than one word are stored in
1768 consecutive registers starting with r0. This will always
1769 be a multiple of the regiser size. */
1770 int len = TYPE_LENGTH (type);
1771 int regno = AARCH64_X0_REGNUM;
1772
1773 while (len > 0)
1774 {
1775 regcache_cooked_write (regs, regno++, valbuf);
1776 len -= X_REGISTER_SIZE;
1777 valbuf += X_REGISTER_SIZE;
1778 }
1779 }
1780 }
1781 else if (is_hfa (type))
1782 {
1783 int elements = TYPE_NFIELDS (type);
1784 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1785 int len = TYPE_LENGTH (member_type);
1786 int i;
1787
1788 for (i = 0; i < elements; i++)
1789 {
1790 int regno = AARCH64_V0_REGNUM + i;
1791 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1792
1793 if (aarch64_debug)
b277c936
PL
1794 {
1795 debug_printf ("write HFA return value element %d to %s\n",
1796 i + 1,
1797 gdbarch_register_name (gdbarch, regno));
1798 }
07b287a0
MS
1799
1800 memcpy (tmpbuf, valbuf, len);
1801 regcache_cooked_write (regs, regno, tmpbuf);
1802 valbuf += len;
1803 }
1804 }
238f2452
YQ
1805 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1806 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1807 {
1808 /* Short vector. */
1809 gdb_byte buf[V_REGISTER_SIZE];
1810
1811 memcpy (buf, valbuf, TYPE_LENGTH (type));
1812 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1813 }
07b287a0
MS
1814 else
1815 {
1816 /* For a structure or union the behaviour is as if the value had
1817 been stored to word-aligned memory and then loaded into
1818 registers with 64-bit load instruction(s). */
1819 int len = TYPE_LENGTH (type);
1820 int regno = AARCH64_X0_REGNUM;
1821 bfd_byte tmpbuf[X_REGISTER_SIZE];
1822
1823 while (len > 0)
1824 {
1825 memcpy (tmpbuf, valbuf,
1826 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1827 regcache_cooked_write (regs, regno++, tmpbuf);
1828 len -= X_REGISTER_SIZE;
1829 valbuf += X_REGISTER_SIZE;
1830 }
1831 }
1832}
1833
1834/* Implement the "return_value" gdbarch method. */
1835
1836static enum return_value_convention
1837aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1838 struct type *valtype, struct regcache *regcache,
1839 gdb_byte *readbuf, const gdb_byte *writebuf)
1840{
1841 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1842
1843 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1844 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1845 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1846 {
1847 if (aarch64_return_in_memory (gdbarch, valtype))
1848 {
1849 if (aarch64_debug)
b277c936 1850 debug_printf ("return value in memory\n");
07b287a0
MS
1851 return RETURN_VALUE_STRUCT_CONVENTION;
1852 }
1853 }
1854
1855 if (writebuf)
1856 aarch64_store_return_value (valtype, regcache, writebuf);
1857
1858 if (readbuf)
1859 aarch64_extract_return_value (valtype, regcache, readbuf);
1860
1861 if (aarch64_debug)
b277c936 1862 debug_printf ("return value in registers\n");
07b287a0
MS
1863
1864 return RETURN_VALUE_REGISTER_CONVENTION;
1865}
1866
1867/* Implement the "get_longjmp_target" gdbarch method. */
1868
1869static int
1870aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1871{
1872 CORE_ADDR jb_addr;
1873 gdb_byte buf[X_REGISTER_SIZE];
1874 struct gdbarch *gdbarch = get_frame_arch (frame);
1875 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1876 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1877
1878 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1879
1880 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1881 X_REGISTER_SIZE))
1882 return 0;
1883
1884 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1885 return 1;
1886}
ea873d8e
PL
1887
1888/* Implement the "gen_return_address" gdbarch method. */
1889
1890static void
1891aarch64_gen_return_address (struct gdbarch *gdbarch,
1892 struct agent_expr *ax, struct axs_value *value,
1893 CORE_ADDR scope)
1894{
1895 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1896 value->kind = axs_lvalue_register;
1897 value->u.reg = AARCH64_LR_REGNUM;
1898}
07b287a0
MS
1899\f
1900
1901/* Return the pseudo register name corresponding to register regnum. */
1902
1903static const char *
1904aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1905{
1906 static const char *const q_name[] =
1907 {
1908 "q0", "q1", "q2", "q3",
1909 "q4", "q5", "q6", "q7",
1910 "q8", "q9", "q10", "q11",
1911 "q12", "q13", "q14", "q15",
1912 "q16", "q17", "q18", "q19",
1913 "q20", "q21", "q22", "q23",
1914 "q24", "q25", "q26", "q27",
1915 "q28", "q29", "q30", "q31",
1916 };
1917
1918 static const char *const d_name[] =
1919 {
1920 "d0", "d1", "d2", "d3",
1921 "d4", "d5", "d6", "d7",
1922 "d8", "d9", "d10", "d11",
1923 "d12", "d13", "d14", "d15",
1924 "d16", "d17", "d18", "d19",
1925 "d20", "d21", "d22", "d23",
1926 "d24", "d25", "d26", "d27",
1927 "d28", "d29", "d30", "d31",
1928 };
1929
1930 static const char *const s_name[] =
1931 {
1932 "s0", "s1", "s2", "s3",
1933 "s4", "s5", "s6", "s7",
1934 "s8", "s9", "s10", "s11",
1935 "s12", "s13", "s14", "s15",
1936 "s16", "s17", "s18", "s19",
1937 "s20", "s21", "s22", "s23",
1938 "s24", "s25", "s26", "s27",
1939 "s28", "s29", "s30", "s31",
1940 };
1941
1942 static const char *const h_name[] =
1943 {
1944 "h0", "h1", "h2", "h3",
1945 "h4", "h5", "h6", "h7",
1946 "h8", "h9", "h10", "h11",
1947 "h12", "h13", "h14", "h15",
1948 "h16", "h17", "h18", "h19",
1949 "h20", "h21", "h22", "h23",
1950 "h24", "h25", "h26", "h27",
1951 "h28", "h29", "h30", "h31",
1952 };
1953
1954 static const char *const b_name[] =
1955 {
1956 "b0", "b1", "b2", "b3",
1957 "b4", "b5", "b6", "b7",
1958 "b8", "b9", "b10", "b11",
1959 "b12", "b13", "b14", "b15",
1960 "b16", "b17", "b18", "b19",
1961 "b20", "b21", "b22", "b23",
1962 "b24", "b25", "b26", "b27",
1963 "b28", "b29", "b30", "b31",
1964 };
1965
1966 regnum -= gdbarch_num_regs (gdbarch);
1967
1968 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1969 return q_name[regnum - AARCH64_Q0_REGNUM];
1970
1971 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1972 return d_name[regnum - AARCH64_D0_REGNUM];
1973
1974 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1975 return s_name[regnum - AARCH64_S0_REGNUM];
1976
1977 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1978 return h_name[regnum - AARCH64_H0_REGNUM];
1979
1980 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1981 return b_name[regnum - AARCH64_B0_REGNUM];
1982
1983 internal_error (__FILE__, __LINE__,
1984 _("aarch64_pseudo_register_name: bad register number %d"),
1985 regnum);
1986}
1987
1988/* Implement the "pseudo_register_type" tdesc_arch_data method. */
1989
1990static struct type *
1991aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1992{
1993 regnum -= gdbarch_num_regs (gdbarch);
1994
1995 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1996 return aarch64_vnq_type (gdbarch);
1997
1998 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1999 return aarch64_vnd_type (gdbarch);
2000
2001 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2002 return aarch64_vns_type (gdbarch);
2003
2004 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2005 return aarch64_vnh_type (gdbarch);
2006
2007 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2008 return aarch64_vnb_type (gdbarch);
2009
2010 internal_error (__FILE__, __LINE__,
2011 _("aarch64_pseudo_register_type: bad register number %d"),
2012 regnum);
2013}
2014
2015/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2016
2017static int
2018aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2019 struct reggroup *group)
2020{
2021 regnum -= gdbarch_num_regs (gdbarch);
2022
2023 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2024 return group == all_reggroup || group == vector_reggroup;
2025 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2026 return (group == all_reggroup || group == vector_reggroup
2027 || group == float_reggroup);
2028 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2029 return (group == all_reggroup || group == vector_reggroup
2030 || group == float_reggroup);
2031 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2032 return group == all_reggroup || group == vector_reggroup;
2033 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2034 return group == all_reggroup || group == vector_reggroup;
2035
2036 return group == all_reggroup;
2037}
2038
2039/* Implement the "pseudo_register_read_value" gdbarch method. */
2040
2041static struct value *
2042aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2043 struct regcache *regcache,
2044 int regnum)
2045{
2046 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2047 struct value *result_value;
2048 gdb_byte *buf;
2049
2050 result_value = allocate_value (register_type (gdbarch, regnum));
2051 VALUE_LVAL (result_value) = lval_register;
2052 VALUE_REGNUM (result_value) = regnum;
2053 buf = value_contents_raw (result_value);
2054
2055 regnum -= gdbarch_num_regs (gdbarch);
2056
2057 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2058 {
2059 enum register_status status;
2060 unsigned v_regnum;
2061
2062 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2063 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2064 if (status != REG_VALID)
2065 mark_value_bytes_unavailable (result_value, 0,
2066 TYPE_LENGTH (value_type (result_value)));
2067 else
2068 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2069 return result_value;
2070 }
2071
2072 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2073 {
2074 enum register_status status;
2075 unsigned v_regnum;
2076
2077 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2078 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2079 if (status != REG_VALID)
2080 mark_value_bytes_unavailable (result_value, 0,
2081 TYPE_LENGTH (value_type (result_value)));
2082 else
2083 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2084 return result_value;
2085 }
2086
2087 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2088 {
2089 enum register_status status;
2090 unsigned v_regnum;
2091
2092 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2093 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2094 if (status != REG_VALID)
2095 mark_value_bytes_unavailable (result_value, 0,
2096 TYPE_LENGTH (value_type (result_value)));
2097 else
2098 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2099 return result_value;
2100 }
2101
2102 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2103 {
2104 enum register_status status;
2105 unsigned v_regnum;
2106
2107 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2108 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2109 if (status != REG_VALID)
2110 mark_value_bytes_unavailable (result_value, 0,
2111 TYPE_LENGTH (value_type (result_value)));
2112 else
2113 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2114 return result_value;
2115 }
2116
2117 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2118 {
2119 enum register_status status;
2120 unsigned v_regnum;
2121
2122 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2123 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2124 if (status != REG_VALID)
2125 mark_value_bytes_unavailable (result_value, 0,
2126 TYPE_LENGTH (value_type (result_value)));
2127 else
2128 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2129 return result_value;
2130 }
2131
2132 gdb_assert_not_reached ("regnum out of bound");
2133}
2134
2135/* Implement the "pseudo_register_write" gdbarch method. */
2136
2137static void
2138aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2139 int regnum, const gdb_byte *buf)
2140{
2141 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2142
2143 /* Ensure the register buffer is zero, we want gdb writes of the
2144 various 'scalar' pseudo registers to behavior like architectural
2145 writes, register width bytes are written the remainder are set to
2146 zero. */
2147 memset (reg_buf, 0, sizeof (reg_buf));
2148
2149 regnum -= gdbarch_num_regs (gdbarch);
2150
2151 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2152 {
2153 /* pseudo Q registers */
2154 unsigned v_regnum;
2155
2156 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2157 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2158 regcache_raw_write (regcache, v_regnum, reg_buf);
2159 return;
2160 }
2161
2162 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2163 {
2164 /* pseudo D registers */
2165 unsigned v_regnum;
2166
2167 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2168 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2169 regcache_raw_write (regcache, v_regnum, reg_buf);
2170 return;
2171 }
2172
2173 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2174 {
2175 unsigned v_regnum;
2176
2177 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2178 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2179 regcache_raw_write (regcache, v_regnum, reg_buf);
2180 return;
2181 }
2182
2183 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2184 {
2185 /* pseudo H registers */
2186 unsigned v_regnum;
2187
2188 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2189 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2190 regcache_raw_write (regcache, v_regnum, reg_buf);
2191 return;
2192 }
2193
2194 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2195 {
2196 /* pseudo B registers */
2197 unsigned v_regnum;
2198
2199 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2200 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2201 regcache_raw_write (regcache, v_regnum, reg_buf);
2202 return;
2203 }
2204
2205 gdb_assert_not_reached ("regnum out of bound");
2206}
2207
07b287a0
MS
2208/* Callback function for user_reg_add. */
2209
2210static struct value *
2211value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2212{
9a3c8263 2213 const int *reg_p = (const int *) baton;
07b287a0
MS
2214
2215 return value_of_register (*reg_p, frame);
2216}
2217\f
2218
9404b58f
KM
2219/* Implement the "software_single_step" gdbarch method, needed to
2220 single step through atomic sequences on AArch64. */
2221
2222static int
2223aarch64_software_single_step (struct frame_info *frame)
2224{
2225 struct gdbarch *gdbarch = get_frame_arch (frame);
2226 struct address_space *aspace = get_frame_address_space (frame);
2227 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2228 const int insn_size = 4;
2229 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2230 CORE_ADDR pc = get_frame_pc (frame);
2231 CORE_ADDR breaks[2] = { -1, -1 };
2232 CORE_ADDR loc = pc;
2233 CORE_ADDR closing_insn = 0;
2234 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2235 byte_order_for_code);
2236 int index;
2237 int insn_count;
2238 int bc_insn_count = 0; /* Conditional branch instruction count. */
2239 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2240 aarch64_inst inst;
2241
43cdf5ae 2242 if (aarch64_decode_insn (insn, &inst, 1) != 0)
f77ee802 2243 return 0;
9404b58f
KM
2244
2245 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2246 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
9404b58f
KM
2247 return 0;
2248
2249 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2250 {
9404b58f
KM
2251 loc += insn_size;
2252 insn = read_memory_unsigned_integer (loc, insn_size,
2253 byte_order_for_code);
2254
43cdf5ae 2255 if (aarch64_decode_insn (insn, &inst, 1) != 0)
f77ee802 2256 return 0;
9404b58f 2257 /* Check if the instruction is a conditional branch. */
f77ee802 2258 if (inst.opcode->iclass == condbranch)
9404b58f 2259 {
f77ee802
YQ
2260 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2261
9404b58f
KM
2262 if (bc_insn_count >= 1)
2263 return 0;
2264
2265 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2266 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2267
2268 bc_insn_count++;
2269 last_breakpoint++;
2270 }
2271
2272 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2273 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2274 {
2275 closing_insn = loc;
2276 break;
2277 }
2278 }
2279
2280 /* We didn't find a closing Store Exclusive instruction, fall back. */
2281 if (!closing_insn)
2282 return 0;
2283
2284 /* Insert breakpoint after the end of the atomic sequence. */
2285 breaks[0] = loc + insn_size;
2286
2287 /* Check for duplicated breakpoints, and also check that the second
2288 breakpoint is not within the atomic sequence. */
2289 if (last_breakpoint
2290 && (breaks[1] == breaks[0]
2291 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2292 last_breakpoint = 0;
2293
2294 /* Insert the breakpoint at the end of the sequence, and one at the
2295 destination of the conditional branch, if it exists. */
2296 for (index = 0; index <= last_breakpoint; index++)
2297 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2298
2299 return 1;
2300}
2301
b6542f81
YQ
2302struct displaced_step_closure
2303{
2304 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2305 is being displaced stepping. */
2306 int cond;
2307
2308 /* PC adjustment offset after displaced stepping. */
2309 int32_t pc_adjust;
2310};
2311
2312/* Data when visiting instructions for displaced stepping. */
2313
2314struct aarch64_displaced_step_data
2315{
2316 struct aarch64_insn_data base;
2317
2318 /* The address where the instruction will be executed at. */
2319 CORE_ADDR new_addr;
2320 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2321 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2322 /* Number of instructions in INSN_BUF. */
2323 unsigned insn_count;
2324 /* Registers when doing displaced stepping. */
2325 struct regcache *regs;
2326
2327 struct displaced_step_closure *dsc;
2328};
2329
2330/* Implementation of aarch64_insn_visitor method "b". */
2331
2332static void
2333aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2334 struct aarch64_insn_data *data)
2335{
2336 struct aarch64_displaced_step_data *dsd
2337 = (struct aarch64_displaced_step_data *) data;
2338 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2339
2340 if (can_encode_int32 (new_offset, 28))
2341 {
2342 /* Emit B rather than BL, because executing BL on a new address
2343 will get the wrong address into LR. In order to avoid this,
2344 we emit B, and update LR if the instruction is BL. */
2345 emit_b (dsd->insn_buf, 0, new_offset);
2346 dsd->insn_count++;
2347 }
2348 else
2349 {
2350 /* Write NOP. */
2351 emit_nop (dsd->insn_buf);
2352 dsd->insn_count++;
2353 dsd->dsc->pc_adjust = offset;
2354 }
2355
2356 if (is_bl)
2357 {
2358 /* Update LR. */
2359 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2360 data->insn_addr + 4);
2361 }
2362}
2363
2364/* Implementation of aarch64_insn_visitor method "b_cond". */
2365
2366static void
2367aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2368 struct aarch64_insn_data *data)
2369{
2370 struct aarch64_displaced_step_data *dsd
2371 = (struct aarch64_displaced_step_data *) data;
2372 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2373
2374 /* GDB has to fix up PC after displaced step this instruction
2375 differently according to the condition is true or false. Instead
2376 of checking COND against conditional flags, we can use
2377 the following instructions, and GDB can tell how to fix up PC
2378 according to the PC value.
2379
2380 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2381 INSN1 ;
2382 TAKEN:
2383 INSN2
2384 */
2385
2386 emit_bcond (dsd->insn_buf, cond, 8);
2387 dsd->dsc->cond = 1;
2388 dsd->dsc->pc_adjust = offset;
2389 dsd->insn_count = 1;
2390}
2391
2392/* Dynamically allocate a new register. If we know the register
2393 statically, we should make it a global as above instead of using this
2394 helper function. */
2395
2396static struct aarch64_register
2397aarch64_register (unsigned num, int is64)
2398{
2399 return (struct aarch64_register) { num, is64 };
2400}
2401
2402/* Implementation of aarch64_insn_visitor method "cb". */
2403
2404static void
2405aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2406 const unsigned rn, int is64,
2407 struct aarch64_insn_data *data)
2408{
2409 struct aarch64_displaced_step_data *dsd
2410 = (struct aarch64_displaced_step_data *) data;
2411 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2412
2413 /* The offset is out of range for a compare and branch
2414 instruction. We can use the following instructions instead:
2415
2416 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2417 INSN1 ;
2418 TAKEN:
2419 INSN2
2420 */
2421 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2422 dsd->insn_count = 1;
2423 dsd->dsc->cond = 1;
2424 dsd->dsc->pc_adjust = offset;
2425}
2426
2427/* Implementation of aarch64_insn_visitor method "tb". */
2428
2429static void
2430aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2431 const unsigned rt, unsigned bit,
2432 struct aarch64_insn_data *data)
2433{
2434 struct aarch64_displaced_step_data *dsd
2435 = (struct aarch64_displaced_step_data *) data;
2436 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2437
2438 /* The offset is out of range for a test bit and branch
2439 instruction We can use the following instructions instead:
2440
2441 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2442 INSN1 ;
2443 TAKEN:
2444 INSN2
2445
2446 */
2447 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2448 dsd->insn_count = 1;
2449 dsd->dsc->cond = 1;
2450 dsd->dsc->pc_adjust = offset;
2451}
2452
2453/* Implementation of aarch64_insn_visitor method "adr". */
2454
2455static void
2456aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2457 const int is_adrp, struct aarch64_insn_data *data)
2458{
2459 struct aarch64_displaced_step_data *dsd
2460 = (struct aarch64_displaced_step_data *) data;
2461 /* We know exactly the address the ADR{P,} instruction will compute.
2462 We can just write it to the destination register. */
2463 CORE_ADDR address = data->insn_addr + offset;
2464
2465 if (is_adrp)
2466 {
2467 /* Clear the lower 12 bits of the offset to get the 4K page. */
2468 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2469 address & ~0xfff);
2470 }
2471 else
2472 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2473 address);
2474
2475 dsd->dsc->pc_adjust = 4;
2476 emit_nop (dsd->insn_buf);
2477 dsd->insn_count = 1;
2478}
2479
2480/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2481
2482static void
2483aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2484 const unsigned rt, const int is64,
2485 struct aarch64_insn_data *data)
2486{
2487 struct aarch64_displaced_step_data *dsd
2488 = (struct aarch64_displaced_step_data *) data;
2489 CORE_ADDR address = data->insn_addr + offset;
2490 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2491
2492 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2493 address);
2494
2495 if (is_sw)
2496 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2497 aarch64_register (rt, 1), zero);
2498 else
2499 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2500 aarch64_register (rt, 1), zero);
2501
2502 dsd->dsc->pc_adjust = 4;
2503}
2504
2505/* Implementation of aarch64_insn_visitor method "others". */
2506
2507static void
2508aarch64_displaced_step_others (const uint32_t insn,
2509 struct aarch64_insn_data *data)
2510{
2511 struct aarch64_displaced_step_data *dsd
2512 = (struct aarch64_displaced_step_data *) data;
2513
e1c587c3 2514 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2515 dsd->insn_count = 1;
2516
2517 if ((insn & 0xfffffc1f) == 0xd65f0000)
2518 {
2519 /* RET */
2520 dsd->dsc->pc_adjust = 0;
2521 }
2522 else
2523 dsd->dsc->pc_adjust = 4;
2524}
2525
2526static const struct aarch64_insn_visitor visitor =
2527{
2528 aarch64_displaced_step_b,
2529 aarch64_displaced_step_b_cond,
2530 aarch64_displaced_step_cb,
2531 aarch64_displaced_step_tb,
2532 aarch64_displaced_step_adr,
2533 aarch64_displaced_step_ldr_literal,
2534 aarch64_displaced_step_others,
2535};
2536
2537/* Implement the "displaced_step_copy_insn" gdbarch method. */
2538
2539struct displaced_step_closure *
2540aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2541 CORE_ADDR from, CORE_ADDR to,
2542 struct regcache *regs)
2543{
2544 struct displaced_step_closure *dsc = NULL;
2545 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2546 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2547 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2548 aarch64_inst inst;
2549
2550 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2551 return NULL;
b6542f81
YQ
2552
2553 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2554 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2555 {
2556 /* We can't displaced step atomic sequences. */
2557 return NULL;
2558 }
2559
2560 dsc = XCNEW (struct displaced_step_closure);
2561 dsd.base.insn_addr = from;
2562 dsd.new_addr = to;
2563 dsd.regs = regs;
2564 dsd.dsc = dsc;
034f1a81 2565 dsd.insn_count = 0;
b6542f81
YQ
2566 aarch64_relocate_instruction (insn, &visitor,
2567 (struct aarch64_insn_data *) &dsd);
2568 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2569
2570 if (dsd.insn_count != 0)
2571 {
2572 int i;
2573
2574 /* Instruction can be relocated to scratch pad. Copy
2575 relocated instruction(s) there. */
2576 for (i = 0; i < dsd.insn_count; i++)
2577 {
2578 if (debug_displaced)
2579 {
2580 debug_printf ("displaced: writing insn ");
2581 debug_printf ("%.8x", dsd.insn_buf[i]);
2582 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2583 }
2584 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2585 (ULONGEST) dsd.insn_buf[i]);
2586 }
2587 }
2588 else
2589 {
2590 xfree (dsc);
2591 dsc = NULL;
2592 }
2593
2594 return dsc;
2595}
2596
2597/* Implement the "displaced_step_fixup" gdbarch method. */
2598
2599void
2600aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2601 struct displaced_step_closure *dsc,
2602 CORE_ADDR from, CORE_ADDR to,
2603 struct regcache *regs)
2604{
2605 if (dsc->cond)
2606 {
2607 ULONGEST pc;
2608
2609 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2610 if (pc - to == 8)
2611 {
2612 /* Condition is true. */
2613 }
2614 else if (pc - to == 4)
2615 {
2616 /* Condition is false. */
2617 dsc->pc_adjust = 4;
2618 }
2619 else
2620 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2621 }
2622
2623 if (dsc->pc_adjust != 0)
2624 {
2625 if (debug_displaced)
2626 {
2627 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2628 paddress (gdbarch, from), dsc->pc_adjust);
2629 }
2630 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2631 from + dsc->pc_adjust);
2632 }
2633}
2634
2635/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2636
2637int
2638aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2639 struct displaced_step_closure *closure)
2640{
2641 return 1;
2642}
2643
07b287a0
MS
2644/* Initialize the current architecture based on INFO. If possible,
2645 re-use an architecture from ARCHES, which is a list of
2646 architectures already created during this debugging session.
2647
2648 Called e.g. at program startup, when reading a core file, and when
2649 reading a binary file. */
2650
2651static struct gdbarch *
2652aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2653{
2654 struct gdbarch_tdep *tdep;
2655 struct gdbarch *gdbarch;
2656 struct gdbarch_list *best_arch;
2657 struct tdesc_arch_data *tdesc_data = NULL;
2658 const struct target_desc *tdesc = info.target_desc;
2659 int i;
2660 int have_fpa_registers = 1;
2661 int valid_p = 1;
2662 const struct tdesc_feature *feature;
2663 int num_regs = 0;
2664 int num_pseudo_regs = 0;
2665
2666 /* Ensure we always have a target descriptor. */
2667 if (!tdesc_has_registers (tdesc))
2668 tdesc = tdesc_aarch64;
2669
2670 gdb_assert (tdesc);
2671
2672 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2673
2674 if (feature == NULL)
2675 return NULL;
2676
2677 tdesc_data = tdesc_data_alloc ();
2678
2679 /* Validate the descriptor provides the mandatory core R registers
2680 and allocate their numbers. */
2681 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2682 valid_p &=
2683 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2684 aarch64_r_register_names[i]);
2685
2686 num_regs = AARCH64_X0_REGNUM + i;
2687
2688 /* Look for the V registers. */
2689 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2690 if (feature)
2691 {
2692 /* Validate the descriptor provides the mandatory V registers
2693 and allocate their numbers. */
2694 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2695 valid_p &=
2696 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2697 aarch64_v_register_names[i]);
2698
2699 num_regs = AARCH64_V0_REGNUM + i;
2700
2701 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2702 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2703 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2704 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2705 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2706 }
2707
2708 if (!valid_p)
2709 {
2710 tdesc_data_cleanup (tdesc_data);
2711 return NULL;
2712 }
2713
2714 /* AArch64 code is always little-endian. */
2715 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2716
2717 /* If there is already a candidate, use it. */
2718 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2719 best_arch != NULL;
2720 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2721 {
2722 /* Found a match. */
2723 break;
2724 }
2725
2726 if (best_arch != NULL)
2727 {
2728 if (tdesc_data != NULL)
2729 tdesc_data_cleanup (tdesc_data);
2730 return best_arch->gdbarch;
2731 }
2732
8d749320 2733 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2734 gdbarch = gdbarch_alloc (&info, tdep);
2735
2736 /* This should be low enough for everything. */
2737 tdep->lowest_pc = 0x20;
2738 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2739 tdep->jb_elt_size = 8;
2740
2741 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2742 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2743
07b287a0
MS
2744 /* Frame handling. */
2745 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2746 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2747 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2748
2749 /* Advance PC across function entry code. */
2750 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2751
2752 /* The stack grows downward. */
2753 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2754
2755 /* Breakpoint manipulation. */
2756 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
07b287a0 2757 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2758 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2759
2760 /* Information about registers, etc. */
2761 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2762 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2763 set_gdbarch_num_regs (gdbarch, num_regs);
2764
2765 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2766 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2767 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2768 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2769 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2770 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2771 aarch64_pseudo_register_reggroup_p);
2772
2773 /* ABI */
2774 set_gdbarch_short_bit (gdbarch, 16);
2775 set_gdbarch_int_bit (gdbarch, 32);
2776 set_gdbarch_float_bit (gdbarch, 32);
2777 set_gdbarch_double_bit (gdbarch, 64);
2778 set_gdbarch_long_double_bit (gdbarch, 128);
2779 set_gdbarch_long_bit (gdbarch, 64);
2780 set_gdbarch_long_long_bit (gdbarch, 64);
2781 set_gdbarch_ptr_bit (gdbarch, 64);
2782 set_gdbarch_char_signed (gdbarch, 0);
2783 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2784 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2785 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2786
2787 /* Internal <-> external register number maps. */
2788 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2789
2790 /* Returning results. */
2791 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2792
2793 /* Disassembly. */
2794 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2795
2796 /* Virtual tables. */
2797 set_gdbarch_vbit_in_delta (gdbarch, 1);
2798
2799 /* Hook in the ABI-specific overrides, if they have been registered. */
2800 info.target_desc = tdesc;
2801 info.tdep_info = (void *) tdesc_data;
2802 gdbarch_init_osabi (info, gdbarch);
2803
2804 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2805
2806 /* Add some default predicates. */
2807 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2808 dwarf2_append_unwinders (gdbarch);
2809 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2810
2811 frame_base_set_default (gdbarch, &aarch64_normal_base);
2812
2813 /* Now we have tuned the configuration, set a few final things,
2814 based on what the OS ABI has told us. */
2815
2816 if (tdep->jb_pc >= 0)
2817 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2818
ea873d8e
PL
2819 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2820
07b287a0
MS
2821 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2822
2823 /* Add standard register aliases. */
2824 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2825 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2826 value_of_aarch64_user_reg,
2827 &aarch64_register_aliases[i].regnum);
2828
2829 return gdbarch;
2830}
2831
2832static void
2833aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2834{
2835 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2836
2837 if (tdep == NULL)
2838 return;
2839
2840 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2841 paddress (gdbarch, tdep->lowest_pc));
2842}
2843
2844/* Suppress warning from -Wmissing-prototypes. */
2845extern initialize_file_ftype _initialize_aarch64_tdep;
2846
2847void
2848_initialize_aarch64_tdep (void)
2849{
2850 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2851 aarch64_dump_tdep);
2852
2853 initialize_tdesc_aarch64 ();
07b287a0
MS
2854
2855 /* Debug this file's internals. */
2856 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2857Set AArch64 debugging."), _("\
2858Show AArch64 debugging."), _("\
2859When on, AArch64 specific debugging is enabled."),
2860 NULL,
2861 show_aarch64_debug,
2862 &setdebuglist, &showdebuglist);
2863}
99afc88b
OJ
2864
2865/* AArch64 process record-replay related structures, defines etc. */
2866
99afc88b
OJ
2867#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2868 do \
2869 { \
2870 unsigned int reg_len = LENGTH; \
2871 if (reg_len) \
2872 { \
2873 REGS = XNEWVEC (uint32_t, reg_len); \
2874 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2875 } \
2876 } \
2877 while (0)
2878
2879#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2880 do \
2881 { \
2882 unsigned int mem_len = LENGTH; \
2883 if (mem_len) \
2884 { \
2885 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2886 memcpy(&MEMS->len, &RECORD_BUF[0], \
2887 sizeof(struct aarch64_mem_r) * LENGTH); \
2888 } \
2889 } \
2890 while (0)
2891
2892/* AArch64 record/replay structures and enumerations. */
2893
2894struct aarch64_mem_r
2895{
2896 uint64_t len; /* Record length. */
2897 uint64_t addr; /* Memory address. */
2898};
2899
2900enum aarch64_record_result
2901{
2902 AARCH64_RECORD_SUCCESS,
2903 AARCH64_RECORD_FAILURE,
2904 AARCH64_RECORD_UNSUPPORTED,
2905 AARCH64_RECORD_UNKNOWN
2906};
2907
2908typedef struct insn_decode_record_t
2909{
2910 struct gdbarch *gdbarch;
2911 struct regcache *regcache;
2912 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2913 uint32_t aarch64_insn; /* Insn to be recorded. */
2914 uint32_t mem_rec_count; /* Count of memory records. */
2915 uint32_t reg_rec_count; /* Count of register records. */
2916 uint32_t *aarch64_regs; /* Registers to be recorded. */
2917 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2918} insn_decode_record;
2919
2920/* Record handler for data processing - register instructions. */
2921
2922static unsigned int
2923aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2924{
2925 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2926 uint32_t record_buf[4];
2927
2928 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2929 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2930 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2931
2932 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2933 {
2934 uint8_t setflags;
2935
2936 /* Logical (shifted register). */
2937 if (insn_bits24_27 == 0x0a)
2938 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2939 /* Add/subtract. */
2940 else if (insn_bits24_27 == 0x0b)
2941 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2942 else
2943 return AARCH64_RECORD_UNKNOWN;
2944
2945 record_buf[0] = reg_rd;
2946 aarch64_insn_r->reg_rec_count = 1;
2947 if (setflags)
2948 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2949 }
2950 else
2951 {
2952 if (insn_bits24_27 == 0x0b)
2953 {
2954 /* Data-processing (3 source). */
2955 record_buf[0] = reg_rd;
2956 aarch64_insn_r->reg_rec_count = 1;
2957 }
2958 else if (insn_bits24_27 == 0x0a)
2959 {
2960 if (insn_bits21_23 == 0x00)
2961 {
2962 /* Add/subtract (with carry). */
2963 record_buf[0] = reg_rd;
2964 aarch64_insn_r->reg_rec_count = 1;
2965 if (bit (aarch64_insn_r->aarch64_insn, 29))
2966 {
2967 record_buf[1] = AARCH64_CPSR_REGNUM;
2968 aarch64_insn_r->reg_rec_count = 2;
2969 }
2970 }
2971 else if (insn_bits21_23 == 0x02)
2972 {
2973 /* Conditional compare (register) and conditional compare
2974 (immediate) instructions. */
2975 record_buf[0] = AARCH64_CPSR_REGNUM;
2976 aarch64_insn_r->reg_rec_count = 1;
2977 }
2978 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2979 {
2980 /* CConditional select. */
2981 /* Data-processing (2 source). */
2982 /* Data-processing (1 source). */
2983 record_buf[0] = reg_rd;
2984 aarch64_insn_r->reg_rec_count = 1;
2985 }
2986 else
2987 return AARCH64_RECORD_UNKNOWN;
2988 }
2989 }
2990
2991 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2992 record_buf);
2993 return AARCH64_RECORD_SUCCESS;
2994}
2995
2996/* Record handler for data processing - immediate instructions. */
2997
2998static unsigned int
2999aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3000{
3001 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
3002 uint32_t record_buf[4];
3003
3004 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3005 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3006 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3007 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3008
3009 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3010 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3011 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3012 {
3013 record_buf[0] = reg_rd;
3014 aarch64_insn_r->reg_rec_count = 1;
3015 }
3016 else if (insn_bits24_27 == 0x01)
3017 {
3018 /* Add/Subtract (immediate). */
3019 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3020 record_buf[0] = reg_rd;
3021 aarch64_insn_r->reg_rec_count = 1;
3022 if (setflags)
3023 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3024 }
3025 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3026 {
3027 /* Logical (immediate). */
3028 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3029 record_buf[0] = reg_rd;
3030 aarch64_insn_r->reg_rec_count = 1;
3031 if (setflags)
3032 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3033 }
3034 else
3035 return AARCH64_RECORD_UNKNOWN;
3036
3037 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3038 record_buf);
3039 return AARCH64_RECORD_SUCCESS;
3040}
3041
3042/* Record handler for branch, exception generation and system instructions. */
3043
3044static unsigned int
3045aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3046{
3047 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3048 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3049 uint32_t record_buf[4];
3050
3051 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3052 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3053 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3054
3055 if (insn_bits28_31 == 0x0d)
3056 {
3057 /* Exception generation instructions. */
3058 if (insn_bits24_27 == 0x04)
3059 {
5d98d3cd
YQ
3060 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3061 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3062 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3063 {
3064 ULONGEST svc_number;
3065
3066 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3067 &svc_number);
3068 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3069 svc_number);
3070 }
3071 else
3072 return AARCH64_RECORD_UNSUPPORTED;
3073 }
3074 /* System instructions. */
3075 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3076 {
3077 uint32_t reg_rt, reg_crn;
3078
3079 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3080 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3081
3082 /* Record rt in case of sysl and mrs instructions. */
3083 if (bit (aarch64_insn_r->aarch64_insn, 21))
3084 {
3085 record_buf[0] = reg_rt;
3086 aarch64_insn_r->reg_rec_count = 1;
3087 }
3088 /* Record cpsr for hint and msr(immediate) instructions. */
3089 else if (reg_crn == 0x02 || reg_crn == 0x04)
3090 {
3091 record_buf[0] = AARCH64_CPSR_REGNUM;
3092 aarch64_insn_r->reg_rec_count = 1;
3093 }
3094 }
3095 /* Unconditional branch (register). */
3096 else if((insn_bits24_27 & 0x0e) == 0x06)
3097 {
3098 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3099 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3100 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3101 }
3102 else
3103 return AARCH64_RECORD_UNKNOWN;
3104 }
3105 /* Unconditional branch (immediate). */
3106 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3107 {
3108 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3109 if (bit (aarch64_insn_r->aarch64_insn, 31))
3110 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3111 }
3112 else
3113 /* Compare & branch (immediate), Test & branch (immediate) and
3114 Conditional branch (immediate). */
3115 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3116
3117 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3118 record_buf);
3119 return AARCH64_RECORD_SUCCESS;
3120}
3121
3122/* Record handler for advanced SIMD load and store instructions. */
3123
3124static unsigned int
3125aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3126{
3127 CORE_ADDR address;
3128 uint64_t addr_offset = 0;
3129 uint32_t record_buf[24];
3130 uint64_t record_buf_mem[24];
3131 uint32_t reg_rn, reg_rt;
3132 uint32_t reg_index = 0, mem_index = 0;
3133 uint8_t opcode_bits, size_bits;
3134
3135 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3136 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3137 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3138 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3139 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3140
3141 if (record_debug)
b277c936 3142 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3143
3144 /* Load/store single structure. */
3145 if (bit (aarch64_insn_r->aarch64_insn, 24))
3146 {
3147 uint8_t sindex, scale, selem, esize, replicate = 0;
3148 scale = opcode_bits >> 2;
3149 selem = ((opcode_bits & 0x02) |
3150 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3151 switch (scale)
3152 {
3153 case 1:
3154 if (size_bits & 0x01)
3155 return AARCH64_RECORD_UNKNOWN;
3156 break;
3157 case 2:
3158 if ((size_bits >> 1) & 0x01)
3159 return AARCH64_RECORD_UNKNOWN;
3160 if (size_bits & 0x01)
3161 {
3162 if (!((opcode_bits >> 1) & 0x01))
3163 scale = 3;
3164 else
3165 return AARCH64_RECORD_UNKNOWN;
3166 }
3167 break;
3168 case 3:
3169 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3170 {
3171 scale = size_bits;
3172 replicate = 1;
3173 break;
3174 }
3175 else
3176 return AARCH64_RECORD_UNKNOWN;
3177 default:
3178 break;
3179 }
3180 esize = 8 << scale;
3181 if (replicate)
3182 for (sindex = 0; sindex < selem; sindex++)
3183 {
3184 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3185 reg_rt = (reg_rt + 1) % 32;
3186 }
3187 else
3188 {
3189 for (sindex = 0; sindex < selem; sindex++)
3190 if (bit (aarch64_insn_r->aarch64_insn, 22))
3191 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3192 else
3193 {
3194 record_buf_mem[mem_index++] = esize / 8;
3195 record_buf_mem[mem_index++] = address + addr_offset;
3196 }
3197 addr_offset = addr_offset + (esize / 8);
3198 reg_rt = (reg_rt + 1) % 32;
3199 }
3200 }
3201 /* Load/store multiple structure. */
3202 else
3203 {
3204 uint8_t selem, esize, rpt, elements;
3205 uint8_t eindex, rindex;
3206
3207 esize = 8 << size_bits;
3208 if (bit (aarch64_insn_r->aarch64_insn, 30))
3209 elements = 128 / esize;
3210 else
3211 elements = 64 / esize;
3212
3213 switch (opcode_bits)
3214 {
3215 /*LD/ST4 (4 Registers). */
3216 case 0:
3217 rpt = 1;
3218 selem = 4;
3219 break;
3220 /*LD/ST1 (4 Registers). */
3221 case 2:
3222 rpt = 4;
3223 selem = 1;
3224 break;
3225 /*LD/ST3 (3 Registers). */
3226 case 4:
3227 rpt = 1;
3228 selem = 3;
3229 break;
3230 /*LD/ST1 (3 Registers). */
3231 case 6:
3232 rpt = 3;
3233 selem = 1;
3234 break;
3235 /*LD/ST1 (1 Register). */
3236 case 7:
3237 rpt = 1;
3238 selem = 1;
3239 break;
3240 /*LD/ST2 (2 Registers). */
3241 case 8:
3242 rpt = 1;
3243 selem = 2;
3244 break;
3245 /*LD/ST1 (2 Registers). */
3246 case 10:
3247 rpt = 2;
3248 selem = 1;
3249 break;
3250 default:
3251 return AARCH64_RECORD_UNSUPPORTED;
3252 break;
3253 }
3254 for (rindex = 0; rindex < rpt; rindex++)
3255 for (eindex = 0; eindex < elements; eindex++)
3256 {
3257 uint8_t reg_tt, sindex;
3258 reg_tt = (reg_rt + rindex) % 32;
3259 for (sindex = 0; sindex < selem; sindex++)
3260 {
3261 if (bit (aarch64_insn_r->aarch64_insn, 22))
3262 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3263 else
3264 {
3265 record_buf_mem[mem_index++] = esize / 8;
3266 record_buf_mem[mem_index++] = address + addr_offset;
3267 }
3268 addr_offset = addr_offset + (esize / 8);
3269 reg_tt = (reg_tt + 1) % 32;
3270 }
3271 }
3272 }
3273
3274 if (bit (aarch64_insn_r->aarch64_insn, 23))
3275 record_buf[reg_index++] = reg_rn;
3276
3277 aarch64_insn_r->reg_rec_count = reg_index;
3278 aarch64_insn_r->mem_rec_count = mem_index / 2;
3279 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3280 record_buf_mem);
3281 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3282 record_buf);
3283 return AARCH64_RECORD_SUCCESS;
3284}
3285
3286/* Record handler for load and store instructions. */
3287
3288static unsigned int
3289aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3290{
3291 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3292 uint8_t insn_bit23, insn_bit21;
3293 uint8_t opc, size_bits, ld_flag, vector_flag;
3294 uint32_t reg_rn, reg_rt, reg_rt2;
3295 uint64_t datasize, offset;
3296 uint32_t record_buf[8];
3297 uint64_t record_buf_mem[8];
3298 CORE_ADDR address;
3299
3300 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3301 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3302 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3303 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3304 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3305 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3306 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3307 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3308 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3309 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3310 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3311
3312 /* Load/store exclusive. */
3313 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3314 {
3315 if (record_debug)
b277c936 3316 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3317
3318 if (ld_flag)
3319 {
3320 record_buf[0] = reg_rt;
3321 aarch64_insn_r->reg_rec_count = 1;
3322 if (insn_bit21)
3323 {
3324 record_buf[1] = reg_rt2;
3325 aarch64_insn_r->reg_rec_count = 2;
3326 }
3327 }
3328 else
3329 {
3330 if (insn_bit21)
3331 datasize = (8 << size_bits) * 2;
3332 else
3333 datasize = (8 << size_bits);
3334 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3335 &address);
3336 record_buf_mem[0] = datasize / 8;
3337 record_buf_mem[1] = address;
3338 aarch64_insn_r->mem_rec_count = 1;
3339 if (!insn_bit23)
3340 {
3341 /* Save register rs. */
3342 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3343 aarch64_insn_r->reg_rec_count = 1;
3344 }
3345 }
3346 }
3347 /* Load register (literal) instructions decoding. */
3348 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3349 {
3350 if (record_debug)
b277c936 3351 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3352 if (vector_flag)
3353 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3354 else
3355 record_buf[0] = reg_rt;
3356 aarch64_insn_r->reg_rec_count = 1;
3357 }
3358 /* All types of load/store pair instructions decoding. */
3359 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3360 {
3361 if (record_debug)
b277c936 3362 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3363
3364 if (ld_flag)
3365 {
3366 if (vector_flag)
3367 {
3368 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3369 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3370 }
3371 else
3372 {
3373 record_buf[0] = reg_rt;
3374 record_buf[1] = reg_rt2;
3375 }
3376 aarch64_insn_r->reg_rec_count = 2;
3377 }
3378 else
3379 {
3380 uint16_t imm7_off;
3381 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3382 if (!vector_flag)
3383 size_bits = size_bits >> 1;
3384 datasize = 8 << (2 + size_bits);
3385 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3386 offset = offset << (2 + size_bits);
3387 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3388 &address);
3389 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3390 {
3391 if (imm7_off & 0x40)
3392 address = address - offset;
3393 else
3394 address = address + offset;
3395 }
3396
3397 record_buf_mem[0] = datasize / 8;
3398 record_buf_mem[1] = address;
3399 record_buf_mem[2] = datasize / 8;
3400 record_buf_mem[3] = address + (datasize / 8);
3401 aarch64_insn_r->mem_rec_count = 2;
3402 }
3403 if (bit (aarch64_insn_r->aarch64_insn, 23))
3404 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3405 }
3406 /* Load/store register (unsigned immediate) instructions. */
3407 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3408 {
3409 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3410 if (!(opc >> 1))
3411 if (opc & 0x01)
3412 ld_flag = 0x01;
3413 else
3414 ld_flag = 0x0;
3415 else
3416 if (size_bits != 0x03)
3417 ld_flag = 0x01;
3418 else
3419 return AARCH64_RECORD_UNKNOWN;
3420
3421 if (record_debug)
3422 {
b277c936
PL
3423 debug_printf ("Process record: load/store (unsigned immediate):"
3424 " size %x V %d opc %x\n", size_bits, vector_flag,
3425 opc);
99afc88b
OJ
3426 }
3427
3428 if (!ld_flag)
3429 {
3430 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3431 datasize = 8 << size_bits;
3432 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3433 &address);
3434 offset = offset << size_bits;
3435 address = address + offset;
3436
3437 record_buf_mem[0] = datasize >> 3;
3438 record_buf_mem[1] = address;
3439 aarch64_insn_r->mem_rec_count = 1;
3440 }
3441 else
3442 {
3443 if (vector_flag)
3444 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3445 else
3446 record_buf[0] = reg_rt;
3447 aarch64_insn_r->reg_rec_count = 1;
3448 }
3449 }
3450 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3451 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3452 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3453 {
3454 if (record_debug)
b277c936 3455 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3456 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3457 if (!(opc >> 1))
3458 if (opc & 0x01)
3459 ld_flag = 0x01;
3460 else
3461 ld_flag = 0x0;
3462 else
3463 if (size_bits != 0x03)
3464 ld_flag = 0x01;
3465 else
3466 return AARCH64_RECORD_UNKNOWN;
3467
3468 if (!ld_flag)
3469 {
3470 uint64_t reg_rm_val;
3471 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3472 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3473 if (bit (aarch64_insn_r->aarch64_insn, 12))
3474 offset = reg_rm_val << size_bits;
3475 else
3476 offset = reg_rm_val;
3477 datasize = 8 << size_bits;
3478 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3479 &address);
3480 address = address + offset;
3481 record_buf_mem[0] = datasize >> 3;
3482 record_buf_mem[1] = address;
3483 aarch64_insn_r->mem_rec_count = 1;
3484 }
3485 else
3486 {
3487 if (vector_flag)
3488 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3489 else
3490 record_buf[0] = reg_rt;
3491 aarch64_insn_r->reg_rec_count = 1;
3492 }
3493 }
3494 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3495 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3496 && !insn_bit21)
99afc88b
OJ
3497 {
3498 if (record_debug)
3499 {
b277c936
PL
3500 debug_printf ("Process record: load/store "
3501 "(immediate and unprivileged)\n");
99afc88b
OJ
3502 }
3503 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3504 if (!(opc >> 1))
3505 if (opc & 0x01)
3506 ld_flag = 0x01;
3507 else
3508 ld_flag = 0x0;
3509 else
3510 if (size_bits != 0x03)
3511 ld_flag = 0x01;
3512 else
3513 return AARCH64_RECORD_UNKNOWN;
3514
3515 if (!ld_flag)
3516 {
3517 uint16_t imm9_off;
3518 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3519 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3520 datasize = 8 << size_bits;
3521 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3522 &address);
3523 if (insn_bits10_11 != 0x01)
3524 {
3525 if (imm9_off & 0x0100)
3526 address = address - offset;
3527 else
3528 address = address + offset;
3529 }
3530 record_buf_mem[0] = datasize >> 3;
3531 record_buf_mem[1] = address;
3532 aarch64_insn_r->mem_rec_count = 1;
3533 }
3534 else
3535 {
3536 if (vector_flag)
3537 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3538 else
3539 record_buf[0] = reg_rt;
3540 aarch64_insn_r->reg_rec_count = 1;
3541 }
3542 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3543 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3544 }
3545 /* Advanced SIMD load/store instructions. */
3546 else
3547 return aarch64_record_asimd_load_store (aarch64_insn_r);
3548
3549 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3550 record_buf_mem);
3551 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3552 record_buf);
3553 return AARCH64_RECORD_SUCCESS;
3554}
3555
3556/* Record handler for data processing SIMD and floating point instructions. */
3557
3558static unsigned int
3559aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3560{
3561 uint8_t insn_bit21, opcode, rmode, reg_rd;
3562 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3563 uint8_t insn_bits11_14;
3564 uint32_t record_buf[2];
3565
3566 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3567 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3568 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3569 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3570 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3571 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3572 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3573 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3574 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3575
3576 if (record_debug)
b277c936 3577 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3578
3579 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3580 {
3581 /* Floating point - fixed point conversion instructions. */
3582 if (!insn_bit21)
3583 {
3584 if (record_debug)
b277c936 3585 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3586
3587 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3588 record_buf[0] = reg_rd;
3589 else
3590 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3591 }
3592 /* Floating point - conditional compare instructions. */
3593 else if (insn_bits10_11 == 0x01)
3594 {
3595 if (record_debug)
b277c936 3596 debug_printf ("FP - conditional compare");
99afc88b
OJ
3597
3598 record_buf[0] = AARCH64_CPSR_REGNUM;
3599 }
3600 /* Floating point - data processing (2-source) and
3601 conditional select instructions. */
3602 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3603 {
3604 if (record_debug)
b277c936 3605 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3606
3607 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3608 }
3609 else if (insn_bits10_11 == 0x00)
3610 {
3611 /* Floating point - immediate instructions. */
3612 if ((insn_bits12_15 & 0x01) == 0x01
3613 || (insn_bits12_15 & 0x07) == 0x04)
3614 {
3615 if (record_debug)
b277c936 3616 debug_printf ("FP - immediate");
99afc88b
OJ
3617 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3618 }
3619 /* Floating point - compare instructions. */
3620 else if ((insn_bits12_15 & 0x03) == 0x02)
3621 {
3622 if (record_debug)
b277c936 3623 debug_printf ("FP - immediate");
99afc88b
OJ
3624 record_buf[0] = AARCH64_CPSR_REGNUM;
3625 }
3626 /* Floating point - integer conversions instructions. */
f62fce35 3627 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3628 {
3629 /* Convert float to integer instruction. */
3630 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3631 {
3632 if (record_debug)
b277c936 3633 debug_printf ("float to int conversion");
99afc88b
OJ
3634
3635 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3636 }
3637 /* Convert integer to float instruction. */
3638 else if ((opcode >> 1) == 0x01 && !rmode)
3639 {
3640 if (record_debug)
b277c936 3641 debug_printf ("int to float conversion");
99afc88b
OJ
3642
3643 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3644 }
3645 /* Move float to integer instruction. */
3646 else if ((opcode >> 1) == 0x03)
3647 {
3648 if (record_debug)
b277c936 3649 debug_printf ("move float to int");
99afc88b
OJ
3650
3651 if (!(opcode & 0x01))
3652 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3653 else
3654 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3655 }
f62fce35
YQ
3656 else
3657 return AARCH64_RECORD_UNKNOWN;
99afc88b 3658 }
f62fce35
YQ
3659 else
3660 return AARCH64_RECORD_UNKNOWN;
99afc88b 3661 }
f62fce35
YQ
3662 else
3663 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3664 }
3665 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3666 {
3667 if (record_debug)
b277c936 3668 debug_printf ("SIMD copy");
99afc88b
OJ
3669
3670 /* Advanced SIMD copy instructions. */
3671 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3672 && !bit (aarch64_insn_r->aarch64_insn, 15)
3673 && bit (aarch64_insn_r->aarch64_insn, 10))
3674 {
3675 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3676 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3677 else
3678 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3679 }
3680 else
3681 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3682 }
3683 /* All remaining floating point or advanced SIMD instructions. */
3684 else
3685 {
3686 if (record_debug)
b277c936 3687 debug_printf ("all remain");
99afc88b
OJ
3688
3689 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3690 }
3691
3692 if (record_debug)
b277c936 3693 debug_printf ("\n");
99afc88b
OJ
3694
3695 aarch64_insn_r->reg_rec_count++;
3696 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3697 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3698 record_buf);
3699 return AARCH64_RECORD_SUCCESS;
3700}
3701
3702/* Decodes insns type and invokes its record handler. */
3703
3704static unsigned int
3705aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3706{
3707 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3708
3709 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3710 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3711 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3712 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3713
3714 /* Data processing - immediate instructions. */
3715 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3716 return aarch64_record_data_proc_imm (aarch64_insn_r);
3717
3718 /* Branch, exception generation and system instructions. */
3719 if (ins_bit26 && !ins_bit27 && ins_bit28)
3720 return aarch64_record_branch_except_sys (aarch64_insn_r);
3721
3722 /* Load and store instructions. */
3723 if (!ins_bit25 && ins_bit27)
3724 return aarch64_record_load_store (aarch64_insn_r);
3725
3726 /* Data processing - register instructions. */
3727 if (ins_bit25 && !ins_bit26 && ins_bit27)
3728 return aarch64_record_data_proc_reg (aarch64_insn_r);
3729
3730 /* Data processing - SIMD and floating point instructions. */
3731 if (ins_bit25 && ins_bit26 && ins_bit27)
3732 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3733
3734 return AARCH64_RECORD_UNSUPPORTED;
3735}
3736
3737/* Cleans up local record registers and memory allocations. */
3738
3739static void
3740deallocate_reg_mem (insn_decode_record *record)
3741{
3742 xfree (record->aarch64_regs);
3743 xfree (record->aarch64_mems);
3744}
3745
3746/* Parse the current instruction and record the values of the registers and
3747 memory that will be changed in current instruction to record_arch_list
3748 return -1 if something is wrong. */
3749
3750int
3751aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3752 CORE_ADDR insn_addr)
3753{
3754 uint32_t rec_no = 0;
3755 uint8_t insn_size = 4;
3756 uint32_t ret = 0;
3757 ULONGEST t_bit = 0, insn_id = 0;
3758 gdb_byte buf[insn_size];
3759 insn_decode_record aarch64_record;
3760
3761 memset (&buf[0], 0, insn_size);
3762 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3763 target_read_memory (insn_addr, &buf[0], insn_size);
3764 aarch64_record.aarch64_insn
3765 = (uint32_t) extract_unsigned_integer (&buf[0],
3766 insn_size,
3767 gdbarch_byte_order (gdbarch));
3768 aarch64_record.regcache = regcache;
3769 aarch64_record.this_addr = insn_addr;
3770 aarch64_record.gdbarch = gdbarch;
3771
3772 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3773 if (ret == AARCH64_RECORD_UNSUPPORTED)
3774 {
3775 printf_unfiltered (_("Process record does not support instruction "
3776 "0x%0x at address %s.\n"),
3777 aarch64_record.aarch64_insn,
3778 paddress (gdbarch, insn_addr));
3779 ret = -1;
3780 }
3781
3782 if (0 == ret)
3783 {
3784 /* Record registers. */
3785 record_full_arch_list_add_reg (aarch64_record.regcache,
3786 AARCH64_PC_REGNUM);
3787 /* Always record register CPSR. */
3788 record_full_arch_list_add_reg (aarch64_record.regcache,
3789 AARCH64_CPSR_REGNUM);
3790 if (aarch64_record.aarch64_regs)
3791 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3792 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3793 aarch64_record.aarch64_regs[rec_no]))
3794 ret = -1;
3795
3796 /* Record memories. */
3797 if (aarch64_record.aarch64_mems)
3798 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3799 if (record_full_arch_list_add_mem
3800 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3801 aarch64_record.aarch64_mems[rec_no].len))
3802 ret = -1;
3803
3804 if (record_full_arch_list_add_end ())
3805 ret = -1;
3806 }
3807
3808 deallocate_reg_mem (&aarch64_record);
3809 return ret;
3810}
This page took 0.3736 seconds and 4 git commands to generate.