Prevent looping in archives
[deliverable/binutils-gdb.git] / gdb / aarch64-tdep.c
CommitLineData
07b287a0
MS
1/* Common target dependent code for GDB on AArch64 systems.
2
32d0add0 3 Copyright (C) 2009-2015 Free Software Foundation, Inc.
07b287a0
MS
4 Contributed by ARM Ltd.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "defs.h"
22
23#include "frame.h"
24#include "inferior.h"
25#include "gdbcmd.h"
26#include "gdbcore.h"
07b287a0
MS
27#include "dis-asm.h"
28#include "regcache.h"
29#include "reggroups.h"
30#include "doublest.h"
31#include "value.h"
32#include "arch-utils.h"
33#include "osabi.h"
34#include "frame-unwind.h"
35#include "frame-base.h"
36#include "trad-frame.h"
37#include "objfiles.h"
38#include "dwarf2-frame.h"
39#include "gdbtypes.h"
40#include "prologue-value.h"
41#include "target-descriptions.h"
42#include "user-regs.h"
43#include "language.h"
44#include "infcall.h"
ea873d8e
PL
45#include "ax.h"
46#include "ax-gdb.h"
07b287a0
MS
47
48#include "aarch64-tdep.h"
49
50#include "elf-bfd.h"
51#include "elf/aarch64.h"
52
07b287a0
MS
53#include "vec.h"
54
99afc88b
OJ
55#include "record.h"
56#include "record-full.h"
57
07b287a0 58#include "features/aarch64.c"
07b287a0 59
787749ea
PL
60#include "arch/aarch64-insn.h"
61
f77ee802
YQ
62#include "opcode/aarch64.h"
63
64#define submask(x) ((1L << ((x) + 1)) - 1)
65#define bit(obj,st) (((obj) >> (st)) & 1)
66#define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67
07b287a0
MS
68/* Pseudo register base numbers. */
69#define AARCH64_Q0_REGNUM 0
70#define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + 32)
71#define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
72#define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
73#define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
74
75/* The standard register names, and all the valid aliases for them. */
76static const struct
77{
78 const char *const name;
79 int regnum;
80} aarch64_register_aliases[] =
81{
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
86
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
119
120 /* specials */
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
123};
124
125/* The required core 'R' registers. */
126static const char *const aarch64_r_register_names[] =
127{
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
138 "pc", "cpsr"
139};
140
141/* The FP/SIMD 'V' registers. */
142static const char *const aarch64_v_register_names[] =
143{
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
154 "fpsr",
155 "fpcr"
156};
157
158/* AArch64 prologue cache structure. */
159struct aarch64_prologue_cache
160{
db634143
PL
161 /* The program counter at the start of the function. It is used to
162 identify this frame as a prologue frame. */
163 CORE_ADDR func;
164
165 /* The program counter at the time this frame was created; i.e. where
166 this function was called from. It is used to identify this frame as a
167 stub frame. */
168 CORE_ADDR prev_pc;
169
07b287a0
MS
170 /* The stack pointer at the time this frame was created; i.e. the
171 caller's stack pointer when this function was called. It is used
172 to identify this frame. */
173 CORE_ADDR prev_sp;
174
7dfa3edc
PL
175 /* Is the target available to read from? */
176 int available_p;
177
07b287a0
MS
178 /* The frame base for this frame is just prev_sp - frame size.
179 FRAMESIZE is the distance from the frame pointer to the
180 initial stack pointer. */
181 int framesize;
182
183 /* The register used to hold the frame pointer for this frame. */
184 int framereg;
185
186 /* Saved register offsets. */
187 struct trad_frame_saved_reg *saved_regs;
188};
189
07b287a0
MS
190static void
191show_aarch64_debug (struct ui_file *file, int from_tty,
192 struct cmd_list_element *c, const char *value)
193{
194 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
195}
196
07b287a0
MS
197/* Analyze a prologue, looking for a recognizable stack frame
198 and frame pointer. Scan until we encounter a store that could
199 clobber the stack frame unexpectedly, or an unknown instruction. */
200
201static CORE_ADDR
202aarch64_analyze_prologue (struct gdbarch *gdbarch,
203 CORE_ADDR start, CORE_ADDR limit,
204 struct aarch64_prologue_cache *cache)
205{
206 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
207 int i;
208 pv_t regs[AARCH64_X_REGISTER_COUNT];
209 struct pv_area *stack;
210 struct cleanup *back_to;
211
212 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
213 regs[i] = pv_register (i, 0);
214 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
215 back_to = make_cleanup_free_pv_area (stack);
216
217 for (; start < limit; start += 4)
218 {
219 uint32_t insn;
d9ebcbce 220 aarch64_inst inst;
07b287a0
MS
221
222 insn = read_memory_unsigned_integer (start, 4, byte_order_for_code);
223
d9ebcbce
YQ
224 if (aarch64_decode_insn (insn, &inst, 1) != 0)
225 break;
226
227 if (inst.opcode->iclass == addsub_imm
228 && (inst.opcode->op == OP_ADD
229 || strcmp ("sub", inst.opcode->name) == 0))
07b287a0 230 {
d9ebcbce
YQ
231 unsigned rd = inst.operands[0].reg.regno;
232 unsigned rn = inst.operands[1].reg.regno;
233
234 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
235 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
236 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
237 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
238
239 if (inst.opcode->op == OP_ADD)
240 {
241 regs[rd] = pv_add_constant (regs[rn],
242 inst.operands[2].imm.value);
243 }
244 else
245 {
246 regs[rd] = pv_add_constant (regs[rn],
247 -inst.operands[2].imm.value);
248 }
249 }
250 else if (inst.opcode->iclass == pcreladdr
251 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
252 {
253 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
254 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
255
256 regs[inst.operands[0].reg.regno] = pv_unknown ();
07b287a0 257 }
d9ebcbce 258 else if (inst.opcode->iclass == branch_imm)
07b287a0
MS
259 {
260 /* Stop analysis on branch. */
261 break;
262 }
d9ebcbce 263 else if (inst.opcode->iclass == condbranch)
07b287a0
MS
264 {
265 /* Stop analysis on branch. */
266 break;
267 }
d9ebcbce 268 else if (inst.opcode->iclass == branch_reg)
07b287a0
MS
269 {
270 /* Stop analysis on branch. */
271 break;
272 }
d9ebcbce 273 else if (inst.opcode->iclass == compbranch)
07b287a0
MS
274 {
275 /* Stop analysis on branch. */
276 break;
277 }
d9ebcbce
YQ
278 else if (inst.opcode->op == OP_MOVZ)
279 {
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
281 regs[inst.operands[0].reg.regno] = pv_unknown ();
282 }
283 else if (inst.opcode->iclass == log_shift
284 && strcmp (inst.opcode->name, "orr") == 0)
07b287a0 285 {
d9ebcbce
YQ
286 unsigned rd = inst.operands[0].reg.regno;
287 unsigned rn = inst.operands[1].reg.regno;
288 unsigned rm = inst.operands[2].reg.regno;
289
290 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
291 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
292 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
293
294 if (inst.operands[2].shifter.amount == 0
295 && rn == AARCH64_SP_REGNUM)
07b287a0
MS
296 regs[rd] = regs[rm];
297 else
298 {
299 if (aarch64_debug)
b277c936
PL
300 {
301 debug_printf ("aarch64: prologue analysis gave up "
302 "addr=0x%s opcode=0x%x (orr x register)\n",
303 core_addr_to_string_nz (start), insn);
304 }
07b287a0
MS
305 break;
306 }
307 }
d9ebcbce 308 else if (inst.opcode->op == OP_STUR)
07b287a0 309 {
d9ebcbce
YQ
310 unsigned rt = inst.operands[0].reg.regno;
311 unsigned rn = inst.operands[1].addr.base_regno;
312 int is64
313 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
314
315 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
316 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
317 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
318 gdb_assert (!inst.operands[1].addr.offset.is_reg);
319
320 pv_area_store (stack, pv_add_constant (regs[rn],
321 inst.operands[1].addr.offset.imm),
07b287a0
MS
322 is64 ? 8 : 4, regs[rt]);
323 }
d9ebcbce
YQ
324 else if ((inst.opcode->iclass == ldstpair_off
325 || inst.opcode->iclass == ldstpair_indexed)
326 && inst.operands[2].addr.preind
327 && strcmp ("stp", inst.opcode->name) == 0)
07b287a0 328 {
d9ebcbce
YQ
329 unsigned rt1 = inst.operands[0].reg.regno;
330 unsigned rt2 = inst.operands[1].reg.regno;
331 unsigned rn = inst.operands[2].addr.base_regno;
332 int32_t imm = inst.operands[2].addr.offset.imm;
333
334 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
335 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2);
336 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
337 gdb_assert (!inst.operands[2].addr.offset.is_reg);
338
07b287a0
MS
339 /* If recording this store would invalidate the store area
340 (perhaps because rn is not known) then we should abandon
341 further prologue analysis. */
342 if (pv_area_store_would_trash (stack,
343 pv_add_constant (regs[rn], imm)))
344 break;
345
346 if (pv_area_store_would_trash (stack,
347 pv_add_constant (regs[rn], imm + 8)))
348 break;
349
350 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
351 regs[rt1]);
352 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
353 regs[rt2]);
14ac654f 354
d9ebcbce 355 if (inst.operands[2].addr.writeback)
93d96012 356 regs[rn] = pv_add_constant (regs[rn], imm);
07b287a0 357
07b287a0 358 }
d9ebcbce 359 else if (inst.opcode->iclass == testbranch)
07b287a0
MS
360 {
361 /* Stop analysis on branch. */
362 break;
363 }
364 else
365 {
366 if (aarch64_debug)
b277c936
PL
367 {
368 debug_printf ("aarch64: prologue analysis gave up addr=0x%s"
369 " opcode=0x%x\n",
370 core_addr_to_string_nz (start), insn);
371 }
07b287a0
MS
372 break;
373 }
374 }
375
376 if (cache == NULL)
377 {
378 do_cleanups (back_to);
379 return start;
380 }
381
382 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
383 {
384 /* Frame pointer is fp. Frame size is constant. */
385 cache->framereg = AARCH64_FP_REGNUM;
386 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
387 }
388 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
389 {
390 /* Try the stack pointer. */
391 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
392 cache->framereg = AARCH64_SP_REGNUM;
393 }
394 else
395 {
396 /* We're just out of luck. We don't know where the frame is. */
397 cache->framereg = -1;
398 cache->framesize = 0;
399 }
400
401 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
402 {
403 CORE_ADDR offset;
404
405 if (pv_area_find_reg (stack, gdbarch, i, &offset))
406 cache->saved_regs[i].addr = offset;
407 }
408
409 do_cleanups (back_to);
410 return start;
411}
412
413/* Implement the "skip_prologue" gdbarch method. */
414
415static CORE_ADDR
416aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
417{
418 unsigned long inst;
419 CORE_ADDR skip_pc;
420 CORE_ADDR func_addr, limit_pc;
421 struct symtab_and_line sal;
422
423 /* See if we can determine the end of the prologue via the symbol
424 table. If so, then return either PC, or the PC after the
425 prologue, whichever is greater. */
426 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
427 {
428 CORE_ADDR post_prologue_pc
429 = skip_prologue_using_sal (gdbarch, func_addr);
430
431 if (post_prologue_pc != 0)
432 return max (pc, post_prologue_pc);
433 }
434
435 /* Can't determine prologue from the symbol table, need to examine
436 instructions. */
437
438 /* Find an upper limit on the function prologue using the debug
439 information. If the debug information could not be used to
440 provide that bound, then use an arbitrary large number as the
441 upper bound. */
442 limit_pc = skip_prologue_using_sal (gdbarch, pc);
443 if (limit_pc == 0)
444 limit_pc = pc + 128; /* Magic. */
445
446 /* Try disassembling prologue. */
447 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
448}
449
450/* Scan the function prologue for THIS_FRAME and populate the prologue
451 cache CACHE. */
452
453static void
454aarch64_scan_prologue (struct frame_info *this_frame,
455 struct aarch64_prologue_cache *cache)
456{
457 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
458 CORE_ADDR prologue_start;
459 CORE_ADDR prologue_end;
460 CORE_ADDR prev_pc = get_frame_pc (this_frame);
461 struct gdbarch *gdbarch = get_frame_arch (this_frame);
462
db634143
PL
463 cache->prev_pc = prev_pc;
464
07b287a0
MS
465 /* Assume we do not find a frame. */
466 cache->framereg = -1;
467 cache->framesize = 0;
468
469 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
470 &prologue_end))
471 {
472 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
473
474 if (sal.line == 0)
475 {
476 /* No line info so use the current PC. */
477 prologue_end = prev_pc;
478 }
479 else if (sal.end < prologue_end)
480 {
481 /* The next line begins after the function end. */
482 prologue_end = sal.end;
483 }
484
485 prologue_end = min (prologue_end, prev_pc);
486 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
487 }
488 else
489 {
490 CORE_ADDR frame_loc;
491 LONGEST saved_fp;
492 LONGEST saved_lr;
493 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
494
495 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
496 if (frame_loc == 0)
497 return;
498
499 cache->framereg = AARCH64_FP_REGNUM;
500 cache->framesize = 16;
501 cache->saved_regs[29].addr = 0;
502 cache->saved_regs[30].addr = 8;
503 }
504}
505
7dfa3edc
PL
506/* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
507 function may throw an exception if the inferior's registers or memory is
508 not available. */
07b287a0 509
7dfa3edc
PL
510static void
511aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
512 struct aarch64_prologue_cache *cache)
07b287a0 513{
07b287a0
MS
514 CORE_ADDR unwound_fp;
515 int reg;
516
07b287a0
MS
517 aarch64_scan_prologue (this_frame, cache);
518
519 if (cache->framereg == -1)
7dfa3edc 520 return;
07b287a0
MS
521
522 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
523 if (unwound_fp == 0)
7dfa3edc 524 return;
07b287a0
MS
525
526 cache->prev_sp = unwound_fp + cache->framesize;
527
528 /* Calculate actual addresses of saved registers using offsets
529 determined by aarch64_analyze_prologue. */
530 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
531 if (trad_frame_addr_p (cache->saved_regs, reg))
532 cache->saved_regs[reg].addr += cache->prev_sp;
533
db634143
PL
534 cache->func = get_frame_func (this_frame);
535
7dfa3edc
PL
536 cache->available_p = 1;
537}
538
539/* Allocate and fill in *THIS_CACHE with information about the prologue of
540 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
541 Return a pointer to the current aarch64_prologue_cache in
542 *THIS_CACHE. */
543
544static struct aarch64_prologue_cache *
545aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
546{
547 struct aarch64_prologue_cache *cache;
548
549 if (*this_cache != NULL)
9a3c8263 550 return (struct aarch64_prologue_cache *) *this_cache;
7dfa3edc
PL
551
552 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
553 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
554 *this_cache = cache;
555
556 TRY
557 {
558 aarch64_make_prologue_cache_1 (this_frame, cache);
559 }
560 CATCH (ex, RETURN_MASK_ERROR)
561 {
562 if (ex.error != NOT_AVAILABLE_ERROR)
563 throw_exception (ex);
564 }
565 END_CATCH
566
07b287a0
MS
567 return cache;
568}
569
7dfa3edc
PL
570/* Implement the "stop_reason" frame_unwind method. */
571
572static enum unwind_stop_reason
573aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
574 void **this_cache)
575{
576 struct aarch64_prologue_cache *cache
577 = aarch64_make_prologue_cache (this_frame, this_cache);
578
579 if (!cache->available_p)
580 return UNWIND_UNAVAILABLE;
581
582 /* Halt the backtrace at "_start". */
583 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
584 return UNWIND_OUTERMOST;
585
586 /* We've hit a wall, stop. */
587 if (cache->prev_sp == 0)
588 return UNWIND_OUTERMOST;
589
590 return UNWIND_NO_REASON;
591}
592
07b287a0
MS
593/* Our frame ID for a normal frame is the current function's starting
594 PC and the caller's SP when we were called. */
595
596static void
597aarch64_prologue_this_id (struct frame_info *this_frame,
598 void **this_cache, struct frame_id *this_id)
599{
7c8edfae
PL
600 struct aarch64_prologue_cache *cache
601 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0 602
7dfa3edc
PL
603 if (!cache->available_p)
604 *this_id = frame_id_build_unavailable_stack (cache->func);
605 else
606 *this_id = frame_id_build (cache->prev_sp, cache->func);
07b287a0
MS
607}
608
609/* Implement the "prev_register" frame_unwind method. */
610
611static struct value *
612aarch64_prologue_prev_register (struct frame_info *this_frame,
613 void **this_cache, int prev_regnum)
614{
615 struct gdbarch *gdbarch = get_frame_arch (this_frame);
7c8edfae
PL
616 struct aarch64_prologue_cache *cache
617 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
618
619 /* If we are asked to unwind the PC, then we need to return the LR
620 instead. The prologue may save PC, but it will point into this
621 frame's prologue, not the next frame's resume location. */
622 if (prev_regnum == AARCH64_PC_REGNUM)
623 {
624 CORE_ADDR lr;
625
626 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
627 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
628 }
629
630 /* SP is generally not saved to the stack, but this frame is
631 identified by the next frame's stack pointer at the time of the
632 call. The value was already reconstructed into PREV_SP. */
633 /*
634 +----------+ ^
635 | saved lr | |
636 +->| saved fp |--+
637 | | |
638 | | | <- Previous SP
639 | +----------+
640 | | saved lr |
641 +--| saved fp |<- FP
642 | |
643 | |<- SP
644 +----------+ */
645 if (prev_regnum == AARCH64_SP_REGNUM)
646 return frame_unwind_got_constant (this_frame, prev_regnum,
647 cache->prev_sp);
648
649 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
650 prev_regnum);
651}
652
653/* AArch64 prologue unwinder. */
654struct frame_unwind aarch64_prologue_unwind =
655{
656 NORMAL_FRAME,
7dfa3edc 657 aarch64_prologue_frame_unwind_stop_reason,
07b287a0
MS
658 aarch64_prologue_this_id,
659 aarch64_prologue_prev_register,
660 NULL,
661 default_frame_sniffer
662};
663
8b61f75d
PL
664/* Allocate and fill in *THIS_CACHE with information about the prologue of
665 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
666 Return a pointer to the current aarch64_prologue_cache in
667 *THIS_CACHE. */
07b287a0
MS
668
669static struct aarch64_prologue_cache *
8b61f75d 670aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
07b287a0 671{
07b287a0 672 struct aarch64_prologue_cache *cache;
8b61f75d
PL
673
674 if (*this_cache != NULL)
9a3c8263 675 return (struct aarch64_prologue_cache *) *this_cache;
07b287a0
MS
676
677 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
678 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
8b61f75d 679 *this_cache = cache;
07b287a0 680
02a2a705
PL
681 TRY
682 {
683 cache->prev_sp = get_frame_register_unsigned (this_frame,
684 AARCH64_SP_REGNUM);
685 cache->prev_pc = get_frame_pc (this_frame);
686 cache->available_p = 1;
687 }
688 CATCH (ex, RETURN_MASK_ERROR)
689 {
690 if (ex.error != NOT_AVAILABLE_ERROR)
691 throw_exception (ex);
692 }
693 END_CATCH
07b287a0
MS
694
695 return cache;
696}
697
02a2a705
PL
698/* Implement the "stop_reason" frame_unwind method. */
699
700static enum unwind_stop_reason
701aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
702 void **this_cache)
703{
704 struct aarch64_prologue_cache *cache
705 = aarch64_make_stub_cache (this_frame, this_cache);
706
707 if (!cache->available_p)
708 return UNWIND_UNAVAILABLE;
709
710 return UNWIND_NO_REASON;
711}
712
07b287a0
MS
713/* Our frame ID for a stub frame is the current SP and LR. */
714
715static void
716aarch64_stub_this_id (struct frame_info *this_frame,
717 void **this_cache, struct frame_id *this_id)
718{
8b61f75d
PL
719 struct aarch64_prologue_cache *cache
720 = aarch64_make_stub_cache (this_frame, this_cache);
07b287a0 721
02a2a705
PL
722 if (cache->available_p)
723 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
724 else
725 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
07b287a0
MS
726}
727
728/* Implement the "sniffer" frame_unwind method. */
729
730static int
731aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
732 struct frame_info *this_frame,
733 void **this_prologue_cache)
734{
735 CORE_ADDR addr_in_block;
736 gdb_byte dummy[4];
737
738 addr_in_block = get_frame_address_in_block (this_frame);
3e5d3a5a 739 if (in_plt_section (addr_in_block)
07b287a0
MS
740 /* We also use the stub winder if the target memory is unreadable
741 to avoid having the prologue unwinder trying to read it. */
742 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
743 return 1;
744
745 return 0;
746}
747
748/* AArch64 stub unwinder. */
749struct frame_unwind aarch64_stub_unwind =
750{
751 NORMAL_FRAME,
02a2a705 752 aarch64_stub_frame_unwind_stop_reason,
07b287a0
MS
753 aarch64_stub_this_id,
754 aarch64_prologue_prev_register,
755 NULL,
756 aarch64_stub_unwind_sniffer
757};
758
759/* Return the frame base address of *THIS_FRAME. */
760
761static CORE_ADDR
762aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
763{
7c8edfae
PL
764 struct aarch64_prologue_cache *cache
765 = aarch64_make_prologue_cache (this_frame, this_cache);
07b287a0
MS
766
767 return cache->prev_sp - cache->framesize;
768}
769
770/* AArch64 default frame base information. */
771struct frame_base aarch64_normal_base =
772{
773 &aarch64_prologue_unwind,
774 aarch64_normal_frame_base,
775 aarch64_normal_frame_base,
776 aarch64_normal_frame_base
777};
778
779/* Assuming THIS_FRAME is a dummy, return the frame ID of that
780 dummy frame. The frame ID's base needs to match the TOS value
781 saved by save_dummy_frame_tos () and returned from
782 aarch64_push_dummy_call, and the PC needs to match the dummy
783 frame's breakpoint. */
784
785static struct frame_id
786aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
787{
788 return frame_id_build (get_frame_register_unsigned (this_frame,
789 AARCH64_SP_REGNUM),
790 get_frame_pc (this_frame));
791}
792
793/* Implement the "unwind_pc" gdbarch method. */
794
795static CORE_ADDR
796aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
797{
798 CORE_ADDR pc
799 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
800
801 return pc;
802}
803
804/* Implement the "unwind_sp" gdbarch method. */
805
806static CORE_ADDR
807aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
808{
809 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
810}
811
812/* Return the value of the REGNUM register in the previous frame of
813 *THIS_FRAME. */
814
815static struct value *
816aarch64_dwarf2_prev_register (struct frame_info *this_frame,
817 void **this_cache, int regnum)
818{
819 struct gdbarch *gdbarch = get_frame_arch (this_frame);
820 CORE_ADDR lr;
821
822 switch (regnum)
823 {
824 case AARCH64_PC_REGNUM:
825 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
826 return frame_unwind_got_constant (this_frame, regnum, lr);
827
828 default:
829 internal_error (__FILE__, __LINE__,
830 _("Unexpected register %d"), regnum);
831 }
832}
833
834/* Implement the "init_reg" dwarf2_frame_ops method. */
835
836static void
837aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
838 struct dwarf2_frame_state_reg *reg,
839 struct frame_info *this_frame)
840{
841 switch (regnum)
842 {
843 case AARCH64_PC_REGNUM:
844 reg->how = DWARF2_FRAME_REG_FN;
845 reg->loc.fn = aarch64_dwarf2_prev_register;
846 break;
847 case AARCH64_SP_REGNUM:
848 reg->how = DWARF2_FRAME_REG_CFA;
849 break;
850 }
851}
852
853/* When arguments must be pushed onto the stack, they go on in reverse
854 order. The code below implements a FILO (stack) to do this. */
855
856typedef struct
857{
858 /* Value to pass on stack. */
7c543f7b 859 const gdb_byte *data;
07b287a0
MS
860
861 /* Size in bytes of value to pass on stack. */
862 int len;
863} stack_item_t;
864
865DEF_VEC_O (stack_item_t);
866
867/* Return the alignment (in bytes) of the given type. */
868
869static int
870aarch64_type_align (struct type *t)
871{
872 int n;
873 int align;
874 int falign;
875
876 t = check_typedef (t);
877 switch (TYPE_CODE (t))
878 {
879 default:
880 /* Should never happen. */
881 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
882 return 4;
883
884 case TYPE_CODE_PTR:
885 case TYPE_CODE_ENUM:
886 case TYPE_CODE_INT:
887 case TYPE_CODE_FLT:
888 case TYPE_CODE_SET:
889 case TYPE_CODE_RANGE:
890 case TYPE_CODE_BITSTRING:
891 case TYPE_CODE_REF:
892 case TYPE_CODE_CHAR:
893 case TYPE_CODE_BOOL:
894 return TYPE_LENGTH (t);
895
896 case TYPE_CODE_ARRAY:
897 case TYPE_CODE_COMPLEX:
898 return aarch64_type_align (TYPE_TARGET_TYPE (t));
899
900 case TYPE_CODE_STRUCT:
901 case TYPE_CODE_UNION:
902 align = 1;
903 for (n = 0; n < TYPE_NFIELDS (t); n++)
904 {
905 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
906 if (falign > align)
907 align = falign;
908 }
909 return align;
910 }
911}
912
913/* Return 1 if *TY is a homogeneous floating-point aggregate as
914 defined in the AAPCS64 ABI document; otherwise return 0. */
915
916static int
917is_hfa (struct type *ty)
918{
919 switch (TYPE_CODE (ty))
920 {
921 case TYPE_CODE_ARRAY:
922 {
923 struct type *target_ty = TYPE_TARGET_TYPE (ty);
924 if (TYPE_CODE (target_ty) == TYPE_CODE_FLT && TYPE_LENGTH (ty) <= 4)
925 return 1;
926 break;
927 }
928
929 case TYPE_CODE_UNION:
930 case TYPE_CODE_STRUCT:
931 {
932 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
933 {
934 struct type *member0_type;
935
936 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
937 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT)
938 {
939 int i;
940
941 for (i = 0; i < TYPE_NFIELDS (ty); i++)
942 {
943 struct type *member1_type;
944
945 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
946 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
947 || (TYPE_LENGTH (member0_type)
948 != TYPE_LENGTH (member1_type)))
949 return 0;
950 }
951 return 1;
952 }
953 }
954 return 0;
955 }
956
957 default:
958 break;
959 }
960
961 return 0;
962}
963
964/* AArch64 function call information structure. */
965struct aarch64_call_info
966{
967 /* the current argument number. */
968 unsigned argnum;
969
970 /* The next general purpose register number, equivalent to NGRN as
971 described in the AArch64 Procedure Call Standard. */
972 unsigned ngrn;
973
974 /* The next SIMD and floating point register number, equivalent to
975 NSRN as described in the AArch64 Procedure Call Standard. */
976 unsigned nsrn;
977
978 /* The next stacked argument address, equivalent to NSAA as
979 described in the AArch64 Procedure Call Standard. */
980 unsigned nsaa;
981
982 /* Stack item vector. */
983 VEC(stack_item_t) *si;
984};
985
986/* Pass a value in a sequence of consecutive X registers. The caller
987 is responsbile for ensuring sufficient registers are available. */
988
989static void
990pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
991 struct aarch64_call_info *info, struct type *type,
8e80f9d1 992 struct value *arg)
07b287a0
MS
993{
994 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
995 int len = TYPE_LENGTH (type);
996 enum type_code typecode = TYPE_CODE (type);
997 int regnum = AARCH64_X0_REGNUM + info->ngrn;
8e80f9d1 998 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
999
1000 info->argnum++;
1001
1002 while (len > 0)
1003 {
1004 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1005 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1006 byte_order);
1007
1008
1009 /* Adjust sub-word struct/union args when big-endian. */
1010 if (byte_order == BFD_ENDIAN_BIG
1011 && partial_len < X_REGISTER_SIZE
1012 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1013 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1014
1015 if (aarch64_debug)
b277c936
PL
1016 {
1017 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1018 gdbarch_register_name (gdbarch, regnum),
1019 phex (regval, X_REGISTER_SIZE));
1020 }
07b287a0
MS
1021 regcache_cooked_write_unsigned (regcache, regnum, regval);
1022 len -= partial_len;
1023 buf += partial_len;
1024 regnum++;
1025 }
1026}
1027
1028/* Attempt to marshall a value in a V register. Return 1 if
1029 successful, or 0 if insufficient registers are available. This
1030 function, unlike the equivalent pass_in_x() function does not
1031 handle arguments spread across multiple registers. */
1032
1033static int
1034pass_in_v (struct gdbarch *gdbarch,
1035 struct regcache *regcache,
1036 struct aarch64_call_info *info,
1037 const bfd_byte *buf)
1038{
1039 if (info->nsrn < 8)
1040 {
1041 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1042 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1043
1044 info->argnum++;
1045 info->nsrn++;
1046
1047 regcache_cooked_write (regcache, regnum, buf);
1048 if (aarch64_debug)
b277c936
PL
1049 {
1050 debug_printf ("arg %d in %s\n", info->argnum,
1051 gdbarch_register_name (gdbarch, regnum));
1052 }
07b287a0
MS
1053 return 1;
1054 }
1055 info->nsrn = 8;
1056 return 0;
1057}
1058
1059/* Marshall an argument onto the stack. */
1060
1061static void
1062pass_on_stack (struct aarch64_call_info *info, struct type *type,
8e80f9d1 1063 struct value *arg)
07b287a0 1064{
8e80f9d1 1065 const bfd_byte *buf = value_contents (arg);
07b287a0
MS
1066 int len = TYPE_LENGTH (type);
1067 int align;
1068 stack_item_t item;
1069
1070 info->argnum++;
1071
1072 align = aarch64_type_align (type);
1073
1074 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1075 Natural alignment of the argument's type. */
1076 align = align_up (align, 8);
1077
1078 /* The AArch64 PCS requires at most doubleword alignment. */
1079 if (align > 16)
1080 align = 16;
1081
1082 if (aarch64_debug)
b277c936
PL
1083 {
1084 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1085 info->nsaa);
1086 }
07b287a0
MS
1087
1088 item.len = len;
1089 item.data = buf;
1090 VEC_safe_push (stack_item_t, info->si, &item);
1091
1092 info->nsaa += len;
1093 if (info->nsaa & (align - 1))
1094 {
1095 /* Push stack alignment padding. */
1096 int pad = align - (info->nsaa & (align - 1));
1097
1098 item.len = pad;
1099 item.data = buf;
1100
1101 VEC_safe_push (stack_item_t, info->si, &item);
1102 info->nsaa += pad;
1103 }
1104}
1105
1106/* Marshall an argument into a sequence of one or more consecutive X
1107 registers or, if insufficient X registers are available then onto
1108 the stack. */
1109
1110static void
1111pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1112 struct aarch64_call_info *info, struct type *type,
8e80f9d1 1113 struct value *arg)
07b287a0
MS
1114{
1115 int len = TYPE_LENGTH (type);
1116 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1117
1118 /* PCS C.13 - Pass in registers if we have enough spare */
1119 if (info->ngrn + nregs <= 8)
1120 {
8e80f9d1 1121 pass_in_x (gdbarch, regcache, info, type, arg);
07b287a0
MS
1122 info->ngrn += nregs;
1123 }
1124 else
1125 {
1126 info->ngrn = 8;
8e80f9d1 1127 pass_on_stack (info, type, arg);
07b287a0
MS
1128 }
1129}
1130
1131/* Pass a value in a V register, or on the stack if insufficient are
1132 available. */
1133
1134static void
1135pass_in_v_or_stack (struct gdbarch *gdbarch,
1136 struct regcache *regcache,
1137 struct aarch64_call_info *info,
1138 struct type *type,
8e80f9d1 1139 struct value *arg)
07b287a0 1140{
8e80f9d1
YQ
1141 if (!pass_in_v (gdbarch, regcache, info, value_contents (arg)))
1142 pass_on_stack (info, type, arg);
07b287a0
MS
1143}
1144
1145/* Implement the "push_dummy_call" gdbarch method. */
1146
1147static CORE_ADDR
1148aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1149 struct regcache *regcache, CORE_ADDR bp_addr,
1150 int nargs,
1151 struct value **args, CORE_ADDR sp, int struct_return,
1152 CORE_ADDR struct_addr)
1153{
1154 int nstack = 0;
1155 int argnum;
1156 int x_argreg;
1157 int v_argreg;
1158 struct aarch64_call_info info;
1159 struct type *func_type;
1160 struct type *return_type;
1161 int lang_struct_return;
1162
1163 memset (&info, 0, sizeof (info));
1164
1165 /* We need to know what the type of the called function is in order
1166 to determine the number of named/anonymous arguments for the
1167 actual argument placement, and the return type in order to handle
1168 return value correctly.
1169
1170 The generic code above us views the decision of return in memory
1171 or return in registers as a two stage processes. The language
1172 handler is consulted first and may decide to return in memory (eg
1173 class with copy constructor returned by value), this will cause
1174 the generic code to allocate space AND insert an initial leading
1175 argument.
1176
1177 If the language code does not decide to pass in memory then the
1178 target code is consulted.
1179
1180 If the language code decides to pass in memory we want to move
1181 the pointer inserted as the initial argument from the argument
1182 list and into X8, the conventional AArch64 struct return pointer
1183 register.
1184
1185 This is slightly awkward, ideally the flag "lang_struct_return"
1186 would be passed to the targets implementation of push_dummy_call.
1187 Rather that change the target interface we call the language code
1188 directly ourselves. */
1189
1190 func_type = check_typedef (value_type (function));
1191
1192 /* Dereference function pointer types. */
1193 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1194 func_type = TYPE_TARGET_TYPE (func_type);
1195
1196 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1197 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1198
1199 /* If language_pass_by_reference () returned true we will have been
1200 given an additional initial argument, a hidden pointer to the
1201 return slot in memory. */
1202 return_type = TYPE_TARGET_TYPE (func_type);
1203 lang_struct_return = language_pass_by_reference (return_type);
1204
1205 /* Set the return address. For the AArch64, the return breakpoint
1206 is always at BP_ADDR. */
1207 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1208
1209 /* If we were given an initial argument for the return slot because
1210 lang_struct_return was true, lose it. */
1211 if (lang_struct_return)
1212 {
1213 args++;
1214 nargs--;
1215 }
1216
1217 /* The struct_return pointer occupies X8. */
1218 if (struct_return || lang_struct_return)
1219 {
1220 if (aarch64_debug)
b277c936
PL
1221 {
1222 debug_printf ("struct return in %s = 0x%s\n",
1223 gdbarch_register_name (gdbarch,
1224 AARCH64_STRUCT_RETURN_REGNUM),
1225 paddress (gdbarch, struct_addr));
1226 }
07b287a0
MS
1227 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1228 struct_addr);
1229 }
1230
1231 for (argnum = 0; argnum < nargs; argnum++)
1232 {
1233 struct value *arg = args[argnum];
1234 struct type *arg_type;
1235 int len;
1236
1237 arg_type = check_typedef (value_type (arg));
1238 len = TYPE_LENGTH (arg_type);
1239
1240 switch (TYPE_CODE (arg_type))
1241 {
1242 case TYPE_CODE_INT:
1243 case TYPE_CODE_BOOL:
1244 case TYPE_CODE_CHAR:
1245 case TYPE_CODE_RANGE:
1246 case TYPE_CODE_ENUM:
1247 if (len < 4)
1248 {
1249 /* Promote to 32 bit integer. */
1250 if (TYPE_UNSIGNED (arg_type))
1251 arg_type = builtin_type (gdbarch)->builtin_uint32;
1252 else
1253 arg_type = builtin_type (gdbarch)->builtin_int32;
1254 arg = value_cast (arg_type, arg);
1255 }
8e80f9d1 1256 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1257 break;
1258
1259 case TYPE_CODE_COMPLEX:
1260 if (info.nsrn <= 6)
1261 {
1262 const bfd_byte *buf = value_contents (arg);
1263 struct type *target_type =
1264 check_typedef (TYPE_TARGET_TYPE (arg_type));
1265
1266 pass_in_v (gdbarch, regcache, &info, buf);
1267 pass_in_v (gdbarch, regcache, &info,
1268 buf + TYPE_LENGTH (target_type));
1269 }
1270 else
1271 {
1272 info.nsrn = 8;
8e80f9d1 1273 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1274 }
1275 break;
1276 case TYPE_CODE_FLT:
8e80f9d1 1277 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1278 break;
1279
1280 case TYPE_CODE_STRUCT:
1281 case TYPE_CODE_ARRAY:
1282 case TYPE_CODE_UNION:
1283 if (is_hfa (arg_type))
1284 {
1285 int elements = TYPE_NFIELDS (arg_type);
1286
1287 /* Homogeneous Aggregates */
1288 if (info.nsrn + elements < 8)
1289 {
1290 int i;
1291
1292 for (i = 0; i < elements; i++)
1293 {
1294 /* We know that we have sufficient registers
1295 available therefore this will never fallback
1296 to the stack. */
1297 struct value *field =
1298 value_primitive_field (arg, 0, i, arg_type);
1299 struct type *field_type =
1300 check_typedef (value_type (field));
1301
8e80f9d1
YQ
1302 pass_in_v_or_stack (gdbarch, regcache, &info,
1303 field_type, field);
07b287a0
MS
1304 }
1305 }
1306 else
1307 {
1308 info.nsrn = 8;
8e80f9d1 1309 pass_on_stack (&info, arg_type, arg);
07b287a0
MS
1310 }
1311 }
1312 else if (len > 16)
1313 {
1314 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1315 invisible reference. */
1316
1317 /* Allocate aligned storage. */
1318 sp = align_down (sp - len, 16);
1319
1320 /* Write the real data into the stack. */
1321 write_memory (sp, value_contents (arg), len);
1322
1323 /* Construct the indirection. */
1324 arg_type = lookup_pointer_type (arg_type);
1325 arg = value_from_pointer (arg_type, sp);
8e80f9d1 1326 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1327 }
1328 else
1329 /* PCS C.15 / C.18 multiple values pass. */
8e80f9d1 1330 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1331 break;
1332
1333 default:
8e80f9d1 1334 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
07b287a0
MS
1335 break;
1336 }
1337 }
1338
1339 /* Make sure stack retains 16 byte alignment. */
1340 if (info.nsaa & 15)
1341 sp -= 16 - (info.nsaa & 15);
1342
1343 while (!VEC_empty (stack_item_t, info.si))
1344 {
1345 stack_item_t *si = VEC_last (stack_item_t, info.si);
1346
1347 sp -= si->len;
1348 write_memory (sp, si->data, si->len);
1349 VEC_pop (stack_item_t, info.si);
1350 }
1351
1352 VEC_free (stack_item_t, info.si);
1353
1354 /* Finally, update the SP register. */
1355 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1356
1357 return sp;
1358}
1359
1360/* Implement the "frame_align" gdbarch method. */
1361
1362static CORE_ADDR
1363aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1364{
1365 /* Align the stack to sixteen bytes. */
1366 return sp & ~(CORE_ADDR) 15;
1367}
1368
1369/* Return the type for an AdvSISD Q register. */
1370
1371static struct type *
1372aarch64_vnq_type (struct gdbarch *gdbarch)
1373{
1374 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1375
1376 if (tdep->vnq_type == NULL)
1377 {
1378 struct type *t;
1379 struct type *elem;
1380
1381 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1382 TYPE_CODE_UNION);
1383
1384 elem = builtin_type (gdbarch)->builtin_uint128;
1385 append_composite_type_field (t, "u", elem);
1386
1387 elem = builtin_type (gdbarch)->builtin_int128;
1388 append_composite_type_field (t, "s", elem);
1389
1390 tdep->vnq_type = t;
1391 }
1392
1393 return tdep->vnq_type;
1394}
1395
1396/* Return the type for an AdvSISD D register. */
1397
1398static struct type *
1399aarch64_vnd_type (struct gdbarch *gdbarch)
1400{
1401 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1402
1403 if (tdep->vnd_type == NULL)
1404 {
1405 struct type *t;
1406 struct type *elem;
1407
1408 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1409 TYPE_CODE_UNION);
1410
1411 elem = builtin_type (gdbarch)->builtin_double;
1412 append_composite_type_field (t, "f", elem);
1413
1414 elem = builtin_type (gdbarch)->builtin_uint64;
1415 append_composite_type_field (t, "u", elem);
1416
1417 elem = builtin_type (gdbarch)->builtin_int64;
1418 append_composite_type_field (t, "s", elem);
1419
1420 tdep->vnd_type = t;
1421 }
1422
1423 return tdep->vnd_type;
1424}
1425
1426/* Return the type for an AdvSISD S register. */
1427
1428static struct type *
1429aarch64_vns_type (struct gdbarch *gdbarch)
1430{
1431 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1432
1433 if (tdep->vns_type == NULL)
1434 {
1435 struct type *t;
1436 struct type *elem;
1437
1438 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1439 TYPE_CODE_UNION);
1440
1441 elem = builtin_type (gdbarch)->builtin_float;
1442 append_composite_type_field (t, "f", elem);
1443
1444 elem = builtin_type (gdbarch)->builtin_uint32;
1445 append_composite_type_field (t, "u", elem);
1446
1447 elem = builtin_type (gdbarch)->builtin_int32;
1448 append_composite_type_field (t, "s", elem);
1449
1450 tdep->vns_type = t;
1451 }
1452
1453 return tdep->vns_type;
1454}
1455
1456/* Return the type for an AdvSISD H register. */
1457
1458static struct type *
1459aarch64_vnh_type (struct gdbarch *gdbarch)
1460{
1461 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1462
1463 if (tdep->vnh_type == NULL)
1464 {
1465 struct type *t;
1466 struct type *elem;
1467
1468 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1469 TYPE_CODE_UNION);
1470
1471 elem = builtin_type (gdbarch)->builtin_uint16;
1472 append_composite_type_field (t, "u", elem);
1473
1474 elem = builtin_type (gdbarch)->builtin_int16;
1475 append_composite_type_field (t, "s", elem);
1476
1477 tdep->vnh_type = t;
1478 }
1479
1480 return tdep->vnh_type;
1481}
1482
1483/* Return the type for an AdvSISD B register. */
1484
1485static struct type *
1486aarch64_vnb_type (struct gdbarch *gdbarch)
1487{
1488 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1489
1490 if (tdep->vnb_type == NULL)
1491 {
1492 struct type *t;
1493 struct type *elem;
1494
1495 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1496 TYPE_CODE_UNION);
1497
1498 elem = builtin_type (gdbarch)->builtin_uint8;
1499 append_composite_type_field (t, "u", elem);
1500
1501 elem = builtin_type (gdbarch)->builtin_int8;
1502 append_composite_type_field (t, "s", elem);
1503
1504 tdep->vnb_type = t;
1505 }
1506
1507 return tdep->vnb_type;
1508}
1509
1510/* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1511
1512static int
1513aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1514{
1515 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1516 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1517
1518 if (reg == AARCH64_DWARF_SP)
1519 return AARCH64_SP_REGNUM;
1520
1521 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1522 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1523
1524 return -1;
1525}
1526\f
1527
1528/* Implement the "print_insn" gdbarch method. */
1529
1530static int
1531aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1532{
1533 info->symbols = NULL;
1534 return print_insn_aarch64 (memaddr, info);
1535}
1536
1537/* AArch64 BRK software debug mode instruction.
1538 Note that AArch64 code is always little-endian.
1539 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
948f8e3d 1540static const gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
07b287a0
MS
1541
1542/* Implement the "breakpoint_from_pc" gdbarch method. */
1543
948f8e3d 1544static const gdb_byte *
07b287a0
MS
1545aarch64_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr,
1546 int *lenptr)
1547{
1548 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1549
1550 *lenptr = sizeof (aarch64_default_breakpoint);
1551 return aarch64_default_breakpoint;
1552}
1553
1554/* Extract from an array REGS containing the (raw) register state a
1555 function return value of type TYPE, and copy that, in virtual
1556 format, into VALBUF. */
1557
1558static void
1559aarch64_extract_return_value (struct type *type, struct regcache *regs,
1560 gdb_byte *valbuf)
1561{
1562 struct gdbarch *gdbarch = get_regcache_arch (regs);
1563 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1564
1565 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1566 {
1567 bfd_byte buf[V_REGISTER_SIZE];
1568 int len = TYPE_LENGTH (type);
1569
1570 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1571 memcpy (valbuf, buf, len);
1572 }
1573 else if (TYPE_CODE (type) == TYPE_CODE_INT
1574 || TYPE_CODE (type) == TYPE_CODE_CHAR
1575 || TYPE_CODE (type) == TYPE_CODE_BOOL
1576 || TYPE_CODE (type) == TYPE_CODE_PTR
1577 || TYPE_CODE (type) == TYPE_CODE_REF
1578 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1579 {
1580 /* If the the type is a plain integer, then the access is
1581 straight-forward. Otherwise we have to play around a bit
1582 more. */
1583 int len = TYPE_LENGTH (type);
1584 int regno = AARCH64_X0_REGNUM;
1585 ULONGEST tmp;
1586
1587 while (len > 0)
1588 {
1589 /* By using store_unsigned_integer we avoid having to do
1590 anything special for small big-endian values. */
1591 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1592 store_unsigned_integer (valbuf,
1593 (len > X_REGISTER_SIZE
1594 ? X_REGISTER_SIZE : len), byte_order, tmp);
1595 len -= X_REGISTER_SIZE;
1596 valbuf += X_REGISTER_SIZE;
1597 }
1598 }
1599 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1600 {
1601 int regno = AARCH64_V0_REGNUM;
1602 bfd_byte buf[V_REGISTER_SIZE];
1603 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1604 int len = TYPE_LENGTH (target_type);
1605
1606 regcache_cooked_read (regs, regno, buf);
1607 memcpy (valbuf, buf, len);
1608 valbuf += len;
1609 regcache_cooked_read (regs, regno + 1, buf);
1610 memcpy (valbuf, buf, len);
1611 valbuf += len;
1612 }
1613 else if (is_hfa (type))
1614 {
1615 int elements = TYPE_NFIELDS (type);
1616 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1617 int len = TYPE_LENGTH (member_type);
1618 int i;
1619
1620 for (i = 0; i < elements; i++)
1621 {
1622 int regno = AARCH64_V0_REGNUM + i;
db3516bb 1623 bfd_byte buf[V_REGISTER_SIZE];
07b287a0
MS
1624
1625 if (aarch64_debug)
b277c936
PL
1626 {
1627 debug_printf ("read HFA return value element %d from %s\n",
1628 i + 1,
1629 gdbarch_register_name (gdbarch, regno));
1630 }
07b287a0
MS
1631 regcache_cooked_read (regs, regno, buf);
1632
1633 memcpy (valbuf, buf, len);
1634 valbuf += len;
1635 }
1636 }
1637 else
1638 {
1639 /* For a structure or union the behaviour is as if the value had
1640 been stored to word-aligned memory and then loaded into
1641 registers with 64-bit load instruction(s). */
1642 int len = TYPE_LENGTH (type);
1643 int regno = AARCH64_X0_REGNUM;
1644 bfd_byte buf[X_REGISTER_SIZE];
1645
1646 while (len > 0)
1647 {
1648 regcache_cooked_read (regs, regno++, buf);
1649 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1650 len -= X_REGISTER_SIZE;
1651 valbuf += X_REGISTER_SIZE;
1652 }
1653 }
1654}
1655
1656
1657/* Will a function return an aggregate type in memory or in a
1658 register? Return 0 if an aggregate type can be returned in a
1659 register, 1 if it must be returned in memory. */
1660
1661static int
1662aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1663{
1664 int nRc;
1665 enum type_code code;
1666
f168693b 1667 type = check_typedef (type);
07b287a0
MS
1668
1669 /* In the AArch64 ABI, "integer" like aggregate types are returned
1670 in registers. For an aggregate type to be integer like, its size
1671 must be less than or equal to 4 * X_REGISTER_SIZE. */
1672
1673 if (is_hfa (type))
1674 {
1675 /* PCS B.5 If the argument is a Named HFA, then the argument is
1676 used unmodified. */
1677 return 0;
1678 }
1679
1680 if (TYPE_LENGTH (type) > 16)
1681 {
1682 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1683 invisible reference. */
1684
1685 return 1;
1686 }
1687
1688 return 0;
1689}
1690
1691/* Write into appropriate registers a function return value of type
1692 TYPE, given in virtual format. */
1693
1694static void
1695aarch64_store_return_value (struct type *type, struct regcache *regs,
1696 const gdb_byte *valbuf)
1697{
1698 struct gdbarch *gdbarch = get_regcache_arch (regs);
1699 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1700
1701 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1702 {
1703 bfd_byte buf[V_REGISTER_SIZE];
1704 int len = TYPE_LENGTH (type);
1705
1706 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1707 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1708 }
1709 else if (TYPE_CODE (type) == TYPE_CODE_INT
1710 || TYPE_CODE (type) == TYPE_CODE_CHAR
1711 || TYPE_CODE (type) == TYPE_CODE_BOOL
1712 || TYPE_CODE (type) == TYPE_CODE_PTR
1713 || TYPE_CODE (type) == TYPE_CODE_REF
1714 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1715 {
1716 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1717 {
1718 /* Values of one word or less are zero/sign-extended and
1719 returned in r0. */
1720 bfd_byte tmpbuf[X_REGISTER_SIZE];
1721 LONGEST val = unpack_long (type, valbuf);
1722
1723 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1724 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1725 }
1726 else
1727 {
1728 /* Integral values greater than one word are stored in
1729 consecutive registers starting with r0. This will always
1730 be a multiple of the regiser size. */
1731 int len = TYPE_LENGTH (type);
1732 int regno = AARCH64_X0_REGNUM;
1733
1734 while (len > 0)
1735 {
1736 regcache_cooked_write (regs, regno++, valbuf);
1737 len -= X_REGISTER_SIZE;
1738 valbuf += X_REGISTER_SIZE;
1739 }
1740 }
1741 }
1742 else if (is_hfa (type))
1743 {
1744 int elements = TYPE_NFIELDS (type);
1745 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1746 int len = TYPE_LENGTH (member_type);
1747 int i;
1748
1749 for (i = 0; i < elements; i++)
1750 {
1751 int regno = AARCH64_V0_REGNUM + i;
1752 bfd_byte tmpbuf[MAX_REGISTER_SIZE];
1753
1754 if (aarch64_debug)
b277c936
PL
1755 {
1756 debug_printf ("write HFA return value element %d to %s\n",
1757 i + 1,
1758 gdbarch_register_name (gdbarch, regno));
1759 }
07b287a0
MS
1760
1761 memcpy (tmpbuf, valbuf, len);
1762 regcache_cooked_write (regs, regno, tmpbuf);
1763 valbuf += len;
1764 }
1765 }
1766 else
1767 {
1768 /* For a structure or union the behaviour is as if the value had
1769 been stored to word-aligned memory and then loaded into
1770 registers with 64-bit load instruction(s). */
1771 int len = TYPE_LENGTH (type);
1772 int regno = AARCH64_X0_REGNUM;
1773 bfd_byte tmpbuf[X_REGISTER_SIZE];
1774
1775 while (len > 0)
1776 {
1777 memcpy (tmpbuf, valbuf,
1778 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1779 regcache_cooked_write (regs, regno++, tmpbuf);
1780 len -= X_REGISTER_SIZE;
1781 valbuf += X_REGISTER_SIZE;
1782 }
1783 }
1784}
1785
1786/* Implement the "return_value" gdbarch method. */
1787
1788static enum return_value_convention
1789aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
1790 struct type *valtype, struct regcache *regcache,
1791 gdb_byte *readbuf, const gdb_byte *writebuf)
1792{
1793 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1794
1795 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
1796 || TYPE_CODE (valtype) == TYPE_CODE_UNION
1797 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
1798 {
1799 if (aarch64_return_in_memory (gdbarch, valtype))
1800 {
1801 if (aarch64_debug)
b277c936 1802 debug_printf ("return value in memory\n");
07b287a0
MS
1803 return RETURN_VALUE_STRUCT_CONVENTION;
1804 }
1805 }
1806
1807 if (writebuf)
1808 aarch64_store_return_value (valtype, regcache, writebuf);
1809
1810 if (readbuf)
1811 aarch64_extract_return_value (valtype, regcache, readbuf);
1812
1813 if (aarch64_debug)
b277c936 1814 debug_printf ("return value in registers\n");
07b287a0
MS
1815
1816 return RETURN_VALUE_REGISTER_CONVENTION;
1817}
1818
1819/* Implement the "get_longjmp_target" gdbarch method. */
1820
1821static int
1822aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1823{
1824 CORE_ADDR jb_addr;
1825 gdb_byte buf[X_REGISTER_SIZE];
1826 struct gdbarch *gdbarch = get_frame_arch (frame);
1827 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1828 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1829
1830 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
1831
1832 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
1833 X_REGISTER_SIZE))
1834 return 0;
1835
1836 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
1837 return 1;
1838}
ea873d8e
PL
1839
1840/* Implement the "gen_return_address" gdbarch method. */
1841
1842static void
1843aarch64_gen_return_address (struct gdbarch *gdbarch,
1844 struct agent_expr *ax, struct axs_value *value,
1845 CORE_ADDR scope)
1846{
1847 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
1848 value->kind = axs_lvalue_register;
1849 value->u.reg = AARCH64_LR_REGNUM;
1850}
07b287a0
MS
1851\f
1852
1853/* Return the pseudo register name corresponding to register regnum. */
1854
1855static const char *
1856aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
1857{
1858 static const char *const q_name[] =
1859 {
1860 "q0", "q1", "q2", "q3",
1861 "q4", "q5", "q6", "q7",
1862 "q8", "q9", "q10", "q11",
1863 "q12", "q13", "q14", "q15",
1864 "q16", "q17", "q18", "q19",
1865 "q20", "q21", "q22", "q23",
1866 "q24", "q25", "q26", "q27",
1867 "q28", "q29", "q30", "q31",
1868 };
1869
1870 static const char *const d_name[] =
1871 {
1872 "d0", "d1", "d2", "d3",
1873 "d4", "d5", "d6", "d7",
1874 "d8", "d9", "d10", "d11",
1875 "d12", "d13", "d14", "d15",
1876 "d16", "d17", "d18", "d19",
1877 "d20", "d21", "d22", "d23",
1878 "d24", "d25", "d26", "d27",
1879 "d28", "d29", "d30", "d31",
1880 };
1881
1882 static const char *const s_name[] =
1883 {
1884 "s0", "s1", "s2", "s3",
1885 "s4", "s5", "s6", "s7",
1886 "s8", "s9", "s10", "s11",
1887 "s12", "s13", "s14", "s15",
1888 "s16", "s17", "s18", "s19",
1889 "s20", "s21", "s22", "s23",
1890 "s24", "s25", "s26", "s27",
1891 "s28", "s29", "s30", "s31",
1892 };
1893
1894 static const char *const h_name[] =
1895 {
1896 "h0", "h1", "h2", "h3",
1897 "h4", "h5", "h6", "h7",
1898 "h8", "h9", "h10", "h11",
1899 "h12", "h13", "h14", "h15",
1900 "h16", "h17", "h18", "h19",
1901 "h20", "h21", "h22", "h23",
1902 "h24", "h25", "h26", "h27",
1903 "h28", "h29", "h30", "h31",
1904 };
1905
1906 static const char *const b_name[] =
1907 {
1908 "b0", "b1", "b2", "b3",
1909 "b4", "b5", "b6", "b7",
1910 "b8", "b9", "b10", "b11",
1911 "b12", "b13", "b14", "b15",
1912 "b16", "b17", "b18", "b19",
1913 "b20", "b21", "b22", "b23",
1914 "b24", "b25", "b26", "b27",
1915 "b28", "b29", "b30", "b31",
1916 };
1917
1918 regnum -= gdbarch_num_regs (gdbarch);
1919
1920 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1921 return q_name[regnum - AARCH64_Q0_REGNUM];
1922
1923 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1924 return d_name[regnum - AARCH64_D0_REGNUM];
1925
1926 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1927 return s_name[regnum - AARCH64_S0_REGNUM];
1928
1929 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1930 return h_name[regnum - AARCH64_H0_REGNUM];
1931
1932 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1933 return b_name[regnum - AARCH64_B0_REGNUM];
1934
1935 internal_error (__FILE__, __LINE__,
1936 _("aarch64_pseudo_register_name: bad register number %d"),
1937 regnum);
1938}
1939
1940/* Implement the "pseudo_register_type" tdesc_arch_data method. */
1941
1942static struct type *
1943aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
1944{
1945 regnum -= gdbarch_num_regs (gdbarch);
1946
1947 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1948 return aarch64_vnq_type (gdbarch);
1949
1950 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1951 return aarch64_vnd_type (gdbarch);
1952
1953 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1954 return aarch64_vns_type (gdbarch);
1955
1956 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1957 return aarch64_vnh_type (gdbarch);
1958
1959 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1960 return aarch64_vnb_type (gdbarch);
1961
1962 internal_error (__FILE__, __LINE__,
1963 _("aarch64_pseudo_register_type: bad register number %d"),
1964 regnum);
1965}
1966
1967/* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
1968
1969static int
1970aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
1971 struct reggroup *group)
1972{
1973 regnum -= gdbarch_num_regs (gdbarch);
1974
1975 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
1976 return group == all_reggroup || group == vector_reggroup;
1977 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
1978 return (group == all_reggroup || group == vector_reggroup
1979 || group == float_reggroup);
1980 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
1981 return (group == all_reggroup || group == vector_reggroup
1982 || group == float_reggroup);
1983 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
1984 return group == all_reggroup || group == vector_reggroup;
1985 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
1986 return group == all_reggroup || group == vector_reggroup;
1987
1988 return group == all_reggroup;
1989}
1990
1991/* Implement the "pseudo_register_read_value" gdbarch method. */
1992
1993static struct value *
1994aarch64_pseudo_read_value (struct gdbarch *gdbarch,
1995 struct regcache *regcache,
1996 int regnum)
1997{
1998 gdb_byte reg_buf[MAX_REGISTER_SIZE];
1999 struct value *result_value;
2000 gdb_byte *buf;
2001
2002 result_value = allocate_value (register_type (gdbarch, regnum));
2003 VALUE_LVAL (result_value) = lval_register;
2004 VALUE_REGNUM (result_value) = regnum;
2005 buf = value_contents_raw (result_value);
2006
2007 regnum -= gdbarch_num_regs (gdbarch);
2008
2009 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2010 {
2011 enum register_status status;
2012 unsigned v_regnum;
2013
2014 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2015 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2016 if (status != REG_VALID)
2017 mark_value_bytes_unavailable (result_value, 0,
2018 TYPE_LENGTH (value_type (result_value)));
2019 else
2020 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2021 return result_value;
2022 }
2023
2024 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2025 {
2026 enum register_status status;
2027 unsigned v_regnum;
2028
2029 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2030 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2031 if (status != REG_VALID)
2032 mark_value_bytes_unavailable (result_value, 0,
2033 TYPE_LENGTH (value_type (result_value)));
2034 else
2035 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2036 return result_value;
2037 }
2038
2039 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2040 {
2041 enum register_status status;
2042 unsigned v_regnum;
2043
2044 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2045 status = regcache_raw_read (regcache, v_regnum, reg_buf);
4bcddace
PL
2046 if (status != REG_VALID)
2047 mark_value_bytes_unavailable (result_value, 0,
2048 TYPE_LENGTH (value_type (result_value)));
2049 else
2050 memcpy (buf, reg_buf, S_REGISTER_SIZE);
07b287a0
MS
2051 return result_value;
2052 }
2053
2054 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2055 {
2056 enum register_status status;
2057 unsigned v_regnum;
2058
2059 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2060 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2061 if (status != REG_VALID)
2062 mark_value_bytes_unavailable (result_value, 0,
2063 TYPE_LENGTH (value_type (result_value)));
2064 else
2065 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2066 return result_value;
2067 }
2068
2069 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2070 {
2071 enum register_status status;
2072 unsigned v_regnum;
2073
2074 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2075 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2076 if (status != REG_VALID)
2077 mark_value_bytes_unavailable (result_value, 0,
2078 TYPE_LENGTH (value_type (result_value)));
2079 else
2080 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2081 return result_value;
2082 }
2083
2084 gdb_assert_not_reached ("regnum out of bound");
2085}
2086
2087/* Implement the "pseudo_register_write" gdbarch method. */
2088
2089static void
2090aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2091 int regnum, const gdb_byte *buf)
2092{
2093 gdb_byte reg_buf[MAX_REGISTER_SIZE];
2094
2095 /* Ensure the register buffer is zero, we want gdb writes of the
2096 various 'scalar' pseudo registers to behavior like architectural
2097 writes, register width bytes are written the remainder are set to
2098 zero. */
2099 memset (reg_buf, 0, sizeof (reg_buf));
2100
2101 regnum -= gdbarch_num_regs (gdbarch);
2102
2103 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2104 {
2105 /* pseudo Q registers */
2106 unsigned v_regnum;
2107
2108 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2109 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2110 regcache_raw_write (regcache, v_regnum, reg_buf);
2111 return;
2112 }
2113
2114 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2115 {
2116 /* pseudo D registers */
2117 unsigned v_regnum;
2118
2119 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2120 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2121 regcache_raw_write (regcache, v_regnum, reg_buf);
2122 return;
2123 }
2124
2125 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2126 {
2127 unsigned v_regnum;
2128
2129 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2130 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2131 regcache_raw_write (regcache, v_regnum, reg_buf);
2132 return;
2133 }
2134
2135 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2136 {
2137 /* pseudo H registers */
2138 unsigned v_regnum;
2139
2140 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2141 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2142 regcache_raw_write (regcache, v_regnum, reg_buf);
2143 return;
2144 }
2145
2146 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2147 {
2148 /* pseudo B registers */
2149 unsigned v_regnum;
2150
2151 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2152 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2153 regcache_raw_write (regcache, v_regnum, reg_buf);
2154 return;
2155 }
2156
2157 gdb_assert_not_reached ("regnum out of bound");
2158}
2159
07b287a0
MS
2160/* Callback function for user_reg_add. */
2161
2162static struct value *
2163value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2164{
9a3c8263 2165 const int *reg_p = (const int *) baton;
07b287a0
MS
2166
2167 return value_of_register (*reg_p, frame);
2168}
2169\f
2170
9404b58f
KM
2171/* Implement the "software_single_step" gdbarch method, needed to
2172 single step through atomic sequences on AArch64. */
2173
2174static int
2175aarch64_software_single_step (struct frame_info *frame)
2176{
2177 struct gdbarch *gdbarch = get_frame_arch (frame);
2178 struct address_space *aspace = get_frame_address_space (frame);
2179 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2180 const int insn_size = 4;
2181 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2182 CORE_ADDR pc = get_frame_pc (frame);
2183 CORE_ADDR breaks[2] = { -1, -1 };
2184 CORE_ADDR loc = pc;
2185 CORE_ADDR closing_insn = 0;
2186 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2187 byte_order_for_code);
2188 int index;
2189 int insn_count;
2190 int bc_insn_count = 0; /* Conditional branch instruction count. */
2191 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
f77ee802
YQ
2192 aarch64_inst inst;
2193
43cdf5ae 2194 if (aarch64_decode_insn (insn, &inst, 1) != 0)
f77ee802 2195 return 0;
9404b58f
KM
2196
2197 /* Look for a Load Exclusive instruction which begins the sequence. */
f77ee802 2198 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
9404b58f
KM
2199 return 0;
2200
2201 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2202 {
9404b58f
KM
2203 loc += insn_size;
2204 insn = read_memory_unsigned_integer (loc, insn_size,
2205 byte_order_for_code);
2206
43cdf5ae 2207 if (aarch64_decode_insn (insn, &inst, 1) != 0)
f77ee802 2208 return 0;
9404b58f 2209 /* Check if the instruction is a conditional branch. */
f77ee802 2210 if (inst.opcode->iclass == condbranch)
9404b58f 2211 {
f77ee802
YQ
2212 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2213
9404b58f
KM
2214 if (bc_insn_count >= 1)
2215 return 0;
2216
2217 /* It is, so we'll try to set a breakpoint at the destination. */
f77ee802 2218 breaks[1] = loc + inst.operands[0].imm.value;
9404b58f
KM
2219
2220 bc_insn_count++;
2221 last_breakpoint++;
2222 }
2223
2224 /* Look for the Store Exclusive which closes the atomic sequence. */
f77ee802 2225 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
9404b58f
KM
2226 {
2227 closing_insn = loc;
2228 break;
2229 }
2230 }
2231
2232 /* We didn't find a closing Store Exclusive instruction, fall back. */
2233 if (!closing_insn)
2234 return 0;
2235
2236 /* Insert breakpoint after the end of the atomic sequence. */
2237 breaks[0] = loc + insn_size;
2238
2239 /* Check for duplicated breakpoints, and also check that the second
2240 breakpoint is not within the atomic sequence. */
2241 if (last_breakpoint
2242 && (breaks[1] == breaks[0]
2243 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2244 last_breakpoint = 0;
2245
2246 /* Insert the breakpoint at the end of the sequence, and one at the
2247 destination of the conditional branch, if it exists. */
2248 for (index = 0; index <= last_breakpoint; index++)
2249 insert_single_step_breakpoint (gdbarch, aspace, breaks[index]);
2250
2251 return 1;
2252}
2253
b6542f81
YQ
2254struct displaced_step_closure
2255{
2256 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2257 is being displaced stepping. */
2258 int cond;
2259
2260 /* PC adjustment offset after displaced stepping. */
2261 int32_t pc_adjust;
2262};
2263
2264/* Data when visiting instructions for displaced stepping. */
2265
2266struct aarch64_displaced_step_data
2267{
2268 struct aarch64_insn_data base;
2269
2270 /* The address where the instruction will be executed at. */
2271 CORE_ADDR new_addr;
2272 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2273 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2274 /* Number of instructions in INSN_BUF. */
2275 unsigned insn_count;
2276 /* Registers when doing displaced stepping. */
2277 struct regcache *regs;
2278
2279 struct displaced_step_closure *dsc;
2280};
2281
2282/* Implementation of aarch64_insn_visitor method "b". */
2283
2284static void
2285aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2286 struct aarch64_insn_data *data)
2287{
2288 struct aarch64_displaced_step_data *dsd
2289 = (struct aarch64_displaced_step_data *) data;
2290 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2291
2292 if (can_encode_int32 (new_offset, 28))
2293 {
2294 /* Emit B rather than BL, because executing BL on a new address
2295 will get the wrong address into LR. In order to avoid this,
2296 we emit B, and update LR if the instruction is BL. */
2297 emit_b (dsd->insn_buf, 0, new_offset);
2298 dsd->insn_count++;
2299 }
2300 else
2301 {
2302 /* Write NOP. */
2303 emit_nop (dsd->insn_buf);
2304 dsd->insn_count++;
2305 dsd->dsc->pc_adjust = offset;
2306 }
2307
2308 if (is_bl)
2309 {
2310 /* Update LR. */
2311 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2312 data->insn_addr + 4);
2313 }
2314}
2315
2316/* Implementation of aarch64_insn_visitor method "b_cond". */
2317
2318static void
2319aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2320 struct aarch64_insn_data *data)
2321{
2322 struct aarch64_displaced_step_data *dsd
2323 = (struct aarch64_displaced_step_data *) data;
2324 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2325
2326 /* GDB has to fix up PC after displaced step this instruction
2327 differently according to the condition is true or false. Instead
2328 of checking COND against conditional flags, we can use
2329 the following instructions, and GDB can tell how to fix up PC
2330 according to the PC value.
2331
2332 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2333 INSN1 ;
2334 TAKEN:
2335 INSN2
2336 */
2337
2338 emit_bcond (dsd->insn_buf, cond, 8);
2339 dsd->dsc->cond = 1;
2340 dsd->dsc->pc_adjust = offset;
2341 dsd->insn_count = 1;
2342}
2343
2344/* Dynamically allocate a new register. If we know the register
2345 statically, we should make it a global as above instead of using this
2346 helper function. */
2347
2348static struct aarch64_register
2349aarch64_register (unsigned num, int is64)
2350{
2351 return (struct aarch64_register) { num, is64 };
2352}
2353
2354/* Implementation of aarch64_insn_visitor method "cb". */
2355
2356static void
2357aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2358 const unsigned rn, int is64,
2359 struct aarch64_insn_data *data)
2360{
2361 struct aarch64_displaced_step_data *dsd
2362 = (struct aarch64_displaced_step_data *) data;
2363 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2364
2365 /* The offset is out of range for a compare and branch
2366 instruction. We can use the following instructions instead:
2367
2368 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2369 INSN1 ;
2370 TAKEN:
2371 INSN2
2372 */
2373 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2374 dsd->insn_count = 1;
2375 dsd->dsc->cond = 1;
2376 dsd->dsc->pc_adjust = offset;
2377}
2378
2379/* Implementation of aarch64_insn_visitor method "tb". */
2380
2381static void
2382aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2383 const unsigned rt, unsigned bit,
2384 struct aarch64_insn_data *data)
2385{
2386 struct aarch64_displaced_step_data *dsd
2387 = (struct aarch64_displaced_step_data *) data;
2388 int32_t new_offset = data->insn_addr - dsd->new_addr + offset;
2389
2390 /* The offset is out of range for a test bit and branch
2391 instruction We can use the following instructions instead:
2392
2393 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2394 INSN1 ;
2395 TAKEN:
2396 INSN2
2397
2398 */
2399 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2400 dsd->insn_count = 1;
2401 dsd->dsc->cond = 1;
2402 dsd->dsc->pc_adjust = offset;
2403}
2404
2405/* Implementation of aarch64_insn_visitor method "adr". */
2406
2407static void
2408aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2409 const int is_adrp, struct aarch64_insn_data *data)
2410{
2411 struct aarch64_displaced_step_data *dsd
2412 = (struct aarch64_displaced_step_data *) data;
2413 /* We know exactly the address the ADR{P,} instruction will compute.
2414 We can just write it to the destination register. */
2415 CORE_ADDR address = data->insn_addr + offset;
2416
2417 if (is_adrp)
2418 {
2419 /* Clear the lower 12 bits of the offset to get the 4K page. */
2420 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2421 address & ~0xfff);
2422 }
2423 else
2424 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2425 address);
2426
2427 dsd->dsc->pc_adjust = 4;
2428 emit_nop (dsd->insn_buf);
2429 dsd->insn_count = 1;
2430}
2431
2432/* Implementation of aarch64_insn_visitor method "ldr_literal". */
2433
2434static void
2435aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2436 const unsigned rt, const int is64,
2437 struct aarch64_insn_data *data)
2438{
2439 struct aarch64_displaced_step_data *dsd
2440 = (struct aarch64_displaced_step_data *) data;
2441 CORE_ADDR address = data->insn_addr + offset;
2442 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2443
2444 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2445 address);
2446
2447 if (is_sw)
2448 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2449 aarch64_register (rt, 1), zero);
2450 else
2451 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2452 aarch64_register (rt, 1), zero);
2453
2454 dsd->dsc->pc_adjust = 4;
2455}
2456
2457/* Implementation of aarch64_insn_visitor method "others". */
2458
2459static void
2460aarch64_displaced_step_others (const uint32_t insn,
2461 struct aarch64_insn_data *data)
2462{
2463 struct aarch64_displaced_step_data *dsd
2464 = (struct aarch64_displaced_step_data *) data;
2465
e1c587c3 2466 aarch64_emit_insn (dsd->insn_buf, insn);
b6542f81
YQ
2467 dsd->insn_count = 1;
2468
2469 if ((insn & 0xfffffc1f) == 0xd65f0000)
2470 {
2471 /* RET */
2472 dsd->dsc->pc_adjust = 0;
2473 }
2474 else
2475 dsd->dsc->pc_adjust = 4;
2476}
2477
2478static const struct aarch64_insn_visitor visitor =
2479{
2480 aarch64_displaced_step_b,
2481 aarch64_displaced_step_b_cond,
2482 aarch64_displaced_step_cb,
2483 aarch64_displaced_step_tb,
2484 aarch64_displaced_step_adr,
2485 aarch64_displaced_step_ldr_literal,
2486 aarch64_displaced_step_others,
2487};
2488
2489/* Implement the "displaced_step_copy_insn" gdbarch method. */
2490
2491struct displaced_step_closure *
2492aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2493 CORE_ADDR from, CORE_ADDR to,
2494 struct regcache *regs)
2495{
2496 struct displaced_step_closure *dsc = NULL;
2497 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2498 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2499 struct aarch64_displaced_step_data dsd;
c86a40c6
YQ
2500 aarch64_inst inst;
2501
2502 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2503 return NULL;
b6542f81
YQ
2504
2505 /* Look for a Load Exclusive instruction which begins the sequence. */
c86a40c6 2506 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
b6542f81
YQ
2507 {
2508 /* We can't displaced step atomic sequences. */
2509 return NULL;
2510 }
2511
2512 dsc = XCNEW (struct displaced_step_closure);
2513 dsd.base.insn_addr = from;
2514 dsd.new_addr = to;
2515 dsd.regs = regs;
2516 dsd.dsc = dsc;
034f1a81 2517 dsd.insn_count = 0;
b6542f81
YQ
2518 aarch64_relocate_instruction (insn, &visitor,
2519 (struct aarch64_insn_data *) &dsd);
2520 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2521
2522 if (dsd.insn_count != 0)
2523 {
2524 int i;
2525
2526 /* Instruction can be relocated to scratch pad. Copy
2527 relocated instruction(s) there. */
2528 for (i = 0; i < dsd.insn_count; i++)
2529 {
2530 if (debug_displaced)
2531 {
2532 debug_printf ("displaced: writing insn ");
2533 debug_printf ("%.8x", dsd.insn_buf[i]);
2534 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2535 }
2536 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2537 (ULONGEST) dsd.insn_buf[i]);
2538 }
2539 }
2540 else
2541 {
2542 xfree (dsc);
2543 dsc = NULL;
2544 }
2545
2546 return dsc;
2547}
2548
2549/* Implement the "displaced_step_fixup" gdbarch method. */
2550
2551void
2552aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2553 struct displaced_step_closure *dsc,
2554 CORE_ADDR from, CORE_ADDR to,
2555 struct regcache *regs)
2556{
2557 if (dsc->cond)
2558 {
2559 ULONGEST pc;
2560
2561 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2562 if (pc - to == 8)
2563 {
2564 /* Condition is true. */
2565 }
2566 else if (pc - to == 4)
2567 {
2568 /* Condition is false. */
2569 dsc->pc_adjust = 4;
2570 }
2571 else
2572 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2573 }
2574
2575 if (dsc->pc_adjust != 0)
2576 {
2577 if (debug_displaced)
2578 {
2579 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2580 paddress (gdbarch, from), dsc->pc_adjust);
2581 }
2582 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2583 from + dsc->pc_adjust);
2584 }
2585}
2586
2587/* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2588
2589int
2590aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2591 struct displaced_step_closure *closure)
2592{
2593 return 1;
2594}
2595
07b287a0
MS
2596/* Initialize the current architecture based on INFO. If possible,
2597 re-use an architecture from ARCHES, which is a list of
2598 architectures already created during this debugging session.
2599
2600 Called e.g. at program startup, when reading a core file, and when
2601 reading a binary file. */
2602
2603static struct gdbarch *
2604aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2605{
2606 struct gdbarch_tdep *tdep;
2607 struct gdbarch *gdbarch;
2608 struct gdbarch_list *best_arch;
2609 struct tdesc_arch_data *tdesc_data = NULL;
2610 const struct target_desc *tdesc = info.target_desc;
2611 int i;
2612 int have_fpa_registers = 1;
2613 int valid_p = 1;
2614 const struct tdesc_feature *feature;
2615 int num_regs = 0;
2616 int num_pseudo_regs = 0;
2617
2618 /* Ensure we always have a target descriptor. */
2619 if (!tdesc_has_registers (tdesc))
2620 tdesc = tdesc_aarch64;
2621
2622 gdb_assert (tdesc);
2623
2624 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2625
2626 if (feature == NULL)
2627 return NULL;
2628
2629 tdesc_data = tdesc_data_alloc ();
2630
2631 /* Validate the descriptor provides the mandatory core R registers
2632 and allocate their numbers. */
2633 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2634 valid_p &=
2635 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2636 aarch64_r_register_names[i]);
2637
2638 num_regs = AARCH64_X0_REGNUM + i;
2639
2640 /* Look for the V registers. */
2641 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2642 if (feature)
2643 {
2644 /* Validate the descriptor provides the mandatory V registers
2645 and allocate their numbers. */
2646 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2647 valid_p &=
2648 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2649 aarch64_v_register_names[i]);
2650
2651 num_regs = AARCH64_V0_REGNUM + i;
2652
2653 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2654 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2655 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2656 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2657 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2658 }
2659
2660 if (!valid_p)
2661 {
2662 tdesc_data_cleanup (tdesc_data);
2663 return NULL;
2664 }
2665
2666 /* AArch64 code is always little-endian. */
2667 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2668
2669 /* If there is already a candidate, use it. */
2670 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2671 best_arch != NULL;
2672 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2673 {
2674 /* Found a match. */
2675 break;
2676 }
2677
2678 if (best_arch != NULL)
2679 {
2680 if (tdesc_data != NULL)
2681 tdesc_data_cleanup (tdesc_data);
2682 return best_arch->gdbarch;
2683 }
2684
8d749320 2685 tdep = XCNEW (struct gdbarch_tdep);
07b287a0
MS
2686 gdbarch = gdbarch_alloc (&info, tdep);
2687
2688 /* This should be low enough for everything. */
2689 tdep->lowest_pc = 0x20;
2690 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2691 tdep->jb_elt_size = 8;
2692
2693 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2694 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2695
07b287a0
MS
2696 /* Frame handling. */
2697 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2698 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2699 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2700
2701 /* Advance PC across function entry code. */
2702 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2703
2704 /* The stack grows downward. */
2705 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2706
2707 /* Breakpoint manipulation. */
2708 set_gdbarch_breakpoint_from_pc (gdbarch, aarch64_breakpoint_from_pc);
07b287a0 2709 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
9404b58f 2710 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
07b287a0
MS
2711
2712 /* Information about registers, etc. */
2713 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2714 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2715 set_gdbarch_num_regs (gdbarch, num_regs);
2716
2717 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2718 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2719 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2720 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2721 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2722 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2723 aarch64_pseudo_register_reggroup_p);
2724
2725 /* ABI */
2726 set_gdbarch_short_bit (gdbarch, 16);
2727 set_gdbarch_int_bit (gdbarch, 32);
2728 set_gdbarch_float_bit (gdbarch, 32);
2729 set_gdbarch_double_bit (gdbarch, 64);
2730 set_gdbarch_long_double_bit (gdbarch, 128);
2731 set_gdbarch_long_bit (gdbarch, 64);
2732 set_gdbarch_long_long_bit (gdbarch, 64);
2733 set_gdbarch_ptr_bit (gdbarch, 64);
2734 set_gdbarch_char_signed (gdbarch, 0);
2735 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2736 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2737 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2738
2739 /* Internal <-> external register number maps. */
2740 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2741
2742 /* Returning results. */
2743 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2744
2745 /* Disassembly. */
2746 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2747
2748 /* Virtual tables. */
2749 set_gdbarch_vbit_in_delta (gdbarch, 1);
2750
2751 /* Hook in the ABI-specific overrides, if they have been registered. */
2752 info.target_desc = tdesc;
2753 info.tdep_info = (void *) tdesc_data;
2754 gdbarch_init_osabi (info, gdbarch);
2755
2756 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
2757
2758 /* Add some default predicates. */
2759 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
2760 dwarf2_append_unwinders (gdbarch);
2761 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
2762
2763 frame_base_set_default (gdbarch, &aarch64_normal_base);
2764
2765 /* Now we have tuned the configuration, set a few final things,
2766 based on what the OS ABI has told us. */
2767
2768 if (tdep->jb_pc >= 0)
2769 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
2770
ea873d8e
PL
2771 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
2772
07b287a0
MS
2773 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
2774
2775 /* Add standard register aliases. */
2776 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
2777 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
2778 value_of_aarch64_user_reg,
2779 &aarch64_register_aliases[i].regnum);
2780
2781 return gdbarch;
2782}
2783
2784static void
2785aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
2786{
2787 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2788
2789 if (tdep == NULL)
2790 return;
2791
2792 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
2793 paddress (gdbarch, tdep->lowest_pc));
2794}
2795
2796/* Suppress warning from -Wmissing-prototypes. */
2797extern initialize_file_ftype _initialize_aarch64_tdep;
2798
2799void
2800_initialize_aarch64_tdep (void)
2801{
2802 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
2803 aarch64_dump_tdep);
2804
2805 initialize_tdesc_aarch64 ();
07b287a0
MS
2806
2807 /* Debug this file's internals. */
2808 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
2809Set AArch64 debugging."), _("\
2810Show AArch64 debugging."), _("\
2811When on, AArch64 specific debugging is enabled."),
2812 NULL,
2813 show_aarch64_debug,
2814 &setdebuglist, &showdebuglist);
2815}
99afc88b
OJ
2816
2817/* AArch64 process record-replay related structures, defines etc. */
2818
99afc88b
OJ
2819#define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
2820 do \
2821 { \
2822 unsigned int reg_len = LENGTH; \
2823 if (reg_len) \
2824 { \
2825 REGS = XNEWVEC (uint32_t, reg_len); \
2826 memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
2827 } \
2828 } \
2829 while (0)
2830
2831#define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
2832 do \
2833 { \
2834 unsigned int mem_len = LENGTH; \
2835 if (mem_len) \
2836 { \
2837 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
2838 memcpy(&MEMS->len, &RECORD_BUF[0], \
2839 sizeof(struct aarch64_mem_r) * LENGTH); \
2840 } \
2841 } \
2842 while (0)
2843
2844/* AArch64 record/replay structures and enumerations. */
2845
2846struct aarch64_mem_r
2847{
2848 uint64_t len; /* Record length. */
2849 uint64_t addr; /* Memory address. */
2850};
2851
2852enum aarch64_record_result
2853{
2854 AARCH64_RECORD_SUCCESS,
2855 AARCH64_RECORD_FAILURE,
2856 AARCH64_RECORD_UNSUPPORTED,
2857 AARCH64_RECORD_UNKNOWN
2858};
2859
2860typedef struct insn_decode_record_t
2861{
2862 struct gdbarch *gdbarch;
2863 struct regcache *regcache;
2864 CORE_ADDR this_addr; /* Address of insn to be recorded. */
2865 uint32_t aarch64_insn; /* Insn to be recorded. */
2866 uint32_t mem_rec_count; /* Count of memory records. */
2867 uint32_t reg_rec_count; /* Count of register records. */
2868 uint32_t *aarch64_regs; /* Registers to be recorded. */
2869 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
2870} insn_decode_record;
2871
2872/* Record handler for data processing - register instructions. */
2873
2874static unsigned int
2875aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
2876{
2877 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
2878 uint32_t record_buf[4];
2879
2880 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2881 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2882 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
2883
2884 if (!bit (aarch64_insn_r->aarch64_insn, 28))
2885 {
2886 uint8_t setflags;
2887
2888 /* Logical (shifted register). */
2889 if (insn_bits24_27 == 0x0a)
2890 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
2891 /* Add/subtract. */
2892 else if (insn_bits24_27 == 0x0b)
2893 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2894 else
2895 return AARCH64_RECORD_UNKNOWN;
2896
2897 record_buf[0] = reg_rd;
2898 aarch64_insn_r->reg_rec_count = 1;
2899 if (setflags)
2900 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2901 }
2902 else
2903 {
2904 if (insn_bits24_27 == 0x0b)
2905 {
2906 /* Data-processing (3 source). */
2907 record_buf[0] = reg_rd;
2908 aarch64_insn_r->reg_rec_count = 1;
2909 }
2910 else if (insn_bits24_27 == 0x0a)
2911 {
2912 if (insn_bits21_23 == 0x00)
2913 {
2914 /* Add/subtract (with carry). */
2915 record_buf[0] = reg_rd;
2916 aarch64_insn_r->reg_rec_count = 1;
2917 if (bit (aarch64_insn_r->aarch64_insn, 29))
2918 {
2919 record_buf[1] = AARCH64_CPSR_REGNUM;
2920 aarch64_insn_r->reg_rec_count = 2;
2921 }
2922 }
2923 else if (insn_bits21_23 == 0x02)
2924 {
2925 /* Conditional compare (register) and conditional compare
2926 (immediate) instructions. */
2927 record_buf[0] = AARCH64_CPSR_REGNUM;
2928 aarch64_insn_r->reg_rec_count = 1;
2929 }
2930 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
2931 {
2932 /* CConditional select. */
2933 /* Data-processing (2 source). */
2934 /* Data-processing (1 source). */
2935 record_buf[0] = reg_rd;
2936 aarch64_insn_r->reg_rec_count = 1;
2937 }
2938 else
2939 return AARCH64_RECORD_UNKNOWN;
2940 }
2941 }
2942
2943 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2944 record_buf);
2945 return AARCH64_RECORD_SUCCESS;
2946}
2947
2948/* Record handler for data processing - immediate instructions. */
2949
2950static unsigned int
2951aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
2952{
2953 uint8_t reg_rd, insn_bit28, insn_bit23, insn_bits24_27, setflags;
2954 uint32_t record_buf[4];
2955
2956 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
2957 insn_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
2958 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
2959 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
2960
2961 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
2962 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
2963 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
2964 {
2965 record_buf[0] = reg_rd;
2966 aarch64_insn_r->reg_rec_count = 1;
2967 }
2968 else if (insn_bits24_27 == 0x01)
2969 {
2970 /* Add/Subtract (immediate). */
2971 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
2972 record_buf[0] = reg_rd;
2973 aarch64_insn_r->reg_rec_count = 1;
2974 if (setflags)
2975 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2976 }
2977 else if (insn_bits24_27 == 0x02 && !insn_bit23)
2978 {
2979 /* Logical (immediate). */
2980 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
2981 record_buf[0] = reg_rd;
2982 aarch64_insn_r->reg_rec_count = 1;
2983 if (setflags)
2984 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
2985 }
2986 else
2987 return AARCH64_RECORD_UNKNOWN;
2988
2989 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
2990 record_buf);
2991 return AARCH64_RECORD_SUCCESS;
2992}
2993
2994/* Record handler for branch, exception generation and system instructions. */
2995
2996static unsigned int
2997aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
2998{
2999 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3000 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3001 uint32_t record_buf[4];
3002
3003 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3004 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3005 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3006
3007 if (insn_bits28_31 == 0x0d)
3008 {
3009 /* Exception generation instructions. */
3010 if (insn_bits24_27 == 0x04)
3011 {
5d98d3cd
YQ
3012 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3013 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3014 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
99afc88b
OJ
3015 {
3016 ULONGEST svc_number;
3017
3018 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3019 &svc_number);
3020 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3021 svc_number);
3022 }
3023 else
3024 return AARCH64_RECORD_UNSUPPORTED;
3025 }
3026 /* System instructions. */
3027 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3028 {
3029 uint32_t reg_rt, reg_crn;
3030
3031 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3032 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3033
3034 /* Record rt in case of sysl and mrs instructions. */
3035 if (bit (aarch64_insn_r->aarch64_insn, 21))
3036 {
3037 record_buf[0] = reg_rt;
3038 aarch64_insn_r->reg_rec_count = 1;
3039 }
3040 /* Record cpsr for hint and msr(immediate) instructions. */
3041 else if (reg_crn == 0x02 || reg_crn == 0x04)
3042 {
3043 record_buf[0] = AARCH64_CPSR_REGNUM;
3044 aarch64_insn_r->reg_rec_count = 1;
3045 }
3046 }
3047 /* Unconditional branch (register). */
3048 else if((insn_bits24_27 & 0x0e) == 0x06)
3049 {
3050 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3051 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3052 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3053 }
3054 else
3055 return AARCH64_RECORD_UNKNOWN;
3056 }
3057 /* Unconditional branch (immediate). */
3058 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3059 {
3060 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3061 if (bit (aarch64_insn_r->aarch64_insn, 31))
3062 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3063 }
3064 else
3065 /* Compare & branch (immediate), Test & branch (immediate) and
3066 Conditional branch (immediate). */
3067 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3068
3069 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3070 record_buf);
3071 return AARCH64_RECORD_SUCCESS;
3072}
3073
3074/* Record handler for advanced SIMD load and store instructions. */
3075
3076static unsigned int
3077aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3078{
3079 CORE_ADDR address;
3080 uint64_t addr_offset = 0;
3081 uint32_t record_buf[24];
3082 uint64_t record_buf_mem[24];
3083 uint32_t reg_rn, reg_rt;
3084 uint32_t reg_index = 0, mem_index = 0;
3085 uint8_t opcode_bits, size_bits;
3086
3087 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3088 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3089 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3090 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3091 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3092
3093 if (record_debug)
b277c936 3094 debug_printf ("Process record: Advanced SIMD load/store\n");
99afc88b
OJ
3095
3096 /* Load/store single structure. */
3097 if (bit (aarch64_insn_r->aarch64_insn, 24))
3098 {
3099 uint8_t sindex, scale, selem, esize, replicate = 0;
3100 scale = opcode_bits >> 2;
3101 selem = ((opcode_bits & 0x02) |
3102 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3103 switch (scale)
3104 {
3105 case 1:
3106 if (size_bits & 0x01)
3107 return AARCH64_RECORD_UNKNOWN;
3108 break;
3109 case 2:
3110 if ((size_bits >> 1) & 0x01)
3111 return AARCH64_RECORD_UNKNOWN;
3112 if (size_bits & 0x01)
3113 {
3114 if (!((opcode_bits >> 1) & 0x01))
3115 scale = 3;
3116 else
3117 return AARCH64_RECORD_UNKNOWN;
3118 }
3119 break;
3120 case 3:
3121 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3122 {
3123 scale = size_bits;
3124 replicate = 1;
3125 break;
3126 }
3127 else
3128 return AARCH64_RECORD_UNKNOWN;
3129 default:
3130 break;
3131 }
3132 esize = 8 << scale;
3133 if (replicate)
3134 for (sindex = 0; sindex < selem; sindex++)
3135 {
3136 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3137 reg_rt = (reg_rt + 1) % 32;
3138 }
3139 else
3140 {
3141 for (sindex = 0; sindex < selem; sindex++)
3142 if (bit (aarch64_insn_r->aarch64_insn, 22))
3143 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3144 else
3145 {
3146 record_buf_mem[mem_index++] = esize / 8;
3147 record_buf_mem[mem_index++] = address + addr_offset;
3148 }
3149 addr_offset = addr_offset + (esize / 8);
3150 reg_rt = (reg_rt + 1) % 32;
3151 }
3152 }
3153 /* Load/store multiple structure. */
3154 else
3155 {
3156 uint8_t selem, esize, rpt, elements;
3157 uint8_t eindex, rindex;
3158
3159 esize = 8 << size_bits;
3160 if (bit (aarch64_insn_r->aarch64_insn, 30))
3161 elements = 128 / esize;
3162 else
3163 elements = 64 / esize;
3164
3165 switch (opcode_bits)
3166 {
3167 /*LD/ST4 (4 Registers). */
3168 case 0:
3169 rpt = 1;
3170 selem = 4;
3171 break;
3172 /*LD/ST1 (4 Registers). */
3173 case 2:
3174 rpt = 4;
3175 selem = 1;
3176 break;
3177 /*LD/ST3 (3 Registers). */
3178 case 4:
3179 rpt = 1;
3180 selem = 3;
3181 break;
3182 /*LD/ST1 (3 Registers). */
3183 case 6:
3184 rpt = 3;
3185 selem = 1;
3186 break;
3187 /*LD/ST1 (1 Register). */
3188 case 7:
3189 rpt = 1;
3190 selem = 1;
3191 break;
3192 /*LD/ST2 (2 Registers). */
3193 case 8:
3194 rpt = 1;
3195 selem = 2;
3196 break;
3197 /*LD/ST1 (2 Registers). */
3198 case 10:
3199 rpt = 2;
3200 selem = 1;
3201 break;
3202 default:
3203 return AARCH64_RECORD_UNSUPPORTED;
3204 break;
3205 }
3206 for (rindex = 0; rindex < rpt; rindex++)
3207 for (eindex = 0; eindex < elements; eindex++)
3208 {
3209 uint8_t reg_tt, sindex;
3210 reg_tt = (reg_rt + rindex) % 32;
3211 for (sindex = 0; sindex < selem; sindex++)
3212 {
3213 if (bit (aarch64_insn_r->aarch64_insn, 22))
3214 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3215 else
3216 {
3217 record_buf_mem[mem_index++] = esize / 8;
3218 record_buf_mem[mem_index++] = address + addr_offset;
3219 }
3220 addr_offset = addr_offset + (esize / 8);
3221 reg_tt = (reg_tt + 1) % 32;
3222 }
3223 }
3224 }
3225
3226 if (bit (aarch64_insn_r->aarch64_insn, 23))
3227 record_buf[reg_index++] = reg_rn;
3228
3229 aarch64_insn_r->reg_rec_count = reg_index;
3230 aarch64_insn_r->mem_rec_count = mem_index / 2;
3231 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3232 record_buf_mem);
3233 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3234 record_buf);
3235 return AARCH64_RECORD_SUCCESS;
3236}
3237
3238/* Record handler for load and store instructions. */
3239
3240static unsigned int
3241aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3242{
3243 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3244 uint8_t insn_bit23, insn_bit21;
3245 uint8_t opc, size_bits, ld_flag, vector_flag;
3246 uint32_t reg_rn, reg_rt, reg_rt2;
3247 uint64_t datasize, offset;
3248 uint32_t record_buf[8];
3249 uint64_t record_buf_mem[8];
3250 CORE_ADDR address;
3251
3252 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3253 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3254 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3255 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3256 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3257 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3258 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3259 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3260 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3261 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3262 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3263
3264 /* Load/store exclusive. */
3265 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3266 {
3267 if (record_debug)
b277c936 3268 debug_printf ("Process record: load/store exclusive\n");
99afc88b
OJ
3269
3270 if (ld_flag)
3271 {
3272 record_buf[0] = reg_rt;
3273 aarch64_insn_r->reg_rec_count = 1;
3274 if (insn_bit21)
3275 {
3276 record_buf[1] = reg_rt2;
3277 aarch64_insn_r->reg_rec_count = 2;
3278 }
3279 }
3280 else
3281 {
3282 if (insn_bit21)
3283 datasize = (8 << size_bits) * 2;
3284 else
3285 datasize = (8 << size_bits);
3286 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3287 &address);
3288 record_buf_mem[0] = datasize / 8;
3289 record_buf_mem[1] = address;
3290 aarch64_insn_r->mem_rec_count = 1;
3291 if (!insn_bit23)
3292 {
3293 /* Save register rs. */
3294 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3295 aarch64_insn_r->reg_rec_count = 1;
3296 }
3297 }
3298 }
3299 /* Load register (literal) instructions decoding. */
3300 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3301 {
3302 if (record_debug)
b277c936 3303 debug_printf ("Process record: load register (literal)\n");
99afc88b
OJ
3304 if (vector_flag)
3305 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3306 else
3307 record_buf[0] = reg_rt;
3308 aarch64_insn_r->reg_rec_count = 1;
3309 }
3310 /* All types of load/store pair instructions decoding. */
3311 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3312 {
3313 if (record_debug)
b277c936 3314 debug_printf ("Process record: load/store pair\n");
99afc88b
OJ
3315
3316 if (ld_flag)
3317 {
3318 if (vector_flag)
3319 {
3320 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3321 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3322 }
3323 else
3324 {
3325 record_buf[0] = reg_rt;
3326 record_buf[1] = reg_rt2;
3327 }
3328 aarch64_insn_r->reg_rec_count = 2;
3329 }
3330 else
3331 {
3332 uint16_t imm7_off;
3333 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3334 if (!vector_flag)
3335 size_bits = size_bits >> 1;
3336 datasize = 8 << (2 + size_bits);
3337 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3338 offset = offset << (2 + size_bits);
3339 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3340 &address);
3341 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3342 {
3343 if (imm7_off & 0x40)
3344 address = address - offset;
3345 else
3346 address = address + offset;
3347 }
3348
3349 record_buf_mem[0] = datasize / 8;
3350 record_buf_mem[1] = address;
3351 record_buf_mem[2] = datasize / 8;
3352 record_buf_mem[3] = address + (datasize / 8);
3353 aarch64_insn_r->mem_rec_count = 2;
3354 }
3355 if (bit (aarch64_insn_r->aarch64_insn, 23))
3356 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3357 }
3358 /* Load/store register (unsigned immediate) instructions. */
3359 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3360 {
3361 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3362 if (!(opc >> 1))
3363 if (opc & 0x01)
3364 ld_flag = 0x01;
3365 else
3366 ld_flag = 0x0;
3367 else
3368 if (size_bits != 0x03)
3369 ld_flag = 0x01;
3370 else
3371 return AARCH64_RECORD_UNKNOWN;
3372
3373 if (record_debug)
3374 {
b277c936
PL
3375 debug_printf ("Process record: load/store (unsigned immediate):"
3376 " size %x V %d opc %x\n", size_bits, vector_flag,
3377 opc);
99afc88b
OJ
3378 }
3379
3380 if (!ld_flag)
3381 {
3382 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3383 datasize = 8 << size_bits;
3384 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3385 &address);
3386 offset = offset << size_bits;
3387 address = address + offset;
3388
3389 record_buf_mem[0] = datasize >> 3;
3390 record_buf_mem[1] = address;
3391 aarch64_insn_r->mem_rec_count = 1;
3392 }
3393 else
3394 {
3395 if (vector_flag)
3396 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3397 else
3398 record_buf[0] = reg_rt;
3399 aarch64_insn_r->reg_rec_count = 1;
3400 }
3401 }
3402 /* Load/store register (register offset) instructions. */
5d98d3cd
YQ
3403 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3404 && insn_bits10_11 == 0x02 && insn_bit21)
99afc88b
OJ
3405 {
3406 if (record_debug)
b277c936 3407 debug_printf ("Process record: load/store (register offset)\n");
99afc88b
OJ
3408 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3409 if (!(opc >> 1))
3410 if (opc & 0x01)
3411 ld_flag = 0x01;
3412 else
3413 ld_flag = 0x0;
3414 else
3415 if (size_bits != 0x03)
3416 ld_flag = 0x01;
3417 else
3418 return AARCH64_RECORD_UNKNOWN;
3419
3420 if (!ld_flag)
3421 {
3422 uint64_t reg_rm_val;
3423 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3424 bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
3425 if (bit (aarch64_insn_r->aarch64_insn, 12))
3426 offset = reg_rm_val << size_bits;
3427 else
3428 offset = reg_rm_val;
3429 datasize = 8 << size_bits;
3430 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3431 &address);
3432 address = address + offset;
3433 record_buf_mem[0] = datasize >> 3;
3434 record_buf_mem[1] = address;
3435 aarch64_insn_r->mem_rec_count = 1;
3436 }
3437 else
3438 {
3439 if (vector_flag)
3440 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3441 else
3442 record_buf[0] = reg_rt;
3443 aarch64_insn_r->reg_rec_count = 1;
3444 }
3445 }
3446 /* Load/store register (immediate and unprivileged) instructions. */
5d98d3cd
YQ
3447 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3448 && !insn_bit21)
99afc88b
OJ
3449 {
3450 if (record_debug)
3451 {
b277c936
PL
3452 debug_printf ("Process record: load/store "
3453 "(immediate and unprivileged)\n");
99afc88b
OJ
3454 }
3455 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3456 if (!(opc >> 1))
3457 if (opc & 0x01)
3458 ld_flag = 0x01;
3459 else
3460 ld_flag = 0x0;
3461 else
3462 if (size_bits != 0x03)
3463 ld_flag = 0x01;
3464 else
3465 return AARCH64_RECORD_UNKNOWN;
3466
3467 if (!ld_flag)
3468 {
3469 uint16_t imm9_off;
3470 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3471 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3472 datasize = 8 << size_bits;
3473 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3474 &address);
3475 if (insn_bits10_11 != 0x01)
3476 {
3477 if (imm9_off & 0x0100)
3478 address = address - offset;
3479 else
3480 address = address + offset;
3481 }
3482 record_buf_mem[0] = datasize >> 3;
3483 record_buf_mem[1] = address;
3484 aarch64_insn_r->mem_rec_count = 1;
3485 }
3486 else
3487 {
3488 if (vector_flag)
3489 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3490 else
3491 record_buf[0] = reg_rt;
3492 aarch64_insn_r->reg_rec_count = 1;
3493 }
3494 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3495 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3496 }
3497 /* Advanced SIMD load/store instructions. */
3498 else
3499 return aarch64_record_asimd_load_store (aarch64_insn_r);
3500
3501 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3502 record_buf_mem);
3503 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3504 record_buf);
3505 return AARCH64_RECORD_SUCCESS;
3506}
3507
3508/* Record handler for data processing SIMD and floating point instructions. */
3509
3510static unsigned int
3511aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3512{
3513 uint8_t insn_bit21, opcode, rmode, reg_rd;
3514 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3515 uint8_t insn_bits11_14;
3516 uint32_t record_buf[2];
3517
3518 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3519 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3520 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3521 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3522 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3523 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3524 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3525 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3526 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3527
3528 if (record_debug)
b277c936 3529 debug_printf ("Process record: data processing SIMD/FP: ");
99afc88b
OJ
3530
3531 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3532 {
3533 /* Floating point - fixed point conversion instructions. */
3534 if (!insn_bit21)
3535 {
3536 if (record_debug)
b277c936 3537 debug_printf ("FP - fixed point conversion");
99afc88b
OJ
3538
3539 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3540 record_buf[0] = reg_rd;
3541 else
3542 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3543 }
3544 /* Floating point - conditional compare instructions. */
3545 else if (insn_bits10_11 == 0x01)
3546 {
3547 if (record_debug)
b277c936 3548 debug_printf ("FP - conditional compare");
99afc88b
OJ
3549
3550 record_buf[0] = AARCH64_CPSR_REGNUM;
3551 }
3552 /* Floating point - data processing (2-source) and
3553 conditional select instructions. */
3554 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3555 {
3556 if (record_debug)
b277c936 3557 debug_printf ("FP - DP (2-source)");
99afc88b
OJ
3558
3559 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3560 }
3561 else if (insn_bits10_11 == 0x00)
3562 {
3563 /* Floating point - immediate instructions. */
3564 if ((insn_bits12_15 & 0x01) == 0x01
3565 || (insn_bits12_15 & 0x07) == 0x04)
3566 {
3567 if (record_debug)
b277c936 3568 debug_printf ("FP - immediate");
99afc88b
OJ
3569 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3570 }
3571 /* Floating point - compare instructions. */
3572 else if ((insn_bits12_15 & 0x03) == 0x02)
3573 {
3574 if (record_debug)
b277c936 3575 debug_printf ("FP - immediate");
99afc88b
OJ
3576 record_buf[0] = AARCH64_CPSR_REGNUM;
3577 }
3578 /* Floating point - integer conversions instructions. */
f62fce35 3579 else if (insn_bits12_15 == 0x00)
99afc88b
OJ
3580 {
3581 /* Convert float to integer instruction. */
3582 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3583 {
3584 if (record_debug)
b277c936 3585 debug_printf ("float to int conversion");
99afc88b
OJ
3586
3587 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3588 }
3589 /* Convert integer to float instruction. */
3590 else if ((opcode >> 1) == 0x01 && !rmode)
3591 {
3592 if (record_debug)
b277c936 3593 debug_printf ("int to float conversion");
99afc88b
OJ
3594
3595 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3596 }
3597 /* Move float to integer instruction. */
3598 else if ((opcode >> 1) == 0x03)
3599 {
3600 if (record_debug)
b277c936 3601 debug_printf ("move float to int");
99afc88b
OJ
3602
3603 if (!(opcode & 0x01))
3604 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3605 else
3606 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3607 }
f62fce35
YQ
3608 else
3609 return AARCH64_RECORD_UNKNOWN;
99afc88b 3610 }
f62fce35
YQ
3611 else
3612 return AARCH64_RECORD_UNKNOWN;
99afc88b 3613 }
f62fce35
YQ
3614 else
3615 return AARCH64_RECORD_UNKNOWN;
99afc88b
OJ
3616 }
3617 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3618 {
3619 if (record_debug)
b277c936 3620 debug_printf ("SIMD copy");
99afc88b
OJ
3621
3622 /* Advanced SIMD copy instructions. */
3623 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3624 && !bit (aarch64_insn_r->aarch64_insn, 15)
3625 && bit (aarch64_insn_r->aarch64_insn, 10))
3626 {
3627 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3628 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3629 else
3630 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3631 }
3632 else
3633 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3634 }
3635 /* All remaining floating point or advanced SIMD instructions. */
3636 else
3637 {
3638 if (record_debug)
b277c936 3639 debug_printf ("all remain");
99afc88b
OJ
3640
3641 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3642 }
3643
3644 if (record_debug)
b277c936 3645 debug_printf ("\n");
99afc88b
OJ
3646
3647 aarch64_insn_r->reg_rec_count++;
3648 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3649 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3650 record_buf);
3651 return AARCH64_RECORD_SUCCESS;
3652}
3653
3654/* Decodes insns type and invokes its record handler. */
3655
3656static unsigned int
3657aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3658{
3659 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3660
3661 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3662 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3663 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3664 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3665
3666 /* Data processing - immediate instructions. */
3667 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3668 return aarch64_record_data_proc_imm (aarch64_insn_r);
3669
3670 /* Branch, exception generation and system instructions. */
3671 if (ins_bit26 && !ins_bit27 && ins_bit28)
3672 return aarch64_record_branch_except_sys (aarch64_insn_r);
3673
3674 /* Load and store instructions. */
3675 if (!ins_bit25 && ins_bit27)
3676 return aarch64_record_load_store (aarch64_insn_r);
3677
3678 /* Data processing - register instructions. */
3679 if (ins_bit25 && !ins_bit26 && ins_bit27)
3680 return aarch64_record_data_proc_reg (aarch64_insn_r);
3681
3682 /* Data processing - SIMD and floating point instructions. */
3683 if (ins_bit25 && ins_bit26 && ins_bit27)
3684 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3685
3686 return AARCH64_RECORD_UNSUPPORTED;
3687}
3688
3689/* Cleans up local record registers and memory allocations. */
3690
3691static void
3692deallocate_reg_mem (insn_decode_record *record)
3693{
3694 xfree (record->aarch64_regs);
3695 xfree (record->aarch64_mems);
3696}
3697
3698/* Parse the current instruction and record the values of the registers and
3699 memory that will be changed in current instruction to record_arch_list
3700 return -1 if something is wrong. */
3701
3702int
3703aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
3704 CORE_ADDR insn_addr)
3705{
3706 uint32_t rec_no = 0;
3707 uint8_t insn_size = 4;
3708 uint32_t ret = 0;
3709 ULONGEST t_bit = 0, insn_id = 0;
3710 gdb_byte buf[insn_size];
3711 insn_decode_record aarch64_record;
3712
3713 memset (&buf[0], 0, insn_size);
3714 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3715 target_read_memory (insn_addr, &buf[0], insn_size);
3716 aarch64_record.aarch64_insn
3717 = (uint32_t) extract_unsigned_integer (&buf[0],
3718 insn_size,
3719 gdbarch_byte_order (gdbarch));
3720 aarch64_record.regcache = regcache;
3721 aarch64_record.this_addr = insn_addr;
3722 aarch64_record.gdbarch = gdbarch;
3723
3724 ret = aarch64_record_decode_insn_handler (&aarch64_record);
3725 if (ret == AARCH64_RECORD_UNSUPPORTED)
3726 {
3727 printf_unfiltered (_("Process record does not support instruction "
3728 "0x%0x at address %s.\n"),
3729 aarch64_record.aarch64_insn,
3730 paddress (gdbarch, insn_addr));
3731 ret = -1;
3732 }
3733
3734 if (0 == ret)
3735 {
3736 /* Record registers. */
3737 record_full_arch_list_add_reg (aarch64_record.regcache,
3738 AARCH64_PC_REGNUM);
3739 /* Always record register CPSR. */
3740 record_full_arch_list_add_reg (aarch64_record.regcache,
3741 AARCH64_CPSR_REGNUM);
3742 if (aarch64_record.aarch64_regs)
3743 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
3744 if (record_full_arch_list_add_reg (aarch64_record.regcache,
3745 aarch64_record.aarch64_regs[rec_no]))
3746 ret = -1;
3747
3748 /* Record memories. */
3749 if (aarch64_record.aarch64_mems)
3750 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
3751 if (record_full_arch_list_add_mem
3752 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
3753 aarch64_record.aarch64_mems[rec_no].len))
3754 ret = -1;
3755
3756 if (record_full_arch_list_add_end ())
3757 ret = -1;
3758 }
3759
3760 deallocate_reg_mem (&aarch64_record);
3761 return ret;
3762}
This page took 0.5892 seconds and 4 git commands to generate.