Add support for fast tracepoints
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2015 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29
30 #include <signal.h>
31 #include <sys/user.h>
32 #include "nat/gdb_ptrace.h"
33 #include <asm/ptrace.h>
34 #include <inttypes.h>
35 #include <endian.h>
36 #include <sys/uio.h>
37
38 #include "gdb_proc_service.h"
39
40 /* Defined in auto-generated files. */
41 void init_registers_aarch64 (void);
42 extern const struct target_desc *tdesc_aarch64;
43
44 #ifdef HAVE_SYS_REG_H
45 #include <sys/reg.h>
46 #endif
47
48 #define AARCH64_X_REGS_NUM 31
49 #define AARCH64_V_REGS_NUM 32
50 #define AARCH64_X0_REGNO 0
51 #define AARCH64_SP_REGNO 31
52 #define AARCH64_PC_REGNO 32
53 #define AARCH64_CPSR_REGNO 33
54 #define AARCH64_V0_REGNO 34
55 #define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
56 #define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
57
58 #define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
59
60 /* Per-process arch-specific data we want to keep. */
61
62 struct arch_process_info
63 {
64 /* Hardware breakpoint/watchpoint data.
65 The reason for them to be per-process rather than per-thread is
66 due to the lack of information in the gdbserver environment;
67 gdbserver is not told that whether a requested hardware
68 breakpoint/watchpoint is thread specific or not, so it has to set
69 each hw bp/wp for every thread in the current process. The
70 higher level bp/wp management in gdb will resume a thread if a hw
71 bp/wp trap is not expected for it. Since the hw bp/wp setting is
72 same for each thread, it is reasonable for the data to live here.
73 */
74 struct aarch64_debug_reg_state debug_reg_state;
75 };
76
77 /* Return true if the size of register 0 is 8 byte. */
78
79 static int
80 is_64bit_tdesc (void)
81 {
82 struct regcache *regcache = get_thread_regcache (current_thread, 0);
83
84 return register_size (regcache->tdesc, 0) == 8;
85 }
86
87 /* Implementation of linux_target_ops method "cannot_store_register". */
88
89 static int
90 aarch64_cannot_store_register (int regno)
91 {
92 return regno >= AARCH64_NUM_REGS;
93 }
94
95 /* Implementation of linux_target_ops method "cannot_fetch_register". */
96
97 static int
98 aarch64_cannot_fetch_register (int regno)
99 {
100 return regno >= AARCH64_NUM_REGS;
101 }
102
103 static void
104 aarch64_fill_gregset (struct regcache *regcache, void *buf)
105 {
106 struct user_pt_regs *regset = buf;
107 int i;
108
109 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
110 collect_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
111 collect_register (regcache, AARCH64_SP_REGNO, &regset->sp);
112 collect_register (regcache, AARCH64_PC_REGNO, &regset->pc);
113 collect_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
114 }
115
116 static void
117 aarch64_store_gregset (struct regcache *regcache, const void *buf)
118 {
119 const struct user_pt_regs *regset = buf;
120 int i;
121
122 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
123 supply_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
124 supply_register (regcache, AARCH64_SP_REGNO, &regset->sp);
125 supply_register (regcache, AARCH64_PC_REGNO, &regset->pc);
126 supply_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
127 }
128
129 static void
130 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
131 {
132 struct user_fpsimd_state *regset = buf;
133 int i;
134
135 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
136 collect_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
137 collect_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
138 collect_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
139 }
140
141 static void
142 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
143 {
144 const struct user_fpsimd_state *regset = buf;
145 int i;
146
147 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
148 supply_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
149 supply_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
150 supply_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
151 }
152
153 /* Enable miscellaneous debugging output. The name is historical - it
154 was originally used to debug LinuxThreads support. */
155 extern int debug_threads;
156
157 /* Implementation of linux_target_ops method "get_pc". */
158
159 static CORE_ADDR
160 aarch64_get_pc (struct regcache *regcache)
161 {
162 if (register_size (regcache->tdesc, 0) == 8)
163 {
164 unsigned long pc;
165
166 collect_register_by_name (regcache, "pc", &pc);
167 if (debug_threads)
168 debug_printf ("stop pc is %08lx\n", pc);
169 return pc;
170 }
171 else
172 {
173 unsigned int pc;
174
175 collect_register_by_name (regcache, "pc", &pc);
176 if (debug_threads)
177 debug_printf ("stop pc is %04x\n", pc);
178 return pc;
179 }
180 }
181
182 /* Implementation of linux_target_ops method "set_pc". */
183
184 static void
185 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
186 {
187 if (register_size (regcache->tdesc, 0) == 8)
188 {
189 unsigned long newpc = pc;
190 supply_register_by_name (regcache, "pc", &newpc);
191 }
192 else
193 {
194 unsigned int newpc = pc;
195 supply_register_by_name (regcache, "pc", &newpc);
196 }
197 }
198
199 #define aarch64_breakpoint_len 4
200
201 /* AArch64 BRK software debug mode instruction.
202 This instruction needs to match gdb/aarch64-tdep.c
203 (aarch64_default_breakpoint). */
204 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
205
206 /* Implementation of linux_target_ops method "breakpoint_at". */
207
208 static int
209 aarch64_breakpoint_at (CORE_ADDR where)
210 {
211 gdb_byte insn[aarch64_breakpoint_len];
212
213 (*the_target->read_memory) (where, (unsigned char *) &insn,
214 aarch64_breakpoint_len);
215 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
216 return 1;
217
218 return 0;
219 }
220
221 static void
222 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
223 {
224 int i;
225
226 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
227 {
228 state->dr_addr_bp[i] = 0;
229 state->dr_ctrl_bp[i] = 0;
230 state->dr_ref_count_bp[i] = 0;
231 }
232
233 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
234 {
235 state->dr_addr_wp[i] = 0;
236 state->dr_ctrl_wp[i] = 0;
237 state->dr_ref_count_wp[i] = 0;
238 }
239 }
240
241 /* Return the pointer to the debug register state structure in the
242 current process' arch-specific data area. */
243
244 struct aarch64_debug_reg_state *
245 aarch64_get_debug_reg_state (pid_t pid)
246 {
247 struct process_info *proc = find_process_pid (pid);
248
249 return &proc->priv->arch_private->debug_reg_state;
250 }
251
252 /* Implementation of linux_target_ops method "supports_z_point_type". */
253
254 static int
255 aarch64_supports_z_point_type (char z_type)
256 {
257 switch (z_type)
258 {
259 case Z_PACKET_SW_BP:
260 {
261 if (!extended_protocol && is_64bit_tdesc ())
262 {
263 /* Only enable Z0 packet in non-multi-arch debugging. If
264 extended protocol is used, don't enable Z0 packet because
265 GDBserver may attach to 32-bit process. */
266 return 1;
267 }
268 else
269 {
270 /* Disable Z0 packet so that GDBserver doesn't have to handle
271 different breakpoint instructions (aarch64, arm, thumb etc)
272 in multi-arch debugging. */
273 return 0;
274 }
275 }
276 case Z_PACKET_HW_BP:
277 case Z_PACKET_WRITE_WP:
278 case Z_PACKET_READ_WP:
279 case Z_PACKET_ACCESS_WP:
280 return 1;
281 default:
282 return 0;
283 }
284 }
285
286 /* Implementation of linux_target_ops method "insert_point".
287
288 It actually only records the info of the to-be-inserted bp/wp;
289 the actual insertion will happen when threads are resumed. */
290
291 static int
292 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
293 int len, struct raw_breakpoint *bp)
294 {
295 int ret;
296 enum target_hw_bp_type targ_type;
297 struct aarch64_debug_reg_state *state
298 = aarch64_get_debug_reg_state (pid_of (current_thread));
299
300 if (show_debug_regs)
301 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
302 (unsigned long) addr, len);
303
304 /* Determine the type from the raw breakpoint type. */
305 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
306
307 if (targ_type != hw_execute)
308 {
309 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
310 ret = aarch64_handle_watchpoint (targ_type, addr, len,
311 1 /* is_insert */, state);
312 else
313 ret = -1;
314 }
315 else
316 ret =
317 aarch64_handle_breakpoint (targ_type, addr, len, 1 /* is_insert */,
318 state);
319
320 if (show_debug_regs)
321 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
322 targ_type);
323
324 return ret;
325 }
326
327 /* Implementation of linux_target_ops method "remove_point".
328
329 It actually only records the info of the to-be-removed bp/wp,
330 the actual removal will be done when threads are resumed. */
331
332 static int
333 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
334 int len, struct raw_breakpoint *bp)
335 {
336 int ret;
337 enum target_hw_bp_type targ_type;
338 struct aarch64_debug_reg_state *state
339 = aarch64_get_debug_reg_state (pid_of (current_thread));
340
341 if (show_debug_regs)
342 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
343 (unsigned long) addr, len);
344
345 /* Determine the type from the raw breakpoint type. */
346 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
347
348 /* Set up state pointers. */
349 if (targ_type != hw_execute)
350 ret =
351 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
352 state);
353 else
354 ret =
355 aarch64_handle_breakpoint (targ_type, addr, len, 0 /* is_insert */,
356 state);
357
358 if (show_debug_regs)
359 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
360 targ_type);
361
362 return ret;
363 }
364
365 /* Implementation of linux_target_ops method "stopped_data_address". */
366
367 static CORE_ADDR
368 aarch64_stopped_data_address (void)
369 {
370 siginfo_t siginfo;
371 int pid, i;
372 struct aarch64_debug_reg_state *state;
373
374 pid = lwpid_of (current_thread);
375
376 /* Get the siginfo. */
377 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
378 return (CORE_ADDR) 0;
379
380 /* Need to be a hardware breakpoint/watchpoint trap. */
381 if (siginfo.si_signo != SIGTRAP
382 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
383 return (CORE_ADDR) 0;
384
385 /* Check if the address matches any watched address. */
386 state = aarch64_get_debug_reg_state (pid_of (current_thread));
387 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
388 {
389 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
390 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
391 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
392 if (state->dr_ref_count_wp[i]
393 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
394 && addr_trap >= addr_watch
395 && addr_trap < addr_watch + len)
396 return addr_trap;
397 }
398
399 return (CORE_ADDR) 0;
400 }
401
402 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
403
404 static int
405 aarch64_stopped_by_watchpoint (void)
406 {
407 if (aarch64_stopped_data_address () != 0)
408 return 1;
409 else
410 return 0;
411 }
412
413 /* Fetch the thread-local storage pointer for libthread_db. */
414
415 ps_err_e
416 ps_get_thread_area (const struct ps_prochandle *ph,
417 lwpid_t lwpid, int idx, void **base)
418 {
419 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
420 is_64bit_tdesc ());
421 }
422
423 /* Implementation of linux_target_ops method "siginfo_fixup". */
424
425 static int
426 aarch64_linux_siginfo_fixup (siginfo_t *native, void *inf, int direction)
427 {
428 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
429 if (!is_64bit_tdesc ())
430 {
431 if (direction == 0)
432 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
433 native);
434 else
435 aarch64_siginfo_from_compat_siginfo (native,
436 (struct compat_siginfo *) inf);
437
438 return 1;
439 }
440
441 return 0;
442 }
443
444 /* Implementation of linux_target_ops method "linux_new_process". */
445
446 static struct arch_process_info *
447 aarch64_linux_new_process (void)
448 {
449 struct arch_process_info *info = XCNEW (struct arch_process_info);
450
451 aarch64_init_debug_reg_state (&info->debug_reg_state);
452
453 return info;
454 }
455
456 /* Implementation of linux_target_ops method "linux_new_fork". */
457
458 static void
459 aarch64_linux_new_fork (struct process_info *parent,
460 struct process_info *child)
461 {
462 /* These are allocated by linux_add_process. */
463 gdb_assert (parent->priv != NULL
464 && parent->priv->arch_private != NULL);
465 gdb_assert (child->priv != NULL
466 && child->priv->arch_private != NULL);
467
468 /* Linux kernel before 2.6.33 commit
469 72f674d203cd230426437cdcf7dd6f681dad8b0d
470 will inherit hardware debug registers from parent
471 on fork/vfork/clone. Newer Linux kernels create such tasks with
472 zeroed debug registers.
473
474 GDB core assumes the child inherits the watchpoints/hw
475 breakpoints of the parent, and will remove them all from the
476 forked off process. Copy the debug registers mirrors into the
477 new process so that all breakpoints and watchpoints can be
478 removed together. The debug registers mirror will become zeroed
479 in the end before detaching the forked off process, thus making
480 this compatible with older Linux kernels too. */
481
482 *child->priv->arch_private = *parent->priv->arch_private;
483 }
484
485 /* Return the right target description according to the ELF file of
486 current thread. */
487
488 static const struct target_desc *
489 aarch64_linux_read_description (void)
490 {
491 unsigned int machine;
492 int is_elf64;
493 int tid;
494
495 tid = lwpid_of (current_thread);
496
497 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
498
499 if (is_elf64)
500 return tdesc_aarch64;
501 else
502 return tdesc_arm_with_neon;
503 }
504
505 /* Implementation of linux_target_ops method "arch_setup". */
506
507 static void
508 aarch64_arch_setup (void)
509 {
510 current_process ()->tdesc = aarch64_linux_read_description ();
511
512 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
513 }
514
515 static struct regset_info aarch64_regsets[] =
516 {
517 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
518 sizeof (struct user_pt_regs), GENERAL_REGS,
519 aarch64_fill_gregset, aarch64_store_gregset },
520 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
521 sizeof (struct user_fpsimd_state), FP_REGS,
522 aarch64_fill_fpregset, aarch64_store_fpregset
523 },
524 { 0, 0, 0, -1, -1, NULL, NULL }
525 };
526
527 static struct regsets_info aarch64_regsets_info =
528 {
529 aarch64_regsets, /* regsets */
530 0, /* num_regsets */
531 NULL, /* disabled_regsets */
532 };
533
534 static struct regs_info regs_info_aarch64 =
535 {
536 NULL, /* regset_bitmap */
537 NULL, /* usrregs */
538 &aarch64_regsets_info,
539 };
540
541 /* Implementation of linux_target_ops method "regs_info". */
542
543 static const struct regs_info *
544 aarch64_regs_info (void)
545 {
546 if (is_64bit_tdesc ())
547 return &regs_info_aarch64;
548 else
549 return &regs_info_aarch32;
550 }
551
552 /* Implementation of linux_target_ops method "supports_tracepoints". */
553
554 static int
555 aarch64_supports_tracepoints (void)
556 {
557 if (current_thread == NULL)
558 return 1;
559 else
560 {
561 /* We don't support tracepoints on aarch32 now. */
562 return is_64bit_tdesc ();
563 }
564 }
565
566 /* Implementation of linux_target_ops method "get_thread_area". */
567
568 static int
569 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
570 {
571 struct iovec iovec;
572 uint64_t reg;
573
574 iovec.iov_base = &reg;
575 iovec.iov_len = sizeof (reg);
576
577 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
578 return -1;
579
580 *addrp = reg;
581
582 return 0;
583 }
584
585 /* Extract a signed value from a bit field within an instruction
586 encoding.
587
588 INSN is the instruction opcode.
589
590 WIDTH specifies the width of the bit field to extract (in bits).
591
592 OFFSET specifies the least significant bit of the field where bits
593 are numbered zero counting from least to most significant. */
594
595 static int32_t
596 extract_signed_bitfield (uint32_t insn, unsigned width, unsigned offset)
597 {
598 unsigned shift_l = sizeof (int32_t) * 8 - (offset + width);
599 unsigned shift_r = sizeof (int32_t) * 8 - width;
600
601 return ((int32_t) insn << shift_l) >> shift_r;
602 }
603
604 /* Decode an opcode if it represents an LDR or LDRSW instruction taking a
605 literal offset from the current PC.
606
607 ADDR specifies the address of the opcode.
608 INSN specifies the opcode to test.
609 IS_W is set if the instruction is LDRSW.
610 IS64 receives size field from the decoded instruction.
611 RT receives the 'rt' field from the decoded instruction.
612 OFFSET receives the 'imm' field from the decoded instruction.
613
614 Return 1 if the opcodes matches and is decoded, otherwise 0. */
615
616 int
617 aarch64_decode_ldr_literal (CORE_ADDR addr, uint32_t insn, int *is_w,
618 int *is64, unsigned *rt, int32_t *offset)
619 {
620 /* LDR 0T01 1000 iiii iiii iiii iiii iiir rrrr */
621 /* LDRSW 1001 1000 iiii iiii iiii iiii iiir rrrr */
622 if ((insn & 0x3f000000) == 0x18000000)
623 {
624 *is_w = (insn >> 31) & 0x1;
625
626 if (*is_w)
627 {
628 /* LDRSW always takes a 64-bit destination registers. */
629 *is64 = 1;
630 }
631 else
632 *is64 = (insn >> 30) & 0x1;
633
634 *rt = (insn >> 0) & 0x1f;
635 *offset = extract_signed_bitfield (insn, 19, 5) << 2;
636
637 if (aarch64_debug)
638 debug_printf ("decode: %s 0x%x %s %s%u, #?\n",
639 core_addr_to_string_nz (addr), insn,
640 *is_w ? "ldrsw" : "ldr",
641 *is64 ? "x" : "w", *rt);
642
643 return 1;
644 }
645
646 return 0;
647 }
648
649 /* List of opcodes that we need for building the jump pad and relocating
650 an instruction. */
651
652 enum aarch64_opcodes
653 {
654 /* B 0001 01ii iiii iiii iiii iiii iiii iiii */
655 /* BL 1001 01ii iiii iiii iiii iiii iiii iiii */
656 /* B.COND 0101 0100 iiii iiii iiii iiii iii0 cccc */
657 /* CBZ s011 0100 iiii iiii iiii iiii iiir rrrr */
658 /* CBNZ s011 0101 iiii iiii iiii iiii iiir rrrr */
659 /* TBZ b011 0110 bbbb biii iiii iiii iiir rrrr */
660 /* TBNZ b011 0111 bbbb biii iiii iiii iiir rrrr */
661 B = 0x14000000,
662 BL = 0x80000000 | B,
663 BCOND = 0x40000000 | B,
664 CBZ = 0x20000000 | B,
665 CBNZ = 0x21000000 | B,
666 TBZ = 0x36000000 | B,
667 TBNZ = 0x37000000 | B,
668 /* BLR 1101 0110 0011 1111 0000 00rr rrr0 0000 */
669 BLR = 0xd63f0000,
670 /* STP s010 100o o0ii iiii irrr rrrr rrrr rrrr */
671 /* LDP s010 100o o1ii iiii irrr rrrr rrrr rrrr */
672 /* STP (SIMD&VFP) ss10 110o o0ii iiii irrr rrrr rrrr rrrr */
673 /* LDP (SIMD&VFP) ss10 110o o1ii iiii irrr rrrr rrrr rrrr */
674 STP = 0x28000000,
675 LDP = 0x28400000,
676 STP_SIMD_VFP = 0x04000000 | STP,
677 LDP_SIMD_VFP = 0x04000000 | LDP,
678 /* STR ss11 100o 00xi iiii iiii xxrr rrrr rrrr */
679 /* LDR ss11 100o 01xi iiii iiii xxrr rrrr rrrr */
680 /* LDRSW 1011 100o 10xi iiii iiii xxrr rrrr rrrr */
681 STR = 0x38000000,
682 LDR = 0x00400000 | STR,
683 LDRSW = 0x80800000 | STR,
684 /* LDAXR ss00 1000 0101 1111 1111 11rr rrrr rrrr */
685 LDAXR = 0x085ffc00,
686 /* STXR ss00 1000 000r rrrr 0111 11rr rrrr rrrr */
687 STXR = 0x08007c00,
688 /* STLR ss00 1000 1001 1111 1111 11rr rrrr rrrr */
689 STLR = 0x089ffc00,
690 /* MOV s101 0010 1xxi iiii iiii iiii iiir rrrr */
691 /* MOVK s111 0010 1xxi iiii iiii iiii iiir rrrr */
692 MOV = 0x52800000,
693 MOVK = 0x20000000 | MOV,
694 /* ADD s00o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
695 /* SUB s10o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
696 /* SUBS s11o ooo1 xxxx xxxx xxxx xxxx xxxx xxxx */
697 ADD = 0x01000000,
698 SUB = 0x40000000 | ADD,
699 /* MSR (register) 1101 0101 0001 oooo oooo oooo ooor rrrr */
700 /* MRS 1101 0101 0011 oooo oooo oooo ooor rrrr */
701 MSR = 0xd5100000,
702 MRS = 0x00200000 | MSR,
703 /* HINT 1101 0101 0000 0011 0010 oooo ooo1 1111 */
704 HINT = 0xd503201f,
705 SEVL = (5 << 5) | HINT,
706 WFE = (2 << 5) | HINT,
707 };
708
709 /* Representation of a general purpose register of the form xN or wN.
710
711 This type is used by emitting functions that take registers as operands. */
712
713 struct aarch64_register
714 {
715 unsigned num;
716 int is64;
717 };
718
719 /* Representation of an operand. At this time, it only supports register
720 and immediate types. */
721
722 struct aarch64_operand
723 {
724 /* Type of the operand. */
725 enum
726 {
727 OPERAND_IMMEDIATE,
728 OPERAND_REGISTER,
729 } type;
730 /* Value of the operand according to the type. */
731 union
732 {
733 uint32_t imm;
734 struct aarch64_register reg;
735 };
736 };
737
738 /* List of registers that we are currently using, we can add more here as
739 we need to use them. */
740
741 /* General purpose scratch registers (64 bit). */
742 static const struct aarch64_register x0 = { 0, 1 };
743 static const struct aarch64_register x1 = { 1, 1 };
744 static const struct aarch64_register x2 = { 2, 1 };
745 static const struct aarch64_register x3 = { 3, 1 };
746 static const struct aarch64_register x4 = { 4, 1 };
747
748 /* General purpose scratch registers (32 bit). */
749 static const struct aarch64_register w2 = { 2, 0 };
750
751 /* Intra-procedure scratch registers. */
752 static const struct aarch64_register ip0 = { 16, 1 };
753
754 /* Special purpose registers. */
755 static const struct aarch64_register sp = { 31, 1 };
756 static const struct aarch64_register xzr = { 31, 1 };
757
758 /* Dynamically allocate a new register. If we know the register
759 statically, we should make it a global as above instead of using this
760 helper function. */
761
762 static struct aarch64_register
763 aarch64_register (unsigned num, int is64)
764 {
765 return (struct aarch64_register) { num, is64 };
766 }
767
768 /* Helper function to create a register operand, for instructions with
769 different types of operands.
770
771 For example:
772 p += emit_mov (p, x0, register_operand (x1)); */
773
774 static struct aarch64_operand
775 register_operand (struct aarch64_register reg)
776 {
777 struct aarch64_operand operand;
778
779 operand.type = OPERAND_REGISTER;
780 operand.reg = reg;
781
782 return operand;
783 }
784
785 /* Helper function to create an immediate operand, for instructions with
786 different types of operands.
787
788 For example:
789 p += emit_mov (p, x0, immediate_operand (12)); */
790
791 static struct aarch64_operand
792 immediate_operand (uint32_t imm)
793 {
794 struct aarch64_operand operand;
795
796 operand.type = OPERAND_IMMEDIATE;
797 operand.imm = imm;
798
799 return operand;
800 }
801
802 /* Representation of a memory operand, used for load and store
803 instructions.
804
805 The types correspond to the following variants:
806
807 MEMORY_OPERAND_OFFSET: LDR rt, [rn, #offset]
808 MEMORY_OPERAND_PREINDEX: LDR rt, [rn, #index]! */
809
810 struct aarch64_memory_operand
811 {
812 /* Type of the operand. */
813 enum
814 {
815 MEMORY_OPERAND_OFFSET,
816 MEMORY_OPERAND_PREINDEX,
817 } type;
818 /* Index from the base register. */
819 int32_t index;
820 };
821
822 /* Helper function to create an offset memory operand.
823
824 For example:
825 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
826
827 static struct aarch64_memory_operand
828 offset_memory_operand (int32_t offset)
829 {
830 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
831 }
832
833 /* Helper function to create a pre-index memory operand.
834
835 For example:
836 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
837
838 static struct aarch64_memory_operand
839 preindex_memory_operand (int32_t index)
840 {
841 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
842 }
843
844 /* System control registers. These special registers can be written and
845 read with the MRS and MSR instructions.
846
847 - NZCV: Condition flags. GDB refers to this register under the CPSR
848 name.
849 - FPSR: Floating-point status register.
850 - FPCR: Floating-point control registers.
851 - TPIDR_EL0: Software thread ID register. */
852
853 enum aarch64_system_control_registers
854 {
855 /* op0 op1 crn crm op2 */
856 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
857 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
858 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
859 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
860 };
861
862 /* Helper macro to mask and shift a value into a bitfield. */
863
864 #define ENCODE(val, size, offset) \
865 ((uint32_t) ((val & ((1ULL << size) - 1)) << offset))
866
867 /* Write a 32-bit unsigned integer INSN info *BUF. Return the number of
868 instructions written (aka. 1). */
869
870 static int
871 emit_insn (uint32_t *buf, uint32_t insn)
872 {
873 *buf = insn;
874 return 1;
875 }
876
877 /* Write a B or BL instruction into *BUF.
878
879 B #offset
880 BL #offset
881
882 IS_BL specifies if the link register should be updated.
883 OFFSET is the immediate offset from the current PC. It is
884 byte-addressed but should be 4 bytes aligned. It has a limited range of
885 +/- 128MB (26 bits << 2). */
886
887 static int
888 emit_b (uint32_t *buf, int is_bl, int32_t offset)
889 {
890 uint32_t imm26 = ENCODE (offset >> 2, 26, 0);
891
892 if (is_bl)
893 return emit_insn (buf, BL | imm26);
894 else
895 return emit_insn (buf, B | imm26);
896 }
897
898 /* Write a BCOND instruction into *BUF.
899
900 B.COND #offset
901
902 COND specifies the condition field.
903 OFFSET is the immediate offset from the current PC. It is
904 byte-addressed but should be 4 bytes aligned. It has a limited range of
905 +/- 1MB (19 bits << 2). */
906
907 static int
908 emit_bcond (uint32_t *buf, unsigned cond, int32_t offset)
909 {
910 return emit_insn (buf, BCOND | ENCODE (offset >> 2, 19, 5)
911 | ENCODE (cond, 4, 0));
912 }
913
914 /* Write a CBZ or CBNZ instruction into *BUF.
915
916 CBZ rt, #offset
917 CBNZ rt, #offset
918
919 IS_CBNZ distinguishes between CBZ and CBNZ instructions.
920 RN is the register to test.
921 OFFSET is the immediate offset from the current PC. It is
922 byte-addressed but should be 4 bytes aligned. It has a limited range of
923 +/- 1MB (19 bits << 2). */
924
925 static int
926 emit_cb (uint32_t *buf, int is_cbnz, struct aarch64_register rt,
927 int32_t offset)
928 {
929 uint32_t imm19 = ENCODE (offset >> 2, 19, 5);
930 uint32_t sf = ENCODE (rt.is64, 1, 31);
931
932 if (is_cbnz)
933 return emit_insn (buf, CBNZ | sf | imm19 | ENCODE (rt.num, 5, 0));
934 else
935 return emit_insn (buf, CBZ | sf | imm19 | ENCODE (rt.num, 5, 0));
936 }
937
938 /* Write a TBZ or TBNZ instruction into *BUF.
939
940 TBZ rt, #bit, #offset
941 TBNZ rt, #bit, #offset
942
943 IS_TBNZ distinguishes between TBZ and TBNZ instructions.
944 RT is the register to test.
945 BIT is the index of the bit to test in register RT.
946 OFFSET is the immediate offset from the current PC. It is
947 byte-addressed but should be 4 bytes aligned. It has a limited range of
948 +/- 32KB (14 bits << 2). */
949
950 static int
951 emit_tb (uint32_t *buf, int is_tbnz, unsigned bit,
952 struct aarch64_register rt, int32_t offset)
953 {
954 uint32_t imm14 = ENCODE (offset >> 2, 14, 5);
955 uint32_t b40 = ENCODE (bit, 5, 19);
956 uint32_t b5 = ENCODE (bit >> 5, 1, 31);
957
958 if (is_tbnz)
959 return emit_insn (buf, TBNZ | b5 | b40 | imm14 | ENCODE (rt.num, 5, 0));
960 else
961 return emit_insn (buf, TBZ | b5 | b40 | imm14 | ENCODE (rt.num, 5, 0));
962 }
963
964 /* Write a BLR instruction into *BUF.
965
966 BLR rn
967
968 RN is the register to branch to. */
969
970 static int
971 emit_blr (uint32_t *buf, struct aarch64_register rn)
972 {
973 return emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
974 }
975
976 /* Write a STP instruction into *BUF.
977
978 STP rt, rt2, [rn, #offset]
979 STP rt, rt2, [rn, #index]!
980
981 RT and RT2 are the registers to store.
982 RN is the base address register.
983 OFFSET is the immediate to add to the base address. It is limited to a
984 -512 .. 504 range (7 bits << 3). */
985
986 static int
987 emit_stp (uint32_t *buf, struct aarch64_register rt,
988 struct aarch64_register rt2, struct aarch64_register rn,
989 struct aarch64_memory_operand operand)
990 {
991 uint32_t opc;
992 uint32_t pre_index;
993 uint32_t write_back;
994
995 if (rt.is64)
996 opc = ENCODE (2, 2, 30);
997 else
998 opc = ENCODE (0, 2, 30);
999
1000 switch (operand.type)
1001 {
1002 case MEMORY_OPERAND_OFFSET:
1003 {
1004 pre_index = ENCODE (1, 1, 24);
1005 write_back = ENCODE (0, 1, 23);
1006 break;
1007 }
1008 case MEMORY_OPERAND_PREINDEX:
1009 {
1010 pre_index = ENCODE (1, 1, 24);
1011 write_back = ENCODE (1, 1, 23);
1012 break;
1013 }
1014 default:
1015 return 0;
1016 }
1017
1018 return emit_insn (buf, STP | opc | pre_index | write_back
1019 | ENCODE (operand.index >> 3, 7, 15) | ENCODE (rt2.num, 5, 10)
1020 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1021 }
1022
1023 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1024
1025 LDP qt, qt2, [rn, #offset]
1026
1027 RT and RT2 are the Q registers to store.
1028 RN is the base address register.
1029 OFFSET is the immediate to add to the base address. It is limited to
1030 -1024 .. 1008 range (7 bits << 4). */
1031
1032 static int
1033 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1034 struct aarch64_register rn, int32_t offset)
1035 {
1036 uint32_t opc = ENCODE (2, 2, 30);
1037 uint32_t pre_index = ENCODE (1, 1, 24);
1038
1039 return emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1040 | ENCODE (offset >> 4, 7, 15) | ENCODE (rt2, 5, 10)
1041 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1042 }
1043
1044 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1045
1046 STP qt, qt2, [rn, #offset]
1047
1048 RT and RT2 are the Q registers to store.
1049 RN is the base address register.
1050 OFFSET is the immediate to add to the base address. It is limited to
1051 -1024 .. 1008 range (7 bits << 4). */
1052
1053 static int
1054 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1055 struct aarch64_register rn, int32_t offset)
1056 {
1057 uint32_t opc = ENCODE (2, 2, 30);
1058 uint32_t pre_index = ENCODE (1, 1, 24);
1059
1060 return emit_insn (buf, STP_SIMD_VFP | opc | pre_index
1061 | ENCODE (offset >> 4, 7, 15) | ENCODE (rt2, 5, 10)
1062 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1063 }
1064
1065 /* Helper function emitting a load or store instruction. */
1066
1067 static int
1068 emit_load_store (uint32_t *buf, uint32_t size, enum aarch64_opcodes opcode,
1069 struct aarch64_register rt, struct aarch64_register rn,
1070 struct aarch64_memory_operand operand)
1071 {
1072 uint32_t op;
1073
1074 switch (operand.type)
1075 {
1076 case MEMORY_OPERAND_OFFSET:
1077 {
1078 op = ENCODE (1, 1, 24);
1079
1080 return emit_insn (buf, opcode | ENCODE (size, 2, 30) | op
1081 | ENCODE (operand.index >> 3, 12, 10)
1082 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1083 }
1084 case MEMORY_OPERAND_PREINDEX:
1085 {
1086 uint32_t pre_index = ENCODE (3, 2, 10);
1087
1088 op = ENCODE (0, 1, 24);
1089
1090 return emit_insn (buf, opcode | ENCODE (size, 2, 30) | op
1091 | pre_index | ENCODE (operand.index, 9, 12)
1092 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1093 }
1094 default:
1095 return 0;
1096 }
1097 }
1098
1099 /* Write a LDR instruction into *BUF.
1100
1101 LDR rt, [rn, #offset]
1102 LDR rt, [rn, #index]!
1103
1104 RT is the register to store.
1105 RN is the base address register.
1106 OFFSET is the immediate to add to the base address. It is limited to
1107 0 .. 32760 range (12 bits << 3). */
1108
1109 static int
1110 emit_ldr (uint32_t *buf, struct aarch64_register rt,
1111 struct aarch64_register rn, struct aarch64_memory_operand operand)
1112 {
1113 return emit_load_store (buf, rt.is64 ? 3 : 2, LDR, rt, rn, operand);
1114 }
1115
1116 /* Write a LDRSW instruction into *BUF. The register size is 64-bit.
1117
1118 LDRSW xt, [rn, #offset]
1119 LDRSW xt, [rn, #index]!
1120
1121 RT is the register to store.
1122 RN is the base address register.
1123 OFFSET is the immediate to add to the base address. It is limited to
1124 0 .. 16380 range (12 bits << 2). */
1125
1126 static int
1127 emit_ldrsw (uint32_t *buf, struct aarch64_register rt,
1128 struct aarch64_register rn,
1129 struct aarch64_memory_operand operand)
1130 {
1131 return emit_load_store (buf, 3, LDRSW, rt, rn, operand);
1132 }
1133
1134 /* Write a STR instruction into *BUF.
1135
1136 STR rt, [rn, #offset]
1137 STR rt, [rn, #index]!
1138
1139 RT is the register to store.
1140 RN is the base address register.
1141 OFFSET is the immediate to add to the base address. It is limited to
1142 0 .. 32760 range (12 bits << 3). */
1143
1144 static int
1145 emit_str (uint32_t *buf, struct aarch64_register rt,
1146 struct aarch64_register rn,
1147 struct aarch64_memory_operand operand)
1148 {
1149 return emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1150 }
1151
1152 /* Helper function emitting an exclusive load or store instruction. */
1153
1154 static int
1155 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1156 enum aarch64_opcodes opcode,
1157 struct aarch64_register rs,
1158 struct aarch64_register rt,
1159 struct aarch64_register rt2,
1160 struct aarch64_register rn)
1161 {
1162 return emit_insn (buf, opcode | ENCODE (size, 2, 30)
1163 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1164 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1165 }
1166
1167 /* Write a LAXR instruction into *BUF.
1168
1169 LDAXR rt, [xn]
1170
1171 RT is the destination register.
1172 RN is the base address register. */
1173
1174 static int
1175 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1176 struct aarch64_register rn)
1177 {
1178 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1179 xzr, rn);
1180 }
1181
1182 /* Write a STXR instruction into *BUF.
1183
1184 STXR ws, rt, [xn]
1185
1186 RS is the result register, it indicates if the store succeeded or not.
1187 RT is the destination register.
1188 RN is the base address register. */
1189
1190 static int
1191 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1192 struct aarch64_register rt, struct aarch64_register rn)
1193 {
1194 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1195 xzr, rn);
1196 }
1197
1198 /* Write a STLR instruction into *BUF.
1199
1200 STLR rt, [xn]
1201
1202 RT is the register to store.
1203 RN is the base address register. */
1204
1205 static int
1206 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1207 struct aarch64_register rn)
1208 {
1209 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1210 xzr, rn);
1211 }
1212
1213 /* Helper function for data processing instructions with register sources. */
1214
1215 static int
1216 emit_data_processing_reg (uint32_t *buf, enum aarch64_opcodes opcode,
1217 struct aarch64_register rd,
1218 struct aarch64_register rn,
1219 struct aarch64_register rm)
1220 {
1221 uint32_t size = ENCODE (rd.is64, 1, 31);
1222
1223 return emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1224 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1225 }
1226
1227 /* Helper function for data processing instructions taking either a register
1228 or an immediate. */
1229
1230 static int
1231 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1232 struct aarch64_register rd,
1233 struct aarch64_register rn,
1234 struct aarch64_operand operand)
1235 {
1236 uint32_t size = ENCODE (rd.is64, 1, 31);
1237 /* The opcode is different for register and immediate source operands. */
1238 uint32_t operand_opcode;
1239
1240 if (operand.type == OPERAND_IMMEDIATE)
1241 {
1242 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1243 operand_opcode = ENCODE (8, 4, 25);
1244
1245 return emit_insn (buf, opcode | operand_opcode | size
1246 | ENCODE (operand.imm, 12, 10)
1247 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1248 }
1249 else
1250 {
1251 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1252 operand_opcode = ENCODE (5, 4, 25);
1253
1254 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1255 rn, operand.reg);
1256 }
1257 }
1258
1259 /* Write an ADD instruction into *BUF.
1260
1261 ADD rd, rn, #imm
1262 ADD rd, rn, rm
1263
1264 This function handles both an immediate and register add.
1265
1266 RD is the destination register.
1267 RN is the input register.
1268 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1269 OPERAND_REGISTER. */
1270
1271 static int
1272 emit_add (uint32_t *buf, struct aarch64_register rd,
1273 struct aarch64_register rn, struct aarch64_operand operand)
1274 {
1275 return emit_data_processing (buf, ADD, rd, rn, operand);
1276 }
1277
1278 /* Write a SUB instruction into *BUF.
1279
1280 SUB rd, rn, #imm
1281 SUB rd, rn, rm
1282
1283 This function handles both an immediate and register sub.
1284
1285 RD is the destination register.
1286 RN is the input register.
1287 IMM is the immediate to substract to RN. */
1288
1289 static int
1290 emit_sub (uint32_t *buf, struct aarch64_register rd,
1291 struct aarch64_register rn, struct aarch64_operand operand)
1292 {
1293 return emit_data_processing (buf, SUB, rd, rn, operand);
1294 }
1295
1296 /* Write a MOV instruction into *BUF.
1297
1298 MOV rd, #imm
1299 MOV rd, rm
1300
1301 This function handles both a wide immediate move and a register move,
1302 with the condition that the source register is not xzr. xzr and the
1303 stack pointer share the same encoding and this function only supports
1304 the stack pointer.
1305
1306 RD is the destination register.
1307 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1308 OPERAND_REGISTER. */
1309
1310 static int
1311 emit_mov (uint32_t *buf, struct aarch64_register rd,
1312 struct aarch64_operand operand)
1313 {
1314 if (operand.type == OPERAND_IMMEDIATE)
1315 {
1316 uint32_t size = ENCODE (rd.is64, 1, 31);
1317 /* Do not shift the immediate. */
1318 uint32_t shift = ENCODE (0, 2, 21);
1319
1320 return emit_insn (buf, MOV | size | shift
1321 | ENCODE (operand.imm, 16, 5)
1322 | ENCODE (rd.num, 5, 0));
1323 }
1324 else
1325 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1326 }
1327
1328 /* Write a MOVK instruction into *BUF.
1329
1330 MOVK rd, #imm, lsl #shift
1331
1332 RD is the destination register.
1333 IMM is the immediate.
1334 SHIFT is the logical shift left to apply to IMM. */
1335
1336 static int
1337 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm, unsigned shift)
1338 {
1339 uint32_t size = ENCODE (rd.is64, 1, 31);
1340
1341 return emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1342 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1343 }
1344
1345 /* Write instructions into *BUF in order to move ADDR into a register.
1346 ADDR can be a 64-bit value.
1347
1348 This function will emit a series of MOV and MOVK instructions, such as:
1349
1350 MOV xd, #(addr)
1351 MOVK xd, #(addr >> 16), lsl #16
1352 MOVK xd, #(addr >> 32), lsl #32
1353 MOVK xd, #(addr >> 48), lsl #48 */
1354
1355 static int
1356 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1357 {
1358 uint32_t *p = buf;
1359
1360 /* The MOV (wide immediate) instruction clears to top bits of the
1361 register. */
1362 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1363
1364 if ((addr >> 16) != 0)
1365 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1366 else
1367 return p - buf;
1368
1369 if ((addr >> 32) != 0)
1370 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1371 else
1372 return p - buf;
1373
1374 if ((addr >> 48) != 0)
1375 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1376
1377 return p - buf;
1378 }
1379
1380 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1381
1382 MRS xt, system_reg
1383
1384 RT is the destination register.
1385 SYSTEM_REG is special purpose register to read. */
1386
1387 static int
1388 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1389 enum aarch64_system_control_registers system_reg)
1390 {
1391 return emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1392 | ENCODE (rt.num, 5, 0));
1393 }
1394
1395 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1396
1397 MSR system_reg, xt
1398
1399 SYSTEM_REG is special purpose register to write.
1400 RT is the input register. */
1401
1402 static int
1403 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1404 struct aarch64_register rt)
1405 {
1406 return emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1407 | ENCODE (rt.num, 5, 0));
1408 }
1409
1410 /* Write a SEVL instruction into *BUF.
1411
1412 This is a hint instruction telling the hardware to trigger an event. */
1413
1414 static int
1415 emit_sevl (uint32_t *buf)
1416 {
1417 return emit_insn (buf, SEVL);
1418 }
1419
1420 /* Write a WFE instruction into *BUF.
1421
1422 This is a hint instruction telling the hardware to wait for an event. */
1423
1424 static int
1425 emit_wfe (uint32_t *buf)
1426 {
1427 return emit_insn (buf, WFE);
1428 }
1429
1430 /* Write LEN instructions from BUF into the inferior memory at *TO.
1431
1432 Note instructions are always little endian on AArch64, unlike data. */
1433
1434 static void
1435 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1436 {
1437 size_t byte_len = len * sizeof (uint32_t);
1438 #if (__BYTE_ORDER == __BIG_ENDIAN)
1439 uint32_t *le_buf = xmalloc (byte_len);
1440 size_t i;
1441
1442 for (i = 0; i < len; i++)
1443 le_buf[i] = htole32 (buf[i]);
1444
1445 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1446
1447 xfree (le_buf);
1448 #else
1449 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1450 #endif
1451
1452 *to += byte_len;
1453 }
1454
1455 /* Helper function. Return 1 if VAL can be encoded in BITS bits. */
1456
1457 static int
1458 can_encode_int32 (int32_t val, unsigned bits)
1459 {
1460 /* This must be an arithemic shift. */
1461 int32_t rest = val >> bits;
1462
1463 return rest == 0 || rest == -1;
1464 }
1465
1466 /* Relocate an instruction from OLDLOC to *TO. This function will also
1467 increment TO by the number of bytes the new instruction(s) take(s).
1468
1469 PC relative instructions need to be handled specifically:
1470
1471 - B/BL
1472 - B.COND
1473 - CBZ/CBNZ
1474 - TBZ/TBNZ
1475 - ADR/ADRP
1476 - LDR/LDRSW (literal) */
1477
1478 static void
1479 aarch64_relocate_instruction (CORE_ADDR *to, CORE_ADDR oldloc)
1480 {
1481 uint32_t buf[32];
1482 uint32_t *p = buf;
1483 uint32_t insn;
1484
1485 int is_bl;
1486 int is64;
1487 int is_sw;
1488 int is_cbnz;
1489 int is_tbnz;
1490 int is_adrp;
1491 unsigned rn;
1492 unsigned rt;
1493 unsigned rd;
1494 unsigned cond;
1495 unsigned bit;
1496 int32_t offset;
1497
1498 target_read_uint32 (oldloc, &insn);
1499
1500 if (aarch64_decode_b (oldloc, insn, &is_bl, &offset))
1501 {
1502 offset = (oldloc - *to + offset);
1503
1504 if (can_encode_int32 (offset, 28))
1505 p += emit_b (p, is_bl, offset);
1506 else
1507 return;
1508 }
1509 else if (aarch64_decode_bcond (oldloc, insn, &cond, &offset))
1510 {
1511 offset = (oldloc - *to + offset);
1512
1513 if (can_encode_int32 (offset, 21))
1514 p += emit_bcond (p, cond, offset);
1515 else if (can_encode_int32 (offset, 28))
1516 {
1517 /* The offset is out of range for a conditional branch
1518 instruction but not for a unconditional branch. We can use
1519 the following instructions instead:
1520
1521 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1522 B NOT_TAKEN ; Else jump over TAKEN and continue.
1523 TAKEN:
1524 B #(offset - 8)
1525 NOT_TAKEN:
1526
1527 */
1528
1529 p += emit_bcond (p, cond, 8);
1530 p += emit_b (p, 0, 8);
1531 p += emit_b (p, 0, offset - 8);
1532 }
1533 else
1534 return;
1535 }
1536 else if (aarch64_decode_cb (oldloc, insn, &is64, &is_cbnz, &rn, &offset))
1537 {
1538 offset = (oldloc - *to + offset);
1539
1540 if (can_encode_int32 (offset, 21))
1541 p += emit_cb (p, is_cbnz, aarch64_register (rn, is64), offset);
1542 else if (can_encode_int32 (offset, 28))
1543 {
1544 /* The offset is out of range for a compare and branch
1545 instruction but not for a unconditional branch. We can use
1546 the following instructions instead:
1547
1548 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1549 B NOT_TAKEN ; Else jump over TAKEN and continue.
1550 TAKEN:
1551 B #(offset - 8)
1552 NOT_TAKEN:
1553
1554 */
1555 p += emit_cb (p, is_cbnz, aarch64_register (rn, is64), 8);
1556 p += emit_b (p, 0, 8);
1557 p += emit_b (p, 0, offset - 8);
1558 }
1559 else
1560 return;
1561 }
1562 else if (aarch64_decode_tb (oldloc, insn, &is_tbnz, &bit, &rt, &offset))
1563 {
1564 offset = (oldloc - *to + offset);
1565
1566 if (can_encode_int32 (offset, 16))
1567 p += emit_tb (p, is_tbnz, bit, aarch64_register (rt, 1), offset);
1568 else if (can_encode_int32 (offset, 28))
1569 {
1570 /* The offset is out of range for a test bit and branch
1571 instruction but not for a unconditional branch. We can use
1572 the following instructions instead:
1573
1574 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1575 B NOT_TAKEN ; Else jump over TAKEN and continue.
1576 TAKEN:
1577 B #(offset - 8)
1578 NOT_TAKEN:
1579
1580 */
1581 p += emit_tb (p, is_tbnz, bit, aarch64_register (rt, 1), 8);
1582 p += emit_b (p, 0, 8);
1583 p += emit_b (p, 0, offset - 8);
1584 }
1585 else
1586 return;
1587 }
1588 else if (aarch64_decode_adr (oldloc, insn, &is_adrp, &rd, &offset))
1589 {
1590
1591 /* We know exactly the address the ADR{P,} instruction will compute.
1592 We can just write it to the destination register. */
1593 CORE_ADDR address = oldloc + offset;
1594
1595 if (is_adrp)
1596 {
1597 /* Clear the lower 12 bits of the offset to get the 4K page. */
1598 p += emit_mov_addr (p, aarch64_register (rd, 1),
1599 address & ~0xfff);
1600 }
1601 else
1602 p += emit_mov_addr (p, aarch64_register (rd, 1), address);
1603 }
1604 else if (aarch64_decode_ldr_literal (oldloc, insn, &is_sw, &is64, &rt,
1605 &offset))
1606 {
1607 /* We know exactly what address to load from, and what register we
1608 can use:
1609
1610 MOV xd, #(oldloc + offset)
1611 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1612 ...
1613
1614 LDR xd, [xd] ; or LDRSW xd, [xd]
1615
1616 */
1617 CORE_ADDR address = oldloc + offset;
1618
1619 p += emit_mov_addr (p, aarch64_register (rt, 1), address);
1620
1621 if (is_sw)
1622 p += emit_ldrsw (p, aarch64_register (rt, 1),
1623 aarch64_register (rt, 1),
1624 offset_memory_operand (0));
1625 else
1626 p += emit_ldr (p, aarch64_register (rt, is64),
1627 aarch64_register (rt, 1),
1628 offset_memory_operand (0));
1629 }
1630 else
1631 {
1632 /* The instruction is not PC relative. Just re-emit it at the new
1633 location. */
1634 p += emit_insn (p, insn);
1635 }
1636
1637 append_insns (to, p - buf, buf);
1638 }
1639
1640 /* Implementation of linux_target_ops method
1641 "install_fast_tracepoint_jump_pad". */
1642
1643 static int
1644 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1645 CORE_ADDR tpaddr,
1646 CORE_ADDR collector,
1647 CORE_ADDR lockaddr,
1648 ULONGEST orig_size,
1649 CORE_ADDR *jump_entry,
1650 CORE_ADDR *trampoline,
1651 ULONGEST *trampoline_size,
1652 unsigned char *jjump_pad_insn,
1653 ULONGEST *jjump_pad_insn_size,
1654 CORE_ADDR *adjusted_insn_addr,
1655 CORE_ADDR *adjusted_insn_addr_end,
1656 char *err)
1657 {
1658 uint32_t buf[256];
1659 uint32_t *p = buf;
1660 int32_t offset;
1661 int i;
1662 CORE_ADDR buildaddr = *jump_entry;
1663
1664 /* We need to save the current state on the stack both to restore it
1665 later and to collect register values when the tracepoint is hit.
1666
1667 The saved registers are pushed in a layout that needs to be in sync
1668 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1669 the supply_fast_tracepoint_registers function will fill in the
1670 register cache from a pointer to saved registers on the stack we build
1671 here.
1672
1673 For simplicity, we set the size of each cell on the stack to 16 bytes.
1674 This way one cell can hold any register type, from system registers
1675 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1676 has to be 16 bytes aligned anyway.
1677
1678 Note that the CPSR register does not exist on AArch64. Instead we
1679 can access system bits describing the process state with the
1680 MRS/MSR instructions, namely the condition flags. We save them as
1681 if they are part of a CPSR register because that's how GDB
1682 interprets these system bits. At the moment, only the condition
1683 flags are saved in CPSR (NZCV).
1684
1685 Stack layout, each cell is 16 bytes (descending):
1686
1687 High *-------- SIMD&FP registers from 31 down to 0. --------*
1688 | q31 |
1689 . .
1690 . . 32 cells
1691 . .
1692 | q0 |
1693 *---- General purpose registers from 30 down to 0. ----*
1694 | x30 |
1695 . .
1696 . . 31 cells
1697 . .
1698 | x0 |
1699 *------------- Special purpose registers. -------------*
1700 | SP |
1701 | PC |
1702 | CPSR (NZCV) | 5 cells
1703 | FPSR |
1704 | FPCR | <- SP + 16
1705 *------------- collecting_t object --------------------*
1706 | TPIDR_EL0 | struct tracepoint * |
1707 Low *------------------------------------------------------*
1708
1709 After this stack is set up, we issue a call to the collector, passing
1710 it the saved registers at (SP + 16). */
1711
1712 /* Push SIMD&FP registers on the stack:
1713
1714 SUB sp, sp, #(32 * 16)
1715
1716 STP q30, q31, [sp, #(30 * 16)]
1717 ...
1718 STP q0, q1, [sp]
1719
1720 */
1721 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1722 for (i = 30; i >= 0; i -= 2)
1723 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1724
1725 /* Push general puspose registers on the stack. Note that we do not need
1726 to push x31 as it represents the xzr register and not the stack
1727 pointer in a STR instruction.
1728
1729 SUB sp, sp, #(31 * 16)
1730
1731 STR x30, [sp, #(30 * 16)]
1732 ...
1733 STR x0, [sp]
1734
1735 */
1736 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1737 for (i = 30; i >= 0; i -= 1)
1738 p += emit_str (p, aarch64_register (i, 1), sp,
1739 offset_memory_operand (i * 16));
1740
1741 /* Make space for 5 more cells.
1742
1743 SUB sp, sp, #(5 * 16)
1744
1745 */
1746 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1747
1748
1749 /* Save SP:
1750
1751 ADD x4, sp, #((32 + 31 + 5) * 16)
1752 STR x4, [sp, #(4 * 16)]
1753
1754 */
1755 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1756 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1757
1758 /* Save PC (tracepoint address):
1759
1760 MOV x3, #(tpaddr)
1761 ...
1762
1763 STR x3, [sp, #(3 * 16)]
1764
1765 */
1766
1767 p += emit_mov_addr (p, x3, tpaddr);
1768 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1769
1770 /* Save CPSR (NZCV), FPSR and FPCR:
1771
1772 MRS x2, nzcv
1773 MRS x1, fpsr
1774 MRS x0, fpcr
1775
1776 STR x2, [sp, #(2 * 16)]
1777 STR x1, [sp, #(1 * 16)]
1778 STR x0, [sp, #(0 * 16)]
1779
1780 */
1781 p += emit_mrs (p, x2, NZCV);
1782 p += emit_mrs (p, x1, FPSR);
1783 p += emit_mrs (p, x0, FPCR);
1784 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1785 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1786 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1787
1788 /* Push the collecting_t object. It consist of the address of the
1789 tracepoint and an ID for the current thread. We get the latter by
1790 reading the tpidr_el0 system register. It corresponds to the
1791 NT_ARM_TLS register accessible with ptrace.
1792
1793 MOV x0, #(tpoint)
1794 ...
1795
1796 MRS x1, tpidr_el0
1797
1798 STP x0, x1, [sp, #-16]!
1799
1800 */
1801
1802 p += emit_mov_addr (p, x0, tpoint);
1803 p += emit_mrs (p, x1, TPIDR_EL0);
1804 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1805
1806 /* Spin-lock:
1807
1808 The shared memory for the lock is at lockaddr. It will hold zero
1809 if no-one is holding the lock, otherwise it contains the address of
1810 the collecting_t object on the stack of the thread which acquired it.
1811
1812 At this stage, the stack pointer points to this thread's collecting_t
1813 object.
1814
1815 We use the following registers:
1816 - x0: Address of the lock.
1817 - x1: Pointer to collecting_t object.
1818 - x2: Scratch register.
1819
1820 MOV x0, #(lockaddr)
1821 ...
1822 MOV x1, sp
1823
1824 ; Trigger an event local to this core. So the following WFE
1825 ; instruction is ignored.
1826 SEVL
1827 again:
1828 ; Wait for an event. The event is triggered by either the SEVL
1829 ; or STLR instructions (store release).
1830 WFE
1831
1832 ; Atomically read at lockaddr. This marks the memory location as
1833 ; exclusive. This instruction also has memory constraints which
1834 ; make sure all previous data reads and writes are done before
1835 ; executing it.
1836 LDAXR x2, [x0]
1837
1838 ; Try again if another thread holds the lock.
1839 CBNZ x2, again
1840
1841 ; We can lock it! Write the address of the collecting_t object.
1842 ; This instruction will fail if the memory location is not marked
1843 ; as exclusive anymore. If it succeeds, it will remove the
1844 ; exclusive mark on the memory location. This way, if another
1845 ; thread executes this instruction before us, we will fail and try
1846 ; all over again.
1847 STXR w2, x1, [x0]
1848 CBNZ w2, again
1849
1850 */
1851
1852 p += emit_mov_addr (p, x0, lockaddr);
1853 p += emit_mov (p, x1, register_operand (sp));
1854
1855 p += emit_sevl (p);
1856 p += emit_wfe (p);
1857 p += emit_ldaxr (p, x2, x0);
1858 p += emit_cb (p, 1, w2, -2 * 4);
1859 p += emit_stxr (p, w2, x1, x0);
1860 p += emit_cb (p, 1, x2, -4 * 4);
1861
1862 /* Call collector (struct tracepoint *, unsigned char *):
1863
1864 MOV x0, #(tpoint)
1865 ...
1866
1867 ; Saved registers start after the collecting_t object.
1868 ADD x1, sp, #16
1869
1870 ; We use an intra-procedure-call scratch register.
1871 MOV ip0, #(collector)
1872 ...
1873
1874 ; And call back to C!
1875 BLR ip0
1876
1877 */
1878
1879 p += emit_mov_addr (p, x0, tpoint);
1880 p += emit_add (p, x1, sp, immediate_operand (16));
1881
1882 p += emit_mov_addr (p, ip0, collector);
1883 p += emit_blr (p, ip0);
1884
1885 /* Release the lock.
1886
1887 MOV x0, #(lockaddr)
1888 ...
1889
1890 ; This instruction is a normal store with memory ordering
1891 ; constraints. Thanks to this we do not have to put a data
1892 ; barrier instruction to make sure all data read and writes are done
1893 ; before this instruction is executed. Furthermore, this instrucion
1894 ; will trigger an event, letting other threads know they can grab
1895 ; the lock.
1896 STLR xzr, [x0]
1897
1898 */
1899 p += emit_mov_addr (p, x0, lockaddr);
1900 p += emit_stlr (p, xzr, x0);
1901
1902 /* Free collecting_t object:
1903
1904 ADD sp, sp, #16
1905
1906 */
1907 p += emit_add (p, sp, sp, immediate_operand (16));
1908
1909 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
1910 registers from the stack.
1911
1912 LDR x2, [sp, #(2 * 16)]
1913 LDR x1, [sp, #(1 * 16)]
1914 LDR x0, [sp, #(0 * 16)]
1915
1916 MSR NZCV, x2
1917 MSR FPSR, x1
1918 MSR FPCR, x0
1919
1920 ADD sp, sp #(5 * 16)
1921
1922 */
1923 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
1924 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
1925 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
1926 p += emit_msr (p, NZCV, x2);
1927 p += emit_msr (p, FPSR, x1);
1928 p += emit_msr (p, FPCR, x0);
1929
1930 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
1931
1932 /* Pop general purpose registers:
1933
1934 LDR x0, [sp]
1935 ...
1936 LDR x30, [sp, #(30 * 16)]
1937
1938 ADD sp, sp, #(31 * 16)
1939
1940 */
1941 for (i = 0; i <= 30; i += 1)
1942 p += emit_ldr (p, aarch64_register (i, 1), sp,
1943 offset_memory_operand (i * 16));
1944 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
1945
1946 /* Pop SIMD&FP registers:
1947
1948 LDP q0, q1, [sp]
1949 ...
1950 LDP q30, q31, [sp, #(30 * 16)]
1951
1952 ADD sp, sp, #(32 * 16)
1953
1954 */
1955 for (i = 0; i <= 30; i += 2)
1956 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
1957 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
1958
1959 /* Write the code into the inferior memory. */
1960 append_insns (&buildaddr, p - buf, buf);
1961
1962 /* Now emit the relocated instruction. */
1963 *adjusted_insn_addr = buildaddr;
1964 aarch64_relocate_instruction (&buildaddr, tpaddr);
1965 *adjusted_insn_addr_end = buildaddr;
1966
1967 /* We may not have been able to relocate the instruction. */
1968 if (*adjusted_insn_addr == *adjusted_insn_addr_end)
1969 {
1970 sprintf (err,
1971 "E.Could not relocate instruction from %s to %s.",
1972 core_addr_to_string_nz (tpaddr),
1973 core_addr_to_string_nz (buildaddr));
1974 return 1;
1975 }
1976
1977 /* Go back to the start of the buffer. */
1978 p = buf;
1979
1980 /* Emit a branch back from the jump pad. */
1981 offset = (tpaddr + orig_size - buildaddr);
1982 if (!can_encode_int32 (offset, 28))
1983 {
1984 sprintf (err,
1985 "E.Jump back from jump pad too far from tracepoint "
1986 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
1987 offset);
1988 return 1;
1989 }
1990
1991 p += emit_b (p, 0, offset);
1992 append_insns (&buildaddr, p - buf, buf);
1993
1994 /* Give the caller a branch instruction into the jump pad. */
1995 offset = (*jump_entry - tpaddr);
1996 if (!can_encode_int32 (offset, 28))
1997 {
1998 sprintf (err,
1999 "E.Jump pad too far from tracepoint "
2000 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2001 offset);
2002 return 1;
2003 }
2004
2005 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2006 *jjump_pad_insn_size = 4;
2007
2008 /* Return the end address of our pad. */
2009 *jump_entry = buildaddr;
2010
2011 return 0;
2012 }
2013
2014 /* Implementation of linux_target_ops method
2015 "get_min_fast_tracepoint_insn_len". */
2016
2017 static int
2018 aarch64_get_min_fast_tracepoint_insn_len (void)
2019 {
2020 return 4;
2021 }
2022
2023 /* Implementation of linux_target_ops method "supports_range_stepping". */
2024
2025 static int
2026 aarch64_supports_range_stepping (void)
2027 {
2028 return 1;
2029 }
2030
2031 struct linux_target_ops the_low_target =
2032 {
2033 aarch64_arch_setup,
2034 aarch64_regs_info,
2035 aarch64_cannot_fetch_register,
2036 aarch64_cannot_store_register,
2037 NULL, /* fetch_register */
2038 aarch64_get_pc,
2039 aarch64_set_pc,
2040 (const unsigned char *) &aarch64_breakpoint,
2041 aarch64_breakpoint_len,
2042 NULL, /* breakpoint_reinsert_addr */
2043 0, /* decr_pc_after_break */
2044 aarch64_breakpoint_at,
2045 aarch64_supports_z_point_type,
2046 aarch64_insert_point,
2047 aarch64_remove_point,
2048 aarch64_stopped_by_watchpoint,
2049 aarch64_stopped_data_address,
2050 NULL, /* collect_ptrace_register */
2051 NULL, /* supply_ptrace_register */
2052 aarch64_linux_siginfo_fixup,
2053 aarch64_linux_new_process,
2054 aarch64_linux_new_thread,
2055 aarch64_linux_new_fork,
2056 aarch64_linux_prepare_to_resume,
2057 NULL, /* process_qsupported */
2058 aarch64_supports_tracepoints,
2059 aarch64_get_thread_area,
2060 aarch64_install_fast_tracepoint_jump_pad,
2061 NULL, /* emit_ops */
2062 aarch64_get_min_fast_tracepoint_insn_len,
2063 aarch64_supports_range_stepping,
2064 };
2065
2066 void
2067 initialize_low_arch (void)
2068 {
2069 init_registers_aarch64 ();
2070
2071 initialize_low_arch_aarch32 ();
2072
2073 initialize_regsets_info (&aarch64_regsets_info);
2074 }
This page took 0.124774 seconds and 4 git commands to generate.