1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2019 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "linux-aarch64-tdesc.h"
43 #include "nat/aarch64-sve-linux-ptrace.h"
50 /* Per-process arch-specific data we want to keep. */
52 struct arch_process_info
54 /* Hardware breakpoint/watchpoint data.
55 The reason for them to be per-process rather than per-thread is
56 due to the lack of information in the gdbserver environment;
57 gdbserver is not told that whether a requested hardware
58 breakpoint/watchpoint is thread specific or not, so it has to set
59 each hw bp/wp for every thread in the current process. The
60 higher level bp/wp management in gdb will resume a thread if a hw
61 bp/wp trap is not expected for it. Since the hw bp/wp setting is
62 same for each thread, it is reasonable for the data to live here.
64 struct aarch64_debug_reg_state debug_reg_state
;
67 /* Return true if the size of register 0 is 8 byte. */
72 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
74 return register_size (regcache
->tdesc
, 0) == 8;
77 /* Return true if the regcache contains the number of SVE registers. */
82 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
84 return regcache
->tdesc
->reg_defs
.size () == AARCH64_SVE_NUM_REGS
;
88 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
90 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
93 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
94 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
95 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
96 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
97 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
101 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
103 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
106 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
107 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
108 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
109 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
110 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
114 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
116 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
119 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
120 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
121 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
122 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
126 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
128 const struct user_fpsimd_state
*regset
129 = (const struct user_fpsimd_state
*) buf
;
132 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
133 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
134 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
135 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
138 /* Store the pauth registers to regcache. */
141 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
143 uint64_t *pauth_regset
= (uint64_t *) buf
;
144 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
149 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
151 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
155 /* Enable miscellaneous debugging output. The name is historical - it
156 was originally used to debug LinuxThreads support. */
157 extern int debug_threads
;
159 /* Implementation of linux_target_ops method "get_pc". */
162 aarch64_get_pc (struct regcache
*regcache
)
164 if (register_size (regcache
->tdesc
, 0) == 8)
165 return linux_get_pc_64bit (regcache
);
167 return linux_get_pc_32bit (regcache
);
170 /* Implementation of linux_target_ops method "set_pc". */
173 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
175 if (register_size (regcache
->tdesc
, 0) == 8)
176 linux_set_pc_64bit (regcache
, pc
);
178 linux_set_pc_32bit (regcache
, pc
);
181 #define aarch64_breakpoint_len 4
183 /* AArch64 BRK software debug mode instruction.
184 This instruction needs to match gdb/aarch64-tdep.c
185 (aarch64_default_breakpoint). */
186 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
188 /* Implementation of linux_target_ops method "breakpoint_at". */
191 aarch64_breakpoint_at (CORE_ADDR where
)
193 if (is_64bit_tdesc ())
195 gdb_byte insn
[aarch64_breakpoint_len
];
197 (*the_target
->read_memory
) (where
, (unsigned char *) &insn
,
198 aarch64_breakpoint_len
);
199 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
205 return arm_breakpoint_at (where
);
209 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
213 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
215 state
->dr_addr_bp
[i
] = 0;
216 state
->dr_ctrl_bp
[i
] = 0;
217 state
->dr_ref_count_bp
[i
] = 0;
220 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
222 state
->dr_addr_wp
[i
] = 0;
223 state
->dr_ctrl_wp
[i
] = 0;
224 state
->dr_ref_count_wp
[i
] = 0;
228 /* Return the pointer to the debug register state structure in the
229 current process' arch-specific data area. */
231 struct aarch64_debug_reg_state
*
232 aarch64_get_debug_reg_state (pid_t pid
)
234 struct process_info
*proc
= find_process_pid (pid
);
236 return &proc
->priv
->arch_private
->debug_reg_state
;
239 /* Implementation of linux_target_ops method "supports_z_point_type". */
242 aarch64_supports_z_point_type (char z_type
)
248 case Z_PACKET_WRITE_WP
:
249 case Z_PACKET_READ_WP
:
250 case Z_PACKET_ACCESS_WP
:
257 /* Implementation of linux_target_ops method "insert_point".
259 It actually only records the info of the to-be-inserted bp/wp;
260 the actual insertion will happen when threads are resumed. */
263 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
264 int len
, struct raw_breakpoint
*bp
)
267 enum target_hw_bp_type targ_type
;
268 struct aarch64_debug_reg_state
*state
269 = aarch64_get_debug_reg_state (pid_of (current_thread
));
272 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
273 (unsigned long) addr
, len
);
275 /* Determine the type from the raw breakpoint type. */
276 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
278 if (targ_type
!= hw_execute
)
280 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
281 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
282 1 /* is_insert */, state
);
290 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
291 instruction. Set it to 2 to correctly encode length bit
292 mask in hardware/watchpoint control register. */
295 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
296 1 /* is_insert */, state
);
300 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
306 /* Implementation of linux_target_ops method "remove_point".
308 It actually only records the info of the to-be-removed bp/wp,
309 the actual removal will be done when threads are resumed. */
312 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
313 int len
, struct raw_breakpoint
*bp
)
316 enum target_hw_bp_type targ_type
;
317 struct aarch64_debug_reg_state
*state
318 = aarch64_get_debug_reg_state (pid_of (current_thread
));
321 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
322 (unsigned long) addr
, len
);
324 /* Determine the type from the raw breakpoint type. */
325 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
327 /* Set up state pointers. */
328 if (targ_type
!= hw_execute
)
330 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
336 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
337 instruction. Set it to 2 to correctly encode length bit
338 mask in hardware/watchpoint control register. */
341 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
342 0 /* is_insert */, state
);
346 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
352 /* Implementation of linux_target_ops method "stopped_data_address". */
355 aarch64_stopped_data_address (void)
359 struct aarch64_debug_reg_state
*state
;
361 pid
= lwpid_of (current_thread
);
363 /* Get the siginfo. */
364 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
365 return (CORE_ADDR
) 0;
367 /* Need to be a hardware breakpoint/watchpoint trap. */
368 if (siginfo
.si_signo
!= SIGTRAP
369 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
370 return (CORE_ADDR
) 0;
372 /* Check if the address matches any watched address. */
373 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
374 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
376 const unsigned int offset
377 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
378 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
379 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
380 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
381 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
382 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
384 if (state
->dr_ref_count_wp
[i
]
385 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
386 && addr_trap
>= addr_watch_aligned
387 && addr_trap
< addr_watch
+ len
)
389 /* ADDR_TRAP reports the first address of the memory range
390 accessed by the CPU, regardless of what was the memory
391 range watched. Thus, a large CPU access that straddles
392 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
393 ADDR_TRAP that is lower than the
394 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
396 addr: | 4 | 5 | 6 | 7 | 8 |
397 |---- range watched ----|
398 |----------- range accessed ------------|
400 In this case, ADDR_TRAP will be 4.
402 To match a watchpoint known to GDB core, we must never
403 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
404 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
405 positive on kernels older than 4.10. See PR
411 return (CORE_ADDR
) 0;
414 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
417 aarch64_stopped_by_watchpoint (void)
419 if (aarch64_stopped_data_address () != 0)
425 /* Fetch the thread-local storage pointer for libthread_db. */
428 ps_get_thread_area (struct ps_prochandle
*ph
,
429 lwpid_t lwpid
, int idx
, void **base
)
431 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
435 /* Implementation of linux_target_ops method "siginfo_fixup". */
438 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
440 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
441 if (!is_64bit_tdesc ())
444 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
447 aarch64_siginfo_from_compat_siginfo (native
,
448 (struct compat_siginfo
*) inf
);
456 /* Implementation of linux_target_ops method "new_process". */
458 static struct arch_process_info
*
459 aarch64_linux_new_process (void)
461 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
463 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
468 /* Implementation of linux_target_ops method "delete_process". */
471 aarch64_linux_delete_process (struct arch_process_info
*info
)
476 /* Implementation of linux_target_ops method "linux_new_fork". */
479 aarch64_linux_new_fork (struct process_info
*parent
,
480 struct process_info
*child
)
482 /* These are allocated by linux_add_process. */
483 gdb_assert (parent
->priv
!= NULL
484 && parent
->priv
->arch_private
!= NULL
);
485 gdb_assert (child
->priv
!= NULL
486 && child
->priv
->arch_private
!= NULL
);
488 /* Linux kernel before 2.6.33 commit
489 72f674d203cd230426437cdcf7dd6f681dad8b0d
490 will inherit hardware debug registers from parent
491 on fork/vfork/clone. Newer Linux kernels create such tasks with
492 zeroed debug registers.
494 GDB core assumes the child inherits the watchpoints/hw
495 breakpoints of the parent, and will remove them all from the
496 forked off process. Copy the debug registers mirrors into the
497 new process so that all breakpoints and watchpoints can be
498 removed together. The debug registers mirror will become zeroed
499 in the end before detaching the forked off process, thus making
500 this compatible with older Linux kernels too. */
502 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
505 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
506 #define AARCH64_HWCAP_PACA (1 << 30)
508 /* Fetch the AT_HWCAP entry from the auxv vector. */
511 aarch64_get_hwcap (unsigned long *valp
)
513 unsigned char *data
= (unsigned char *) alloca (16);
516 while ((*the_target
->read_auxv
) (offset
, data
, 16) == 16)
518 unsigned long *data_p
= (unsigned long *)data
;
519 if (data_p
[0] == AT_HWCAP
)
532 /* Implementation of linux_target_ops method "arch_setup". */
535 aarch64_arch_setup (void)
537 unsigned int machine
;
541 tid
= lwpid_of (current_thread
);
543 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
547 uint64_t vq
= aarch64_sve_get_vq (tid
);
548 unsigned long hwcap
= 0;
549 bool pauth_p
= aarch64_get_hwcap (&hwcap
) && (hwcap
& AARCH64_HWCAP_PACA
);
551 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
554 current_process ()->tdesc
= tdesc_arm_with_neon
;
556 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
559 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
562 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
564 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
567 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
570 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
572 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
575 static struct regset_info aarch64_regsets
[] =
577 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
578 sizeof (struct user_pt_regs
), GENERAL_REGS
,
579 aarch64_fill_gregset
, aarch64_store_gregset
},
580 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
581 sizeof (struct user_fpsimd_state
), FP_REGS
,
582 aarch64_fill_fpregset
, aarch64_store_fpregset
584 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
585 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
586 NULL
, aarch64_store_pauthregset
},
590 static struct regsets_info aarch64_regsets_info
=
592 aarch64_regsets
, /* regsets */
594 NULL
, /* disabled_regsets */
597 static struct regs_info regs_info_aarch64
=
599 NULL
, /* regset_bitmap */
601 &aarch64_regsets_info
,
604 static struct regset_info aarch64_sve_regsets
[] =
606 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
607 sizeof (struct user_pt_regs
), GENERAL_REGS
,
608 aarch64_fill_gregset
, aarch64_store_gregset
},
609 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
610 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
611 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
613 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
614 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
615 NULL
, aarch64_store_pauthregset
},
619 static struct regsets_info aarch64_sve_regsets_info
=
621 aarch64_sve_regsets
, /* regsets. */
622 0, /* num_regsets. */
623 NULL
, /* disabled_regsets. */
626 static struct regs_info regs_info_aarch64_sve
=
628 NULL
, /* regset_bitmap. */
630 &aarch64_sve_regsets_info
,
633 /* Implementation of linux_target_ops method "regs_info". */
635 static const struct regs_info
*
636 aarch64_regs_info (void)
638 if (!is_64bit_tdesc ())
639 return ®s_info_aarch32
;
642 return ®s_info_aarch64_sve
;
644 return ®s_info_aarch64
;
647 /* Implementation of linux_target_ops method "supports_tracepoints". */
650 aarch64_supports_tracepoints (void)
652 if (current_thread
== NULL
)
656 /* We don't support tracepoints on aarch32 now. */
657 return is_64bit_tdesc ();
661 /* Implementation of linux_target_ops method "get_thread_area". */
664 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
669 iovec
.iov_base
= ®
;
670 iovec
.iov_len
= sizeof (reg
);
672 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
680 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
683 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
685 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
691 collect_register_by_name (regcache
, "x8", &l_sysno
);
692 *sysno
= (int) l_sysno
;
695 collect_register_by_name (regcache
, "r7", sysno
);
698 /* List of condition codes that we need. */
700 enum aarch64_condition_codes
711 enum aarch64_operand_type
717 /* Representation of an operand. At this time, it only supports register
718 and immediate types. */
720 struct aarch64_operand
722 /* Type of the operand. */
723 enum aarch64_operand_type type
;
725 /* Value of the operand according to the type. */
729 struct aarch64_register reg
;
733 /* List of registers that we are currently using, we can add more here as
734 we need to use them. */
736 /* General purpose scratch registers (64 bit). */
737 static const struct aarch64_register x0
= { 0, 1 };
738 static const struct aarch64_register x1
= { 1, 1 };
739 static const struct aarch64_register x2
= { 2, 1 };
740 static const struct aarch64_register x3
= { 3, 1 };
741 static const struct aarch64_register x4
= { 4, 1 };
743 /* General purpose scratch registers (32 bit). */
744 static const struct aarch64_register w0
= { 0, 0 };
745 static const struct aarch64_register w2
= { 2, 0 };
747 /* Intra-procedure scratch registers. */
748 static const struct aarch64_register ip0
= { 16, 1 };
750 /* Special purpose registers. */
751 static const struct aarch64_register fp
= { 29, 1 };
752 static const struct aarch64_register lr
= { 30, 1 };
753 static const struct aarch64_register sp
= { 31, 1 };
754 static const struct aarch64_register xzr
= { 31, 1 };
756 /* Dynamically allocate a new register. If we know the register
757 statically, we should make it a global as above instead of using this
760 static struct aarch64_register
761 aarch64_register (unsigned num
, int is64
)
763 return (struct aarch64_register
) { num
, is64
};
766 /* Helper function to create a register operand, for instructions with
767 different types of operands.
770 p += emit_mov (p, x0, register_operand (x1)); */
772 static struct aarch64_operand
773 register_operand (struct aarch64_register reg
)
775 struct aarch64_operand operand
;
777 operand
.type
= OPERAND_REGISTER
;
783 /* Helper function to create an immediate operand, for instructions with
784 different types of operands.
787 p += emit_mov (p, x0, immediate_operand (12)); */
789 static struct aarch64_operand
790 immediate_operand (uint32_t imm
)
792 struct aarch64_operand operand
;
794 operand
.type
= OPERAND_IMMEDIATE
;
800 /* Helper function to create an offset memory operand.
803 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
805 static struct aarch64_memory_operand
806 offset_memory_operand (int32_t offset
)
808 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
811 /* Helper function to create a pre-index memory operand.
814 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
816 static struct aarch64_memory_operand
817 preindex_memory_operand (int32_t index
)
819 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
822 /* Helper function to create a post-index memory operand.
825 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
827 static struct aarch64_memory_operand
828 postindex_memory_operand (int32_t index
)
830 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
833 /* System control registers. These special registers can be written and
834 read with the MRS and MSR instructions.
836 - NZCV: Condition flags. GDB refers to this register under the CPSR
838 - FPSR: Floating-point status register.
839 - FPCR: Floating-point control registers.
840 - TPIDR_EL0: Software thread ID register. */
842 enum aarch64_system_control_registers
844 /* op0 op1 crn crm op2 */
845 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
846 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
847 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
848 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
851 /* Write a BLR instruction into *BUF.
855 RN is the register to branch to. */
858 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
860 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
863 /* Write a RET instruction into *BUF.
867 RN is the register to branch to. */
870 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
872 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
876 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
877 struct aarch64_register rt
,
878 struct aarch64_register rt2
,
879 struct aarch64_register rn
,
880 struct aarch64_memory_operand operand
)
887 opc
= ENCODE (2, 2, 30);
889 opc
= ENCODE (0, 2, 30);
891 switch (operand
.type
)
893 case MEMORY_OPERAND_OFFSET
:
895 pre_index
= ENCODE (1, 1, 24);
896 write_back
= ENCODE (0, 1, 23);
899 case MEMORY_OPERAND_POSTINDEX
:
901 pre_index
= ENCODE (0, 1, 24);
902 write_back
= ENCODE (1, 1, 23);
905 case MEMORY_OPERAND_PREINDEX
:
907 pre_index
= ENCODE (1, 1, 24);
908 write_back
= ENCODE (1, 1, 23);
915 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
916 | ENCODE (operand
.index
>> 3, 7, 15)
917 | ENCODE (rt2
.num
, 5, 10)
918 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
921 /* Write a STP instruction into *BUF.
923 STP rt, rt2, [rn, #offset]
924 STP rt, rt2, [rn, #index]!
925 STP rt, rt2, [rn], #index
927 RT and RT2 are the registers to store.
928 RN is the base address register.
929 OFFSET is the immediate to add to the base address. It is limited to a
930 -512 .. 504 range (7 bits << 3). */
933 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
934 struct aarch64_register rt2
, struct aarch64_register rn
,
935 struct aarch64_memory_operand operand
)
937 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
940 /* Write a LDP instruction into *BUF.
942 LDP rt, rt2, [rn, #offset]
943 LDP rt, rt2, [rn, #index]!
944 LDP rt, rt2, [rn], #index
946 RT and RT2 are the registers to store.
947 RN is the base address register.
948 OFFSET is the immediate to add to the base address. It is limited to a
949 -512 .. 504 range (7 bits << 3). */
952 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
953 struct aarch64_register rt2
, struct aarch64_register rn
,
954 struct aarch64_memory_operand operand
)
956 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
959 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
961 LDP qt, qt2, [rn, #offset]
963 RT and RT2 are the Q registers to store.
964 RN is the base address register.
965 OFFSET is the immediate to add to the base address. It is limited to
966 -1024 .. 1008 range (7 bits << 4). */
969 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
970 struct aarch64_register rn
, int32_t offset
)
972 uint32_t opc
= ENCODE (2, 2, 30);
973 uint32_t pre_index
= ENCODE (1, 1, 24);
975 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
976 | ENCODE (offset
>> 4, 7, 15)
977 | ENCODE (rt2
, 5, 10)
978 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
981 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
983 STP qt, qt2, [rn, #offset]
985 RT and RT2 are the Q registers to store.
986 RN is the base address register.
987 OFFSET is the immediate to add to the base address. It is limited to
988 -1024 .. 1008 range (7 bits << 4). */
991 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
992 struct aarch64_register rn
, int32_t offset
)
994 uint32_t opc
= ENCODE (2, 2, 30);
995 uint32_t pre_index
= ENCODE (1, 1, 24);
997 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
998 | ENCODE (offset
>> 4, 7, 15)
999 | ENCODE (rt2
, 5, 10)
1000 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1003 /* Write a LDRH instruction into *BUF.
1005 LDRH wt, [xn, #offset]
1006 LDRH wt, [xn, #index]!
1007 LDRH wt, [xn], #index
1009 RT is the register to store.
1010 RN is the base address register.
1011 OFFSET is the immediate to add to the base address. It is limited to
1012 0 .. 32760 range (12 bits << 3). */
1015 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1016 struct aarch64_register rn
,
1017 struct aarch64_memory_operand operand
)
1019 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1022 /* Write a LDRB instruction into *BUF.
1024 LDRB wt, [xn, #offset]
1025 LDRB wt, [xn, #index]!
1026 LDRB wt, [xn], #index
1028 RT is the register to store.
1029 RN is the base address register.
1030 OFFSET is the immediate to add to the base address. It is limited to
1031 0 .. 32760 range (12 bits << 3). */
1034 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1035 struct aarch64_register rn
,
1036 struct aarch64_memory_operand operand
)
1038 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1043 /* Write a STR instruction into *BUF.
1045 STR rt, [rn, #offset]
1046 STR rt, [rn, #index]!
1047 STR rt, [rn], #index
1049 RT is the register to store.
1050 RN is the base address register.
1051 OFFSET is the immediate to add to the base address. It is limited to
1052 0 .. 32760 range (12 bits << 3). */
1055 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1056 struct aarch64_register rn
,
1057 struct aarch64_memory_operand operand
)
1059 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1062 /* Helper function emitting an exclusive load or store instruction. */
1065 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1066 enum aarch64_opcodes opcode
,
1067 struct aarch64_register rs
,
1068 struct aarch64_register rt
,
1069 struct aarch64_register rt2
,
1070 struct aarch64_register rn
)
1072 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1073 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1074 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1077 /* Write a LAXR instruction into *BUF.
1081 RT is the destination register.
1082 RN is the base address register. */
1085 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1086 struct aarch64_register rn
)
1088 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1092 /* Write a STXR instruction into *BUF.
1096 RS is the result register, it indicates if the store succeeded or not.
1097 RT is the destination register.
1098 RN is the base address register. */
1101 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1102 struct aarch64_register rt
, struct aarch64_register rn
)
1104 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1108 /* Write a STLR instruction into *BUF.
1112 RT is the register to store.
1113 RN is the base address register. */
1116 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1117 struct aarch64_register rn
)
1119 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1123 /* Helper function for data processing instructions with register sources. */
1126 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1127 struct aarch64_register rd
,
1128 struct aarch64_register rn
,
1129 struct aarch64_register rm
)
1131 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1133 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1134 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1137 /* Helper function for data processing instructions taking either a register
1141 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1142 struct aarch64_register rd
,
1143 struct aarch64_register rn
,
1144 struct aarch64_operand operand
)
1146 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1147 /* The opcode is different for register and immediate source operands. */
1148 uint32_t operand_opcode
;
1150 if (operand
.type
== OPERAND_IMMEDIATE
)
1152 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1153 operand_opcode
= ENCODE (8, 4, 25);
1155 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1156 | ENCODE (operand
.imm
, 12, 10)
1157 | ENCODE (rn
.num
, 5, 5)
1158 | ENCODE (rd
.num
, 5, 0));
1162 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1163 operand_opcode
= ENCODE (5, 4, 25);
1165 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1170 /* Write an ADD instruction into *BUF.
1175 This function handles both an immediate and register add.
1177 RD is the destination register.
1178 RN is the input register.
1179 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1180 OPERAND_REGISTER. */
1183 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1184 struct aarch64_register rn
, struct aarch64_operand operand
)
1186 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1189 /* Write a SUB instruction into *BUF.
1194 This function handles both an immediate and register sub.
1196 RD is the destination register.
1197 RN is the input register.
1198 IMM is the immediate to substract to RN. */
1201 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1202 struct aarch64_register rn
, struct aarch64_operand operand
)
1204 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1207 /* Write a MOV instruction into *BUF.
1212 This function handles both a wide immediate move and a register move,
1213 with the condition that the source register is not xzr. xzr and the
1214 stack pointer share the same encoding and this function only supports
1217 RD is the destination register.
1218 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1219 OPERAND_REGISTER. */
1222 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1223 struct aarch64_operand operand
)
1225 if (operand
.type
== OPERAND_IMMEDIATE
)
1227 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1228 /* Do not shift the immediate. */
1229 uint32_t shift
= ENCODE (0, 2, 21);
1231 return aarch64_emit_insn (buf
, MOV
| size
| shift
1232 | ENCODE (operand
.imm
, 16, 5)
1233 | ENCODE (rd
.num
, 5, 0));
1236 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1239 /* Write a MOVK instruction into *BUF.
1241 MOVK rd, #imm, lsl #shift
1243 RD is the destination register.
1244 IMM is the immediate.
1245 SHIFT is the logical shift left to apply to IMM. */
1248 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1251 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1253 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1254 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1257 /* Write instructions into *BUF in order to move ADDR into a register.
1258 ADDR can be a 64-bit value.
1260 This function will emit a series of MOV and MOVK instructions, such as:
1263 MOVK xd, #(addr >> 16), lsl #16
1264 MOVK xd, #(addr >> 32), lsl #32
1265 MOVK xd, #(addr >> 48), lsl #48 */
1268 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1272 /* The MOV (wide immediate) instruction clears to top bits of the
1274 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1276 if ((addr
>> 16) != 0)
1277 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1281 if ((addr
>> 32) != 0)
1282 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1286 if ((addr
>> 48) != 0)
1287 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1292 /* Write a SUBS instruction into *BUF.
1296 This instruction update the condition flags.
1298 RD is the destination register.
1299 RN and RM are the source registers. */
1302 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1303 struct aarch64_register rn
, struct aarch64_operand operand
)
1305 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1308 /* Write a CMP instruction into *BUF.
1312 This instruction is an alias of SUBS xzr, rn, rm.
1314 RN and RM are the registers to compare. */
1317 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1318 struct aarch64_operand operand
)
1320 return emit_subs (buf
, xzr
, rn
, operand
);
1323 /* Write a AND instruction into *BUF.
1327 RD is the destination register.
1328 RN and RM are the source registers. */
1331 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1332 struct aarch64_register rn
, struct aarch64_register rm
)
1334 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1337 /* Write a ORR instruction into *BUF.
1341 RD is the destination register.
1342 RN and RM are the source registers. */
1345 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1346 struct aarch64_register rn
, struct aarch64_register rm
)
1348 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1351 /* Write a ORN instruction into *BUF.
1355 RD is the destination register.
1356 RN and RM are the source registers. */
1359 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1360 struct aarch64_register rn
, struct aarch64_register rm
)
1362 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1365 /* Write a EOR instruction into *BUF.
1369 RD is the destination register.
1370 RN and RM are the source registers. */
1373 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1374 struct aarch64_register rn
, struct aarch64_register rm
)
1376 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1379 /* Write a MVN instruction into *BUF.
1383 This is an alias for ORN rd, xzr, rm.
1385 RD is the destination register.
1386 RM is the source register. */
1389 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1390 struct aarch64_register rm
)
1392 return emit_orn (buf
, rd
, xzr
, rm
);
1395 /* Write a LSLV instruction into *BUF.
1399 RD is the destination register.
1400 RN and RM are the source registers. */
1403 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1404 struct aarch64_register rn
, struct aarch64_register rm
)
1406 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1409 /* Write a LSRV instruction into *BUF.
1413 RD is the destination register.
1414 RN and RM are the source registers. */
1417 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1418 struct aarch64_register rn
, struct aarch64_register rm
)
1420 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1423 /* Write a ASRV instruction into *BUF.
1427 RD is the destination register.
1428 RN and RM are the source registers. */
1431 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1432 struct aarch64_register rn
, struct aarch64_register rm
)
1434 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1437 /* Write a MUL instruction into *BUF.
1441 RD is the destination register.
1442 RN and RM are the source registers. */
1445 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1446 struct aarch64_register rn
, struct aarch64_register rm
)
1448 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1451 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1455 RT is the destination register.
1456 SYSTEM_REG is special purpose register to read. */
1459 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1460 enum aarch64_system_control_registers system_reg
)
1462 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1463 | ENCODE (rt
.num
, 5, 0));
1466 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1470 SYSTEM_REG is special purpose register to write.
1471 RT is the input register. */
1474 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1475 struct aarch64_register rt
)
1477 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1478 | ENCODE (rt
.num
, 5, 0));
1481 /* Write a SEVL instruction into *BUF.
1483 This is a hint instruction telling the hardware to trigger an event. */
1486 emit_sevl (uint32_t *buf
)
1488 return aarch64_emit_insn (buf
, SEVL
);
1491 /* Write a WFE instruction into *BUF.
1493 This is a hint instruction telling the hardware to wait for an event. */
1496 emit_wfe (uint32_t *buf
)
1498 return aarch64_emit_insn (buf
, WFE
);
1501 /* Write a SBFM instruction into *BUF.
1503 SBFM rd, rn, #immr, #imms
1505 This instruction moves the bits from #immr to #imms into the
1506 destination, sign extending the result.
1508 RD is the destination register.
1509 RN is the source register.
1510 IMMR is the bit number to start at (least significant bit).
1511 IMMS is the bit number to stop at (most significant bit). */
1514 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1515 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1517 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1518 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1520 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1521 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1522 | ENCODE (rd
.num
, 5, 0));
1525 /* Write a SBFX instruction into *BUF.
1527 SBFX rd, rn, #lsb, #width
1529 This instruction moves #width bits from #lsb into the destination, sign
1530 extending the result. This is an alias for:
1532 SBFM rd, rn, #lsb, #(lsb + width - 1)
1534 RD is the destination register.
1535 RN is the source register.
1536 LSB is the bit number to start at (least significant bit).
1537 WIDTH is the number of bits to move. */
1540 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1541 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1543 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1546 /* Write a UBFM instruction into *BUF.
1548 UBFM rd, rn, #immr, #imms
1550 This instruction moves the bits from #immr to #imms into the
1551 destination, extending the result with zeros.
1553 RD is the destination register.
1554 RN is the source register.
1555 IMMR is the bit number to start at (least significant bit).
1556 IMMS is the bit number to stop at (most significant bit). */
1559 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1560 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1562 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1563 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1565 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1566 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1567 | ENCODE (rd
.num
, 5, 0));
1570 /* Write a UBFX instruction into *BUF.
1572 UBFX rd, rn, #lsb, #width
1574 This instruction moves #width bits from #lsb into the destination,
1575 extending the result with zeros. This is an alias for:
1577 UBFM rd, rn, #lsb, #(lsb + width - 1)
1579 RD is the destination register.
1580 RN is the source register.
1581 LSB is the bit number to start at (least significant bit).
1582 WIDTH is the number of bits to move. */
1585 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1586 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1588 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1591 /* Write a CSINC instruction into *BUF.
1593 CSINC rd, rn, rm, cond
1595 This instruction conditionally increments rn or rm and places the result
1596 in rd. rn is chosen is the condition is true.
1598 RD is the destination register.
1599 RN and RM are the source registers.
1600 COND is the encoded condition. */
1603 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1604 struct aarch64_register rn
, struct aarch64_register rm
,
1607 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1609 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1610 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1611 | ENCODE (rd
.num
, 5, 0));
1614 /* Write a CSET instruction into *BUF.
1618 This instruction conditionally write 1 or 0 in the destination register.
1619 1 is written if the condition is true. This is an alias for:
1621 CSINC rd, xzr, xzr, !cond
1623 Note that the condition needs to be inverted.
1625 RD is the destination register.
1626 RN and RM are the source registers.
1627 COND is the encoded condition. */
1630 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1632 /* The least significant bit of the condition needs toggling in order to
1634 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1637 /* Write LEN instructions from BUF into the inferior memory at *TO.
1639 Note instructions are always little endian on AArch64, unlike data. */
1642 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1644 size_t byte_len
= len
* sizeof (uint32_t);
1645 #if (__BYTE_ORDER == __BIG_ENDIAN)
1646 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1649 for (i
= 0; i
< len
; i
++)
1650 le_buf
[i
] = htole32 (buf
[i
]);
1652 write_inferior_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1656 write_inferior_memory (*to
, (const unsigned char *) buf
, byte_len
);
1662 /* Sub-class of struct aarch64_insn_data, store information of
1663 instruction relocation for fast tracepoint. Visitor can
1664 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1665 the relocated instructions in buffer pointed by INSN_PTR. */
1667 struct aarch64_insn_relocation_data
1669 struct aarch64_insn_data base
;
1671 /* The new address the instruction is relocated to. */
1673 /* Pointer to the buffer of relocated instruction(s). */
1677 /* Implementation of aarch64_insn_visitor method "b". */
1680 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1681 struct aarch64_insn_data
*data
)
1683 struct aarch64_insn_relocation_data
*insn_reloc
1684 = (struct aarch64_insn_relocation_data
*) data
;
1686 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1688 if (can_encode_int32 (new_offset
, 28))
1689 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1692 /* Implementation of aarch64_insn_visitor method "b_cond". */
1695 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1696 struct aarch64_insn_data
*data
)
1698 struct aarch64_insn_relocation_data
*insn_reloc
1699 = (struct aarch64_insn_relocation_data
*) data
;
1701 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1703 if (can_encode_int32 (new_offset
, 21))
1705 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1708 else if (can_encode_int32 (new_offset
, 28))
1710 /* The offset is out of range for a conditional branch
1711 instruction but not for a unconditional branch. We can use
1712 the following instructions instead:
1714 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1715 B NOT_TAKEN ; Else jump over TAKEN and continue.
1722 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1723 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1724 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1728 /* Implementation of aarch64_insn_visitor method "cb". */
1731 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1732 const unsigned rn
, int is64
,
1733 struct aarch64_insn_data
*data
)
1735 struct aarch64_insn_relocation_data
*insn_reloc
1736 = (struct aarch64_insn_relocation_data
*) data
;
1738 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1740 if (can_encode_int32 (new_offset
, 21))
1742 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1743 aarch64_register (rn
, is64
), new_offset
);
1745 else if (can_encode_int32 (new_offset
, 28))
1747 /* The offset is out of range for a compare and branch
1748 instruction but not for a unconditional branch. We can use
1749 the following instructions instead:
1751 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1752 B NOT_TAKEN ; Else jump over TAKEN and continue.
1758 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1759 aarch64_register (rn
, is64
), 8);
1760 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1761 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1765 /* Implementation of aarch64_insn_visitor method "tb". */
1768 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1769 const unsigned rt
, unsigned bit
,
1770 struct aarch64_insn_data
*data
)
1772 struct aarch64_insn_relocation_data
*insn_reloc
1773 = (struct aarch64_insn_relocation_data
*) data
;
1775 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1777 if (can_encode_int32 (new_offset
, 16))
1779 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1780 aarch64_register (rt
, 1), new_offset
);
1782 else if (can_encode_int32 (new_offset
, 28))
1784 /* The offset is out of range for a test bit and branch
1785 instruction but not for a unconditional branch. We can use
1786 the following instructions instead:
1788 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1789 B NOT_TAKEN ; Else jump over TAKEN and continue.
1795 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1796 aarch64_register (rt
, 1), 8);
1797 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1798 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1803 /* Implementation of aarch64_insn_visitor method "adr". */
1806 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1808 struct aarch64_insn_data
*data
)
1810 struct aarch64_insn_relocation_data
*insn_reloc
1811 = (struct aarch64_insn_relocation_data
*) data
;
1812 /* We know exactly the address the ADR{P,} instruction will compute.
1813 We can just write it to the destination register. */
1814 CORE_ADDR address
= data
->insn_addr
+ offset
;
1818 /* Clear the lower 12 bits of the offset to get the 4K page. */
1819 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1820 aarch64_register (rd
, 1),
1824 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1825 aarch64_register (rd
, 1), address
);
1828 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1831 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1832 const unsigned rt
, const int is64
,
1833 struct aarch64_insn_data
*data
)
1835 struct aarch64_insn_relocation_data
*insn_reloc
1836 = (struct aarch64_insn_relocation_data
*) data
;
1837 CORE_ADDR address
= data
->insn_addr
+ offset
;
1839 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1840 aarch64_register (rt
, 1), address
);
1842 /* We know exactly what address to load from, and what register we
1845 MOV xd, #(oldloc + offset)
1846 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1849 LDR xd, [xd] ; or LDRSW xd, [xd]
1854 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1855 aarch64_register (rt
, 1),
1856 aarch64_register (rt
, 1),
1857 offset_memory_operand (0));
1859 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1860 aarch64_register (rt
, is64
),
1861 aarch64_register (rt
, 1),
1862 offset_memory_operand (0));
1865 /* Implementation of aarch64_insn_visitor method "others". */
1868 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1869 struct aarch64_insn_data
*data
)
1871 struct aarch64_insn_relocation_data
*insn_reloc
1872 = (struct aarch64_insn_relocation_data
*) data
;
1874 /* The instruction is not PC relative. Just re-emit it at the new
1876 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1879 static const struct aarch64_insn_visitor visitor
=
1881 aarch64_ftrace_insn_reloc_b
,
1882 aarch64_ftrace_insn_reloc_b_cond
,
1883 aarch64_ftrace_insn_reloc_cb
,
1884 aarch64_ftrace_insn_reloc_tb
,
1885 aarch64_ftrace_insn_reloc_adr
,
1886 aarch64_ftrace_insn_reloc_ldr_literal
,
1887 aarch64_ftrace_insn_reloc_others
,
1890 /* Implementation of linux_target_ops method
1891 "install_fast_tracepoint_jump_pad". */
1894 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1896 CORE_ADDR collector
,
1899 CORE_ADDR
*jump_entry
,
1900 CORE_ADDR
*trampoline
,
1901 ULONGEST
*trampoline_size
,
1902 unsigned char *jjump_pad_insn
,
1903 ULONGEST
*jjump_pad_insn_size
,
1904 CORE_ADDR
*adjusted_insn_addr
,
1905 CORE_ADDR
*adjusted_insn_addr_end
,
1913 CORE_ADDR buildaddr
= *jump_entry
;
1914 struct aarch64_insn_relocation_data insn_data
;
1916 /* We need to save the current state on the stack both to restore it
1917 later and to collect register values when the tracepoint is hit.
1919 The saved registers are pushed in a layout that needs to be in sync
1920 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1921 the supply_fast_tracepoint_registers function will fill in the
1922 register cache from a pointer to saved registers on the stack we build
1925 For simplicity, we set the size of each cell on the stack to 16 bytes.
1926 This way one cell can hold any register type, from system registers
1927 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1928 has to be 16 bytes aligned anyway.
1930 Note that the CPSR register does not exist on AArch64. Instead we
1931 can access system bits describing the process state with the
1932 MRS/MSR instructions, namely the condition flags. We save them as
1933 if they are part of a CPSR register because that's how GDB
1934 interprets these system bits. At the moment, only the condition
1935 flags are saved in CPSR (NZCV).
1937 Stack layout, each cell is 16 bytes (descending):
1939 High *-------- SIMD&FP registers from 31 down to 0. --------*
1945 *---- General purpose registers from 30 down to 0. ----*
1951 *------------- Special purpose registers. -------------*
1954 | CPSR (NZCV) | 5 cells
1957 *------------- collecting_t object --------------------*
1958 | TPIDR_EL0 | struct tracepoint * |
1959 Low *------------------------------------------------------*
1961 After this stack is set up, we issue a call to the collector, passing
1962 it the saved registers at (SP + 16). */
1964 /* Push SIMD&FP registers on the stack:
1966 SUB sp, sp, #(32 * 16)
1968 STP q30, q31, [sp, #(30 * 16)]
1973 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1974 for (i
= 30; i
>= 0; i
-= 2)
1975 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1977 /* Push general puspose registers on the stack. Note that we do not need
1978 to push x31 as it represents the xzr register and not the stack
1979 pointer in a STR instruction.
1981 SUB sp, sp, #(31 * 16)
1983 STR x30, [sp, #(30 * 16)]
1988 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1989 for (i
= 30; i
>= 0; i
-= 1)
1990 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
1991 offset_memory_operand (i
* 16));
1993 /* Make space for 5 more cells.
1995 SUB sp, sp, #(5 * 16)
1998 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2003 ADD x4, sp, #((32 + 31 + 5) * 16)
2004 STR x4, [sp, #(4 * 16)]
2007 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2008 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2010 /* Save PC (tracepoint address):
2015 STR x3, [sp, #(3 * 16)]
2019 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2020 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2022 /* Save CPSR (NZCV), FPSR and FPCR:
2028 STR x2, [sp, #(2 * 16)]
2029 STR x1, [sp, #(1 * 16)]
2030 STR x0, [sp, #(0 * 16)]
2033 p
+= emit_mrs (p
, x2
, NZCV
);
2034 p
+= emit_mrs (p
, x1
, FPSR
);
2035 p
+= emit_mrs (p
, x0
, FPCR
);
2036 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2037 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2038 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2040 /* Push the collecting_t object. It consist of the address of the
2041 tracepoint and an ID for the current thread. We get the latter by
2042 reading the tpidr_el0 system register. It corresponds to the
2043 NT_ARM_TLS register accessible with ptrace.
2050 STP x0, x1, [sp, #-16]!
2054 p
+= emit_mov_addr (p
, x0
, tpoint
);
2055 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2056 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2060 The shared memory for the lock is at lockaddr. It will hold zero
2061 if no-one is holding the lock, otherwise it contains the address of
2062 the collecting_t object on the stack of the thread which acquired it.
2064 At this stage, the stack pointer points to this thread's collecting_t
2067 We use the following registers:
2068 - x0: Address of the lock.
2069 - x1: Pointer to collecting_t object.
2070 - x2: Scratch register.
2076 ; Trigger an event local to this core. So the following WFE
2077 ; instruction is ignored.
2080 ; Wait for an event. The event is triggered by either the SEVL
2081 ; or STLR instructions (store release).
2084 ; Atomically read at lockaddr. This marks the memory location as
2085 ; exclusive. This instruction also has memory constraints which
2086 ; make sure all previous data reads and writes are done before
2090 ; Try again if another thread holds the lock.
2093 ; We can lock it! Write the address of the collecting_t object.
2094 ; This instruction will fail if the memory location is not marked
2095 ; as exclusive anymore. If it succeeds, it will remove the
2096 ; exclusive mark on the memory location. This way, if another
2097 ; thread executes this instruction before us, we will fail and try
2104 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2105 p
+= emit_mov (p
, x1
, register_operand (sp
));
2109 p
+= emit_ldaxr (p
, x2
, x0
);
2110 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2111 p
+= emit_stxr (p
, w2
, x1
, x0
);
2112 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2114 /* Call collector (struct tracepoint *, unsigned char *):
2119 ; Saved registers start after the collecting_t object.
2122 ; We use an intra-procedure-call scratch register.
2123 MOV ip0, #(collector)
2126 ; And call back to C!
2131 p
+= emit_mov_addr (p
, x0
, tpoint
);
2132 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2134 p
+= emit_mov_addr (p
, ip0
, collector
);
2135 p
+= emit_blr (p
, ip0
);
2137 /* Release the lock.
2142 ; This instruction is a normal store with memory ordering
2143 ; constraints. Thanks to this we do not have to put a data
2144 ; barrier instruction to make sure all data read and writes are done
2145 ; before this instruction is executed. Furthermore, this instrucion
2146 ; will trigger an event, letting other threads know they can grab
2151 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2152 p
+= emit_stlr (p
, xzr
, x0
);
2154 /* Free collecting_t object:
2159 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2161 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2162 registers from the stack.
2164 LDR x2, [sp, #(2 * 16)]
2165 LDR x1, [sp, #(1 * 16)]
2166 LDR x0, [sp, #(0 * 16)]
2172 ADD sp, sp #(5 * 16)
2175 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2176 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2177 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2178 p
+= emit_msr (p
, NZCV
, x2
);
2179 p
+= emit_msr (p
, FPSR
, x1
);
2180 p
+= emit_msr (p
, FPCR
, x0
);
2182 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2184 /* Pop general purpose registers:
2188 LDR x30, [sp, #(30 * 16)]
2190 ADD sp, sp, #(31 * 16)
2193 for (i
= 0; i
<= 30; i
+= 1)
2194 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2195 offset_memory_operand (i
* 16));
2196 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2198 /* Pop SIMD&FP registers:
2202 LDP q30, q31, [sp, #(30 * 16)]
2204 ADD sp, sp, #(32 * 16)
2207 for (i
= 0; i
<= 30; i
+= 2)
2208 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2209 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2211 /* Write the code into the inferior memory. */
2212 append_insns (&buildaddr
, p
- buf
, buf
);
2214 /* Now emit the relocated instruction. */
2215 *adjusted_insn_addr
= buildaddr
;
2216 target_read_uint32 (tpaddr
, &insn
);
2218 insn_data
.base
.insn_addr
= tpaddr
;
2219 insn_data
.new_addr
= buildaddr
;
2220 insn_data
.insn_ptr
= buf
;
2222 aarch64_relocate_instruction (insn
, &visitor
,
2223 (struct aarch64_insn_data
*) &insn_data
);
2225 /* We may not have been able to relocate the instruction. */
2226 if (insn_data
.insn_ptr
== buf
)
2229 "E.Could not relocate instruction from %s to %s.",
2230 core_addr_to_string_nz (tpaddr
),
2231 core_addr_to_string_nz (buildaddr
));
2235 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2236 *adjusted_insn_addr_end
= buildaddr
;
2238 /* Go back to the start of the buffer. */
2241 /* Emit a branch back from the jump pad. */
2242 offset
= (tpaddr
+ orig_size
- buildaddr
);
2243 if (!can_encode_int32 (offset
, 28))
2246 "E.Jump back from jump pad too far from tracepoint "
2247 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2252 p
+= emit_b (p
, 0, offset
);
2253 append_insns (&buildaddr
, p
- buf
, buf
);
2255 /* Give the caller a branch instruction into the jump pad. */
2256 offset
= (*jump_entry
- tpaddr
);
2257 if (!can_encode_int32 (offset
, 28))
2260 "E.Jump pad too far from tracepoint "
2261 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2266 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2267 *jjump_pad_insn_size
= 4;
2269 /* Return the end address of our pad. */
2270 *jump_entry
= buildaddr
;
2275 /* Helper function writing LEN instructions from START into
2276 current_insn_ptr. */
2279 emit_ops_insns (const uint32_t *start
, int len
)
2281 CORE_ADDR buildaddr
= current_insn_ptr
;
2284 debug_printf ("Adding %d instrucions at %s\n",
2285 len
, paddress (buildaddr
));
2287 append_insns (&buildaddr
, len
, start
);
2288 current_insn_ptr
= buildaddr
;
2291 /* Pop a register from the stack. */
2294 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2296 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2299 /* Push a register on the stack. */
2302 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2304 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2307 /* Implementation of emit_ops method "emit_prologue". */
2310 aarch64_emit_prologue (void)
2315 /* This function emit a prologue for the following function prototype:
2317 enum eval_result_type f (unsigned char *regs,
2320 The first argument is a buffer of raw registers. The second
2321 argument is the result of
2322 evaluating the expression, which will be set to whatever is on top of
2323 the stack at the end.
2325 The stack set up by the prologue is as such:
2327 High *------------------------------------------------------*
2330 | x1 (ULONGEST *value) |
2331 | x0 (unsigned char *regs) |
2332 Low *------------------------------------------------------*
2334 As we are implementing a stack machine, each opcode can expand the
2335 stack so we never know how far we are from the data saved by this
2336 prologue. In order to be able refer to value and regs later, we save
2337 the current stack pointer in the frame pointer. This way, it is not
2338 clobbered when calling C functions.
2340 Finally, throughtout every operation, we are using register x0 as the
2341 top of the stack, and x1 as a scratch register. */
2343 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2344 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2345 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2347 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2350 emit_ops_insns (buf
, p
- buf
);
2353 /* Implementation of emit_ops method "emit_epilogue". */
2356 aarch64_emit_epilogue (void)
2361 /* Store the result of the expression (x0) in *value. */
2362 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2363 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2364 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2366 /* Restore the previous state. */
2367 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2368 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2370 /* Return expr_eval_no_error. */
2371 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2372 p
+= emit_ret (p
, lr
);
2374 emit_ops_insns (buf
, p
- buf
);
2377 /* Implementation of emit_ops method "emit_add". */
2380 aarch64_emit_add (void)
2385 p
+= emit_pop (p
, x1
);
2386 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2388 emit_ops_insns (buf
, p
- buf
);
2391 /* Implementation of emit_ops method "emit_sub". */
2394 aarch64_emit_sub (void)
2399 p
+= emit_pop (p
, x1
);
2400 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2402 emit_ops_insns (buf
, p
- buf
);
2405 /* Implementation of emit_ops method "emit_mul". */
2408 aarch64_emit_mul (void)
2413 p
+= emit_pop (p
, x1
);
2414 p
+= emit_mul (p
, x0
, x1
, x0
);
2416 emit_ops_insns (buf
, p
- buf
);
2419 /* Implementation of emit_ops method "emit_lsh". */
2422 aarch64_emit_lsh (void)
2427 p
+= emit_pop (p
, x1
);
2428 p
+= emit_lslv (p
, x0
, x1
, x0
);
2430 emit_ops_insns (buf
, p
- buf
);
2433 /* Implementation of emit_ops method "emit_rsh_signed". */
2436 aarch64_emit_rsh_signed (void)
2441 p
+= emit_pop (p
, x1
);
2442 p
+= emit_asrv (p
, x0
, x1
, x0
);
2444 emit_ops_insns (buf
, p
- buf
);
2447 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2450 aarch64_emit_rsh_unsigned (void)
2455 p
+= emit_pop (p
, x1
);
2456 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2458 emit_ops_insns (buf
, p
- buf
);
2461 /* Implementation of emit_ops method "emit_ext". */
2464 aarch64_emit_ext (int arg
)
2469 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2471 emit_ops_insns (buf
, p
- buf
);
2474 /* Implementation of emit_ops method "emit_log_not". */
2477 aarch64_emit_log_not (void)
2482 /* If the top of the stack is 0, replace it with 1. Else replace it with
2485 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2486 p
+= emit_cset (p
, x0
, EQ
);
2488 emit_ops_insns (buf
, p
- buf
);
2491 /* Implementation of emit_ops method "emit_bit_and". */
2494 aarch64_emit_bit_and (void)
2499 p
+= emit_pop (p
, x1
);
2500 p
+= emit_and (p
, x0
, x0
, x1
);
2502 emit_ops_insns (buf
, p
- buf
);
2505 /* Implementation of emit_ops method "emit_bit_or". */
2508 aarch64_emit_bit_or (void)
2513 p
+= emit_pop (p
, x1
);
2514 p
+= emit_orr (p
, x0
, x0
, x1
);
2516 emit_ops_insns (buf
, p
- buf
);
2519 /* Implementation of emit_ops method "emit_bit_xor". */
2522 aarch64_emit_bit_xor (void)
2527 p
+= emit_pop (p
, x1
);
2528 p
+= emit_eor (p
, x0
, x0
, x1
);
2530 emit_ops_insns (buf
, p
- buf
);
2533 /* Implementation of emit_ops method "emit_bit_not". */
2536 aarch64_emit_bit_not (void)
2541 p
+= emit_mvn (p
, x0
, x0
);
2543 emit_ops_insns (buf
, p
- buf
);
2546 /* Implementation of emit_ops method "emit_equal". */
2549 aarch64_emit_equal (void)
2554 p
+= emit_pop (p
, x1
);
2555 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2556 p
+= emit_cset (p
, x0
, EQ
);
2558 emit_ops_insns (buf
, p
- buf
);
2561 /* Implementation of emit_ops method "emit_less_signed". */
2564 aarch64_emit_less_signed (void)
2569 p
+= emit_pop (p
, x1
);
2570 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2571 p
+= emit_cset (p
, x0
, LT
);
2573 emit_ops_insns (buf
, p
- buf
);
2576 /* Implementation of emit_ops method "emit_less_unsigned". */
2579 aarch64_emit_less_unsigned (void)
2584 p
+= emit_pop (p
, x1
);
2585 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2586 p
+= emit_cset (p
, x0
, LO
);
2588 emit_ops_insns (buf
, p
- buf
);
2591 /* Implementation of emit_ops method "emit_ref". */
2594 aarch64_emit_ref (int size
)
2602 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2605 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2608 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2611 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2614 /* Unknown size, bail on compilation. */
2619 emit_ops_insns (buf
, p
- buf
);
2622 /* Implementation of emit_ops method "emit_if_goto". */
2625 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2630 /* The Z flag is set or cleared here. */
2631 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2632 /* This instruction must not change the Z flag. */
2633 p
+= emit_pop (p
, x0
);
2634 /* Branch over the next instruction if x0 == 0. */
2635 p
+= emit_bcond (p
, EQ
, 8);
2637 /* The NOP instruction will be patched with an unconditional branch. */
2639 *offset_p
= (p
- buf
) * 4;
2644 emit_ops_insns (buf
, p
- buf
);
2647 /* Implementation of emit_ops method "emit_goto". */
2650 aarch64_emit_goto (int *offset_p
, int *size_p
)
2655 /* The NOP instruction will be patched with an unconditional branch. */
2662 emit_ops_insns (buf
, p
- buf
);
2665 /* Implementation of emit_ops method "write_goto_address". */
2668 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2672 emit_b (&insn
, 0, to
- from
);
2673 append_insns (&from
, 1, &insn
);
2676 /* Implementation of emit_ops method "emit_const". */
2679 aarch64_emit_const (LONGEST num
)
2684 p
+= emit_mov_addr (p
, x0
, num
);
2686 emit_ops_insns (buf
, p
- buf
);
2689 /* Implementation of emit_ops method "emit_call". */
2692 aarch64_emit_call (CORE_ADDR fn
)
2697 p
+= emit_mov_addr (p
, ip0
, fn
);
2698 p
+= emit_blr (p
, ip0
);
2700 emit_ops_insns (buf
, p
- buf
);
2703 /* Implementation of emit_ops method "emit_reg". */
2706 aarch64_emit_reg (int reg
)
2711 /* Set x0 to unsigned char *regs. */
2712 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2713 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2714 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2716 emit_ops_insns (buf
, p
- buf
);
2718 aarch64_emit_call (get_raw_reg_func_addr ());
2721 /* Implementation of emit_ops method "emit_pop". */
2724 aarch64_emit_pop (void)
2729 p
+= emit_pop (p
, x0
);
2731 emit_ops_insns (buf
, p
- buf
);
2734 /* Implementation of emit_ops method "emit_stack_flush". */
2737 aarch64_emit_stack_flush (void)
2742 p
+= emit_push (p
, x0
);
2744 emit_ops_insns (buf
, p
- buf
);
2747 /* Implementation of emit_ops method "emit_zero_ext". */
2750 aarch64_emit_zero_ext (int arg
)
2755 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2757 emit_ops_insns (buf
, p
- buf
);
2760 /* Implementation of emit_ops method "emit_swap". */
2763 aarch64_emit_swap (void)
2768 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2769 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2770 p
+= emit_mov (p
, x0
, register_operand (x1
));
2772 emit_ops_insns (buf
, p
- buf
);
2775 /* Implementation of emit_ops method "emit_stack_adjust". */
2778 aarch64_emit_stack_adjust (int n
)
2780 /* This is not needed with our design. */
2784 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2786 emit_ops_insns (buf
, p
- buf
);
2789 /* Implementation of emit_ops method "emit_int_call_1". */
2792 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2797 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2799 emit_ops_insns (buf
, p
- buf
);
2801 aarch64_emit_call (fn
);
2804 /* Implementation of emit_ops method "emit_void_call_2". */
2807 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2812 /* Push x0 on the stack. */
2813 aarch64_emit_stack_flush ();
2815 /* Setup arguments for the function call:
2818 x1: top of the stack
2823 p
+= emit_mov (p
, x1
, register_operand (x0
));
2824 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2826 emit_ops_insns (buf
, p
- buf
);
2828 aarch64_emit_call (fn
);
2831 aarch64_emit_pop ();
2834 /* Implementation of emit_ops method "emit_eq_goto". */
2837 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2842 p
+= emit_pop (p
, x1
);
2843 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2844 /* Branch over the next instruction if x0 != x1. */
2845 p
+= emit_bcond (p
, NE
, 8);
2846 /* The NOP instruction will be patched with an unconditional branch. */
2848 *offset_p
= (p
- buf
) * 4;
2853 emit_ops_insns (buf
, p
- buf
);
2856 /* Implementation of emit_ops method "emit_ne_goto". */
2859 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2864 p
+= emit_pop (p
, x1
);
2865 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2866 /* Branch over the next instruction if x0 == x1. */
2867 p
+= emit_bcond (p
, EQ
, 8);
2868 /* The NOP instruction will be patched with an unconditional branch. */
2870 *offset_p
= (p
- buf
) * 4;
2875 emit_ops_insns (buf
, p
- buf
);
2878 /* Implementation of emit_ops method "emit_lt_goto". */
2881 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2886 p
+= emit_pop (p
, x1
);
2887 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2888 /* Branch over the next instruction if x0 >= x1. */
2889 p
+= emit_bcond (p
, GE
, 8);
2890 /* The NOP instruction will be patched with an unconditional branch. */
2892 *offset_p
= (p
- buf
) * 4;
2897 emit_ops_insns (buf
, p
- buf
);
2900 /* Implementation of emit_ops method "emit_le_goto". */
2903 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2908 p
+= emit_pop (p
, x1
);
2909 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2910 /* Branch over the next instruction if x0 > x1. */
2911 p
+= emit_bcond (p
, GT
, 8);
2912 /* The NOP instruction will be patched with an unconditional branch. */
2914 *offset_p
= (p
- buf
) * 4;
2919 emit_ops_insns (buf
, p
- buf
);
2922 /* Implementation of emit_ops method "emit_gt_goto". */
2925 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2930 p
+= emit_pop (p
, x1
);
2931 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2932 /* Branch over the next instruction if x0 <= x1. */
2933 p
+= emit_bcond (p
, LE
, 8);
2934 /* The NOP instruction will be patched with an unconditional branch. */
2936 *offset_p
= (p
- buf
) * 4;
2941 emit_ops_insns (buf
, p
- buf
);
2944 /* Implementation of emit_ops method "emit_ge_got". */
2947 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2952 p
+= emit_pop (p
, x1
);
2953 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2954 /* Branch over the next instruction if x0 <= x1. */
2955 p
+= emit_bcond (p
, LT
, 8);
2956 /* The NOP instruction will be patched with an unconditional branch. */
2958 *offset_p
= (p
- buf
) * 4;
2963 emit_ops_insns (buf
, p
- buf
);
2966 static struct emit_ops aarch64_emit_ops_impl
=
2968 aarch64_emit_prologue
,
2969 aarch64_emit_epilogue
,
2974 aarch64_emit_rsh_signed
,
2975 aarch64_emit_rsh_unsigned
,
2977 aarch64_emit_log_not
,
2978 aarch64_emit_bit_and
,
2979 aarch64_emit_bit_or
,
2980 aarch64_emit_bit_xor
,
2981 aarch64_emit_bit_not
,
2983 aarch64_emit_less_signed
,
2984 aarch64_emit_less_unsigned
,
2986 aarch64_emit_if_goto
,
2988 aarch64_write_goto_address
,
2993 aarch64_emit_stack_flush
,
2994 aarch64_emit_zero_ext
,
2996 aarch64_emit_stack_adjust
,
2997 aarch64_emit_int_call_1
,
2998 aarch64_emit_void_call_2
,
2999 aarch64_emit_eq_goto
,
3000 aarch64_emit_ne_goto
,
3001 aarch64_emit_lt_goto
,
3002 aarch64_emit_le_goto
,
3003 aarch64_emit_gt_goto
,
3004 aarch64_emit_ge_got
,
3007 /* Implementation of linux_target_ops method "emit_ops". */
3009 static struct emit_ops
*
3010 aarch64_emit_ops (void)
3012 return &aarch64_emit_ops_impl
;
3015 /* Implementation of linux_target_ops method
3016 "get_min_fast_tracepoint_insn_len". */
3019 aarch64_get_min_fast_tracepoint_insn_len (void)
3024 /* Implementation of linux_target_ops method "supports_range_stepping". */
3027 aarch64_supports_range_stepping (void)
3032 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3034 static const gdb_byte
*
3035 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
3037 if (is_64bit_tdesc ())
3039 *size
= aarch64_breakpoint_len
;
3040 return aarch64_breakpoint
;
3043 return arm_sw_breakpoint_from_kind (kind
, size
);
3046 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3049 aarch64_breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3051 if (is_64bit_tdesc ())
3052 return aarch64_breakpoint_len
;
3054 return arm_breakpoint_kind_from_pc (pcptr
);
3057 /* Implementation of the linux_target_ops method
3058 "breakpoint_kind_from_current_state". */
3061 aarch64_breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3063 if (is_64bit_tdesc ())
3064 return aarch64_breakpoint_len
;
3066 return arm_breakpoint_kind_from_current_state (pcptr
);
3069 /* Support for hardware single step. */
3072 aarch64_supports_hardware_single_step (void)
3077 struct linux_target_ops the_low_target
=
3081 NULL
, /* cannot_fetch_register */
3082 NULL
, /* cannot_store_register */
3083 NULL
, /* fetch_register */
3086 aarch64_breakpoint_kind_from_pc
,
3087 aarch64_sw_breakpoint_from_kind
,
3088 NULL
, /* get_next_pcs */
3089 0, /* decr_pc_after_break */
3090 aarch64_breakpoint_at
,
3091 aarch64_supports_z_point_type
,
3092 aarch64_insert_point
,
3093 aarch64_remove_point
,
3094 aarch64_stopped_by_watchpoint
,
3095 aarch64_stopped_data_address
,
3096 NULL
, /* collect_ptrace_register */
3097 NULL
, /* supply_ptrace_register */
3098 aarch64_linux_siginfo_fixup
,
3099 aarch64_linux_new_process
,
3100 aarch64_linux_delete_process
,
3101 aarch64_linux_new_thread
,
3102 aarch64_linux_delete_thread
,
3103 aarch64_linux_new_fork
,
3104 aarch64_linux_prepare_to_resume
,
3105 NULL
, /* process_qsupported */
3106 aarch64_supports_tracepoints
,
3107 aarch64_get_thread_area
,
3108 aarch64_install_fast_tracepoint_jump_pad
,
3110 aarch64_get_min_fast_tracepoint_insn_len
,
3111 aarch64_supports_range_stepping
,
3112 aarch64_breakpoint_kind_from_current_state
,
3113 aarch64_supports_hardware_single_step
,
3114 aarch64_get_syscall_trapinfo
,
3118 initialize_low_arch (void)
3120 initialize_low_arch_aarch32 ();
3122 initialize_regsets_info (&aarch64_regsets_info
);
3123 initialize_regsets_info (&aarch64_sve_regsets_info
);
3126 initialize_low_tdesc ();