1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
64 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
68 void low_arch_setup () override
;
70 bool low_cannot_fetch_register (int regno
) override
;
72 bool low_cannot_store_register (int regno
) override
;
74 bool low_supports_breakpoints () override
;
76 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
78 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
81 /* The singleton target ops object. */
83 static aarch64_target the_aarch64_target
;
86 aarch64_target::low_cannot_fetch_register (int regno
)
88 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
89 "is not implemented by the target");
93 aarch64_target::low_cannot_store_register (int regno
)
95 gdb_assert_not_reached ("linux target op low_cannot_store_register "
96 "is not implemented by the target");
99 /* Per-process arch-specific data we want to keep. */
101 struct arch_process_info
103 /* Hardware breakpoint/watchpoint data.
104 The reason for them to be per-process rather than per-thread is
105 due to the lack of information in the gdbserver environment;
106 gdbserver is not told that whether a requested hardware
107 breakpoint/watchpoint is thread specific or not, so it has to set
108 each hw bp/wp for every thread in the current process. The
109 higher level bp/wp management in gdb will resume a thread if a hw
110 bp/wp trap is not expected for it. Since the hw bp/wp setting is
111 same for each thread, it is reasonable for the data to live here.
113 struct aarch64_debug_reg_state debug_reg_state
;
116 /* Return true if the size of register 0 is 8 byte. */
119 is_64bit_tdesc (void)
121 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
123 return register_size (regcache
->tdesc
, 0) == 8;
126 /* Return true if the regcache contains the number of SVE registers. */
131 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
133 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
137 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
139 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
142 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
143 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
144 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
145 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
146 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
150 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
152 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
155 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
156 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
157 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
158 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
159 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
163 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
165 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
168 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
169 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
170 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
171 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
175 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
177 const struct user_fpsimd_state
*regset
178 = (const struct user_fpsimd_state
*) buf
;
181 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
182 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
183 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
184 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
187 /* Store the pauth registers to regcache. */
190 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
192 uint64_t *pauth_regset
= (uint64_t *) buf
;
193 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
198 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
200 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
205 aarch64_target::low_supports_breakpoints ()
210 /* Implementation of linux target ops method "low_get_pc". */
213 aarch64_target::low_get_pc (regcache
*regcache
)
215 if (register_size (regcache
->tdesc
, 0) == 8)
216 return linux_get_pc_64bit (regcache
);
218 return linux_get_pc_32bit (regcache
);
221 /* Implementation of linux target ops method "low_set_pc". */
224 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
226 if (register_size (regcache
->tdesc
, 0) == 8)
227 linux_set_pc_64bit (regcache
, pc
);
229 linux_set_pc_32bit (regcache
, pc
);
232 #define aarch64_breakpoint_len 4
234 /* AArch64 BRK software debug mode instruction.
235 This instruction needs to match gdb/aarch64-tdep.c
236 (aarch64_default_breakpoint). */
237 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
239 /* Implementation of linux_target_ops method "breakpoint_at". */
242 aarch64_breakpoint_at (CORE_ADDR where
)
244 if (is_64bit_tdesc ())
246 gdb_byte insn
[aarch64_breakpoint_len
];
248 the_target
->read_memory (where
, (unsigned char *) &insn
,
249 aarch64_breakpoint_len
);
250 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
256 return arm_breakpoint_at (where
);
260 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
264 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
266 state
->dr_addr_bp
[i
] = 0;
267 state
->dr_ctrl_bp
[i
] = 0;
268 state
->dr_ref_count_bp
[i
] = 0;
271 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
273 state
->dr_addr_wp
[i
] = 0;
274 state
->dr_ctrl_wp
[i
] = 0;
275 state
->dr_ref_count_wp
[i
] = 0;
279 /* Return the pointer to the debug register state structure in the
280 current process' arch-specific data area. */
282 struct aarch64_debug_reg_state
*
283 aarch64_get_debug_reg_state (pid_t pid
)
285 struct process_info
*proc
= find_process_pid (pid
);
287 return &proc
->priv
->arch_private
->debug_reg_state
;
290 /* Implementation of linux_target_ops method "supports_z_point_type". */
293 aarch64_supports_z_point_type (char z_type
)
299 case Z_PACKET_WRITE_WP
:
300 case Z_PACKET_READ_WP
:
301 case Z_PACKET_ACCESS_WP
:
308 /* Implementation of linux_target_ops method "insert_point".
310 It actually only records the info of the to-be-inserted bp/wp;
311 the actual insertion will happen when threads are resumed. */
314 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
315 int len
, struct raw_breakpoint
*bp
)
318 enum target_hw_bp_type targ_type
;
319 struct aarch64_debug_reg_state
*state
320 = aarch64_get_debug_reg_state (pid_of (current_thread
));
323 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
324 (unsigned long) addr
, len
);
326 /* Determine the type from the raw breakpoint type. */
327 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
329 if (targ_type
!= hw_execute
)
331 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
332 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
333 1 /* is_insert */, state
);
341 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
342 instruction. Set it to 2 to correctly encode length bit
343 mask in hardware/watchpoint control register. */
346 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
347 1 /* is_insert */, state
);
351 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
357 /* Implementation of linux_target_ops method "remove_point".
359 It actually only records the info of the to-be-removed bp/wp,
360 the actual removal will be done when threads are resumed. */
363 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
364 int len
, struct raw_breakpoint
*bp
)
367 enum target_hw_bp_type targ_type
;
368 struct aarch64_debug_reg_state
*state
369 = aarch64_get_debug_reg_state (pid_of (current_thread
));
372 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
373 (unsigned long) addr
, len
);
375 /* Determine the type from the raw breakpoint type. */
376 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
378 /* Set up state pointers. */
379 if (targ_type
!= hw_execute
)
381 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
387 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
388 instruction. Set it to 2 to correctly encode length bit
389 mask in hardware/watchpoint control register. */
392 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
393 0 /* is_insert */, state
);
397 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
403 /* Implementation of linux_target_ops method "stopped_data_address". */
406 aarch64_stopped_data_address (void)
410 struct aarch64_debug_reg_state
*state
;
412 pid
= lwpid_of (current_thread
);
414 /* Get the siginfo. */
415 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
416 return (CORE_ADDR
) 0;
418 /* Need to be a hardware breakpoint/watchpoint trap. */
419 if (siginfo
.si_signo
!= SIGTRAP
420 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
421 return (CORE_ADDR
) 0;
423 /* Check if the address matches any watched address. */
424 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
425 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
427 const unsigned int offset
428 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
429 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
430 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
431 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
432 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
433 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
435 if (state
->dr_ref_count_wp
[i
]
436 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
437 && addr_trap
>= addr_watch_aligned
438 && addr_trap
< addr_watch
+ len
)
440 /* ADDR_TRAP reports the first address of the memory range
441 accessed by the CPU, regardless of what was the memory
442 range watched. Thus, a large CPU access that straddles
443 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
444 ADDR_TRAP that is lower than the
445 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
447 addr: | 4 | 5 | 6 | 7 | 8 |
448 |---- range watched ----|
449 |----------- range accessed ------------|
451 In this case, ADDR_TRAP will be 4.
453 To match a watchpoint known to GDB core, we must never
454 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
455 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
456 positive on kernels older than 4.10. See PR
462 return (CORE_ADDR
) 0;
465 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
468 aarch64_stopped_by_watchpoint (void)
470 if (aarch64_stopped_data_address () != 0)
476 /* Fetch the thread-local storage pointer for libthread_db. */
479 ps_get_thread_area (struct ps_prochandle
*ph
,
480 lwpid_t lwpid
, int idx
, void **base
)
482 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
486 /* Implementation of linux_target_ops method "siginfo_fixup". */
489 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
491 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
492 if (!is_64bit_tdesc ())
495 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
498 aarch64_siginfo_from_compat_siginfo (native
,
499 (struct compat_siginfo
*) inf
);
507 /* Implementation of linux_target_ops method "new_process". */
509 static struct arch_process_info
*
510 aarch64_linux_new_process (void)
512 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
514 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
519 /* Implementation of linux_target_ops method "delete_process". */
522 aarch64_linux_delete_process (struct arch_process_info
*info
)
527 /* Implementation of linux_target_ops method "linux_new_fork". */
530 aarch64_linux_new_fork (struct process_info
*parent
,
531 struct process_info
*child
)
533 /* These are allocated by linux_add_process. */
534 gdb_assert (parent
->priv
!= NULL
535 && parent
->priv
->arch_private
!= NULL
);
536 gdb_assert (child
->priv
!= NULL
537 && child
->priv
->arch_private
!= NULL
);
539 /* Linux kernel before 2.6.33 commit
540 72f674d203cd230426437cdcf7dd6f681dad8b0d
541 will inherit hardware debug registers from parent
542 on fork/vfork/clone. Newer Linux kernels create such tasks with
543 zeroed debug registers.
545 GDB core assumes the child inherits the watchpoints/hw
546 breakpoints of the parent, and will remove them all from the
547 forked off process. Copy the debug registers mirrors into the
548 new process so that all breakpoints and watchpoints can be
549 removed together. The debug registers mirror will become zeroed
550 in the end before detaching the forked off process, thus making
551 this compatible with older Linux kernels too. */
553 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
556 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
557 #define AARCH64_HWCAP_PACA (1 << 30)
559 /* Implementation of linux target ops method "low_arch_setup". */
562 aarch64_target::low_arch_setup ()
564 unsigned int machine
;
568 tid
= lwpid_of (current_thread
);
570 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
574 uint64_t vq
= aarch64_sve_get_vq (tid
);
575 unsigned long hwcap
= linux_get_hwcap (8);
576 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
578 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
581 current_process ()->tdesc
= aarch32_linux_read_description ();
583 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
586 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
589 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
591 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
594 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
597 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
599 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
602 static struct regset_info aarch64_regsets
[] =
604 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
605 sizeof (struct user_pt_regs
), GENERAL_REGS
,
606 aarch64_fill_gregset
, aarch64_store_gregset
},
607 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
608 sizeof (struct user_fpsimd_state
), FP_REGS
,
609 aarch64_fill_fpregset
, aarch64_store_fpregset
611 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
612 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
613 NULL
, aarch64_store_pauthregset
},
617 static struct regsets_info aarch64_regsets_info
=
619 aarch64_regsets
, /* regsets */
621 NULL
, /* disabled_regsets */
624 static struct regs_info regs_info_aarch64
=
626 NULL
, /* regset_bitmap */
628 &aarch64_regsets_info
,
631 static struct regset_info aarch64_sve_regsets
[] =
633 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
634 sizeof (struct user_pt_regs
), GENERAL_REGS
,
635 aarch64_fill_gregset
, aarch64_store_gregset
},
636 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
637 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
638 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
640 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
641 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
642 NULL
, aarch64_store_pauthregset
},
646 static struct regsets_info aarch64_sve_regsets_info
=
648 aarch64_sve_regsets
, /* regsets. */
649 0, /* num_regsets. */
650 NULL
, /* disabled_regsets. */
653 static struct regs_info regs_info_aarch64_sve
=
655 NULL
, /* regset_bitmap. */
657 &aarch64_sve_regsets_info
,
660 /* Implementation of linux target ops method "get_regs_info". */
663 aarch64_target::get_regs_info ()
665 if (!is_64bit_tdesc ())
666 return ®s_info_aarch32
;
669 return ®s_info_aarch64_sve
;
671 return ®s_info_aarch64
;
674 /* Implementation of linux_target_ops method "supports_tracepoints". */
677 aarch64_supports_tracepoints (void)
679 if (current_thread
== NULL
)
683 /* We don't support tracepoints on aarch32 now. */
684 return is_64bit_tdesc ();
688 /* Implementation of linux_target_ops method "get_thread_area". */
691 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
696 iovec
.iov_base
= ®
;
697 iovec
.iov_len
= sizeof (reg
);
699 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
707 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
710 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
712 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
718 collect_register_by_name (regcache
, "x8", &l_sysno
);
719 *sysno
= (int) l_sysno
;
722 collect_register_by_name (regcache
, "r7", sysno
);
725 /* List of condition codes that we need. */
727 enum aarch64_condition_codes
738 enum aarch64_operand_type
744 /* Representation of an operand. At this time, it only supports register
745 and immediate types. */
747 struct aarch64_operand
749 /* Type of the operand. */
750 enum aarch64_operand_type type
;
752 /* Value of the operand according to the type. */
756 struct aarch64_register reg
;
760 /* List of registers that we are currently using, we can add more here as
761 we need to use them. */
763 /* General purpose scratch registers (64 bit). */
764 static const struct aarch64_register x0
= { 0, 1 };
765 static const struct aarch64_register x1
= { 1, 1 };
766 static const struct aarch64_register x2
= { 2, 1 };
767 static const struct aarch64_register x3
= { 3, 1 };
768 static const struct aarch64_register x4
= { 4, 1 };
770 /* General purpose scratch registers (32 bit). */
771 static const struct aarch64_register w0
= { 0, 0 };
772 static const struct aarch64_register w2
= { 2, 0 };
774 /* Intra-procedure scratch registers. */
775 static const struct aarch64_register ip0
= { 16, 1 };
777 /* Special purpose registers. */
778 static const struct aarch64_register fp
= { 29, 1 };
779 static const struct aarch64_register lr
= { 30, 1 };
780 static const struct aarch64_register sp
= { 31, 1 };
781 static const struct aarch64_register xzr
= { 31, 1 };
783 /* Dynamically allocate a new register. If we know the register
784 statically, we should make it a global as above instead of using this
787 static struct aarch64_register
788 aarch64_register (unsigned num
, int is64
)
790 return (struct aarch64_register
) { num
, is64
};
793 /* Helper function to create a register operand, for instructions with
794 different types of operands.
797 p += emit_mov (p, x0, register_operand (x1)); */
799 static struct aarch64_operand
800 register_operand (struct aarch64_register reg
)
802 struct aarch64_operand operand
;
804 operand
.type
= OPERAND_REGISTER
;
810 /* Helper function to create an immediate operand, for instructions with
811 different types of operands.
814 p += emit_mov (p, x0, immediate_operand (12)); */
816 static struct aarch64_operand
817 immediate_operand (uint32_t imm
)
819 struct aarch64_operand operand
;
821 operand
.type
= OPERAND_IMMEDIATE
;
827 /* Helper function to create an offset memory operand.
830 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
832 static struct aarch64_memory_operand
833 offset_memory_operand (int32_t offset
)
835 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
838 /* Helper function to create a pre-index memory operand.
841 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
843 static struct aarch64_memory_operand
844 preindex_memory_operand (int32_t index
)
846 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
849 /* Helper function to create a post-index memory operand.
852 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
854 static struct aarch64_memory_operand
855 postindex_memory_operand (int32_t index
)
857 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
860 /* System control registers. These special registers can be written and
861 read with the MRS and MSR instructions.
863 - NZCV: Condition flags. GDB refers to this register under the CPSR
865 - FPSR: Floating-point status register.
866 - FPCR: Floating-point control registers.
867 - TPIDR_EL0: Software thread ID register. */
869 enum aarch64_system_control_registers
871 /* op0 op1 crn crm op2 */
872 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
873 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
874 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
875 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
878 /* Write a BLR instruction into *BUF.
882 RN is the register to branch to. */
885 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
887 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
890 /* Write a RET instruction into *BUF.
894 RN is the register to branch to. */
897 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
899 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
903 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
904 struct aarch64_register rt
,
905 struct aarch64_register rt2
,
906 struct aarch64_register rn
,
907 struct aarch64_memory_operand operand
)
914 opc
= ENCODE (2, 2, 30);
916 opc
= ENCODE (0, 2, 30);
918 switch (operand
.type
)
920 case MEMORY_OPERAND_OFFSET
:
922 pre_index
= ENCODE (1, 1, 24);
923 write_back
= ENCODE (0, 1, 23);
926 case MEMORY_OPERAND_POSTINDEX
:
928 pre_index
= ENCODE (0, 1, 24);
929 write_back
= ENCODE (1, 1, 23);
932 case MEMORY_OPERAND_PREINDEX
:
934 pre_index
= ENCODE (1, 1, 24);
935 write_back
= ENCODE (1, 1, 23);
942 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
943 | ENCODE (operand
.index
>> 3, 7, 15)
944 | ENCODE (rt2
.num
, 5, 10)
945 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
948 /* Write a STP instruction into *BUF.
950 STP rt, rt2, [rn, #offset]
951 STP rt, rt2, [rn, #index]!
952 STP rt, rt2, [rn], #index
954 RT and RT2 are the registers to store.
955 RN is the base address register.
956 OFFSET is the immediate to add to the base address. It is limited to a
957 -512 .. 504 range (7 bits << 3). */
960 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
961 struct aarch64_register rt2
, struct aarch64_register rn
,
962 struct aarch64_memory_operand operand
)
964 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
967 /* Write a LDP instruction into *BUF.
969 LDP rt, rt2, [rn, #offset]
970 LDP rt, rt2, [rn, #index]!
971 LDP rt, rt2, [rn], #index
973 RT and RT2 are the registers to store.
974 RN is the base address register.
975 OFFSET is the immediate to add to the base address. It is limited to a
976 -512 .. 504 range (7 bits << 3). */
979 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
980 struct aarch64_register rt2
, struct aarch64_register rn
,
981 struct aarch64_memory_operand operand
)
983 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
986 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
988 LDP qt, qt2, [rn, #offset]
990 RT and RT2 are the Q registers to store.
991 RN is the base address register.
992 OFFSET is the immediate to add to the base address. It is limited to
993 -1024 .. 1008 range (7 bits << 4). */
996 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
997 struct aarch64_register rn
, int32_t offset
)
999 uint32_t opc
= ENCODE (2, 2, 30);
1000 uint32_t pre_index
= ENCODE (1, 1, 24);
1002 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1003 | ENCODE (offset
>> 4, 7, 15)
1004 | ENCODE (rt2
, 5, 10)
1005 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1008 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1010 STP qt, qt2, [rn, #offset]
1012 RT and RT2 are the Q registers to store.
1013 RN is the base address register.
1014 OFFSET is the immediate to add to the base address. It is limited to
1015 -1024 .. 1008 range (7 bits << 4). */
1018 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1019 struct aarch64_register rn
, int32_t offset
)
1021 uint32_t opc
= ENCODE (2, 2, 30);
1022 uint32_t pre_index
= ENCODE (1, 1, 24);
1024 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1025 | ENCODE (offset
>> 4, 7, 15)
1026 | ENCODE (rt2
, 5, 10)
1027 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1030 /* Write a LDRH instruction into *BUF.
1032 LDRH wt, [xn, #offset]
1033 LDRH wt, [xn, #index]!
1034 LDRH wt, [xn], #index
1036 RT is the register to store.
1037 RN is the base address register.
1038 OFFSET is the immediate to add to the base address. It is limited to
1039 0 .. 32760 range (12 bits << 3). */
1042 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1043 struct aarch64_register rn
,
1044 struct aarch64_memory_operand operand
)
1046 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1049 /* Write a LDRB instruction into *BUF.
1051 LDRB wt, [xn, #offset]
1052 LDRB wt, [xn, #index]!
1053 LDRB wt, [xn], #index
1055 RT is the register to store.
1056 RN is the base address register.
1057 OFFSET is the immediate to add to the base address. It is limited to
1058 0 .. 32760 range (12 bits << 3). */
1061 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1062 struct aarch64_register rn
,
1063 struct aarch64_memory_operand operand
)
1065 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1070 /* Write a STR instruction into *BUF.
1072 STR rt, [rn, #offset]
1073 STR rt, [rn, #index]!
1074 STR rt, [rn], #index
1076 RT is the register to store.
1077 RN is the base address register.
1078 OFFSET is the immediate to add to the base address. It is limited to
1079 0 .. 32760 range (12 bits << 3). */
1082 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1083 struct aarch64_register rn
,
1084 struct aarch64_memory_operand operand
)
1086 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1089 /* Helper function emitting an exclusive load or store instruction. */
1092 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1093 enum aarch64_opcodes opcode
,
1094 struct aarch64_register rs
,
1095 struct aarch64_register rt
,
1096 struct aarch64_register rt2
,
1097 struct aarch64_register rn
)
1099 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1100 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1101 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1104 /* Write a LAXR instruction into *BUF.
1108 RT is the destination register.
1109 RN is the base address register. */
1112 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1113 struct aarch64_register rn
)
1115 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1119 /* Write a STXR instruction into *BUF.
1123 RS is the result register, it indicates if the store succeeded or not.
1124 RT is the destination register.
1125 RN is the base address register. */
1128 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1129 struct aarch64_register rt
, struct aarch64_register rn
)
1131 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1135 /* Write a STLR instruction into *BUF.
1139 RT is the register to store.
1140 RN is the base address register. */
1143 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1144 struct aarch64_register rn
)
1146 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1150 /* Helper function for data processing instructions with register sources. */
1153 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1154 struct aarch64_register rd
,
1155 struct aarch64_register rn
,
1156 struct aarch64_register rm
)
1158 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1160 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1161 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1164 /* Helper function for data processing instructions taking either a register
1168 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1169 struct aarch64_register rd
,
1170 struct aarch64_register rn
,
1171 struct aarch64_operand operand
)
1173 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1174 /* The opcode is different for register and immediate source operands. */
1175 uint32_t operand_opcode
;
1177 if (operand
.type
== OPERAND_IMMEDIATE
)
1179 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1180 operand_opcode
= ENCODE (8, 4, 25);
1182 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1183 | ENCODE (operand
.imm
, 12, 10)
1184 | ENCODE (rn
.num
, 5, 5)
1185 | ENCODE (rd
.num
, 5, 0));
1189 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1190 operand_opcode
= ENCODE (5, 4, 25);
1192 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1197 /* Write an ADD instruction into *BUF.
1202 This function handles both an immediate and register add.
1204 RD is the destination register.
1205 RN is the input register.
1206 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1207 OPERAND_REGISTER. */
1210 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1211 struct aarch64_register rn
, struct aarch64_operand operand
)
1213 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1216 /* Write a SUB instruction into *BUF.
1221 This function handles both an immediate and register sub.
1223 RD is the destination register.
1224 RN is the input register.
1225 IMM is the immediate to substract to RN. */
1228 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1229 struct aarch64_register rn
, struct aarch64_operand operand
)
1231 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1234 /* Write a MOV instruction into *BUF.
1239 This function handles both a wide immediate move and a register move,
1240 with the condition that the source register is not xzr. xzr and the
1241 stack pointer share the same encoding and this function only supports
1244 RD is the destination register.
1245 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1246 OPERAND_REGISTER. */
1249 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1250 struct aarch64_operand operand
)
1252 if (operand
.type
== OPERAND_IMMEDIATE
)
1254 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1255 /* Do not shift the immediate. */
1256 uint32_t shift
= ENCODE (0, 2, 21);
1258 return aarch64_emit_insn (buf
, MOV
| size
| shift
1259 | ENCODE (operand
.imm
, 16, 5)
1260 | ENCODE (rd
.num
, 5, 0));
1263 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1266 /* Write a MOVK instruction into *BUF.
1268 MOVK rd, #imm, lsl #shift
1270 RD is the destination register.
1271 IMM is the immediate.
1272 SHIFT is the logical shift left to apply to IMM. */
1275 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1278 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1280 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1281 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1284 /* Write instructions into *BUF in order to move ADDR into a register.
1285 ADDR can be a 64-bit value.
1287 This function will emit a series of MOV and MOVK instructions, such as:
1290 MOVK xd, #(addr >> 16), lsl #16
1291 MOVK xd, #(addr >> 32), lsl #32
1292 MOVK xd, #(addr >> 48), lsl #48 */
1295 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1299 /* The MOV (wide immediate) instruction clears to top bits of the
1301 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1303 if ((addr
>> 16) != 0)
1304 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1308 if ((addr
>> 32) != 0)
1309 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1313 if ((addr
>> 48) != 0)
1314 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1319 /* Write a SUBS instruction into *BUF.
1323 This instruction update the condition flags.
1325 RD is the destination register.
1326 RN and RM are the source registers. */
1329 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1330 struct aarch64_register rn
, struct aarch64_operand operand
)
1332 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1335 /* Write a CMP instruction into *BUF.
1339 This instruction is an alias of SUBS xzr, rn, rm.
1341 RN and RM are the registers to compare. */
1344 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1345 struct aarch64_operand operand
)
1347 return emit_subs (buf
, xzr
, rn
, operand
);
1350 /* Write a AND instruction into *BUF.
1354 RD is the destination register.
1355 RN and RM are the source registers. */
1358 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1359 struct aarch64_register rn
, struct aarch64_register rm
)
1361 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1364 /* Write a ORR instruction into *BUF.
1368 RD is the destination register.
1369 RN and RM are the source registers. */
1372 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1373 struct aarch64_register rn
, struct aarch64_register rm
)
1375 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1378 /* Write a ORN instruction into *BUF.
1382 RD is the destination register.
1383 RN and RM are the source registers. */
1386 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1387 struct aarch64_register rn
, struct aarch64_register rm
)
1389 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1392 /* Write a EOR instruction into *BUF.
1396 RD is the destination register.
1397 RN and RM are the source registers. */
1400 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1401 struct aarch64_register rn
, struct aarch64_register rm
)
1403 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1406 /* Write a MVN instruction into *BUF.
1410 This is an alias for ORN rd, xzr, rm.
1412 RD is the destination register.
1413 RM is the source register. */
1416 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1417 struct aarch64_register rm
)
1419 return emit_orn (buf
, rd
, xzr
, rm
);
1422 /* Write a LSLV instruction into *BUF.
1426 RD is the destination register.
1427 RN and RM are the source registers. */
1430 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1431 struct aarch64_register rn
, struct aarch64_register rm
)
1433 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1436 /* Write a LSRV instruction into *BUF.
1440 RD is the destination register.
1441 RN and RM are the source registers. */
1444 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1445 struct aarch64_register rn
, struct aarch64_register rm
)
1447 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1450 /* Write a ASRV instruction into *BUF.
1454 RD is the destination register.
1455 RN and RM are the source registers. */
1458 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1459 struct aarch64_register rn
, struct aarch64_register rm
)
1461 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1464 /* Write a MUL instruction into *BUF.
1468 RD is the destination register.
1469 RN and RM are the source registers. */
1472 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1473 struct aarch64_register rn
, struct aarch64_register rm
)
1475 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1478 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1482 RT is the destination register.
1483 SYSTEM_REG is special purpose register to read. */
1486 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1487 enum aarch64_system_control_registers system_reg
)
1489 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1490 | ENCODE (rt
.num
, 5, 0));
1493 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1497 SYSTEM_REG is special purpose register to write.
1498 RT is the input register. */
1501 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1502 struct aarch64_register rt
)
1504 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1505 | ENCODE (rt
.num
, 5, 0));
1508 /* Write a SEVL instruction into *BUF.
1510 This is a hint instruction telling the hardware to trigger an event. */
1513 emit_sevl (uint32_t *buf
)
1515 return aarch64_emit_insn (buf
, SEVL
);
1518 /* Write a WFE instruction into *BUF.
1520 This is a hint instruction telling the hardware to wait for an event. */
1523 emit_wfe (uint32_t *buf
)
1525 return aarch64_emit_insn (buf
, WFE
);
1528 /* Write a SBFM instruction into *BUF.
1530 SBFM rd, rn, #immr, #imms
1532 This instruction moves the bits from #immr to #imms into the
1533 destination, sign extending the result.
1535 RD is the destination register.
1536 RN is the source register.
1537 IMMR is the bit number to start at (least significant bit).
1538 IMMS is the bit number to stop at (most significant bit). */
1541 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1542 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1544 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1545 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1547 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1548 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1549 | ENCODE (rd
.num
, 5, 0));
1552 /* Write a SBFX instruction into *BUF.
1554 SBFX rd, rn, #lsb, #width
1556 This instruction moves #width bits from #lsb into the destination, sign
1557 extending the result. This is an alias for:
1559 SBFM rd, rn, #lsb, #(lsb + width - 1)
1561 RD is the destination register.
1562 RN is the source register.
1563 LSB is the bit number to start at (least significant bit).
1564 WIDTH is the number of bits to move. */
1567 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1568 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1570 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1573 /* Write a UBFM instruction into *BUF.
1575 UBFM rd, rn, #immr, #imms
1577 This instruction moves the bits from #immr to #imms into the
1578 destination, extending the result with zeros.
1580 RD is the destination register.
1581 RN is the source register.
1582 IMMR is the bit number to start at (least significant bit).
1583 IMMS is the bit number to stop at (most significant bit). */
1586 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1587 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1589 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1590 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1592 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1593 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1594 | ENCODE (rd
.num
, 5, 0));
1597 /* Write a UBFX instruction into *BUF.
1599 UBFX rd, rn, #lsb, #width
1601 This instruction moves #width bits from #lsb into the destination,
1602 extending the result with zeros. This is an alias for:
1604 UBFM rd, rn, #lsb, #(lsb + width - 1)
1606 RD is the destination register.
1607 RN is the source register.
1608 LSB is the bit number to start at (least significant bit).
1609 WIDTH is the number of bits to move. */
1612 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1613 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1615 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1618 /* Write a CSINC instruction into *BUF.
1620 CSINC rd, rn, rm, cond
1622 This instruction conditionally increments rn or rm and places the result
1623 in rd. rn is chosen is the condition is true.
1625 RD is the destination register.
1626 RN and RM are the source registers.
1627 COND is the encoded condition. */
1630 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1631 struct aarch64_register rn
, struct aarch64_register rm
,
1634 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1636 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1637 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1638 | ENCODE (rd
.num
, 5, 0));
1641 /* Write a CSET instruction into *BUF.
1645 This instruction conditionally write 1 or 0 in the destination register.
1646 1 is written if the condition is true. This is an alias for:
1648 CSINC rd, xzr, xzr, !cond
1650 Note that the condition needs to be inverted.
1652 RD is the destination register.
1653 RN and RM are the source registers.
1654 COND is the encoded condition. */
1657 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1659 /* The least significant bit of the condition needs toggling in order to
1661 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1664 /* Write LEN instructions from BUF into the inferior memory at *TO.
1666 Note instructions are always little endian on AArch64, unlike data. */
1669 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1671 size_t byte_len
= len
* sizeof (uint32_t);
1672 #if (__BYTE_ORDER == __BIG_ENDIAN)
1673 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1676 for (i
= 0; i
< len
; i
++)
1677 le_buf
[i
] = htole32 (buf
[i
]);
1679 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1683 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1689 /* Sub-class of struct aarch64_insn_data, store information of
1690 instruction relocation for fast tracepoint. Visitor can
1691 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1692 the relocated instructions in buffer pointed by INSN_PTR. */
1694 struct aarch64_insn_relocation_data
1696 struct aarch64_insn_data base
;
1698 /* The new address the instruction is relocated to. */
1700 /* Pointer to the buffer of relocated instruction(s). */
1704 /* Implementation of aarch64_insn_visitor method "b". */
1707 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1708 struct aarch64_insn_data
*data
)
1710 struct aarch64_insn_relocation_data
*insn_reloc
1711 = (struct aarch64_insn_relocation_data
*) data
;
1713 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1715 if (can_encode_int32 (new_offset
, 28))
1716 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1719 /* Implementation of aarch64_insn_visitor method "b_cond". */
1722 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1723 struct aarch64_insn_data
*data
)
1725 struct aarch64_insn_relocation_data
*insn_reloc
1726 = (struct aarch64_insn_relocation_data
*) data
;
1728 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1730 if (can_encode_int32 (new_offset
, 21))
1732 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1735 else if (can_encode_int32 (new_offset
, 28))
1737 /* The offset is out of range for a conditional branch
1738 instruction but not for a unconditional branch. We can use
1739 the following instructions instead:
1741 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1742 B NOT_TAKEN ; Else jump over TAKEN and continue.
1749 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1750 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1751 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1755 /* Implementation of aarch64_insn_visitor method "cb". */
1758 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1759 const unsigned rn
, int is64
,
1760 struct aarch64_insn_data
*data
)
1762 struct aarch64_insn_relocation_data
*insn_reloc
1763 = (struct aarch64_insn_relocation_data
*) data
;
1765 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1767 if (can_encode_int32 (new_offset
, 21))
1769 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1770 aarch64_register (rn
, is64
), new_offset
);
1772 else if (can_encode_int32 (new_offset
, 28))
1774 /* The offset is out of range for a compare and branch
1775 instruction but not for a unconditional branch. We can use
1776 the following instructions instead:
1778 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1779 B NOT_TAKEN ; Else jump over TAKEN and continue.
1785 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1786 aarch64_register (rn
, is64
), 8);
1787 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1788 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1792 /* Implementation of aarch64_insn_visitor method "tb". */
1795 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1796 const unsigned rt
, unsigned bit
,
1797 struct aarch64_insn_data
*data
)
1799 struct aarch64_insn_relocation_data
*insn_reloc
1800 = (struct aarch64_insn_relocation_data
*) data
;
1802 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1804 if (can_encode_int32 (new_offset
, 16))
1806 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1807 aarch64_register (rt
, 1), new_offset
);
1809 else if (can_encode_int32 (new_offset
, 28))
1811 /* The offset is out of range for a test bit and branch
1812 instruction but not for a unconditional branch. We can use
1813 the following instructions instead:
1815 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1816 B NOT_TAKEN ; Else jump over TAKEN and continue.
1822 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1823 aarch64_register (rt
, 1), 8);
1824 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1825 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1830 /* Implementation of aarch64_insn_visitor method "adr". */
1833 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1835 struct aarch64_insn_data
*data
)
1837 struct aarch64_insn_relocation_data
*insn_reloc
1838 = (struct aarch64_insn_relocation_data
*) data
;
1839 /* We know exactly the address the ADR{P,} instruction will compute.
1840 We can just write it to the destination register. */
1841 CORE_ADDR address
= data
->insn_addr
+ offset
;
1845 /* Clear the lower 12 bits of the offset to get the 4K page. */
1846 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1847 aarch64_register (rd
, 1),
1851 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1852 aarch64_register (rd
, 1), address
);
1855 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1858 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1859 const unsigned rt
, const int is64
,
1860 struct aarch64_insn_data
*data
)
1862 struct aarch64_insn_relocation_data
*insn_reloc
1863 = (struct aarch64_insn_relocation_data
*) data
;
1864 CORE_ADDR address
= data
->insn_addr
+ offset
;
1866 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1867 aarch64_register (rt
, 1), address
);
1869 /* We know exactly what address to load from, and what register we
1872 MOV xd, #(oldloc + offset)
1873 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1876 LDR xd, [xd] ; or LDRSW xd, [xd]
1881 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1882 aarch64_register (rt
, 1),
1883 aarch64_register (rt
, 1),
1884 offset_memory_operand (0));
1886 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1887 aarch64_register (rt
, is64
),
1888 aarch64_register (rt
, 1),
1889 offset_memory_operand (0));
1892 /* Implementation of aarch64_insn_visitor method "others". */
1895 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1896 struct aarch64_insn_data
*data
)
1898 struct aarch64_insn_relocation_data
*insn_reloc
1899 = (struct aarch64_insn_relocation_data
*) data
;
1901 /* The instruction is not PC relative. Just re-emit it at the new
1903 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1906 static const struct aarch64_insn_visitor visitor
=
1908 aarch64_ftrace_insn_reloc_b
,
1909 aarch64_ftrace_insn_reloc_b_cond
,
1910 aarch64_ftrace_insn_reloc_cb
,
1911 aarch64_ftrace_insn_reloc_tb
,
1912 aarch64_ftrace_insn_reloc_adr
,
1913 aarch64_ftrace_insn_reloc_ldr_literal
,
1914 aarch64_ftrace_insn_reloc_others
,
1917 /* Implementation of linux_target_ops method
1918 "install_fast_tracepoint_jump_pad". */
1921 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1923 CORE_ADDR collector
,
1926 CORE_ADDR
*jump_entry
,
1927 CORE_ADDR
*trampoline
,
1928 ULONGEST
*trampoline_size
,
1929 unsigned char *jjump_pad_insn
,
1930 ULONGEST
*jjump_pad_insn_size
,
1931 CORE_ADDR
*adjusted_insn_addr
,
1932 CORE_ADDR
*adjusted_insn_addr_end
,
1940 CORE_ADDR buildaddr
= *jump_entry
;
1941 struct aarch64_insn_relocation_data insn_data
;
1943 /* We need to save the current state on the stack both to restore it
1944 later and to collect register values when the tracepoint is hit.
1946 The saved registers are pushed in a layout that needs to be in sync
1947 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1948 the supply_fast_tracepoint_registers function will fill in the
1949 register cache from a pointer to saved registers on the stack we build
1952 For simplicity, we set the size of each cell on the stack to 16 bytes.
1953 This way one cell can hold any register type, from system registers
1954 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1955 has to be 16 bytes aligned anyway.
1957 Note that the CPSR register does not exist on AArch64. Instead we
1958 can access system bits describing the process state with the
1959 MRS/MSR instructions, namely the condition flags. We save them as
1960 if they are part of a CPSR register because that's how GDB
1961 interprets these system bits. At the moment, only the condition
1962 flags are saved in CPSR (NZCV).
1964 Stack layout, each cell is 16 bytes (descending):
1966 High *-------- SIMD&FP registers from 31 down to 0. --------*
1972 *---- General purpose registers from 30 down to 0. ----*
1978 *------------- Special purpose registers. -------------*
1981 | CPSR (NZCV) | 5 cells
1984 *------------- collecting_t object --------------------*
1985 | TPIDR_EL0 | struct tracepoint * |
1986 Low *------------------------------------------------------*
1988 After this stack is set up, we issue a call to the collector, passing
1989 it the saved registers at (SP + 16). */
1991 /* Push SIMD&FP registers on the stack:
1993 SUB sp, sp, #(32 * 16)
1995 STP q30, q31, [sp, #(30 * 16)]
2000 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2001 for (i
= 30; i
>= 0; i
-= 2)
2002 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2004 /* Push general purpose registers on the stack. Note that we do not need
2005 to push x31 as it represents the xzr register and not the stack
2006 pointer in a STR instruction.
2008 SUB sp, sp, #(31 * 16)
2010 STR x30, [sp, #(30 * 16)]
2015 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2016 for (i
= 30; i
>= 0; i
-= 1)
2017 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2018 offset_memory_operand (i
* 16));
2020 /* Make space for 5 more cells.
2022 SUB sp, sp, #(5 * 16)
2025 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2030 ADD x4, sp, #((32 + 31 + 5) * 16)
2031 STR x4, [sp, #(4 * 16)]
2034 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2035 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2037 /* Save PC (tracepoint address):
2042 STR x3, [sp, #(3 * 16)]
2046 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2047 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2049 /* Save CPSR (NZCV), FPSR and FPCR:
2055 STR x2, [sp, #(2 * 16)]
2056 STR x1, [sp, #(1 * 16)]
2057 STR x0, [sp, #(0 * 16)]
2060 p
+= emit_mrs (p
, x2
, NZCV
);
2061 p
+= emit_mrs (p
, x1
, FPSR
);
2062 p
+= emit_mrs (p
, x0
, FPCR
);
2063 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2064 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2065 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2067 /* Push the collecting_t object. It consist of the address of the
2068 tracepoint and an ID for the current thread. We get the latter by
2069 reading the tpidr_el0 system register. It corresponds to the
2070 NT_ARM_TLS register accessible with ptrace.
2077 STP x0, x1, [sp, #-16]!
2081 p
+= emit_mov_addr (p
, x0
, tpoint
);
2082 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2083 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2087 The shared memory for the lock is at lockaddr. It will hold zero
2088 if no-one is holding the lock, otherwise it contains the address of
2089 the collecting_t object on the stack of the thread which acquired it.
2091 At this stage, the stack pointer points to this thread's collecting_t
2094 We use the following registers:
2095 - x0: Address of the lock.
2096 - x1: Pointer to collecting_t object.
2097 - x2: Scratch register.
2103 ; Trigger an event local to this core. So the following WFE
2104 ; instruction is ignored.
2107 ; Wait for an event. The event is triggered by either the SEVL
2108 ; or STLR instructions (store release).
2111 ; Atomically read at lockaddr. This marks the memory location as
2112 ; exclusive. This instruction also has memory constraints which
2113 ; make sure all previous data reads and writes are done before
2117 ; Try again if another thread holds the lock.
2120 ; We can lock it! Write the address of the collecting_t object.
2121 ; This instruction will fail if the memory location is not marked
2122 ; as exclusive anymore. If it succeeds, it will remove the
2123 ; exclusive mark on the memory location. This way, if another
2124 ; thread executes this instruction before us, we will fail and try
2131 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2132 p
+= emit_mov (p
, x1
, register_operand (sp
));
2136 p
+= emit_ldaxr (p
, x2
, x0
);
2137 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2138 p
+= emit_stxr (p
, w2
, x1
, x0
);
2139 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2141 /* Call collector (struct tracepoint *, unsigned char *):
2146 ; Saved registers start after the collecting_t object.
2149 ; We use an intra-procedure-call scratch register.
2150 MOV ip0, #(collector)
2153 ; And call back to C!
2158 p
+= emit_mov_addr (p
, x0
, tpoint
);
2159 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2161 p
+= emit_mov_addr (p
, ip0
, collector
);
2162 p
+= emit_blr (p
, ip0
);
2164 /* Release the lock.
2169 ; This instruction is a normal store with memory ordering
2170 ; constraints. Thanks to this we do not have to put a data
2171 ; barrier instruction to make sure all data read and writes are done
2172 ; before this instruction is executed. Furthermore, this instruction
2173 ; will trigger an event, letting other threads know they can grab
2178 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2179 p
+= emit_stlr (p
, xzr
, x0
);
2181 /* Free collecting_t object:
2186 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2188 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2189 registers from the stack.
2191 LDR x2, [sp, #(2 * 16)]
2192 LDR x1, [sp, #(1 * 16)]
2193 LDR x0, [sp, #(0 * 16)]
2199 ADD sp, sp #(5 * 16)
2202 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2203 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2204 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2205 p
+= emit_msr (p
, NZCV
, x2
);
2206 p
+= emit_msr (p
, FPSR
, x1
);
2207 p
+= emit_msr (p
, FPCR
, x0
);
2209 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2211 /* Pop general purpose registers:
2215 LDR x30, [sp, #(30 * 16)]
2217 ADD sp, sp, #(31 * 16)
2220 for (i
= 0; i
<= 30; i
+= 1)
2221 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2222 offset_memory_operand (i
* 16));
2223 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2225 /* Pop SIMD&FP registers:
2229 LDP q30, q31, [sp, #(30 * 16)]
2231 ADD sp, sp, #(32 * 16)
2234 for (i
= 0; i
<= 30; i
+= 2)
2235 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2236 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2238 /* Write the code into the inferior memory. */
2239 append_insns (&buildaddr
, p
- buf
, buf
);
2241 /* Now emit the relocated instruction. */
2242 *adjusted_insn_addr
= buildaddr
;
2243 target_read_uint32 (tpaddr
, &insn
);
2245 insn_data
.base
.insn_addr
= tpaddr
;
2246 insn_data
.new_addr
= buildaddr
;
2247 insn_data
.insn_ptr
= buf
;
2249 aarch64_relocate_instruction (insn
, &visitor
,
2250 (struct aarch64_insn_data
*) &insn_data
);
2252 /* We may not have been able to relocate the instruction. */
2253 if (insn_data
.insn_ptr
== buf
)
2256 "E.Could not relocate instruction from %s to %s.",
2257 core_addr_to_string_nz (tpaddr
),
2258 core_addr_to_string_nz (buildaddr
));
2262 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2263 *adjusted_insn_addr_end
= buildaddr
;
2265 /* Go back to the start of the buffer. */
2268 /* Emit a branch back from the jump pad. */
2269 offset
= (tpaddr
+ orig_size
- buildaddr
);
2270 if (!can_encode_int32 (offset
, 28))
2273 "E.Jump back from jump pad too far from tracepoint "
2274 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2279 p
+= emit_b (p
, 0, offset
);
2280 append_insns (&buildaddr
, p
- buf
, buf
);
2282 /* Give the caller a branch instruction into the jump pad. */
2283 offset
= (*jump_entry
- tpaddr
);
2284 if (!can_encode_int32 (offset
, 28))
2287 "E.Jump pad too far from tracepoint "
2288 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2293 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2294 *jjump_pad_insn_size
= 4;
2296 /* Return the end address of our pad. */
2297 *jump_entry
= buildaddr
;
2302 /* Helper function writing LEN instructions from START into
2303 current_insn_ptr. */
2306 emit_ops_insns (const uint32_t *start
, int len
)
2308 CORE_ADDR buildaddr
= current_insn_ptr
;
2311 debug_printf ("Adding %d instrucions at %s\n",
2312 len
, paddress (buildaddr
));
2314 append_insns (&buildaddr
, len
, start
);
2315 current_insn_ptr
= buildaddr
;
2318 /* Pop a register from the stack. */
2321 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2323 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2326 /* Push a register on the stack. */
2329 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2331 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2334 /* Implementation of emit_ops method "emit_prologue". */
2337 aarch64_emit_prologue (void)
2342 /* This function emit a prologue for the following function prototype:
2344 enum eval_result_type f (unsigned char *regs,
2347 The first argument is a buffer of raw registers. The second
2348 argument is the result of
2349 evaluating the expression, which will be set to whatever is on top of
2350 the stack at the end.
2352 The stack set up by the prologue is as such:
2354 High *------------------------------------------------------*
2357 | x1 (ULONGEST *value) |
2358 | x0 (unsigned char *regs) |
2359 Low *------------------------------------------------------*
2361 As we are implementing a stack machine, each opcode can expand the
2362 stack so we never know how far we are from the data saved by this
2363 prologue. In order to be able refer to value and regs later, we save
2364 the current stack pointer in the frame pointer. This way, it is not
2365 clobbered when calling C functions.
2367 Finally, throughout every operation, we are using register x0 as the
2368 top of the stack, and x1 as a scratch register. */
2370 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2371 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2372 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2374 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2377 emit_ops_insns (buf
, p
- buf
);
2380 /* Implementation of emit_ops method "emit_epilogue". */
2383 aarch64_emit_epilogue (void)
2388 /* Store the result of the expression (x0) in *value. */
2389 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2390 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2391 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2393 /* Restore the previous state. */
2394 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2395 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2397 /* Return expr_eval_no_error. */
2398 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2399 p
+= emit_ret (p
, lr
);
2401 emit_ops_insns (buf
, p
- buf
);
2404 /* Implementation of emit_ops method "emit_add". */
2407 aarch64_emit_add (void)
2412 p
+= emit_pop (p
, x1
);
2413 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2415 emit_ops_insns (buf
, p
- buf
);
2418 /* Implementation of emit_ops method "emit_sub". */
2421 aarch64_emit_sub (void)
2426 p
+= emit_pop (p
, x1
);
2427 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2429 emit_ops_insns (buf
, p
- buf
);
2432 /* Implementation of emit_ops method "emit_mul". */
2435 aarch64_emit_mul (void)
2440 p
+= emit_pop (p
, x1
);
2441 p
+= emit_mul (p
, x0
, x1
, x0
);
2443 emit_ops_insns (buf
, p
- buf
);
2446 /* Implementation of emit_ops method "emit_lsh". */
2449 aarch64_emit_lsh (void)
2454 p
+= emit_pop (p
, x1
);
2455 p
+= emit_lslv (p
, x0
, x1
, x0
);
2457 emit_ops_insns (buf
, p
- buf
);
2460 /* Implementation of emit_ops method "emit_rsh_signed". */
2463 aarch64_emit_rsh_signed (void)
2468 p
+= emit_pop (p
, x1
);
2469 p
+= emit_asrv (p
, x0
, x1
, x0
);
2471 emit_ops_insns (buf
, p
- buf
);
2474 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2477 aarch64_emit_rsh_unsigned (void)
2482 p
+= emit_pop (p
, x1
);
2483 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2485 emit_ops_insns (buf
, p
- buf
);
2488 /* Implementation of emit_ops method "emit_ext". */
2491 aarch64_emit_ext (int arg
)
2496 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2498 emit_ops_insns (buf
, p
- buf
);
2501 /* Implementation of emit_ops method "emit_log_not". */
2504 aarch64_emit_log_not (void)
2509 /* If the top of the stack is 0, replace it with 1. Else replace it with
2512 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2513 p
+= emit_cset (p
, x0
, EQ
);
2515 emit_ops_insns (buf
, p
- buf
);
2518 /* Implementation of emit_ops method "emit_bit_and". */
2521 aarch64_emit_bit_and (void)
2526 p
+= emit_pop (p
, x1
);
2527 p
+= emit_and (p
, x0
, x0
, x1
);
2529 emit_ops_insns (buf
, p
- buf
);
2532 /* Implementation of emit_ops method "emit_bit_or". */
2535 aarch64_emit_bit_or (void)
2540 p
+= emit_pop (p
, x1
);
2541 p
+= emit_orr (p
, x0
, x0
, x1
);
2543 emit_ops_insns (buf
, p
- buf
);
2546 /* Implementation of emit_ops method "emit_bit_xor". */
2549 aarch64_emit_bit_xor (void)
2554 p
+= emit_pop (p
, x1
);
2555 p
+= emit_eor (p
, x0
, x0
, x1
);
2557 emit_ops_insns (buf
, p
- buf
);
2560 /* Implementation of emit_ops method "emit_bit_not". */
2563 aarch64_emit_bit_not (void)
2568 p
+= emit_mvn (p
, x0
, x0
);
2570 emit_ops_insns (buf
, p
- buf
);
2573 /* Implementation of emit_ops method "emit_equal". */
2576 aarch64_emit_equal (void)
2581 p
+= emit_pop (p
, x1
);
2582 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2583 p
+= emit_cset (p
, x0
, EQ
);
2585 emit_ops_insns (buf
, p
- buf
);
2588 /* Implementation of emit_ops method "emit_less_signed". */
2591 aarch64_emit_less_signed (void)
2596 p
+= emit_pop (p
, x1
);
2597 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2598 p
+= emit_cset (p
, x0
, LT
);
2600 emit_ops_insns (buf
, p
- buf
);
2603 /* Implementation of emit_ops method "emit_less_unsigned". */
2606 aarch64_emit_less_unsigned (void)
2611 p
+= emit_pop (p
, x1
);
2612 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2613 p
+= emit_cset (p
, x0
, LO
);
2615 emit_ops_insns (buf
, p
- buf
);
2618 /* Implementation of emit_ops method "emit_ref". */
2621 aarch64_emit_ref (int size
)
2629 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2632 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2635 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2638 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2641 /* Unknown size, bail on compilation. */
2646 emit_ops_insns (buf
, p
- buf
);
2649 /* Implementation of emit_ops method "emit_if_goto". */
2652 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2657 /* The Z flag is set or cleared here. */
2658 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2659 /* This instruction must not change the Z flag. */
2660 p
+= emit_pop (p
, x0
);
2661 /* Branch over the next instruction if x0 == 0. */
2662 p
+= emit_bcond (p
, EQ
, 8);
2664 /* The NOP instruction will be patched with an unconditional branch. */
2666 *offset_p
= (p
- buf
) * 4;
2671 emit_ops_insns (buf
, p
- buf
);
2674 /* Implementation of emit_ops method "emit_goto". */
2677 aarch64_emit_goto (int *offset_p
, int *size_p
)
2682 /* The NOP instruction will be patched with an unconditional branch. */
2689 emit_ops_insns (buf
, p
- buf
);
2692 /* Implementation of emit_ops method "write_goto_address". */
2695 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2699 emit_b (&insn
, 0, to
- from
);
2700 append_insns (&from
, 1, &insn
);
2703 /* Implementation of emit_ops method "emit_const". */
2706 aarch64_emit_const (LONGEST num
)
2711 p
+= emit_mov_addr (p
, x0
, num
);
2713 emit_ops_insns (buf
, p
- buf
);
2716 /* Implementation of emit_ops method "emit_call". */
2719 aarch64_emit_call (CORE_ADDR fn
)
2724 p
+= emit_mov_addr (p
, ip0
, fn
);
2725 p
+= emit_blr (p
, ip0
);
2727 emit_ops_insns (buf
, p
- buf
);
2730 /* Implementation of emit_ops method "emit_reg". */
2733 aarch64_emit_reg (int reg
)
2738 /* Set x0 to unsigned char *regs. */
2739 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2740 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2741 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2743 emit_ops_insns (buf
, p
- buf
);
2745 aarch64_emit_call (get_raw_reg_func_addr ());
2748 /* Implementation of emit_ops method "emit_pop". */
2751 aarch64_emit_pop (void)
2756 p
+= emit_pop (p
, x0
);
2758 emit_ops_insns (buf
, p
- buf
);
2761 /* Implementation of emit_ops method "emit_stack_flush". */
2764 aarch64_emit_stack_flush (void)
2769 p
+= emit_push (p
, x0
);
2771 emit_ops_insns (buf
, p
- buf
);
2774 /* Implementation of emit_ops method "emit_zero_ext". */
2777 aarch64_emit_zero_ext (int arg
)
2782 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2784 emit_ops_insns (buf
, p
- buf
);
2787 /* Implementation of emit_ops method "emit_swap". */
2790 aarch64_emit_swap (void)
2795 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2796 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2797 p
+= emit_mov (p
, x0
, register_operand (x1
));
2799 emit_ops_insns (buf
, p
- buf
);
2802 /* Implementation of emit_ops method "emit_stack_adjust". */
2805 aarch64_emit_stack_adjust (int n
)
2807 /* This is not needed with our design. */
2811 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2813 emit_ops_insns (buf
, p
- buf
);
2816 /* Implementation of emit_ops method "emit_int_call_1". */
2819 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2824 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2826 emit_ops_insns (buf
, p
- buf
);
2828 aarch64_emit_call (fn
);
2831 /* Implementation of emit_ops method "emit_void_call_2". */
2834 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2839 /* Push x0 on the stack. */
2840 aarch64_emit_stack_flush ();
2842 /* Setup arguments for the function call:
2845 x1: top of the stack
2850 p
+= emit_mov (p
, x1
, register_operand (x0
));
2851 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2853 emit_ops_insns (buf
, p
- buf
);
2855 aarch64_emit_call (fn
);
2858 aarch64_emit_pop ();
2861 /* Implementation of emit_ops method "emit_eq_goto". */
2864 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2869 p
+= emit_pop (p
, x1
);
2870 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2871 /* Branch over the next instruction if x0 != x1. */
2872 p
+= emit_bcond (p
, NE
, 8);
2873 /* The NOP instruction will be patched with an unconditional branch. */
2875 *offset_p
= (p
- buf
) * 4;
2880 emit_ops_insns (buf
, p
- buf
);
2883 /* Implementation of emit_ops method "emit_ne_goto". */
2886 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2891 p
+= emit_pop (p
, x1
);
2892 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2893 /* Branch over the next instruction if x0 == x1. */
2894 p
+= emit_bcond (p
, EQ
, 8);
2895 /* The NOP instruction will be patched with an unconditional branch. */
2897 *offset_p
= (p
- buf
) * 4;
2902 emit_ops_insns (buf
, p
- buf
);
2905 /* Implementation of emit_ops method "emit_lt_goto". */
2908 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2913 p
+= emit_pop (p
, x1
);
2914 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2915 /* Branch over the next instruction if x0 >= x1. */
2916 p
+= emit_bcond (p
, GE
, 8);
2917 /* The NOP instruction will be patched with an unconditional branch. */
2919 *offset_p
= (p
- buf
) * 4;
2924 emit_ops_insns (buf
, p
- buf
);
2927 /* Implementation of emit_ops method "emit_le_goto". */
2930 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2935 p
+= emit_pop (p
, x1
);
2936 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2937 /* Branch over the next instruction if x0 > x1. */
2938 p
+= emit_bcond (p
, GT
, 8);
2939 /* The NOP instruction will be patched with an unconditional branch. */
2941 *offset_p
= (p
- buf
) * 4;
2946 emit_ops_insns (buf
, p
- buf
);
2949 /* Implementation of emit_ops method "emit_gt_goto". */
2952 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2957 p
+= emit_pop (p
, x1
);
2958 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2959 /* Branch over the next instruction if x0 <= x1. */
2960 p
+= emit_bcond (p
, LE
, 8);
2961 /* The NOP instruction will be patched with an unconditional branch. */
2963 *offset_p
= (p
- buf
) * 4;
2968 emit_ops_insns (buf
, p
- buf
);
2971 /* Implementation of emit_ops method "emit_ge_got". */
2974 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2979 p
+= emit_pop (p
, x1
);
2980 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2981 /* Branch over the next instruction if x0 <= x1. */
2982 p
+= emit_bcond (p
, LT
, 8);
2983 /* The NOP instruction will be patched with an unconditional branch. */
2985 *offset_p
= (p
- buf
) * 4;
2990 emit_ops_insns (buf
, p
- buf
);
2993 static struct emit_ops aarch64_emit_ops_impl
=
2995 aarch64_emit_prologue
,
2996 aarch64_emit_epilogue
,
3001 aarch64_emit_rsh_signed
,
3002 aarch64_emit_rsh_unsigned
,
3004 aarch64_emit_log_not
,
3005 aarch64_emit_bit_and
,
3006 aarch64_emit_bit_or
,
3007 aarch64_emit_bit_xor
,
3008 aarch64_emit_bit_not
,
3010 aarch64_emit_less_signed
,
3011 aarch64_emit_less_unsigned
,
3013 aarch64_emit_if_goto
,
3015 aarch64_write_goto_address
,
3020 aarch64_emit_stack_flush
,
3021 aarch64_emit_zero_ext
,
3023 aarch64_emit_stack_adjust
,
3024 aarch64_emit_int_call_1
,
3025 aarch64_emit_void_call_2
,
3026 aarch64_emit_eq_goto
,
3027 aarch64_emit_ne_goto
,
3028 aarch64_emit_lt_goto
,
3029 aarch64_emit_le_goto
,
3030 aarch64_emit_gt_goto
,
3031 aarch64_emit_ge_got
,
3034 /* Implementation of linux_target_ops method "emit_ops". */
3036 static struct emit_ops
*
3037 aarch64_emit_ops (void)
3039 return &aarch64_emit_ops_impl
;
3042 /* Implementation of linux_target_ops method
3043 "get_min_fast_tracepoint_insn_len". */
3046 aarch64_get_min_fast_tracepoint_insn_len (void)
3051 /* Implementation of linux_target_ops method "supports_range_stepping". */
3054 aarch64_supports_range_stepping (void)
3059 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3062 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3064 if (is_64bit_tdesc ())
3066 *size
= aarch64_breakpoint_len
;
3067 return aarch64_breakpoint
;
3070 return arm_sw_breakpoint_from_kind (kind
, size
);
3073 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3076 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3078 if (is_64bit_tdesc ())
3079 return aarch64_breakpoint_len
;
3081 return arm_breakpoint_kind_from_pc (pcptr
);
3084 /* Implementation of the target ops method
3085 "breakpoint_kind_from_current_state". */
3088 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3090 if (is_64bit_tdesc ())
3091 return aarch64_breakpoint_len
;
3093 return arm_breakpoint_kind_from_current_state (pcptr
);
3096 /* Support for hardware single step. */
3099 aarch64_supports_hardware_single_step (void)
3104 struct linux_target_ops the_low_target
=
3106 NULL
, /* get_next_pcs */
3107 0, /* decr_pc_after_break */
3108 aarch64_breakpoint_at
,
3109 aarch64_supports_z_point_type
,
3110 aarch64_insert_point
,
3111 aarch64_remove_point
,
3112 aarch64_stopped_by_watchpoint
,
3113 aarch64_stopped_data_address
,
3114 NULL
, /* collect_ptrace_register */
3115 NULL
, /* supply_ptrace_register */
3116 aarch64_linux_siginfo_fixup
,
3117 aarch64_linux_new_process
,
3118 aarch64_linux_delete_process
,
3119 aarch64_linux_new_thread
,
3120 aarch64_linux_delete_thread
,
3121 aarch64_linux_new_fork
,
3122 aarch64_linux_prepare_to_resume
,
3123 NULL
, /* process_qsupported */
3124 aarch64_supports_tracepoints
,
3125 aarch64_get_thread_area
,
3126 aarch64_install_fast_tracepoint_jump_pad
,
3128 aarch64_get_min_fast_tracepoint_insn_len
,
3129 aarch64_supports_range_stepping
,
3130 aarch64_supports_hardware_single_step
,
3131 aarch64_get_syscall_trapinfo
,
3134 /* The linux target ops object. */
3136 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3139 initialize_low_arch (void)
3141 initialize_low_arch_aarch32 ();
3143 initialize_regsets_info (&aarch64_regsets_info
);
3144 initialize_regsets_info (&aarch64_sve_regsets_info
);