1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
64 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
66 bool supports_z_point_type (char z_type
) override
;
68 bool supports_tracepoints () override
;
70 bool supports_fast_tracepoints () override
;
72 int install_fast_tracepoint_jump_pad
73 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
74 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
75 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
76 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
77 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
80 int get_min_fast_tracepoint_insn_len () override
;
84 void low_arch_setup () override
;
86 bool low_cannot_fetch_register (int regno
) override
;
88 bool low_cannot_store_register (int regno
) override
;
90 bool low_supports_breakpoints () override
;
92 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
94 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
96 bool low_breakpoint_at (CORE_ADDR pc
) override
;
98 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
99 int size
, raw_breakpoint
*bp
) override
;
101 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
102 int size
, raw_breakpoint
*bp
) override
;
104 bool low_stopped_by_watchpoint () override
;
106 CORE_ADDR
low_stopped_data_address () override
;
108 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
109 int direction
) override
;
111 arch_process_info
*low_new_process () override
;
113 void low_delete_process (arch_process_info
*info
) override
;
115 void low_new_thread (lwp_info
*) override
;
117 void low_delete_thread (arch_lwp_info
*) override
;
119 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
121 void low_prepare_to_resume (lwp_info
*lwp
) override
;
123 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
126 /* The singleton target ops object. */
128 static aarch64_target the_aarch64_target
;
131 aarch64_target::low_cannot_fetch_register (int regno
)
133 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
134 "is not implemented by the target");
138 aarch64_target::low_cannot_store_register (int regno
)
140 gdb_assert_not_reached ("linux target op low_cannot_store_register "
141 "is not implemented by the target");
145 aarch64_target::low_prepare_to_resume (lwp_info
*lwp
)
147 aarch64_linux_prepare_to_resume (lwp
);
150 /* Per-process arch-specific data we want to keep. */
152 struct arch_process_info
154 /* Hardware breakpoint/watchpoint data.
155 The reason for them to be per-process rather than per-thread is
156 due to the lack of information in the gdbserver environment;
157 gdbserver is not told that whether a requested hardware
158 breakpoint/watchpoint is thread specific or not, so it has to set
159 each hw bp/wp for every thread in the current process. The
160 higher level bp/wp management in gdb will resume a thread if a hw
161 bp/wp trap is not expected for it. Since the hw bp/wp setting is
162 same for each thread, it is reasonable for the data to live here.
164 struct aarch64_debug_reg_state debug_reg_state
;
167 /* Return true if the size of register 0 is 8 byte. */
170 is_64bit_tdesc (void)
172 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
174 return register_size (regcache
->tdesc
, 0) == 8;
177 /* Return true if the regcache contains the number of SVE registers. */
182 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
184 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
188 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
190 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
193 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
194 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
195 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
196 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
197 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
201 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
203 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
206 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
207 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
208 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
209 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
210 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
214 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
216 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
219 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
220 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
221 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
222 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
226 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
228 const struct user_fpsimd_state
*regset
229 = (const struct user_fpsimd_state
*) buf
;
232 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
233 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
234 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
235 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
238 /* Store the pauth registers to regcache. */
241 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
243 uint64_t *pauth_regset
= (uint64_t *) buf
;
244 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
249 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
251 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
256 aarch64_target::low_supports_breakpoints ()
261 /* Implementation of linux target ops method "low_get_pc". */
264 aarch64_target::low_get_pc (regcache
*regcache
)
266 if (register_size (regcache
->tdesc
, 0) == 8)
267 return linux_get_pc_64bit (regcache
);
269 return linux_get_pc_32bit (regcache
);
272 /* Implementation of linux target ops method "low_set_pc". */
275 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
277 if (register_size (regcache
->tdesc
, 0) == 8)
278 linux_set_pc_64bit (regcache
, pc
);
280 linux_set_pc_32bit (regcache
, pc
);
283 #define aarch64_breakpoint_len 4
285 /* AArch64 BRK software debug mode instruction.
286 This instruction needs to match gdb/aarch64-tdep.c
287 (aarch64_default_breakpoint). */
288 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
290 /* Implementation of linux target ops method "low_breakpoint_at". */
293 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
295 if (is_64bit_tdesc ())
297 gdb_byte insn
[aarch64_breakpoint_len
];
299 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
300 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
306 return arm_breakpoint_at (where
);
310 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
314 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
316 state
->dr_addr_bp
[i
] = 0;
317 state
->dr_ctrl_bp
[i
] = 0;
318 state
->dr_ref_count_bp
[i
] = 0;
321 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
323 state
->dr_addr_wp
[i
] = 0;
324 state
->dr_ctrl_wp
[i
] = 0;
325 state
->dr_ref_count_wp
[i
] = 0;
329 /* Return the pointer to the debug register state structure in the
330 current process' arch-specific data area. */
332 struct aarch64_debug_reg_state
*
333 aarch64_get_debug_reg_state (pid_t pid
)
335 struct process_info
*proc
= find_process_pid (pid
);
337 return &proc
->priv
->arch_private
->debug_reg_state
;
340 /* Implementation of target ops method "supports_z_point_type". */
343 aarch64_target::supports_z_point_type (char z_type
)
349 case Z_PACKET_WRITE_WP
:
350 case Z_PACKET_READ_WP
:
351 case Z_PACKET_ACCESS_WP
:
358 /* Implementation of linux target ops method "low_insert_point".
360 It actually only records the info of the to-be-inserted bp/wp;
361 the actual insertion will happen when threads are resumed. */
364 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
365 int len
, raw_breakpoint
*bp
)
368 enum target_hw_bp_type targ_type
;
369 struct aarch64_debug_reg_state
*state
370 = aarch64_get_debug_reg_state (pid_of (current_thread
));
373 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
374 (unsigned long) addr
, len
);
376 /* Determine the type from the raw breakpoint type. */
377 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
379 if (targ_type
!= hw_execute
)
381 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
382 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
383 1 /* is_insert */, state
);
391 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
392 instruction. Set it to 2 to correctly encode length bit
393 mask in hardware/watchpoint control register. */
396 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
397 1 /* is_insert */, state
);
401 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
407 /* Implementation of linux target ops method "low_remove_point".
409 It actually only records the info of the to-be-removed bp/wp,
410 the actual removal will be done when threads are resumed. */
413 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
414 int len
, raw_breakpoint
*bp
)
417 enum target_hw_bp_type targ_type
;
418 struct aarch64_debug_reg_state
*state
419 = aarch64_get_debug_reg_state (pid_of (current_thread
));
422 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
423 (unsigned long) addr
, len
);
425 /* Determine the type from the raw breakpoint type. */
426 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
428 /* Set up state pointers. */
429 if (targ_type
!= hw_execute
)
431 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
437 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
438 instruction. Set it to 2 to correctly encode length bit
439 mask in hardware/watchpoint control register. */
442 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
443 0 /* is_insert */, state
);
447 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
453 /* Implementation of linux target ops method "low_stopped_data_address". */
456 aarch64_target::low_stopped_data_address ()
460 struct aarch64_debug_reg_state
*state
;
462 pid
= lwpid_of (current_thread
);
464 /* Get the siginfo. */
465 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
466 return (CORE_ADDR
) 0;
468 /* Need to be a hardware breakpoint/watchpoint trap. */
469 if (siginfo
.si_signo
!= SIGTRAP
470 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
471 return (CORE_ADDR
) 0;
473 /* Check if the address matches any watched address. */
474 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
475 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
477 const unsigned int offset
478 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
479 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
480 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
481 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
482 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
483 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
485 if (state
->dr_ref_count_wp
[i
]
486 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
487 && addr_trap
>= addr_watch_aligned
488 && addr_trap
< addr_watch
+ len
)
490 /* ADDR_TRAP reports the first address of the memory range
491 accessed by the CPU, regardless of what was the memory
492 range watched. Thus, a large CPU access that straddles
493 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
494 ADDR_TRAP that is lower than the
495 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
497 addr: | 4 | 5 | 6 | 7 | 8 |
498 |---- range watched ----|
499 |----------- range accessed ------------|
501 In this case, ADDR_TRAP will be 4.
503 To match a watchpoint known to GDB core, we must never
504 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
505 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
506 positive on kernels older than 4.10. See PR
512 return (CORE_ADDR
) 0;
515 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
518 aarch64_target::low_stopped_by_watchpoint ()
520 return (low_stopped_data_address () != 0);
523 /* Fetch the thread-local storage pointer for libthread_db. */
526 ps_get_thread_area (struct ps_prochandle
*ph
,
527 lwpid_t lwpid
, int idx
, void **base
)
529 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
533 /* Implementation of linux target ops method "low_siginfo_fixup". */
536 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
539 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
540 if (!is_64bit_tdesc ())
543 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
546 aarch64_siginfo_from_compat_siginfo (native
,
547 (struct compat_siginfo
*) inf
);
555 /* Implementation of linux target ops method "low_new_process". */
558 aarch64_target::low_new_process ()
560 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
562 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
567 /* Implementation of linux target ops method "low_delete_process". */
570 aarch64_target::low_delete_process (arch_process_info
*info
)
576 aarch64_target::low_new_thread (lwp_info
*lwp
)
578 aarch64_linux_new_thread (lwp
);
582 aarch64_target::low_delete_thread (arch_lwp_info
*arch_lwp
)
584 aarch64_linux_delete_thread (arch_lwp
);
587 /* Implementation of linux target ops method "low_new_fork". */
590 aarch64_target::low_new_fork (process_info
*parent
,
593 /* These are allocated by linux_add_process. */
594 gdb_assert (parent
->priv
!= NULL
595 && parent
->priv
->arch_private
!= NULL
);
596 gdb_assert (child
->priv
!= NULL
597 && child
->priv
->arch_private
!= NULL
);
599 /* Linux kernel before 2.6.33 commit
600 72f674d203cd230426437cdcf7dd6f681dad8b0d
601 will inherit hardware debug registers from parent
602 on fork/vfork/clone. Newer Linux kernels create such tasks with
603 zeroed debug registers.
605 GDB core assumes the child inherits the watchpoints/hw
606 breakpoints of the parent, and will remove them all from the
607 forked off process. Copy the debug registers mirrors into the
608 new process so that all breakpoints and watchpoints can be
609 removed together. The debug registers mirror will become zeroed
610 in the end before detaching the forked off process, thus making
611 this compatible with older Linux kernels too. */
613 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
616 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
617 #define AARCH64_HWCAP_PACA (1 << 30)
619 /* Implementation of linux target ops method "low_arch_setup". */
622 aarch64_target::low_arch_setup ()
624 unsigned int machine
;
628 tid
= lwpid_of (current_thread
);
630 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
634 uint64_t vq
= aarch64_sve_get_vq (tid
);
635 unsigned long hwcap
= linux_get_hwcap (8);
636 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
638 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
641 current_process ()->tdesc
= aarch32_linux_read_description ();
643 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
646 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
649 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
651 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
654 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
657 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
659 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
662 static struct regset_info aarch64_regsets
[] =
664 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
665 sizeof (struct user_pt_regs
), GENERAL_REGS
,
666 aarch64_fill_gregset
, aarch64_store_gregset
},
667 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
668 sizeof (struct user_fpsimd_state
), FP_REGS
,
669 aarch64_fill_fpregset
, aarch64_store_fpregset
671 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
672 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
673 NULL
, aarch64_store_pauthregset
},
677 static struct regsets_info aarch64_regsets_info
=
679 aarch64_regsets
, /* regsets */
681 NULL
, /* disabled_regsets */
684 static struct regs_info regs_info_aarch64
=
686 NULL
, /* regset_bitmap */
688 &aarch64_regsets_info
,
691 static struct regset_info aarch64_sve_regsets
[] =
693 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
694 sizeof (struct user_pt_regs
), GENERAL_REGS
,
695 aarch64_fill_gregset
, aarch64_store_gregset
},
696 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
697 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
698 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
700 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
701 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
702 NULL
, aarch64_store_pauthregset
},
706 static struct regsets_info aarch64_sve_regsets_info
=
708 aarch64_sve_regsets
, /* regsets. */
709 0, /* num_regsets. */
710 NULL
, /* disabled_regsets. */
713 static struct regs_info regs_info_aarch64_sve
=
715 NULL
, /* regset_bitmap. */
717 &aarch64_sve_regsets_info
,
720 /* Implementation of linux target ops method "get_regs_info". */
723 aarch64_target::get_regs_info ()
725 if (!is_64bit_tdesc ())
726 return ®s_info_aarch32
;
729 return ®s_info_aarch64_sve
;
731 return ®s_info_aarch64
;
734 /* Implementation of target ops method "supports_tracepoints". */
737 aarch64_target::supports_tracepoints ()
739 if (current_thread
== NULL
)
743 /* We don't support tracepoints on aarch32 now. */
744 return is_64bit_tdesc ();
748 /* Implementation of linux target ops method "low_get_thread_area". */
751 aarch64_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
756 iovec
.iov_base
= ®
;
757 iovec
.iov_len
= sizeof (reg
);
759 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
767 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
770 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
772 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
778 collect_register_by_name (regcache
, "x8", &l_sysno
);
779 *sysno
= (int) l_sysno
;
782 collect_register_by_name (regcache
, "r7", sysno
);
785 /* List of condition codes that we need. */
787 enum aarch64_condition_codes
798 enum aarch64_operand_type
804 /* Representation of an operand. At this time, it only supports register
805 and immediate types. */
807 struct aarch64_operand
809 /* Type of the operand. */
810 enum aarch64_operand_type type
;
812 /* Value of the operand according to the type. */
816 struct aarch64_register reg
;
820 /* List of registers that we are currently using, we can add more here as
821 we need to use them. */
823 /* General purpose scratch registers (64 bit). */
824 static const struct aarch64_register x0
= { 0, 1 };
825 static const struct aarch64_register x1
= { 1, 1 };
826 static const struct aarch64_register x2
= { 2, 1 };
827 static const struct aarch64_register x3
= { 3, 1 };
828 static const struct aarch64_register x4
= { 4, 1 };
830 /* General purpose scratch registers (32 bit). */
831 static const struct aarch64_register w0
= { 0, 0 };
832 static const struct aarch64_register w2
= { 2, 0 };
834 /* Intra-procedure scratch registers. */
835 static const struct aarch64_register ip0
= { 16, 1 };
837 /* Special purpose registers. */
838 static const struct aarch64_register fp
= { 29, 1 };
839 static const struct aarch64_register lr
= { 30, 1 };
840 static const struct aarch64_register sp
= { 31, 1 };
841 static const struct aarch64_register xzr
= { 31, 1 };
843 /* Dynamically allocate a new register. If we know the register
844 statically, we should make it a global as above instead of using this
847 static struct aarch64_register
848 aarch64_register (unsigned num
, int is64
)
850 return (struct aarch64_register
) { num
, is64
};
853 /* Helper function to create a register operand, for instructions with
854 different types of operands.
857 p += emit_mov (p, x0, register_operand (x1)); */
859 static struct aarch64_operand
860 register_operand (struct aarch64_register reg
)
862 struct aarch64_operand operand
;
864 operand
.type
= OPERAND_REGISTER
;
870 /* Helper function to create an immediate operand, for instructions with
871 different types of operands.
874 p += emit_mov (p, x0, immediate_operand (12)); */
876 static struct aarch64_operand
877 immediate_operand (uint32_t imm
)
879 struct aarch64_operand operand
;
881 operand
.type
= OPERAND_IMMEDIATE
;
887 /* Helper function to create an offset memory operand.
890 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
892 static struct aarch64_memory_operand
893 offset_memory_operand (int32_t offset
)
895 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
898 /* Helper function to create a pre-index memory operand.
901 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
903 static struct aarch64_memory_operand
904 preindex_memory_operand (int32_t index
)
906 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
909 /* Helper function to create a post-index memory operand.
912 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
914 static struct aarch64_memory_operand
915 postindex_memory_operand (int32_t index
)
917 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
920 /* System control registers. These special registers can be written and
921 read with the MRS and MSR instructions.
923 - NZCV: Condition flags. GDB refers to this register under the CPSR
925 - FPSR: Floating-point status register.
926 - FPCR: Floating-point control registers.
927 - TPIDR_EL0: Software thread ID register. */
929 enum aarch64_system_control_registers
931 /* op0 op1 crn crm op2 */
932 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
933 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
934 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
935 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
938 /* Write a BLR instruction into *BUF.
942 RN is the register to branch to. */
945 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
947 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
950 /* Write a RET instruction into *BUF.
954 RN is the register to branch to. */
957 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
959 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
963 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
964 struct aarch64_register rt
,
965 struct aarch64_register rt2
,
966 struct aarch64_register rn
,
967 struct aarch64_memory_operand operand
)
974 opc
= ENCODE (2, 2, 30);
976 opc
= ENCODE (0, 2, 30);
978 switch (operand
.type
)
980 case MEMORY_OPERAND_OFFSET
:
982 pre_index
= ENCODE (1, 1, 24);
983 write_back
= ENCODE (0, 1, 23);
986 case MEMORY_OPERAND_POSTINDEX
:
988 pre_index
= ENCODE (0, 1, 24);
989 write_back
= ENCODE (1, 1, 23);
992 case MEMORY_OPERAND_PREINDEX
:
994 pre_index
= ENCODE (1, 1, 24);
995 write_back
= ENCODE (1, 1, 23);
1002 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
1003 | ENCODE (operand
.index
>> 3, 7, 15)
1004 | ENCODE (rt2
.num
, 5, 10)
1005 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1008 /* Write a STP instruction into *BUF.
1010 STP rt, rt2, [rn, #offset]
1011 STP rt, rt2, [rn, #index]!
1012 STP rt, rt2, [rn], #index
1014 RT and RT2 are the registers to store.
1015 RN is the base address register.
1016 OFFSET is the immediate to add to the base address. It is limited to a
1017 -512 .. 504 range (7 bits << 3). */
1020 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
1021 struct aarch64_register rt2
, struct aarch64_register rn
,
1022 struct aarch64_memory_operand operand
)
1024 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
1027 /* Write a LDP instruction into *BUF.
1029 LDP rt, rt2, [rn, #offset]
1030 LDP rt, rt2, [rn, #index]!
1031 LDP rt, rt2, [rn], #index
1033 RT and RT2 are the registers to store.
1034 RN is the base address register.
1035 OFFSET is the immediate to add to the base address. It is limited to a
1036 -512 .. 504 range (7 bits << 3). */
1039 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
1040 struct aarch64_register rt2
, struct aarch64_register rn
,
1041 struct aarch64_memory_operand operand
)
1043 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1046 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1048 LDP qt, qt2, [rn, #offset]
1050 RT and RT2 are the Q registers to store.
1051 RN is the base address register.
1052 OFFSET is the immediate to add to the base address. It is limited to
1053 -1024 .. 1008 range (7 bits << 4). */
1056 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1057 struct aarch64_register rn
, int32_t offset
)
1059 uint32_t opc
= ENCODE (2, 2, 30);
1060 uint32_t pre_index
= ENCODE (1, 1, 24);
1062 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1063 | ENCODE (offset
>> 4, 7, 15)
1064 | ENCODE (rt2
, 5, 10)
1065 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1068 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1070 STP qt, qt2, [rn, #offset]
1072 RT and RT2 are the Q registers to store.
1073 RN is the base address register.
1074 OFFSET is the immediate to add to the base address. It is limited to
1075 -1024 .. 1008 range (7 bits << 4). */
1078 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1079 struct aarch64_register rn
, int32_t offset
)
1081 uint32_t opc
= ENCODE (2, 2, 30);
1082 uint32_t pre_index
= ENCODE (1, 1, 24);
1084 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1085 | ENCODE (offset
>> 4, 7, 15)
1086 | ENCODE (rt2
, 5, 10)
1087 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1090 /* Write a LDRH instruction into *BUF.
1092 LDRH wt, [xn, #offset]
1093 LDRH wt, [xn, #index]!
1094 LDRH wt, [xn], #index
1096 RT is the register to store.
1097 RN is the base address register.
1098 OFFSET is the immediate to add to the base address. It is limited to
1099 0 .. 32760 range (12 bits << 3). */
1102 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1103 struct aarch64_register rn
,
1104 struct aarch64_memory_operand operand
)
1106 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1109 /* Write a LDRB instruction into *BUF.
1111 LDRB wt, [xn, #offset]
1112 LDRB wt, [xn, #index]!
1113 LDRB wt, [xn], #index
1115 RT is the register to store.
1116 RN is the base address register.
1117 OFFSET is the immediate to add to the base address. It is limited to
1118 0 .. 32760 range (12 bits << 3). */
1121 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1122 struct aarch64_register rn
,
1123 struct aarch64_memory_operand operand
)
1125 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1130 /* Write a STR instruction into *BUF.
1132 STR rt, [rn, #offset]
1133 STR rt, [rn, #index]!
1134 STR rt, [rn], #index
1136 RT is the register to store.
1137 RN is the base address register.
1138 OFFSET is the immediate to add to the base address. It is limited to
1139 0 .. 32760 range (12 bits << 3). */
1142 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1143 struct aarch64_register rn
,
1144 struct aarch64_memory_operand operand
)
1146 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1149 /* Helper function emitting an exclusive load or store instruction. */
1152 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1153 enum aarch64_opcodes opcode
,
1154 struct aarch64_register rs
,
1155 struct aarch64_register rt
,
1156 struct aarch64_register rt2
,
1157 struct aarch64_register rn
)
1159 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1160 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1161 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1164 /* Write a LAXR instruction into *BUF.
1168 RT is the destination register.
1169 RN is the base address register. */
1172 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1173 struct aarch64_register rn
)
1175 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1179 /* Write a STXR instruction into *BUF.
1183 RS is the result register, it indicates if the store succeeded or not.
1184 RT is the destination register.
1185 RN is the base address register. */
1188 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1189 struct aarch64_register rt
, struct aarch64_register rn
)
1191 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1195 /* Write a STLR instruction into *BUF.
1199 RT is the register to store.
1200 RN is the base address register. */
1203 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1204 struct aarch64_register rn
)
1206 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1210 /* Helper function for data processing instructions with register sources. */
1213 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1214 struct aarch64_register rd
,
1215 struct aarch64_register rn
,
1216 struct aarch64_register rm
)
1218 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1220 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1221 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1224 /* Helper function for data processing instructions taking either a register
1228 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1229 struct aarch64_register rd
,
1230 struct aarch64_register rn
,
1231 struct aarch64_operand operand
)
1233 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1234 /* The opcode is different for register and immediate source operands. */
1235 uint32_t operand_opcode
;
1237 if (operand
.type
== OPERAND_IMMEDIATE
)
1239 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1240 operand_opcode
= ENCODE (8, 4, 25);
1242 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1243 | ENCODE (operand
.imm
, 12, 10)
1244 | ENCODE (rn
.num
, 5, 5)
1245 | ENCODE (rd
.num
, 5, 0));
1249 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1250 operand_opcode
= ENCODE (5, 4, 25);
1252 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1257 /* Write an ADD instruction into *BUF.
1262 This function handles both an immediate and register add.
1264 RD is the destination register.
1265 RN is the input register.
1266 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1267 OPERAND_REGISTER. */
1270 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1271 struct aarch64_register rn
, struct aarch64_operand operand
)
1273 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1276 /* Write a SUB instruction into *BUF.
1281 This function handles both an immediate and register sub.
1283 RD is the destination register.
1284 RN is the input register.
1285 IMM is the immediate to substract to RN. */
1288 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1289 struct aarch64_register rn
, struct aarch64_operand operand
)
1291 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1294 /* Write a MOV instruction into *BUF.
1299 This function handles both a wide immediate move and a register move,
1300 with the condition that the source register is not xzr. xzr and the
1301 stack pointer share the same encoding and this function only supports
1304 RD is the destination register.
1305 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1306 OPERAND_REGISTER. */
1309 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1310 struct aarch64_operand operand
)
1312 if (operand
.type
== OPERAND_IMMEDIATE
)
1314 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1315 /* Do not shift the immediate. */
1316 uint32_t shift
= ENCODE (0, 2, 21);
1318 return aarch64_emit_insn (buf
, MOV
| size
| shift
1319 | ENCODE (operand
.imm
, 16, 5)
1320 | ENCODE (rd
.num
, 5, 0));
1323 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1326 /* Write a MOVK instruction into *BUF.
1328 MOVK rd, #imm, lsl #shift
1330 RD is the destination register.
1331 IMM is the immediate.
1332 SHIFT is the logical shift left to apply to IMM. */
1335 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1338 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1340 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1341 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1344 /* Write instructions into *BUF in order to move ADDR into a register.
1345 ADDR can be a 64-bit value.
1347 This function will emit a series of MOV and MOVK instructions, such as:
1350 MOVK xd, #(addr >> 16), lsl #16
1351 MOVK xd, #(addr >> 32), lsl #32
1352 MOVK xd, #(addr >> 48), lsl #48 */
1355 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1359 /* The MOV (wide immediate) instruction clears to top bits of the
1361 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1363 if ((addr
>> 16) != 0)
1364 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1368 if ((addr
>> 32) != 0)
1369 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1373 if ((addr
>> 48) != 0)
1374 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1379 /* Write a SUBS instruction into *BUF.
1383 This instruction update the condition flags.
1385 RD is the destination register.
1386 RN and RM are the source registers. */
1389 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1390 struct aarch64_register rn
, struct aarch64_operand operand
)
1392 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1395 /* Write a CMP instruction into *BUF.
1399 This instruction is an alias of SUBS xzr, rn, rm.
1401 RN and RM are the registers to compare. */
1404 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1405 struct aarch64_operand operand
)
1407 return emit_subs (buf
, xzr
, rn
, operand
);
1410 /* Write a AND instruction into *BUF.
1414 RD is the destination register.
1415 RN and RM are the source registers. */
1418 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1419 struct aarch64_register rn
, struct aarch64_register rm
)
1421 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1424 /* Write a ORR instruction into *BUF.
1428 RD is the destination register.
1429 RN and RM are the source registers. */
1432 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1433 struct aarch64_register rn
, struct aarch64_register rm
)
1435 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1438 /* Write a ORN instruction into *BUF.
1442 RD is the destination register.
1443 RN and RM are the source registers. */
1446 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1447 struct aarch64_register rn
, struct aarch64_register rm
)
1449 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1452 /* Write a EOR instruction into *BUF.
1456 RD is the destination register.
1457 RN and RM are the source registers. */
1460 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1461 struct aarch64_register rn
, struct aarch64_register rm
)
1463 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1466 /* Write a MVN instruction into *BUF.
1470 This is an alias for ORN rd, xzr, rm.
1472 RD is the destination register.
1473 RM is the source register. */
1476 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1477 struct aarch64_register rm
)
1479 return emit_orn (buf
, rd
, xzr
, rm
);
1482 /* Write a LSLV instruction into *BUF.
1486 RD is the destination register.
1487 RN and RM are the source registers. */
1490 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1491 struct aarch64_register rn
, struct aarch64_register rm
)
1493 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1496 /* Write a LSRV instruction into *BUF.
1500 RD is the destination register.
1501 RN and RM are the source registers. */
1504 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1505 struct aarch64_register rn
, struct aarch64_register rm
)
1507 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1510 /* Write a ASRV instruction into *BUF.
1514 RD is the destination register.
1515 RN and RM are the source registers. */
1518 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1519 struct aarch64_register rn
, struct aarch64_register rm
)
1521 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1524 /* Write a MUL instruction into *BUF.
1528 RD is the destination register.
1529 RN and RM are the source registers. */
1532 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1533 struct aarch64_register rn
, struct aarch64_register rm
)
1535 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1538 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1542 RT is the destination register.
1543 SYSTEM_REG is special purpose register to read. */
1546 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1547 enum aarch64_system_control_registers system_reg
)
1549 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1550 | ENCODE (rt
.num
, 5, 0));
1553 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1557 SYSTEM_REG is special purpose register to write.
1558 RT is the input register. */
1561 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1562 struct aarch64_register rt
)
1564 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1565 | ENCODE (rt
.num
, 5, 0));
1568 /* Write a SEVL instruction into *BUF.
1570 This is a hint instruction telling the hardware to trigger an event. */
1573 emit_sevl (uint32_t *buf
)
1575 return aarch64_emit_insn (buf
, SEVL
);
1578 /* Write a WFE instruction into *BUF.
1580 This is a hint instruction telling the hardware to wait for an event. */
1583 emit_wfe (uint32_t *buf
)
1585 return aarch64_emit_insn (buf
, WFE
);
1588 /* Write a SBFM instruction into *BUF.
1590 SBFM rd, rn, #immr, #imms
1592 This instruction moves the bits from #immr to #imms into the
1593 destination, sign extending the result.
1595 RD is the destination register.
1596 RN is the source register.
1597 IMMR is the bit number to start at (least significant bit).
1598 IMMS is the bit number to stop at (most significant bit). */
1601 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1602 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1604 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1605 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1607 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1608 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1609 | ENCODE (rd
.num
, 5, 0));
1612 /* Write a SBFX instruction into *BUF.
1614 SBFX rd, rn, #lsb, #width
1616 This instruction moves #width bits from #lsb into the destination, sign
1617 extending the result. This is an alias for:
1619 SBFM rd, rn, #lsb, #(lsb + width - 1)
1621 RD is the destination register.
1622 RN is the source register.
1623 LSB is the bit number to start at (least significant bit).
1624 WIDTH is the number of bits to move. */
1627 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1628 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1630 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1633 /* Write a UBFM instruction into *BUF.
1635 UBFM rd, rn, #immr, #imms
1637 This instruction moves the bits from #immr to #imms into the
1638 destination, extending the result with zeros.
1640 RD is the destination register.
1641 RN is the source register.
1642 IMMR is the bit number to start at (least significant bit).
1643 IMMS is the bit number to stop at (most significant bit). */
1646 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1647 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1649 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1650 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1652 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1653 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1654 | ENCODE (rd
.num
, 5, 0));
1657 /* Write a UBFX instruction into *BUF.
1659 UBFX rd, rn, #lsb, #width
1661 This instruction moves #width bits from #lsb into the destination,
1662 extending the result with zeros. This is an alias for:
1664 UBFM rd, rn, #lsb, #(lsb + width - 1)
1666 RD is the destination register.
1667 RN is the source register.
1668 LSB is the bit number to start at (least significant bit).
1669 WIDTH is the number of bits to move. */
1672 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1673 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1675 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1678 /* Write a CSINC instruction into *BUF.
1680 CSINC rd, rn, rm, cond
1682 This instruction conditionally increments rn or rm and places the result
1683 in rd. rn is chosen is the condition is true.
1685 RD is the destination register.
1686 RN and RM are the source registers.
1687 COND is the encoded condition. */
1690 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1691 struct aarch64_register rn
, struct aarch64_register rm
,
1694 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1696 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1697 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1698 | ENCODE (rd
.num
, 5, 0));
1701 /* Write a CSET instruction into *BUF.
1705 This instruction conditionally write 1 or 0 in the destination register.
1706 1 is written if the condition is true. This is an alias for:
1708 CSINC rd, xzr, xzr, !cond
1710 Note that the condition needs to be inverted.
1712 RD is the destination register.
1713 RN and RM are the source registers.
1714 COND is the encoded condition. */
1717 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1719 /* The least significant bit of the condition needs toggling in order to
1721 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1724 /* Write LEN instructions from BUF into the inferior memory at *TO.
1726 Note instructions are always little endian on AArch64, unlike data. */
1729 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1731 size_t byte_len
= len
* sizeof (uint32_t);
1732 #if (__BYTE_ORDER == __BIG_ENDIAN)
1733 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1736 for (i
= 0; i
< len
; i
++)
1737 le_buf
[i
] = htole32 (buf
[i
]);
1739 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1743 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1749 /* Sub-class of struct aarch64_insn_data, store information of
1750 instruction relocation for fast tracepoint. Visitor can
1751 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1752 the relocated instructions in buffer pointed by INSN_PTR. */
1754 struct aarch64_insn_relocation_data
1756 struct aarch64_insn_data base
;
1758 /* The new address the instruction is relocated to. */
1760 /* Pointer to the buffer of relocated instruction(s). */
1764 /* Implementation of aarch64_insn_visitor method "b". */
1767 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1768 struct aarch64_insn_data
*data
)
1770 struct aarch64_insn_relocation_data
*insn_reloc
1771 = (struct aarch64_insn_relocation_data
*) data
;
1773 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1775 if (can_encode_int32 (new_offset
, 28))
1776 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1779 /* Implementation of aarch64_insn_visitor method "b_cond". */
1782 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1783 struct aarch64_insn_data
*data
)
1785 struct aarch64_insn_relocation_data
*insn_reloc
1786 = (struct aarch64_insn_relocation_data
*) data
;
1788 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1790 if (can_encode_int32 (new_offset
, 21))
1792 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1795 else if (can_encode_int32 (new_offset
, 28))
1797 /* The offset is out of range for a conditional branch
1798 instruction but not for a unconditional branch. We can use
1799 the following instructions instead:
1801 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1802 B NOT_TAKEN ; Else jump over TAKEN and continue.
1809 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1810 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1811 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1815 /* Implementation of aarch64_insn_visitor method "cb". */
1818 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1819 const unsigned rn
, int is64
,
1820 struct aarch64_insn_data
*data
)
1822 struct aarch64_insn_relocation_data
*insn_reloc
1823 = (struct aarch64_insn_relocation_data
*) data
;
1825 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1827 if (can_encode_int32 (new_offset
, 21))
1829 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1830 aarch64_register (rn
, is64
), new_offset
);
1832 else if (can_encode_int32 (new_offset
, 28))
1834 /* The offset is out of range for a compare and branch
1835 instruction but not for a unconditional branch. We can use
1836 the following instructions instead:
1838 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1839 B NOT_TAKEN ; Else jump over TAKEN and continue.
1845 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1846 aarch64_register (rn
, is64
), 8);
1847 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1848 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1852 /* Implementation of aarch64_insn_visitor method "tb". */
1855 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1856 const unsigned rt
, unsigned bit
,
1857 struct aarch64_insn_data
*data
)
1859 struct aarch64_insn_relocation_data
*insn_reloc
1860 = (struct aarch64_insn_relocation_data
*) data
;
1862 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1864 if (can_encode_int32 (new_offset
, 16))
1866 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1867 aarch64_register (rt
, 1), new_offset
);
1869 else if (can_encode_int32 (new_offset
, 28))
1871 /* The offset is out of range for a test bit and branch
1872 instruction but not for a unconditional branch. We can use
1873 the following instructions instead:
1875 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1876 B NOT_TAKEN ; Else jump over TAKEN and continue.
1882 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1883 aarch64_register (rt
, 1), 8);
1884 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1885 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1890 /* Implementation of aarch64_insn_visitor method "adr". */
1893 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1895 struct aarch64_insn_data
*data
)
1897 struct aarch64_insn_relocation_data
*insn_reloc
1898 = (struct aarch64_insn_relocation_data
*) data
;
1899 /* We know exactly the address the ADR{P,} instruction will compute.
1900 We can just write it to the destination register. */
1901 CORE_ADDR address
= data
->insn_addr
+ offset
;
1905 /* Clear the lower 12 bits of the offset to get the 4K page. */
1906 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1907 aarch64_register (rd
, 1),
1911 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1912 aarch64_register (rd
, 1), address
);
1915 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1918 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1919 const unsigned rt
, const int is64
,
1920 struct aarch64_insn_data
*data
)
1922 struct aarch64_insn_relocation_data
*insn_reloc
1923 = (struct aarch64_insn_relocation_data
*) data
;
1924 CORE_ADDR address
= data
->insn_addr
+ offset
;
1926 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1927 aarch64_register (rt
, 1), address
);
1929 /* We know exactly what address to load from, and what register we
1932 MOV xd, #(oldloc + offset)
1933 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1936 LDR xd, [xd] ; or LDRSW xd, [xd]
1941 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1942 aarch64_register (rt
, 1),
1943 aarch64_register (rt
, 1),
1944 offset_memory_operand (0));
1946 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1947 aarch64_register (rt
, is64
),
1948 aarch64_register (rt
, 1),
1949 offset_memory_operand (0));
1952 /* Implementation of aarch64_insn_visitor method "others". */
1955 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1956 struct aarch64_insn_data
*data
)
1958 struct aarch64_insn_relocation_data
*insn_reloc
1959 = (struct aarch64_insn_relocation_data
*) data
;
1961 /* The instruction is not PC relative. Just re-emit it at the new
1963 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1966 static const struct aarch64_insn_visitor visitor
=
1968 aarch64_ftrace_insn_reloc_b
,
1969 aarch64_ftrace_insn_reloc_b_cond
,
1970 aarch64_ftrace_insn_reloc_cb
,
1971 aarch64_ftrace_insn_reloc_tb
,
1972 aarch64_ftrace_insn_reloc_adr
,
1973 aarch64_ftrace_insn_reloc_ldr_literal
,
1974 aarch64_ftrace_insn_reloc_others
,
1978 aarch64_target::supports_fast_tracepoints ()
1983 /* Implementation of target ops method
1984 "install_fast_tracepoint_jump_pad". */
1987 aarch64_target::install_fast_tracepoint_jump_pad
1988 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
1989 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
1990 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
1991 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
1992 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
2000 CORE_ADDR buildaddr
= *jump_entry
;
2001 struct aarch64_insn_relocation_data insn_data
;
2003 /* We need to save the current state on the stack both to restore it
2004 later and to collect register values when the tracepoint is hit.
2006 The saved registers are pushed in a layout that needs to be in sync
2007 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2008 the supply_fast_tracepoint_registers function will fill in the
2009 register cache from a pointer to saved registers on the stack we build
2012 For simplicity, we set the size of each cell on the stack to 16 bytes.
2013 This way one cell can hold any register type, from system registers
2014 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2015 has to be 16 bytes aligned anyway.
2017 Note that the CPSR register does not exist on AArch64. Instead we
2018 can access system bits describing the process state with the
2019 MRS/MSR instructions, namely the condition flags. We save them as
2020 if they are part of a CPSR register because that's how GDB
2021 interprets these system bits. At the moment, only the condition
2022 flags are saved in CPSR (NZCV).
2024 Stack layout, each cell is 16 bytes (descending):
2026 High *-------- SIMD&FP registers from 31 down to 0. --------*
2032 *---- General purpose registers from 30 down to 0. ----*
2038 *------------- Special purpose registers. -------------*
2041 | CPSR (NZCV) | 5 cells
2044 *------------- collecting_t object --------------------*
2045 | TPIDR_EL0 | struct tracepoint * |
2046 Low *------------------------------------------------------*
2048 After this stack is set up, we issue a call to the collector, passing
2049 it the saved registers at (SP + 16). */
2051 /* Push SIMD&FP registers on the stack:
2053 SUB sp, sp, #(32 * 16)
2055 STP q30, q31, [sp, #(30 * 16)]
2060 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2061 for (i
= 30; i
>= 0; i
-= 2)
2062 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2064 /* Push general purpose registers on the stack. Note that we do not need
2065 to push x31 as it represents the xzr register and not the stack
2066 pointer in a STR instruction.
2068 SUB sp, sp, #(31 * 16)
2070 STR x30, [sp, #(30 * 16)]
2075 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2076 for (i
= 30; i
>= 0; i
-= 1)
2077 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2078 offset_memory_operand (i
* 16));
2080 /* Make space for 5 more cells.
2082 SUB sp, sp, #(5 * 16)
2085 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2090 ADD x4, sp, #((32 + 31 + 5) * 16)
2091 STR x4, [sp, #(4 * 16)]
2094 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2095 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2097 /* Save PC (tracepoint address):
2102 STR x3, [sp, #(3 * 16)]
2106 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2107 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2109 /* Save CPSR (NZCV), FPSR and FPCR:
2115 STR x2, [sp, #(2 * 16)]
2116 STR x1, [sp, #(1 * 16)]
2117 STR x0, [sp, #(0 * 16)]
2120 p
+= emit_mrs (p
, x2
, NZCV
);
2121 p
+= emit_mrs (p
, x1
, FPSR
);
2122 p
+= emit_mrs (p
, x0
, FPCR
);
2123 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2124 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2125 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2127 /* Push the collecting_t object. It consist of the address of the
2128 tracepoint and an ID for the current thread. We get the latter by
2129 reading the tpidr_el0 system register. It corresponds to the
2130 NT_ARM_TLS register accessible with ptrace.
2137 STP x0, x1, [sp, #-16]!
2141 p
+= emit_mov_addr (p
, x0
, tpoint
);
2142 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2143 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2147 The shared memory for the lock is at lockaddr. It will hold zero
2148 if no-one is holding the lock, otherwise it contains the address of
2149 the collecting_t object on the stack of the thread which acquired it.
2151 At this stage, the stack pointer points to this thread's collecting_t
2154 We use the following registers:
2155 - x0: Address of the lock.
2156 - x1: Pointer to collecting_t object.
2157 - x2: Scratch register.
2163 ; Trigger an event local to this core. So the following WFE
2164 ; instruction is ignored.
2167 ; Wait for an event. The event is triggered by either the SEVL
2168 ; or STLR instructions (store release).
2171 ; Atomically read at lockaddr. This marks the memory location as
2172 ; exclusive. This instruction also has memory constraints which
2173 ; make sure all previous data reads and writes are done before
2177 ; Try again if another thread holds the lock.
2180 ; We can lock it! Write the address of the collecting_t object.
2181 ; This instruction will fail if the memory location is not marked
2182 ; as exclusive anymore. If it succeeds, it will remove the
2183 ; exclusive mark on the memory location. This way, if another
2184 ; thread executes this instruction before us, we will fail and try
2191 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2192 p
+= emit_mov (p
, x1
, register_operand (sp
));
2196 p
+= emit_ldaxr (p
, x2
, x0
);
2197 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2198 p
+= emit_stxr (p
, w2
, x1
, x0
);
2199 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2201 /* Call collector (struct tracepoint *, unsigned char *):
2206 ; Saved registers start after the collecting_t object.
2209 ; We use an intra-procedure-call scratch register.
2210 MOV ip0, #(collector)
2213 ; And call back to C!
2218 p
+= emit_mov_addr (p
, x0
, tpoint
);
2219 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2221 p
+= emit_mov_addr (p
, ip0
, collector
);
2222 p
+= emit_blr (p
, ip0
);
2224 /* Release the lock.
2229 ; This instruction is a normal store with memory ordering
2230 ; constraints. Thanks to this we do not have to put a data
2231 ; barrier instruction to make sure all data read and writes are done
2232 ; before this instruction is executed. Furthermore, this instruction
2233 ; will trigger an event, letting other threads know they can grab
2238 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2239 p
+= emit_stlr (p
, xzr
, x0
);
2241 /* Free collecting_t object:
2246 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2248 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2249 registers from the stack.
2251 LDR x2, [sp, #(2 * 16)]
2252 LDR x1, [sp, #(1 * 16)]
2253 LDR x0, [sp, #(0 * 16)]
2259 ADD sp, sp #(5 * 16)
2262 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2263 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2264 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2265 p
+= emit_msr (p
, NZCV
, x2
);
2266 p
+= emit_msr (p
, FPSR
, x1
);
2267 p
+= emit_msr (p
, FPCR
, x0
);
2269 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2271 /* Pop general purpose registers:
2275 LDR x30, [sp, #(30 * 16)]
2277 ADD sp, sp, #(31 * 16)
2280 for (i
= 0; i
<= 30; i
+= 1)
2281 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2282 offset_memory_operand (i
* 16));
2283 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2285 /* Pop SIMD&FP registers:
2289 LDP q30, q31, [sp, #(30 * 16)]
2291 ADD sp, sp, #(32 * 16)
2294 for (i
= 0; i
<= 30; i
+= 2)
2295 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2296 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2298 /* Write the code into the inferior memory. */
2299 append_insns (&buildaddr
, p
- buf
, buf
);
2301 /* Now emit the relocated instruction. */
2302 *adjusted_insn_addr
= buildaddr
;
2303 target_read_uint32 (tpaddr
, &insn
);
2305 insn_data
.base
.insn_addr
= tpaddr
;
2306 insn_data
.new_addr
= buildaddr
;
2307 insn_data
.insn_ptr
= buf
;
2309 aarch64_relocate_instruction (insn
, &visitor
,
2310 (struct aarch64_insn_data
*) &insn_data
);
2312 /* We may not have been able to relocate the instruction. */
2313 if (insn_data
.insn_ptr
== buf
)
2316 "E.Could not relocate instruction from %s to %s.",
2317 core_addr_to_string_nz (tpaddr
),
2318 core_addr_to_string_nz (buildaddr
));
2322 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2323 *adjusted_insn_addr_end
= buildaddr
;
2325 /* Go back to the start of the buffer. */
2328 /* Emit a branch back from the jump pad. */
2329 offset
= (tpaddr
+ orig_size
- buildaddr
);
2330 if (!can_encode_int32 (offset
, 28))
2333 "E.Jump back from jump pad too far from tracepoint "
2334 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2339 p
+= emit_b (p
, 0, offset
);
2340 append_insns (&buildaddr
, p
- buf
, buf
);
2342 /* Give the caller a branch instruction into the jump pad. */
2343 offset
= (*jump_entry
- tpaddr
);
2344 if (!can_encode_int32 (offset
, 28))
2347 "E.Jump pad too far from tracepoint "
2348 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2353 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2354 *jjump_pad_insn_size
= 4;
2356 /* Return the end address of our pad. */
2357 *jump_entry
= buildaddr
;
2362 /* Helper function writing LEN instructions from START into
2363 current_insn_ptr. */
2366 emit_ops_insns (const uint32_t *start
, int len
)
2368 CORE_ADDR buildaddr
= current_insn_ptr
;
2371 debug_printf ("Adding %d instrucions at %s\n",
2372 len
, paddress (buildaddr
));
2374 append_insns (&buildaddr
, len
, start
);
2375 current_insn_ptr
= buildaddr
;
2378 /* Pop a register from the stack. */
2381 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2383 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2386 /* Push a register on the stack. */
2389 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2391 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2394 /* Implementation of emit_ops method "emit_prologue". */
2397 aarch64_emit_prologue (void)
2402 /* This function emit a prologue for the following function prototype:
2404 enum eval_result_type f (unsigned char *regs,
2407 The first argument is a buffer of raw registers. The second
2408 argument is the result of
2409 evaluating the expression, which will be set to whatever is on top of
2410 the stack at the end.
2412 The stack set up by the prologue is as such:
2414 High *------------------------------------------------------*
2417 | x1 (ULONGEST *value) |
2418 | x0 (unsigned char *regs) |
2419 Low *------------------------------------------------------*
2421 As we are implementing a stack machine, each opcode can expand the
2422 stack so we never know how far we are from the data saved by this
2423 prologue. In order to be able refer to value and regs later, we save
2424 the current stack pointer in the frame pointer. This way, it is not
2425 clobbered when calling C functions.
2427 Finally, throughout every operation, we are using register x0 as the
2428 top of the stack, and x1 as a scratch register. */
2430 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2431 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2432 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2434 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2437 emit_ops_insns (buf
, p
- buf
);
2440 /* Implementation of emit_ops method "emit_epilogue". */
2443 aarch64_emit_epilogue (void)
2448 /* Store the result of the expression (x0) in *value. */
2449 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2450 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2451 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2453 /* Restore the previous state. */
2454 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2455 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2457 /* Return expr_eval_no_error. */
2458 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2459 p
+= emit_ret (p
, lr
);
2461 emit_ops_insns (buf
, p
- buf
);
2464 /* Implementation of emit_ops method "emit_add". */
2467 aarch64_emit_add (void)
2472 p
+= emit_pop (p
, x1
);
2473 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2475 emit_ops_insns (buf
, p
- buf
);
2478 /* Implementation of emit_ops method "emit_sub". */
2481 aarch64_emit_sub (void)
2486 p
+= emit_pop (p
, x1
);
2487 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2489 emit_ops_insns (buf
, p
- buf
);
2492 /* Implementation of emit_ops method "emit_mul". */
2495 aarch64_emit_mul (void)
2500 p
+= emit_pop (p
, x1
);
2501 p
+= emit_mul (p
, x0
, x1
, x0
);
2503 emit_ops_insns (buf
, p
- buf
);
2506 /* Implementation of emit_ops method "emit_lsh". */
2509 aarch64_emit_lsh (void)
2514 p
+= emit_pop (p
, x1
);
2515 p
+= emit_lslv (p
, x0
, x1
, x0
);
2517 emit_ops_insns (buf
, p
- buf
);
2520 /* Implementation of emit_ops method "emit_rsh_signed". */
2523 aarch64_emit_rsh_signed (void)
2528 p
+= emit_pop (p
, x1
);
2529 p
+= emit_asrv (p
, x0
, x1
, x0
);
2531 emit_ops_insns (buf
, p
- buf
);
2534 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2537 aarch64_emit_rsh_unsigned (void)
2542 p
+= emit_pop (p
, x1
);
2543 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2545 emit_ops_insns (buf
, p
- buf
);
2548 /* Implementation of emit_ops method "emit_ext". */
2551 aarch64_emit_ext (int arg
)
2556 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2558 emit_ops_insns (buf
, p
- buf
);
2561 /* Implementation of emit_ops method "emit_log_not". */
2564 aarch64_emit_log_not (void)
2569 /* If the top of the stack is 0, replace it with 1. Else replace it with
2572 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2573 p
+= emit_cset (p
, x0
, EQ
);
2575 emit_ops_insns (buf
, p
- buf
);
2578 /* Implementation of emit_ops method "emit_bit_and". */
2581 aarch64_emit_bit_and (void)
2586 p
+= emit_pop (p
, x1
);
2587 p
+= emit_and (p
, x0
, x0
, x1
);
2589 emit_ops_insns (buf
, p
- buf
);
2592 /* Implementation of emit_ops method "emit_bit_or". */
2595 aarch64_emit_bit_or (void)
2600 p
+= emit_pop (p
, x1
);
2601 p
+= emit_orr (p
, x0
, x0
, x1
);
2603 emit_ops_insns (buf
, p
- buf
);
2606 /* Implementation of emit_ops method "emit_bit_xor". */
2609 aarch64_emit_bit_xor (void)
2614 p
+= emit_pop (p
, x1
);
2615 p
+= emit_eor (p
, x0
, x0
, x1
);
2617 emit_ops_insns (buf
, p
- buf
);
2620 /* Implementation of emit_ops method "emit_bit_not". */
2623 aarch64_emit_bit_not (void)
2628 p
+= emit_mvn (p
, x0
, x0
);
2630 emit_ops_insns (buf
, p
- buf
);
2633 /* Implementation of emit_ops method "emit_equal". */
2636 aarch64_emit_equal (void)
2641 p
+= emit_pop (p
, x1
);
2642 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2643 p
+= emit_cset (p
, x0
, EQ
);
2645 emit_ops_insns (buf
, p
- buf
);
2648 /* Implementation of emit_ops method "emit_less_signed". */
2651 aarch64_emit_less_signed (void)
2656 p
+= emit_pop (p
, x1
);
2657 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2658 p
+= emit_cset (p
, x0
, LT
);
2660 emit_ops_insns (buf
, p
- buf
);
2663 /* Implementation of emit_ops method "emit_less_unsigned". */
2666 aarch64_emit_less_unsigned (void)
2671 p
+= emit_pop (p
, x1
);
2672 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2673 p
+= emit_cset (p
, x0
, LO
);
2675 emit_ops_insns (buf
, p
- buf
);
2678 /* Implementation of emit_ops method "emit_ref". */
2681 aarch64_emit_ref (int size
)
2689 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2692 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2695 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2698 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2701 /* Unknown size, bail on compilation. */
2706 emit_ops_insns (buf
, p
- buf
);
2709 /* Implementation of emit_ops method "emit_if_goto". */
2712 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2717 /* The Z flag is set or cleared here. */
2718 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2719 /* This instruction must not change the Z flag. */
2720 p
+= emit_pop (p
, x0
);
2721 /* Branch over the next instruction if x0 == 0. */
2722 p
+= emit_bcond (p
, EQ
, 8);
2724 /* The NOP instruction will be patched with an unconditional branch. */
2726 *offset_p
= (p
- buf
) * 4;
2731 emit_ops_insns (buf
, p
- buf
);
2734 /* Implementation of emit_ops method "emit_goto". */
2737 aarch64_emit_goto (int *offset_p
, int *size_p
)
2742 /* The NOP instruction will be patched with an unconditional branch. */
2749 emit_ops_insns (buf
, p
- buf
);
2752 /* Implementation of emit_ops method "write_goto_address". */
2755 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2759 emit_b (&insn
, 0, to
- from
);
2760 append_insns (&from
, 1, &insn
);
2763 /* Implementation of emit_ops method "emit_const". */
2766 aarch64_emit_const (LONGEST num
)
2771 p
+= emit_mov_addr (p
, x0
, num
);
2773 emit_ops_insns (buf
, p
- buf
);
2776 /* Implementation of emit_ops method "emit_call". */
2779 aarch64_emit_call (CORE_ADDR fn
)
2784 p
+= emit_mov_addr (p
, ip0
, fn
);
2785 p
+= emit_blr (p
, ip0
);
2787 emit_ops_insns (buf
, p
- buf
);
2790 /* Implementation of emit_ops method "emit_reg". */
2793 aarch64_emit_reg (int reg
)
2798 /* Set x0 to unsigned char *regs. */
2799 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2800 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2801 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2803 emit_ops_insns (buf
, p
- buf
);
2805 aarch64_emit_call (get_raw_reg_func_addr ());
2808 /* Implementation of emit_ops method "emit_pop". */
2811 aarch64_emit_pop (void)
2816 p
+= emit_pop (p
, x0
);
2818 emit_ops_insns (buf
, p
- buf
);
2821 /* Implementation of emit_ops method "emit_stack_flush". */
2824 aarch64_emit_stack_flush (void)
2829 p
+= emit_push (p
, x0
);
2831 emit_ops_insns (buf
, p
- buf
);
2834 /* Implementation of emit_ops method "emit_zero_ext". */
2837 aarch64_emit_zero_ext (int arg
)
2842 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2844 emit_ops_insns (buf
, p
- buf
);
2847 /* Implementation of emit_ops method "emit_swap". */
2850 aarch64_emit_swap (void)
2855 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2856 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2857 p
+= emit_mov (p
, x0
, register_operand (x1
));
2859 emit_ops_insns (buf
, p
- buf
);
2862 /* Implementation of emit_ops method "emit_stack_adjust". */
2865 aarch64_emit_stack_adjust (int n
)
2867 /* This is not needed with our design. */
2871 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2873 emit_ops_insns (buf
, p
- buf
);
2876 /* Implementation of emit_ops method "emit_int_call_1". */
2879 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2884 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2886 emit_ops_insns (buf
, p
- buf
);
2888 aarch64_emit_call (fn
);
2891 /* Implementation of emit_ops method "emit_void_call_2". */
2894 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2899 /* Push x0 on the stack. */
2900 aarch64_emit_stack_flush ();
2902 /* Setup arguments for the function call:
2905 x1: top of the stack
2910 p
+= emit_mov (p
, x1
, register_operand (x0
));
2911 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2913 emit_ops_insns (buf
, p
- buf
);
2915 aarch64_emit_call (fn
);
2918 aarch64_emit_pop ();
2921 /* Implementation of emit_ops method "emit_eq_goto". */
2924 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2929 p
+= emit_pop (p
, x1
);
2930 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2931 /* Branch over the next instruction if x0 != x1. */
2932 p
+= emit_bcond (p
, NE
, 8);
2933 /* The NOP instruction will be patched with an unconditional branch. */
2935 *offset_p
= (p
- buf
) * 4;
2940 emit_ops_insns (buf
, p
- buf
);
2943 /* Implementation of emit_ops method "emit_ne_goto". */
2946 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2951 p
+= emit_pop (p
, x1
);
2952 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2953 /* Branch over the next instruction if x0 == x1. */
2954 p
+= emit_bcond (p
, EQ
, 8);
2955 /* The NOP instruction will be patched with an unconditional branch. */
2957 *offset_p
= (p
- buf
) * 4;
2962 emit_ops_insns (buf
, p
- buf
);
2965 /* Implementation of emit_ops method "emit_lt_goto". */
2968 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2973 p
+= emit_pop (p
, x1
);
2974 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2975 /* Branch over the next instruction if x0 >= x1. */
2976 p
+= emit_bcond (p
, GE
, 8);
2977 /* The NOP instruction will be patched with an unconditional branch. */
2979 *offset_p
= (p
- buf
) * 4;
2984 emit_ops_insns (buf
, p
- buf
);
2987 /* Implementation of emit_ops method "emit_le_goto". */
2990 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2995 p
+= emit_pop (p
, x1
);
2996 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2997 /* Branch over the next instruction if x0 > x1. */
2998 p
+= emit_bcond (p
, GT
, 8);
2999 /* The NOP instruction will be patched with an unconditional branch. */
3001 *offset_p
= (p
- buf
) * 4;
3006 emit_ops_insns (buf
, p
- buf
);
3009 /* Implementation of emit_ops method "emit_gt_goto". */
3012 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
3017 p
+= emit_pop (p
, x1
);
3018 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3019 /* Branch over the next instruction if x0 <= x1. */
3020 p
+= emit_bcond (p
, LE
, 8);
3021 /* The NOP instruction will be patched with an unconditional branch. */
3023 *offset_p
= (p
- buf
) * 4;
3028 emit_ops_insns (buf
, p
- buf
);
3031 /* Implementation of emit_ops method "emit_ge_got". */
3034 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
3039 p
+= emit_pop (p
, x1
);
3040 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3041 /* Branch over the next instruction if x0 <= x1. */
3042 p
+= emit_bcond (p
, LT
, 8);
3043 /* The NOP instruction will be patched with an unconditional branch. */
3045 *offset_p
= (p
- buf
) * 4;
3050 emit_ops_insns (buf
, p
- buf
);
3053 static struct emit_ops aarch64_emit_ops_impl
=
3055 aarch64_emit_prologue
,
3056 aarch64_emit_epilogue
,
3061 aarch64_emit_rsh_signed
,
3062 aarch64_emit_rsh_unsigned
,
3064 aarch64_emit_log_not
,
3065 aarch64_emit_bit_and
,
3066 aarch64_emit_bit_or
,
3067 aarch64_emit_bit_xor
,
3068 aarch64_emit_bit_not
,
3070 aarch64_emit_less_signed
,
3071 aarch64_emit_less_unsigned
,
3073 aarch64_emit_if_goto
,
3075 aarch64_write_goto_address
,
3080 aarch64_emit_stack_flush
,
3081 aarch64_emit_zero_ext
,
3083 aarch64_emit_stack_adjust
,
3084 aarch64_emit_int_call_1
,
3085 aarch64_emit_void_call_2
,
3086 aarch64_emit_eq_goto
,
3087 aarch64_emit_ne_goto
,
3088 aarch64_emit_lt_goto
,
3089 aarch64_emit_le_goto
,
3090 aarch64_emit_gt_goto
,
3091 aarch64_emit_ge_got
,
3094 /* Implementation of linux_target_ops method "emit_ops". */
3096 static struct emit_ops
*
3097 aarch64_emit_ops (void)
3099 return &aarch64_emit_ops_impl
;
3102 /* Implementation of target ops method
3103 "get_min_fast_tracepoint_insn_len". */
3106 aarch64_target::get_min_fast_tracepoint_insn_len ()
3111 /* Implementation of linux_target_ops method "supports_range_stepping". */
3114 aarch64_supports_range_stepping (void)
3119 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3122 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3124 if (is_64bit_tdesc ())
3126 *size
= aarch64_breakpoint_len
;
3127 return aarch64_breakpoint
;
3130 return arm_sw_breakpoint_from_kind (kind
, size
);
3133 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3136 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3138 if (is_64bit_tdesc ())
3139 return aarch64_breakpoint_len
;
3141 return arm_breakpoint_kind_from_pc (pcptr
);
3144 /* Implementation of the target ops method
3145 "breakpoint_kind_from_current_state". */
3148 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3150 if (is_64bit_tdesc ())
3151 return aarch64_breakpoint_len
;
3153 return arm_breakpoint_kind_from_current_state (pcptr
);
3156 /* Support for hardware single step. */
3159 aarch64_supports_hardware_single_step (void)
3164 struct linux_target_ops the_low_target
=
3167 aarch64_supports_range_stepping
,
3168 aarch64_supports_hardware_single_step
,
3169 aarch64_get_syscall_trapinfo
,
3172 /* The linux target ops object. */
3174 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3177 initialize_low_arch (void)
3179 initialize_low_arch_aarch32 ();
3181 initialize_regsets_info (&aarch64_regsets_info
);
3182 initialize_regsets_info (&aarch64_sve_regsets_info
);