1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
64 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
66 bool supports_z_point_type (char z_type
) override
;
68 bool supports_tracepoints () override
;
72 void low_arch_setup () override
;
74 bool low_cannot_fetch_register (int regno
) override
;
76 bool low_cannot_store_register (int regno
) override
;
78 bool low_supports_breakpoints () override
;
80 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
82 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
84 bool low_breakpoint_at (CORE_ADDR pc
) override
;
86 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
87 int size
, raw_breakpoint
*bp
) override
;
89 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
90 int size
, raw_breakpoint
*bp
) override
;
92 bool low_stopped_by_watchpoint () override
;
94 CORE_ADDR
low_stopped_data_address () override
;
96 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
97 int direction
) override
;
99 arch_process_info
*low_new_process () override
;
101 void low_delete_process (arch_process_info
*info
) override
;
103 void low_new_thread (lwp_info
*) override
;
105 void low_delete_thread (arch_lwp_info
*) override
;
107 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
109 void low_prepare_to_resume (lwp_info
*lwp
) override
;
112 /* The singleton target ops object. */
114 static aarch64_target the_aarch64_target
;
117 aarch64_target::low_cannot_fetch_register (int regno
)
119 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
120 "is not implemented by the target");
124 aarch64_target::low_cannot_store_register (int regno
)
126 gdb_assert_not_reached ("linux target op low_cannot_store_register "
127 "is not implemented by the target");
131 aarch64_target::low_prepare_to_resume (lwp_info
*lwp
)
133 aarch64_linux_prepare_to_resume (lwp
);
136 /* Per-process arch-specific data we want to keep. */
138 struct arch_process_info
140 /* Hardware breakpoint/watchpoint data.
141 The reason for them to be per-process rather than per-thread is
142 due to the lack of information in the gdbserver environment;
143 gdbserver is not told that whether a requested hardware
144 breakpoint/watchpoint is thread specific or not, so it has to set
145 each hw bp/wp for every thread in the current process. The
146 higher level bp/wp management in gdb will resume a thread if a hw
147 bp/wp trap is not expected for it. Since the hw bp/wp setting is
148 same for each thread, it is reasonable for the data to live here.
150 struct aarch64_debug_reg_state debug_reg_state
;
153 /* Return true if the size of register 0 is 8 byte. */
156 is_64bit_tdesc (void)
158 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
160 return register_size (regcache
->tdesc
, 0) == 8;
163 /* Return true if the regcache contains the number of SVE registers. */
168 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
170 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
174 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
176 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
179 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
180 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
181 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
182 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
183 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
187 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
189 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
192 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
193 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
194 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
195 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
196 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
200 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
202 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
205 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
206 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
207 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
208 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
212 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
214 const struct user_fpsimd_state
*regset
215 = (const struct user_fpsimd_state
*) buf
;
218 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
219 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
220 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
221 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
224 /* Store the pauth registers to regcache. */
227 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
229 uint64_t *pauth_regset
= (uint64_t *) buf
;
230 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
235 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
237 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
242 aarch64_target::low_supports_breakpoints ()
247 /* Implementation of linux target ops method "low_get_pc". */
250 aarch64_target::low_get_pc (regcache
*regcache
)
252 if (register_size (regcache
->tdesc
, 0) == 8)
253 return linux_get_pc_64bit (regcache
);
255 return linux_get_pc_32bit (regcache
);
258 /* Implementation of linux target ops method "low_set_pc". */
261 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
263 if (register_size (regcache
->tdesc
, 0) == 8)
264 linux_set_pc_64bit (regcache
, pc
);
266 linux_set_pc_32bit (regcache
, pc
);
269 #define aarch64_breakpoint_len 4
271 /* AArch64 BRK software debug mode instruction.
272 This instruction needs to match gdb/aarch64-tdep.c
273 (aarch64_default_breakpoint). */
274 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
276 /* Implementation of linux target ops method "low_breakpoint_at". */
279 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
281 if (is_64bit_tdesc ())
283 gdb_byte insn
[aarch64_breakpoint_len
];
285 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
286 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
292 return arm_breakpoint_at (where
);
296 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
300 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
302 state
->dr_addr_bp
[i
] = 0;
303 state
->dr_ctrl_bp
[i
] = 0;
304 state
->dr_ref_count_bp
[i
] = 0;
307 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
309 state
->dr_addr_wp
[i
] = 0;
310 state
->dr_ctrl_wp
[i
] = 0;
311 state
->dr_ref_count_wp
[i
] = 0;
315 /* Return the pointer to the debug register state structure in the
316 current process' arch-specific data area. */
318 struct aarch64_debug_reg_state
*
319 aarch64_get_debug_reg_state (pid_t pid
)
321 struct process_info
*proc
= find_process_pid (pid
);
323 return &proc
->priv
->arch_private
->debug_reg_state
;
326 /* Implementation of target ops method "supports_z_point_type". */
329 aarch64_target::supports_z_point_type (char z_type
)
335 case Z_PACKET_WRITE_WP
:
336 case Z_PACKET_READ_WP
:
337 case Z_PACKET_ACCESS_WP
:
344 /* Implementation of linux target ops method "low_insert_point".
346 It actually only records the info of the to-be-inserted bp/wp;
347 the actual insertion will happen when threads are resumed. */
350 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
351 int len
, raw_breakpoint
*bp
)
354 enum target_hw_bp_type targ_type
;
355 struct aarch64_debug_reg_state
*state
356 = aarch64_get_debug_reg_state (pid_of (current_thread
));
359 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
360 (unsigned long) addr
, len
);
362 /* Determine the type from the raw breakpoint type. */
363 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
365 if (targ_type
!= hw_execute
)
367 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
368 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
369 1 /* is_insert */, state
);
377 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
378 instruction. Set it to 2 to correctly encode length bit
379 mask in hardware/watchpoint control register. */
382 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
383 1 /* is_insert */, state
);
387 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
393 /* Implementation of linux target ops method "low_remove_point".
395 It actually only records the info of the to-be-removed bp/wp,
396 the actual removal will be done when threads are resumed. */
399 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
400 int len
, raw_breakpoint
*bp
)
403 enum target_hw_bp_type targ_type
;
404 struct aarch64_debug_reg_state
*state
405 = aarch64_get_debug_reg_state (pid_of (current_thread
));
408 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
409 (unsigned long) addr
, len
);
411 /* Determine the type from the raw breakpoint type. */
412 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
414 /* Set up state pointers. */
415 if (targ_type
!= hw_execute
)
417 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
423 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
424 instruction. Set it to 2 to correctly encode length bit
425 mask in hardware/watchpoint control register. */
428 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
429 0 /* is_insert */, state
);
433 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
439 /* Implementation of linux target ops method "low_stopped_data_address". */
442 aarch64_target::low_stopped_data_address ()
446 struct aarch64_debug_reg_state
*state
;
448 pid
= lwpid_of (current_thread
);
450 /* Get the siginfo. */
451 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
452 return (CORE_ADDR
) 0;
454 /* Need to be a hardware breakpoint/watchpoint trap. */
455 if (siginfo
.si_signo
!= SIGTRAP
456 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
457 return (CORE_ADDR
) 0;
459 /* Check if the address matches any watched address. */
460 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
461 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
463 const unsigned int offset
464 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
465 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
466 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
467 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
468 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
469 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
471 if (state
->dr_ref_count_wp
[i
]
472 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
473 && addr_trap
>= addr_watch_aligned
474 && addr_trap
< addr_watch
+ len
)
476 /* ADDR_TRAP reports the first address of the memory range
477 accessed by the CPU, regardless of what was the memory
478 range watched. Thus, a large CPU access that straddles
479 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
480 ADDR_TRAP that is lower than the
481 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
483 addr: | 4 | 5 | 6 | 7 | 8 |
484 |---- range watched ----|
485 |----------- range accessed ------------|
487 In this case, ADDR_TRAP will be 4.
489 To match a watchpoint known to GDB core, we must never
490 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
491 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
492 positive on kernels older than 4.10. See PR
498 return (CORE_ADDR
) 0;
501 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
504 aarch64_target::low_stopped_by_watchpoint ()
506 return (low_stopped_data_address () != 0);
509 /* Fetch the thread-local storage pointer for libthread_db. */
512 ps_get_thread_area (struct ps_prochandle
*ph
,
513 lwpid_t lwpid
, int idx
, void **base
)
515 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
519 /* Implementation of linux target ops method "low_siginfo_fixup". */
522 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
525 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
526 if (!is_64bit_tdesc ())
529 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
532 aarch64_siginfo_from_compat_siginfo (native
,
533 (struct compat_siginfo
*) inf
);
541 /* Implementation of linux target ops method "low_new_process". */
544 aarch64_target::low_new_process ()
546 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
548 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
553 /* Implementation of linux target ops method "low_delete_process". */
556 aarch64_target::low_delete_process (arch_process_info
*info
)
562 aarch64_target::low_new_thread (lwp_info
*lwp
)
564 aarch64_linux_new_thread (lwp
);
568 aarch64_target::low_delete_thread (arch_lwp_info
*arch_lwp
)
570 aarch64_linux_delete_thread (arch_lwp
);
573 /* Implementation of linux target ops method "low_new_fork". */
576 aarch64_target::low_new_fork (process_info
*parent
,
579 /* These are allocated by linux_add_process. */
580 gdb_assert (parent
->priv
!= NULL
581 && parent
->priv
->arch_private
!= NULL
);
582 gdb_assert (child
->priv
!= NULL
583 && child
->priv
->arch_private
!= NULL
);
585 /* Linux kernel before 2.6.33 commit
586 72f674d203cd230426437cdcf7dd6f681dad8b0d
587 will inherit hardware debug registers from parent
588 on fork/vfork/clone. Newer Linux kernels create such tasks with
589 zeroed debug registers.
591 GDB core assumes the child inherits the watchpoints/hw
592 breakpoints of the parent, and will remove them all from the
593 forked off process. Copy the debug registers mirrors into the
594 new process so that all breakpoints and watchpoints can be
595 removed together. The debug registers mirror will become zeroed
596 in the end before detaching the forked off process, thus making
597 this compatible with older Linux kernels too. */
599 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
602 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
603 #define AARCH64_HWCAP_PACA (1 << 30)
605 /* Implementation of linux target ops method "low_arch_setup". */
608 aarch64_target::low_arch_setup ()
610 unsigned int machine
;
614 tid
= lwpid_of (current_thread
);
616 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
620 uint64_t vq
= aarch64_sve_get_vq (tid
);
621 unsigned long hwcap
= linux_get_hwcap (8);
622 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
624 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
627 current_process ()->tdesc
= aarch32_linux_read_description ();
629 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
632 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
635 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
637 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
640 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
643 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
645 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
648 static struct regset_info aarch64_regsets
[] =
650 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
651 sizeof (struct user_pt_regs
), GENERAL_REGS
,
652 aarch64_fill_gregset
, aarch64_store_gregset
},
653 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
654 sizeof (struct user_fpsimd_state
), FP_REGS
,
655 aarch64_fill_fpregset
, aarch64_store_fpregset
657 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
658 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
659 NULL
, aarch64_store_pauthregset
},
663 static struct regsets_info aarch64_regsets_info
=
665 aarch64_regsets
, /* regsets */
667 NULL
, /* disabled_regsets */
670 static struct regs_info regs_info_aarch64
=
672 NULL
, /* regset_bitmap */
674 &aarch64_regsets_info
,
677 static struct regset_info aarch64_sve_regsets
[] =
679 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
680 sizeof (struct user_pt_regs
), GENERAL_REGS
,
681 aarch64_fill_gregset
, aarch64_store_gregset
},
682 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
683 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
684 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
686 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
687 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
688 NULL
, aarch64_store_pauthregset
},
692 static struct regsets_info aarch64_sve_regsets_info
=
694 aarch64_sve_regsets
, /* regsets. */
695 0, /* num_regsets. */
696 NULL
, /* disabled_regsets. */
699 static struct regs_info regs_info_aarch64_sve
=
701 NULL
, /* regset_bitmap. */
703 &aarch64_sve_regsets_info
,
706 /* Implementation of linux target ops method "get_regs_info". */
709 aarch64_target::get_regs_info ()
711 if (!is_64bit_tdesc ())
712 return ®s_info_aarch32
;
715 return ®s_info_aarch64_sve
;
717 return ®s_info_aarch64
;
720 /* Implementation of target ops method "supports_tracepoints". */
723 aarch64_target::supports_tracepoints ()
725 if (current_thread
== NULL
)
729 /* We don't support tracepoints on aarch32 now. */
730 return is_64bit_tdesc ();
734 /* Implementation of linux_target_ops method "get_thread_area". */
737 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
742 iovec
.iov_base
= ®
;
743 iovec
.iov_len
= sizeof (reg
);
745 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
753 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
756 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
758 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
764 collect_register_by_name (regcache
, "x8", &l_sysno
);
765 *sysno
= (int) l_sysno
;
768 collect_register_by_name (regcache
, "r7", sysno
);
771 /* List of condition codes that we need. */
773 enum aarch64_condition_codes
784 enum aarch64_operand_type
790 /* Representation of an operand. At this time, it only supports register
791 and immediate types. */
793 struct aarch64_operand
795 /* Type of the operand. */
796 enum aarch64_operand_type type
;
798 /* Value of the operand according to the type. */
802 struct aarch64_register reg
;
806 /* List of registers that we are currently using, we can add more here as
807 we need to use them. */
809 /* General purpose scratch registers (64 bit). */
810 static const struct aarch64_register x0
= { 0, 1 };
811 static const struct aarch64_register x1
= { 1, 1 };
812 static const struct aarch64_register x2
= { 2, 1 };
813 static const struct aarch64_register x3
= { 3, 1 };
814 static const struct aarch64_register x4
= { 4, 1 };
816 /* General purpose scratch registers (32 bit). */
817 static const struct aarch64_register w0
= { 0, 0 };
818 static const struct aarch64_register w2
= { 2, 0 };
820 /* Intra-procedure scratch registers. */
821 static const struct aarch64_register ip0
= { 16, 1 };
823 /* Special purpose registers. */
824 static const struct aarch64_register fp
= { 29, 1 };
825 static const struct aarch64_register lr
= { 30, 1 };
826 static const struct aarch64_register sp
= { 31, 1 };
827 static const struct aarch64_register xzr
= { 31, 1 };
829 /* Dynamically allocate a new register. If we know the register
830 statically, we should make it a global as above instead of using this
833 static struct aarch64_register
834 aarch64_register (unsigned num
, int is64
)
836 return (struct aarch64_register
) { num
, is64
};
839 /* Helper function to create a register operand, for instructions with
840 different types of operands.
843 p += emit_mov (p, x0, register_operand (x1)); */
845 static struct aarch64_operand
846 register_operand (struct aarch64_register reg
)
848 struct aarch64_operand operand
;
850 operand
.type
= OPERAND_REGISTER
;
856 /* Helper function to create an immediate operand, for instructions with
857 different types of operands.
860 p += emit_mov (p, x0, immediate_operand (12)); */
862 static struct aarch64_operand
863 immediate_operand (uint32_t imm
)
865 struct aarch64_operand operand
;
867 operand
.type
= OPERAND_IMMEDIATE
;
873 /* Helper function to create an offset memory operand.
876 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
878 static struct aarch64_memory_operand
879 offset_memory_operand (int32_t offset
)
881 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
884 /* Helper function to create a pre-index memory operand.
887 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
889 static struct aarch64_memory_operand
890 preindex_memory_operand (int32_t index
)
892 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
895 /* Helper function to create a post-index memory operand.
898 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
900 static struct aarch64_memory_operand
901 postindex_memory_operand (int32_t index
)
903 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
906 /* System control registers. These special registers can be written and
907 read with the MRS and MSR instructions.
909 - NZCV: Condition flags. GDB refers to this register under the CPSR
911 - FPSR: Floating-point status register.
912 - FPCR: Floating-point control registers.
913 - TPIDR_EL0: Software thread ID register. */
915 enum aarch64_system_control_registers
917 /* op0 op1 crn crm op2 */
918 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
919 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
920 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
921 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
924 /* Write a BLR instruction into *BUF.
928 RN is the register to branch to. */
931 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
933 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
936 /* Write a RET instruction into *BUF.
940 RN is the register to branch to. */
943 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
945 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
949 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
950 struct aarch64_register rt
,
951 struct aarch64_register rt2
,
952 struct aarch64_register rn
,
953 struct aarch64_memory_operand operand
)
960 opc
= ENCODE (2, 2, 30);
962 opc
= ENCODE (0, 2, 30);
964 switch (operand
.type
)
966 case MEMORY_OPERAND_OFFSET
:
968 pre_index
= ENCODE (1, 1, 24);
969 write_back
= ENCODE (0, 1, 23);
972 case MEMORY_OPERAND_POSTINDEX
:
974 pre_index
= ENCODE (0, 1, 24);
975 write_back
= ENCODE (1, 1, 23);
978 case MEMORY_OPERAND_PREINDEX
:
980 pre_index
= ENCODE (1, 1, 24);
981 write_back
= ENCODE (1, 1, 23);
988 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
989 | ENCODE (operand
.index
>> 3, 7, 15)
990 | ENCODE (rt2
.num
, 5, 10)
991 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
994 /* Write a STP instruction into *BUF.
996 STP rt, rt2, [rn, #offset]
997 STP rt, rt2, [rn, #index]!
998 STP rt, rt2, [rn], #index
1000 RT and RT2 are the registers to store.
1001 RN is the base address register.
1002 OFFSET is the immediate to add to the base address. It is limited to a
1003 -512 .. 504 range (7 bits << 3). */
1006 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
1007 struct aarch64_register rt2
, struct aarch64_register rn
,
1008 struct aarch64_memory_operand operand
)
1010 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
1013 /* Write a LDP instruction into *BUF.
1015 LDP rt, rt2, [rn, #offset]
1016 LDP rt, rt2, [rn, #index]!
1017 LDP rt, rt2, [rn], #index
1019 RT and RT2 are the registers to store.
1020 RN is the base address register.
1021 OFFSET is the immediate to add to the base address. It is limited to a
1022 -512 .. 504 range (7 bits << 3). */
1025 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
1026 struct aarch64_register rt2
, struct aarch64_register rn
,
1027 struct aarch64_memory_operand operand
)
1029 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1032 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1034 LDP qt, qt2, [rn, #offset]
1036 RT and RT2 are the Q registers to store.
1037 RN is the base address register.
1038 OFFSET is the immediate to add to the base address. It is limited to
1039 -1024 .. 1008 range (7 bits << 4). */
1042 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1043 struct aarch64_register rn
, int32_t offset
)
1045 uint32_t opc
= ENCODE (2, 2, 30);
1046 uint32_t pre_index
= ENCODE (1, 1, 24);
1048 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1049 | ENCODE (offset
>> 4, 7, 15)
1050 | ENCODE (rt2
, 5, 10)
1051 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1054 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1056 STP qt, qt2, [rn, #offset]
1058 RT and RT2 are the Q registers to store.
1059 RN is the base address register.
1060 OFFSET is the immediate to add to the base address. It is limited to
1061 -1024 .. 1008 range (7 bits << 4). */
1064 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1065 struct aarch64_register rn
, int32_t offset
)
1067 uint32_t opc
= ENCODE (2, 2, 30);
1068 uint32_t pre_index
= ENCODE (1, 1, 24);
1070 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1071 | ENCODE (offset
>> 4, 7, 15)
1072 | ENCODE (rt2
, 5, 10)
1073 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1076 /* Write a LDRH instruction into *BUF.
1078 LDRH wt, [xn, #offset]
1079 LDRH wt, [xn, #index]!
1080 LDRH wt, [xn], #index
1082 RT is the register to store.
1083 RN is the base address register.
1084 OFFSET is the immediate to add to the base address. It is limited to
1085 0 .. 32760 range (12 bits << 3). */
1088 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1089 struct aarch64_register rn
,
1090 struct aarch64_memory_operand operand
)
1092 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1095 /* Write a LDRB instruction into *BUF.
1097 LDRB wt, [xn, #offset]
1098 LDRB wt, [xn, #index]!
1099 LDRB wt, [xn], #index
1101 RT is the register to store.
1102 RN is the base address register.
1103 OFFSET is the immediate to add to the base address. It is limited to
1104 0 .. 32760 range (12 bits << 3). */
1107 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1108 struct aarch64_register rn
,
1109 struct aarch64_memory_operand operand
)
1111 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1116 /* Write a STR instruction into *BUF.
1118 STR rt, [rn, #offset]
1119 STR rt, [rn, #index]!
1120 STR rt, [rn], #index
1122 RT is the register to store.
1123 RN is the base address register.
1124 OFFSET is the immediate to add to the base address. It is limited to
1125 0 .. 32760 range (12 bits << 3). */
1128 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1129 struct aarch64_register rn
,
1130 struct aarch64_memory_operand operand
)
1132 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1135 /* Helper function emitting an exclusive load or store instruction. */
1138 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1139 enum aarch64_opcodes opcode
,
1140 struct aarch64_register rs
,
1141 struct aarch64_register rt
,
1142 struct aarch64_register rt2
,
1143 struct aarch64_register rn
)
1145 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1146 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1147 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1150 /* Write a LAXR instruction into *BUF.
1154 RT is the destination register.
1155 RN is the base address register. */
1158 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1159 struct aarch64_register rn
)
1161 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1165 /* Write a STXR instruction into *BUF.
1169 RS is the result register, it indicates if the store succeeded or not.
1170 RT is the destination register.
1171 RN is the base address register. */
1174 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1175 struct aarch64_register rt
, struct aarch64_register rn
)
1177 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1181 /* Write a STLR instruction into *BUF.
1185 RT is the register to store.
1186 RN is the base address register. */
1189 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1190 struct aarch64_register rn
)
1192 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1196 /* Helper function for data processing instructions with register sources. */
1199 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1200 struct aarch64_register rd
,
1201 struct aarch64_register rn
,
1202 struct aarch64_register rm
)
1204 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1206 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1207 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1210 /* Helper function for data processing instructions taking either a register
1214 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1215 struct aarch64_register rd
,
1216 struct aarch64_register rn
,
1217 struct aarch64_operand operand
)
1219 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1220 /* The opcode is different for register and immediate source operands. */
1221 uint32_t operand_opcode
;
1223 if (operand
.type
== OPERAND_IMMEDIATE
)
1225 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1226 operand_opcode
= ENCODE (8, 4, 25);
1228 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1229 | ENCODE (operand
.imm
, 12, 10)
1230 | ENCODE (rn
.num
, 5, 5)
1231 | ENCODE (rd
.num
, 5, 0));
1235 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1236 operand_opcode
= ENCODE (5, 4, 25);
1238 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1243 /* Write an ADD instruction into *BUF.
1248 This function handles both an immediate and register add.
1250 RD is the destination register.
1251 RN is the input register.
1252 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1253 OPERAND_REGISTER. */
1256 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1257 struct aarch64_register rn
, struct aarch64_operand operand
)
1259 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1262 /* Write a SUB instruction into *BUF.
1267 This function handles both an immediate and register sub.
1269 RD is the destination register.
1270 RN is the input register.
1271 IMM is the immediate to substract to RN. */
1274 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1275 struct aarch64_register rn
, struct aarch64_operand operand
)
1277 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1280 /* Write a MOV instruction into *BUF.
1285 This function handles both a wide immediate move and a register move,
1286 with the condition that the source register is not xzr. xzr and the
1287 stack pointer share the same encoding and this function only supports
1290 RD is the destination register.
1291 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1292 OPERAND_REGISTER. */
1295 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1296 struct aarch64_operand operand
)
1298 if (operand
.type
== OPERAND_IMMEDIATE
)
1300 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1301 /* Do not shift the immediate. */
1302 uint32_t shift
= ENCODE (0, 2, 21);
1304 return aarch64_emit_insn (buf
, MOV
| size
| shift
1305 | ENCODE (operand
.imm
, 16, 5)
1306 | ENCODE (rd
.num
, 5, 0));
1309 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1312 /* Write a MOVK instruction into *BUF.
1314 MOVK rd, #imm, lsl #shift
1316 RD is the destination register.
1317 IMM is the immediate.
1318 SHIFT is the logical shift left to apply to IMM. */
1321 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1324 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1326 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1327 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1330 /* Write instructions into *BUF in order to move ADDR into a register.
1331 ADDR can be a 64-bit value.
1333 This function will emit a series of MOV and MOVK instructions, such as:
1336 MOVK xd, #(addr >> 16), lsl #16
1337 MOVK xd, #(addr >> 32), lsl #32
1338 MOVK xd, #(addr >> 48), lsl #48 */
1341 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1345 /* The MOV (wide immediate) instruction clears to top bits of the
1347 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1349 if ((addr
>> 16) != 0)
1350 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1354 if ((addr
>> 32) != 0)
1355 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1359 if ((addr
>> 48) != 0)
1360 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1365 /* Write a SUBS instruction into *BUF.
1369 This instruction update the condition flags.
1371 RD is the destination register.
1372 RN and RM are the source registers. */
1375 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1376 struct aarch64_register rn
, struct aarch64_operand operand
)
1378 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1381 /* Write a CMP instruction into *BUF.
1385 This instruction is an alias of SUBS xzr, rn, rm.
1387 RN and RM are the registers to compare. */
1390 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1391 struct aarch64_operand operand
)
1393 return emit_subs (buf
, xzr
, rn
, operand
);
1396 /* Write a AND instruction into *BUF.
1400 RD is the destination register.
1401 RN and RM are the source registers. */
1404 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1405 struct aarch64_register rn
, struct aarch64_register rm
)
1407 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1410 /* Write a ORR instruction into *BUF.
1414 RD is the destination register.
1415 RN and RM are the source registers. */
1418 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1419 struct aarch64_register rn
, struct aarch64_register rm
)
1421 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1424 /* Write a ORN instruction into *BUF.
1428 RD is the destination register.
1429 RN and RM are the source registers. */
1432 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1433 struct aarch64_register rn
, struct aarch64_register rm
)
1435 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1438 /* Write a EOR instruction into *BUF.
1442 RD is the destination register.
1443 RN and RM are the source registers. */
1446 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1447 struct aarch64_register rn
, struct aarch64_register rm
)
1449 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1452 /* Write a MVN instruction into *BUF.
1456 This is an alias for ORN rd, xzr, rm.
1458 RD is the destination register.
1459 RM is the source register. */
1462 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1463 struct aarch64_register rm
)
1465 return emit_orn (buf
, rd
, xzr
, rm
);
1468 /* Write a LSLV instruction into *BUF.
1472 RD is the destination register.
1473 RN and RM are the source registers. */
1476 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1477 struct aarch64_register rn
, struct aarch64_register rm
)
1479 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1482 /* Write a LSRV instruction into *BUF.
1486 RD is the destination register.
1487 RN and RM are the source registers. */
1490 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1491 struct aarch64_register rn
, struct aarch64_register rm
)
1493 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1496 /* Write a ASRV instruction into *BUF.
1500 RD is the destination register.
1501 RN and RM are the source registers. */
1504 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1505 struct aarch64_register rn
, struct aarch64_register rm
)
1507 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1510 /* Write a MUL instruction into *BUF.
1514 RD is the destination register.
1515 RN and RM are the source registers. */
1518 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1519 struct aarch64_register rn
, struct aarch64_register rm
)
1521 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1524 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1528 RT is the destination register.
1529 SYSTEM_REG is special purpose register to read. */
1532 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1533 enum aarch64_system_control_registers system_reg
)
1535 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1536 | ENCODE (rt
.num
, 5, 0));
1539 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1543 SYSTEM_REG is special purpose register to write.
1544 RT is the input register. */
1547 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1548 struct aarch64_register rt
)
1550 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1551 | ENCODE (rt
.num
, 5, 0));
1554 /* Write a SEVL instruction into *BUF.
1556 This is a hint instruction telling the hardware to trigger an event. */
1559 emit_sevl (uint32_t *buf
)
1561 return aarch64_emit_insn (buf
, SEVL
);
1564 /* Write a WFE instruction into *BUF.
1566 This is a hint instruction telling the hardware to wait for an event. */
1569 emit_wfe (uint32_t *buf
)
1571 return aarch64_emit_insn (buf
, WFE
);
1574 /* Write a SBFM instruction into *BUF.
1576 SBFM rd, rn, #immr, #imms
1578 This instruction moves the bits from #immr to #imms into the
1579 destination, sign extending the result.
1581 RD is the destination register.
1582 RN is the source register.
1583 IMMR is the bit number to start at (least significant bit).
1584 IMMS is the bit number to stop at (most significant bit). */
1587 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1588 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1590 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1591 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1593 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1594 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1595 | ENCODE (rd
.num
, 5, 0));
1598 /* Write a SBFX instruction into *BUF.
1600 SBFX rd, rn, #lsb, #width
1602 This instruction moves #width bits from #lsb into the destination, sign
1603 extending the result. This is an alias for:
1605 SBFM rd, rn, #lsb, #(lsb + width - 1)
1607 RD is the destination register.
1608 RN is the source register.
1609 LSB is the bit number to start at (least significant bit).
1610 WIDTH is the number of bits to move. */
1613 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1614 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1616 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1619 /* Write a UBFM instruction into *BUF.
1621 UBFM rd, rn, #immr, #imms
1623 This instruction moves the bits from #immr to #imms into the
1624 destination, extending the result with zeros.
1626 RD is the destination register.
1627 RN is the source register.
1628 IMMR is the bit number to start at (least significant bit).
1629 IMMS is the bit number to stop at (most significant bit). */
1632 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1633 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1635 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1636 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1638 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1639 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1640 | ENCODE (rd
.num
, 5, 0));
1643 /* Write a UBFX instruction into *BUF.
1645 UBFX rd, rn, #lsb, #width
1647 This instruction moves #width bits from #lsb into the destination,
1648 extending the result with zeros. This is an alias for:
1650 UBFM rd, rn, #lsb, #(lsb + width - 1)
1652 RD is the destination register.
1653 RN is the source register.
1654 LSB is the bit number to start at (least significant bit).
1655 WIDTH is the number of bits to move. */
1658 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1659 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1661 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1664 /* Write a CSINC instruction into *BUF.
1666 CSINC rd, rn, rm, cond
1668 This instruction conditionally increments rn or rm and places the result
1669 in rd. rn is chosen is the condition is true.
1671 RD is the destination register.
1672 RN and RM are the source registers.
1673 COND is the encoded condition. */
1676 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1677 struct aarch64_register rn
, struct aarch64_register rm
,
1680 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1682 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1683 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1684 | ENCODE (rd
.num
, 5, 0));
1687 /* Write a CSET instruction into *BUF.
1691 This instruction conditionally write 1 or 0 in the destination register.
1692 1 is written if the condition is true. This is an alias for:
1694 CSINC rd, xzr, xzr, !cond
1696 Note that the condition needs to be inverted.
1698 RD is the destination register.
1699 RN and RM are the source registers.
1700 COND is the encoded condition. */
1703 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1705 /* The least significant bit of the condition needs toggling in order to
1707 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1710 /* Write LEN instructions from BUF into the inferior memory at *TO.
1712 Note instructions are always little endian on AArch64, unlike data. */
1715 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1717 size_t byte_len
= len
* sizeof (uint32_t);
1718 #if (__BYTE_ORDER == __BIG_ENDIAN)
1719 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1722 for (i
= 0; i
< len
; i
++)
1723 le_buf
[i
] = htole32 (buf
[i
]);
1725 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1729 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1735 /* Sub-class of struct aarch64_insn_data, store information of
1736 instruction relocation for fast tracepoint. Visitor can
1737 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1738 the relocated instructions in buffer pointed by INSN_PTR. */
1740 struct aarch64_insn_relocation_data
1742 struct aarch64_insn_data base
;
1744 /* The new address the instruction is relocated to. */
1746 /* Pointer to the buffer of relocated instruction(s). */
1750 /* Implementation of aarch64_insn_visitor method "b". */
1753 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1754 struct aarch64_insn_data
*data
)
1756 struct aarch64_insn_relocation_data
*insn_reloc
1757 = (struct aarch64_insn_relocation_data
*) data
;
1759 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1761 if (can_encode_int32 (new_offset
, 28))
1762 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1765 /* Implementation of aarch64_insn_visitor method "b_cond". */
1768 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1769 struct aarch64_insn_data
*data
)
1771 struct aarch64_insn_relocation_data
*insn_reloc
1772 = (struct aarch64_insn_relocation_data
*) data
;
1774 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1776 if (can_encode_int32 (new_offset
, 21))
1778 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1781 else if (can_encode_int32 (new_offset
, 28))
1783 /* The offset is out of range for a conditional branch
1784 instruction but not for a unconditional branch. We can use
1785 the following instructions instead:
1787 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1788 B NOT_TAKEN ; Else jump over TAKEN and continue.
1795 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1796 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1797 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1801 /* Implementation of aarch64_insn_visitor method "cb". */
1804 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1805 const unsigned rn
, int is64
,
1806 struct aarch64_insn_data
*data
)
1808 struct aarch64_insn_relocation_data
*insn_reloc
1809 = (struct aarch64_insn_relocation_data
*) data
;
1811 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1813 if (can_encode_int32 (new_offset
, 21))
1815 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1816 aarch64_register (rn
, is64
), new_offset
);
1818 else if (can_encode_int32 (new_offset
, 28))
1820 /* The offset is out of range for a compare and branch
1821 instruction but not for a unconditional branch. We can use
1822 the following instructions instead:
1824 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1825 B NOT_TAKEN ; Else jump over TAKEN and continue.
1831 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1832 aarch64_register (rn
, is64
), 8);
1833 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1834 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1838 /* Implementation of aarch64_insn_visitor method "tb". */
1841 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1842 const unsigned rt
, unsigned bit
,
1843 struct aarch64_insn_data
*data
)
1845 struct aarch64_insn_relocation_data
*insn_reloc
1846 = (struct aarch64_insn_relocation_data
*) data
;
1848 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1850 if (can_encode_int32 (new_offset
, 16))
1852 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1853 aarch64_register (rt
, 1), new_offset
);
1855 else if (can_encode_int32 (new_offset
, 28))
1857 /* The offset is out of range for a test bit and branch
1858 instruction but not for a unconditional branch. We can use
1859 the following instructions instead:
1861 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1862 B NOT_TAKEN ; Else jump over TAKEN and continue.
1868 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1869 aarch64_register (rt
, 1), 8);
1870 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1871 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1876 /* Implementation of aarch64_insn_visitor method "adr". */
1879 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1881 struct aarch64_insn_data
*data
)
1883 struct aarch64_insn_relocation_data
*insn_reloc
1884 = (struct aarch64_insn_relocation_data
*) data
;
1885 /* We know exactly the address the ADR{P,} instruction will compute.
1886 We can just write it to the destination register. */
1887 CORE_ADDR address
= data
->insn_addr
+ offset
;
1891 /* Clear the lower 12 bits of the offset to get the 4K page. */
1892 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1893 aarch64_register (rd
, 1),
1897 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1898 aarch64_register (rd
, 1), address
);
1901 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1904 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1905 const unsigned rt
, const int is64
,
1906 struct aarch64_insn_data
*data
)
1908 struct aarch64_insn_relocation_data
*insn_reloc
1909 = (struct aarch64_insn_relocation_data
*) data
;
1910 CORE_ADDR address
= data
->insn_addr
+ offset
;
1912 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1913 aarch64_register (rt
, 1), address
);
1915 /* We know exactly what address to load from, and what register we
1918 MOV xd, #(oldloc + offset)
1919 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1922 LDR xd, [xd] ; or LDRSW xd, [xd]
1927 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1928 aarch64_register (rt
, 1),
1929 aarch64_register (rt
, 1),
1930 offset_memory_operand (0));
1932 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1933 aarch64_register (rt
, is64
),
1934 aarch64_register (rt
, 1),
1935 offset_memory_operand (0));
1938 /* Implementation of aarch64_insn_visitor method "others". */
1941 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1942 struct aarch64_insn_data
*data
)
1944 struct aarch64_insn_relocation_data
*insn_reloc
1945 = (struct aarch64_insn_relocation_data
*) data
;
1947 /* The instruction is not PC relative. Just re-emit it at the new
1949 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1952 static const struct aarch64_insn_visitor visitor
=
1954 aarch64_ftrace_insn_reloc_b
,
1955 aarch64_ftrace_insn_reloc_b_cond
,
1956 aarch64_ftrace_insn_reloc_cb
,
1957 aarch64_ftrace_insn_reloc_tb
,
1958 aarch64_ftrace_insn_reloc_adr
,
1959 aarch64_ftrace_insn_reloc_ldr_literal
,
1960 aarch64_ftrace_insn_reloc_others
,
1963 /* Implementation of linux_target_ops method
1964 "install_fast_tracepoint_jump_pad". */
1967 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1969 CORE_ADDR collector
,
1972 CORE_ADDR
*jump_entry
,
1973 CORE_ADDR
*trampoline
,
1974 ULONGEST
*trampoline_size
,
1975 unsigned char *jjump_pad_insn
,
1976 ULONGEST
*jjump_pad_insn_size
,
1977 CORE_ADDR
*adjusted_insn_addr
,
1978 CORE_ADDR
*adjusted_insn_addr_end
,
1986 CORE_ADDR buildaddr
= *jump_entry
;
1987 struct aarch64_insn_relocation_data insn_data
;
1989 /* We need to save the current state on the stack both to restore it
1990 later and to collect register values when the tracepoint is hit.
1992 The saved registers are pushed in a layout that needs to be in sync
1993 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1994 the supply_fast_tracepoint_registers function will fill in the
1995 register cache from a pointer to saved registers on the stack we build
1998 For simplicity, we set the size of each cell on the stack to 16 bytes.
1999 This way one cell can hold any register type, from system registers
2000 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2001 has to be 16 bytes aligned anyway.
2003 Note that the CPSR register does not exist on AArch64. Instead we
2004 can access system bits describing the process state with the
2005 MRS/MSR instructions, namely the condition flags. We save them as
2006 if they are part of a CPSR register because that's how GDB
2007 interprets these system bits. At the moment, only the condition
2008 flags are saved in CPSR (NZCV).
2010 Stack layout, each cell is 16 bytes (descending):
2012 High *-------- SIMD&FP registers from 31 down to 0. --------*
2018 *---- General purpose registers from 30 down to 0. ----*
2024 *------------- Special purpose registers. -------------*
2027 | CPSR (NZCV) | 5 cells
2030 *------------- collecting_t object --------------------*
2031 | TPIDR_EL0 | struct tracepoint * |
2032 Low *------------------------------------------------------*
2034 After this stack is set up, we issue a call to the collector, passing
2035 it the saved registers at (SP + 16). */
2037 /* Push SIMD&FP registers on the stack:
2039 SUB sp, sp, #(32 * 16)
2041 STP q30, q31, [sp, #(30 * 16)]
2046 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2047 for (i
= 30; i
>= 0; i
-= 2)
2048 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2050 /* Push general purpose registers on the stack. Note that we do not need
2051 to push x31 as it represents the xzr register and not the stack
2052 pointer in a STR instruction.
2054 SUB sp, sp, #(31 * 16)
2056 STR x30, [sp, #(30 * 16)]
2061 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2062 for (i
= 30; i
>= 0; i
-= 1)
2063 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2064 offset_memory_operand (i
* 16));
2066 /* Make space for 5 more cells.
2068 SUB sp, sp, #(5 * 16)
2071 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2076 ADD x4, sp, #((32 + 31 + 5) * 16)
2077 STR x4, [sp, #(4 * 16)]
2080 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2081 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2083 /* Save PC (tracepoint address):
2088 STR x3, [sp, #(3 * 16)]
2092 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2093 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2095 /* Save CPSR (NZCV), FPSR and FPCR:
2101 STR x2, [sp, #(2 * 16)]
2102 STR x1, [sp, #(1 * 16)]
2103 STR x0, [sp, #(0 * 16)]
2106 p
+= emit_mrs (p
, x2
, NZCV
);
2107 p
+= emit_mrs (p
, x1
, FPSR
);
2108 p
+= emit_mrs (p
, x0
, FPCR
);
2109 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2110 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2111 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2113 /* Push the collecting_t object. It consist of the address of the
2114 tracepoint and an ID for the current thread. We get the latter by
2115 reading the tpidr_el0 system register. It corresponds to the
2116 NT_ARM_TLS register accessible with ptrace.
2123 STP x0, x1, [sp, #-16]!
2127 p
+= emit_mov_addr (p
, x0
, tpoint
);
2128 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2129 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2133 The shared memory for the lock is at lockaddr. It will hold zero
2134 if no-one is holding the lock, otherwise it contains the address of
2135 the collecting_t object on the stack of the thread which acquired it.
2137 At this stage, the stack pointer points to this thread's collecting_t
2140 We use the following registers:
2141 - x0: Address of the lock.
2142 - x1: Pointer to collecting_t object.
2143 - x2: Scratch register.
2149 ; Trigger an event local to this core. So the following WFE
2150 ; instruction is ignored.
2153 ; Wait for an event. The event is triggered by either the SEVL
2154 ; or STLR instructions (store release).
2157 ; Atomically read at lockaddr. This marks the memory location as
2158 ; exclusive. This instruction also has memory constraints which
2159 ; make sure all previous data reads and writes are done before
2163 ; Try again if another thread holds the lock.
2166 ; We can lock it! Write the address of the collecting_t object.
2167 ; This instruction will fail if the memory location is not marked
2168 ; as exclusive anymore. If it succeeds, it will remove the
2169 ; exclusive mark on the memory location. This way, if another
2170 ; thread executes this instruction before us, we will fail and try
2177 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2178 p
+= emit_mov (p
, x1
, register_operand (sp
));
2182 p
+= emit_ldaxr (p
, x2
, x0
);
2183 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2184 p
+= emit_stxr (p
, w2
, x1
, x0
);
2185 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2187 /* Call collector (struct tracepoint *, unsigned char *):
2192 ; Saved registers start after the collecting_t object.
2195 ; We use an intra-procedure-call scratch register.
2196 MOV ip0, #(collector)
2199 ; And call back to C!
2204 p
+= emit_mov_addr (p
, x0
, tpoint
);
2205 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2207 p
+= emit_mov_addr (p
, ip0
, collector
);
2208 p
+= emit_blr (p
, ip0
);
2210 /* Release the lock.
2215 ; This instruction is a normal store with memory ordering
2216 ; constraints. Thanks to this we do not have to put a data
2217 ; barrier instruction to make sure all data read and writes are done
2218 ; before this instruction is executed. Furthermore, this instruction
2219 ; will trigger an event, letting other threads know they can grab
2224 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2225 p
+= emit_stlr (p
, xzr
, x0
);
2227 /* Free collecting_t object:
2232 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2234 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2235 registers from the stack.
2237 LDR x2, [sp, #(2 * 16)]
2238 LDR x1, [sp, #(1 * 16)]
2239 LDR x0, [sp, #(0 * 16)]
2245 ADD sp, sp #(5 * 16)
2248 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2249 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2250 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2251 p
+= emit_msr (p
, NZCV
, x2
);
2252 p
+= emit_msr (p
, FPSR
, x1
);
2253 p
+= emit_msr (p
, FPCR
, x0
);
2255 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2257 /* Pop general purpose registers:
2261 LDR x30, [sp, #(30 * 16)]
2263 ADD sp, sp, #(31 * 16)
2266 for (i
= 0; i
<= 30; i
+= 1)
2267 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2268 offset_memory_operand (i
* 16));
2269 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2271 /* Pop SIMD&FP registers:
2275 LDP q30, q31, [sp, #(30 * 16)]
2277 ADD sp, sp, #(32 * 16)
2280 for (i
= 0; i
<= 30; i
+= 2)
2281 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2282 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2284 /* Write the code into the inferior memory. */
2285 append_insns (&buildaddr
, p
- buf
, buf
);
2287 /* Now emit the relocated instruction. */
2288 *adjusted_insn_addr
= buildaddr
;
2289 target_read_uint32 (tpaddr
, &insn
);
2291 insn_data
.base
.insn_addr
= tpaddr
;
2292 insn_data
.new_addr
= buildaddr
;
2293 insn_data
.insn_ptr
= buf
;
2295 aarch64_relocate_instruction (insn
, &visitor
,
2296 (struct aarch64_insn_data
*) &insn_data
);
2298 /* We may not have been able to relocate the instruction. */
2299 if (insn_data
.insn_ptr
== buf
)
2302 "E.Could not relocate instruction from %s to %s.",
2303 core_addr_to_string_nz (tpaddr
),
2304 core_addr_to_string_nz (buildaddr
));
2308 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2309 *adjusted_insn_addr_end
= buildaddr
;
2311 /* Go back to the start of the buffer. */
2314 /* Emit a branch back from the jump pad. */
2315 offset
= (tpaddr
+ orig_size
- buildaddr
);
2316 if (!can_encode_int32 (offset
, 28))
2319 "E.Jump back from jump pad too far from tracepoint "
2320 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2325 p
+= emit_b (p
, 0, offset
);
2326 append_insns (&buildaddr
, p
- buf
, buf
);
2328 /* Give the caller a branch instruction into the jump pad. */
2329 offset
= (*jump_entry
- tpaddr
);
2330 if (!can_encode_int32 (offset
, 28))
2333 "E.Jump pad too far from tracepoint "
2334 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2339 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2340 *jjump_pad_insn_size
= 4;
2342 /* Return the end address of our pad. */
2343 *jump_entry
= buildaddr
;
2348 /* Helper function writing LEN instructions from START into
2349 current_insn_ptr. */
2352 emit_ops_insns (const uint32_t *start
, int len
)
2354 CORE_ADDR buildaddr
= current_insn_ptr
;
2357 debug_printf ("Adding %d instrucions at %s\n",
2358 len
, paddress (buildaddr
));
2360 append_insns (&buildaddr
, len
, start
);
2361 current_insn_ptr
= buildaddr
;
2364 /* Pop a register from the stack. */
2367 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2369 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2372 /* Push a register on the stack. */
2375 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2377 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2380 /* Implementation of emit_ops method "emit_prologue". */
2383 aarch64_emit_prologue (void)
2388 /* This function emit a prologue for the following function prototype:
2390 enum eval_result_type f (unsigned char *regs,
2393 The first argument is a buffer of raw registers. The second
2394 argument is the result of
2395 evaluating the expression, which will be set to whatever is on top of
2396 the stack at the end.
2398 The stack set up by the prologue is as such:
2400 High *------------------------------------------------------*
2403 | x1 (ULONGEST *value) |
2404 | x0 (unsigned char *regs) |
2405 Low *------------------------------------------------------*
2407 As we are implementing a stack machine, each opcode can expand the
2408 stack so we never know how far we are from the data saved by this
2409 prologue. In order to be able refer to value and regs later, we save
2410 the current stack pointer in the frame pointer. This way, it is not
2411 clobbered when calling C functions.
2413 Finally, throughout every operation, we are using register x0 as the
2414 top of the stack, and x1 as a scratch register. */
2416 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2417 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2418 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2420 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2423 emit_ops_insns (buf
, p
- buf
);
2426 /* Implementation of emit_ops method "emit_epilogue". */
2429 aarch64_emit_epilogue (void)
2434 /* Store the result of the expression (x0) in *value. */
2435 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2436 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2437 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2439 /* Restore the previous state. */
2440 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2441 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2443 /* Return expr_eval_no_error. */
2444 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2445 p
+= emit_ret (p
, lr
);
2447 emit_ops_insns (buf
, p
- buf
);
2450 /* Implementation of emit_ops method "emit_add". */
2453 aarch64_emit_add (void)
2458 p
+= emit_pop (p
, x1
);
2459 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2461 emit_ops_insns (buf
, p
- buf
);
2464 /* Implementation of emit_ops method "emit_sub". */
2467 aarch64_emit_sub (void)
2472 p
+= emit_pop (p
, x1
);
2473 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2475 emit_ops_insns (buf
, p
- buf
);
2478 /* Implementation of emit_ops method "emit_mul". */
2481 aarch64_emit_mul (void)
2486 p
+= emit_pop (p
, x1
);
2487 p
+= emit_mul (p
, x0
, x1
, x0
);
2489 emit_ops_insns (buf
, p
- buf
);
2492 /* Implementation of emit_ops method "emit_lsh". */
2495 aarch64_emit_lsh (void)
2500 p
+= emit_pop (p
, x1
);
2501 p
+= emit_lslv (p
, x0
, x1
, x0
);
2503 emit_ops_insns (buf
, p
- buf
);
2506 /* Implementation of emit_ops method "emit_rsh_signed". */
2509 aarch64_emit_rsh_signed (void)
2514 p
+= emit_pop (p
, x1
);
2515 p
+= emit_asrv (p
, x0
, x1
, x0
);
2517 emit_ops_insns (buf
, p
- buf
);
2520 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2523 aarch64_emit_rsh_unsigned (void)
2528 p
+= emit_pop (p
, x1
);
2529 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2531 emit_ops_insns (buf
, p
- buf
);
2534 /* Implementation of emit_ops method "emit_ext". */
2537 aarch64_emit_ext (int arg
)
2542 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2544 emit_ops_insns (buf
, p
- buf
);
2547 /* Implementation of emit_ops method "emit_log_not". */
2550 aarch64_emit_log_not (void)
2555 /* If the top of the stack is 0, replace it with 1. Else replace it with
2558 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2559 p
+= emit_cset (p
, x0
, EQ
);
2561 emit_ops_insns (buf
, p
- buf
);
2564 /* Implementation of emit_ops method "emit_bit_and". */
2567 aarch64_emit_bit_and (void)
2572 p
+= emit_pop (p
, x1
);
2573 p
+= emit_and (p
, x0
, x0
, x1
);
2575 emit_ops_insns (buf
, p
- buf
);
2578 /* Implementation of emit_ops method "emit_bit_or". */
2581 aarch64_emit_bit_or (void)
2586 p
+= emit_pop (p
, x1
);
2587 p
+= emit_orr (p
, x0
, x0
, x1
);
2589 emit_ops_insns (buf
, p
- buf
);
2592 /* Implementation of emit_ops method "emit_bit_xor". */
2595 aarch64_emit_bit_xor (void)
2600 p
+= emit_pop (p
, x1
);
2601 p
+= emit_eor (p
, x0
, x0
, x1
);
2603 emit_ops_insns (buf
, p
- buf
);
2606 /* Implementation of emit_ops method "emit_bit_not". */
2609 aarch64_emit_bit_not (void)
2614 p
+= emit_mvn (p
, x0
, x0
);
2616 emit_ops_insns (buf
, p
- buf
);
2619 /* Implementation of emit_ops method "emit_equal". */
2622 aarch64_emit_equal (void)
2627 p
+= emit_pop (p
, x1
);
2628 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2629 p
+= emit_cset (p
, x0
, EQ
);
2631 emit_ops_insns (buf
, p
- buf
);
2634 /* Implementation of emit_ops method "emit_less_signed". */
2637 aarch64_emit_less_signed (void)
2642 p
+= emit_pop (p
, x1
);
2643 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2644 p
+= emit_cset (p
, x0
, LT
);
2646 emit_ops_insns (buf
, p
- buf
);
2649 /* Implementation of emit_ops method "emit_less_unsigned". */
2652 aarch64_emit_less_unsigned (void)
2657 p
+= emit_pop (p
, x1
);
2658 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2659 p
+= emit_cset (p
, x0
, LO
);
2661 emit_ops_insns (buf
, p
- buf
);
2664 /* Implementation of emit_ops method "emit_ref". */
2667 aarch64_emit_ref (int size
)
2675 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2678 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2681 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2684 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2687 /* Unknown size, bail on compilation. */
2692 emit_ops_insns (buf
, p
- buf
);
2695 /* Implementation of emit_ops method "emit_if_goto". */
2698 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2703 /* The Z flag is set or cleared here. */
2704 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2705 /* This instruction must not change the Z flag. */
2706 p
+= emit_pop (p
, x0
);
2707 /* Branch over the next instruction if x0 == 0. */
2708 p
+= emit_bcond (p
, EQ
, 8);
2710 /* The NOP instruction will be patched with an unconditional branch. */
2712 *offset_p
= (p
- buf
) * 4;
2717 emit_ops_insns (buf
, p
- buf
);
2720 /* Implementation of emit_ops method "emit_goto". */
2723 aarch64_emit_goto (int *offset_p
, int *size_p
)
2728 /* The NOP instruction will be patched with an unconditional branch. */
2735 emit_ops_insns (buf
, p
- buf
);
2738 /* Implementation of emit_ops method "write_goto_address". */
2741 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2745 emit_b (&insn
, 0, to
- from
);
2746 append_insns (&from
, 1, &insn
);
2749 /* Implementation of emit_ops method "emit_const". */
2752 aarch64_emit_const (LONGEST num
)
2757 p
+= emit_mov_addr (p
, x0
, num
);
2759 emit_ops_insns (buf
, p
- buf
);
2762 /* Implementation of emit_ops method "emit_call". */
2765 aarch64_emit_call (CORE_ADDR fn
)
2770 p
+= emit_mov_addr (p
, ip0
, fn
);
2771 p
+= emit_blr (p
, ip0
);
2773 emit_ops_insns (buf
, p
- buf
);
2776 /* Implementation of emit_ops method "emit_reg". */
2779 aarch64_emit_reg (int reg
)
2784 /* Set x0 to unsigned char *regs. */
2785 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2786 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2787 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2789 emit_ops_insns (buf
, p
- buf
);
2791 aarch64_emit_call (get_raw_reg_func_addr ());
2794 /* Implementation of emit_ops method "emit_pop". */
2797 aarch64_emit_pop (void)
2802 p
+= emit_pop (p
, x0
);
2804 emit_ops_insns (buf
, p
- buf
);
2807 /* Implementation of emit_ops method "emit_stack_flush". */
2810 aarch64_emit_stack_flush (void)
2815 p
+= emit_push (p
, x0
);
2817 emit_ops_insns (buf
, p
- buf
);
2820 /* Implementation of emit_ops method "emit_zero_ext". */
2823 aarch64_emit_zero_ext (int arg
)
2828 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2830 emit_ops_insns (buf
, p
- buf
);
2833 /* Implementation of emit_ops method "emit_swap". */
2836 aarch64_emit_swap (void)
2841 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2842 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2843 p
+= emit_mov (p
, x0
, register_operand (x1
));
2845 emit_ops_insns (buf
, p
- buf
);
2848 /* Implementation of emit_ops method "emit_stack_adjust". */
2851 aarch64_emit_stack_adjust (int n
)
2853 /* This is not needed with our design. */
2857 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2859 emit_ops_insns (buf
, p
- buf
);
2862 /* Implementation of emit_ops method "emit_int_call_1". */
2865 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2870 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2872 emit_ops_insns (buf
, p
- buf
);
2874 aarch64_emit_call (fn
);
2877 /* Implementation of emit_ops method "emit_void_call_2". */
2880 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2885 /* Push x0 on the stack. */
2886 aarch64_emit_stack_flush ();
2888 /* Setup arguments for the function call:
2891 x1: top of the stack
2896 p
+= emit_mov (p
, x1
, register_operand (x0
));
2897 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2899 emit_ops_insns (buf
, p
- buf
);
2901 aarch64_emit_call (fn
);
2904 aarch64_emit_pop ();
2907 /* Implementation of emit_ops method "emit_eq_goto". */
2910 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2915 p
+= emit_pop (p
, x1
);
2916 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2917 /* Branch over the next instruction if x0 != x1. */
2918 p
+= emit_bcond (p
, NE
, 8);
2919 /* The NOP instruction will be patched with an unconditional branch. */
2921 *offset_p
= (p
- buf
) * 4;
2926 emit_ops_insns (buf
, p
- buf
);
2929 /* Implementation of emit_ops method "emit_ne_goto". */
2932 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2937 p
+= emit_pop (p
, x1
);
2938 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2939 /* Branch over the next instruction if x0 == x1. */
2940 p
+= emit_bcond (p
, EQ
, 8);
2941 /* The NOP instruction will be patched with an unconditional branch. */
2943 *offset_p
= (p
- buf
) * 4;
2948 emit_ops_insns (buf
, p
- buf
);
2951 /* Implementation of emit_ops method "emit_lt_goto". */
2954 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2959 p
+= emit_pop (p
, x1
);
2960 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2961 /* Branch over the next instruction if x0 >= x1. */
2962 p
+= emit_bcond (p
, GE
, 8);
2963 /* The NOP instruction will be patched with an unconditional branch. */
2965 *offset_p
= (p
- buf
) * 4;
2970 emit_ops_insns (buf
, p
- buf
);
2973 /* Implementation of emit_ops method "emit_le_goto". */
2976 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2981 p
+= emit_pop (p
, x1
);
2982 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2983 /* Branch over the next instruction if x0 > x1. */
2984 p
+= emit_bcond (p
, GT
, 8);
2985 /* The NOP instruction will be patched with an unconditional branch. */
2987 *offset_p
= (p
- buf
) * 4;
2992 emit_ops_insns (buf
, p
- buf
);
2995 /* Implementation of emit_ops method "emit_gt_goto". */
2998 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
3003 p
+= emit_pop (p
, x1
);
3004 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3005 /* Branch over the next instruction if x0 <= x1. */
3006 p
+= emit_bcond (p
, LE
, 8);
3007 /* The NOP instruction will be patched with an unconditional branch. */
3009 *offset_p
= (p
- buf
) * 4;
3014 emit_ops_insns (buf
, p
- buf
);
3017 /* Implementation of emit_ops method "emit_ge_got". */
3020 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
3025 p
+= emit_pop (p
, x1
);
3026 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3027 /* Branch over the next instruction if x0 <= x1. */
3028 p
+= emit_bcond (p
, LT
, 8);
3029 /* The NOP instruction will be patched with an unconditional branch. */
3031 *offset_p
= (p
- buf
) * 4;
3036 emit_ops_insns (buf
, p
- buf
);
3039 static struct emit_ops aarch64_emit_ops_impl
=
3041 aarch64_emit_prologue
,
3042 aarch64_emit_epilogue
,
3047 aarch64_emit_rsh_signed
,
3048 aarch64_emit_rsh_unsigned
,
3050 aarch64_emit_log_not
,
3051 aarch64_emit_bit_and
,
3052 aarch64_emit_bit_or
,
3053 aarch64_emit_bit_xor
,
3054 aarch64_emit_bit_not
,
3056 aarch64_emit_less_signed
,
3057 aarch64_emit_less_unsigned
,
3059 aarch64_emit_if_goto
,
3061 aarch64_write_goto_address
,
3066 aarch64_emit_stack_flush
,
3067 aarch64_emit_zero_ext
,
3069 aarch64_emit_stack_adjust
,
3070 aarch64_emit_int_call_1
,
3071 aarch64_emit_void_call_2
,
3072 aarch64_emit_eq_goto
,
3073 aarch64_emit_ne_goto
,
3074 aarch64_emit_lt_goto
,
3075 aarch64_emit_le_goto
,
3076 aarch64_emit_gt_goto
,
3077 aarch64_emit_ge_got
,
3080 /* Implementation of linux_target_ops method "emit_ops". */
3082 static struct emit_ops
*
3083 aarch64_emit_ops (void)
3085 return &aarch64_emit_ops_impl
;
3088 /* Implementation of linux_target_ops method
3089 "get_min_fast_tracepoint_insn_len". */
3092 aarch64_get_min_fast_tracepoint_insn_len (void)
3097 /* Implementation of linux_target_ops method "supports_range_stepping". */
3100 aarch64_supports_range_stepping (void)
3105 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3108 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3110 if (is_64bit_tdesc ())
3112 *size
= aarch64_breakpoint_len
;
3113 return aarch64_breakpoint
;
3116 return arm_sw_breakpoint_from_kind (kind
, size
);
3119 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3122 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3124 if (is_64bit_tdesc ())
3125 return aarch64_breakpoint_len
;
3127 return arm_breakpoint_kind_from_pc (pcptr
);
3130 /* Implementation of the target ops method
3131 "breakpoint_kind_from_current_state". */
3134 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3136 if (is_64bit_tdesc ())
3137 return aarch64_breakpoint_len
;
3139 return arm_breakpoint_kind_from_current_state (pcptr
);
3142 /* Support for hardware single step. */
3145 aarch64_supports_hardware_single_step (void)
3150 struct linux_target_ops the_low_target
=
3152 aarch64_get_thread_area
,
3153 aarch64_install_fast_tracepoint_jump_pad
,
3155 aarch64_get_min_fast_tracepoint_insn_len
,
3156 aarch64_supports_range_stepping
,
3157 aarch64_supports_hardware_single_step
,
3158 aarch64_get_syscall_trapinfo
,
3161 /* The linux target ops object. */
3163 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3166 initialize_low_arch (void)
3168 initialize_low_arch_aarch32 ();
3170 initialize_regsets_info (&aarch64_regsets_info
);
3171 initialize_regsets_info (&aarch64_sve_regsets_info
);