1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
64 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
66 bool supports_z_point_type (char z_type
) override
;
70 void low_arch_setup () override
;
72 bool low_cannot_fetch_register (int regno
) override
;
74 bool low_cannot_store_register (int regno
) override
;
76 bool low_supports_breakpoints () override
;
78 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
80 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
82 bool low_breakpoint_at (CORE_ADDR pc
) override
;
84 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
85 int size
, raw_breakpoint
*bp
) override
;
87 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
88 int size
, raw_breakpoint
*bp
) override
;
91 /* The singleton target ops object. */
93 static aarch64_target the_aarch64_target
;
96 aarch64_target::low_cannot_fetch_register (int regno
)
98 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
99 "is not implemented by the target");
103 aarch64_target::low_cannot_store_register (int regno
)
105 gdb_assert_not_reached ("linux target op low_cannot_store_register "
106 "is not implemented by the target");
109 /* Per-process arch-specific data we want to keep. */
111 struct arch_process_info
113 /* Hardware breakpoint/watchpoint data.
114 The reason for them to be per-process rather than per-thread is
115 due to the lack of information in the gdbserver environment;
116 gdbserver is not told that whether a requested hardware
117 breakpoint/watchpoint is thread specific or not, so it has to set
118 each hw bp/wp for every thread in the current process. The
119 higher level bp/wp management in gdb will resume a thread if a hw
120 bp/wp trap is not expected for it. Since the hw bp/wp setting is
121 same for each thread, it is reasonable for the data to live here.
123 struct aarch64_debug_reg_state debug_reg_state
;
126 /* Return true if the size of register 0 is 8 byte. */
129 is_64bit_tdesc (void)
131 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
133 return register_size (regcache
->tdesc
, 0) == 8;
136 /* Return true if the regcache contains the number of SVE registers. */
141 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
143 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
147 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
149 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
152 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
153 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
154 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
155 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
156 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
160 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
162 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
165 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
166 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
167 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
168 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
169 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
173 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
175 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
178 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
179 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
180 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
181 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
185 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
187 const struct user_fpsimd_state
*regset
188 = (const struct user_fpsimd_state
*) buf
;
191 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
192 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
193 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
194 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
197 /* Store the pauth registers to regcache. */
200 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
202 uint64_t *pauth_regset
= (uint64_t *) buf
;
203 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
208 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
210 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
215 aarch64_target::low_supports_breakpoints ()
220 /* Implementation of linux target ops method "low_get_pc". */
223 aarch64_target::low_get_pc (regcache
*regcache
)
225 if (register_size (regcache
->tdesc
, 0) == 8)
226 return linux_get_pc_64bit (regcache
);
228 return linux_get_pc_32bit (regcache
);
231 /* Implementation of linux target ops method "low_set_pc". */
234 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
236 if (register_size (regcache
->tdesc
, 0) == 8)
237 linux_set_pc_64bit (regcache
, pc
);
239 linux_set_pc_32bit (regcache
, pc
);
242 #define aarch64_breakpoint_len 4
244 /* AArch64 BRK software debug mode instruction.
245 This instruction needs to match gdb/aarch64-tdep.c
246 (aarch64_default_breakpoint). */
247 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
249 /* Implementation of linux target ops method "low_breakpoint_at". */
252 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
254 if (is_64bit_tdesc ())
256 gdb_byte insn
[aarch64_breakpoint_len
];
258 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
259 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
265 return arm_breakpoint_at (where
);
269 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
273 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
275 state
->dr_addr_bp
[i
] = 0;
276 state
->dr_ctrl_bp
[i
] = 0;
277 state
->dr_ref_count_bp
[i
] = 0;
280 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
282 state
->dr_addr_wp
[i
] = 0;
283 state
->dr_ctrl_wp
[i
] = 0;
284 state
->dr_ref_count_wp
[i
] = 0;
288 /* Return the pointer to the debug register state structure in the
289 current process' arch-specific data area. */
291 struct aarch64_debug_reg_state
*
292 aarch64_get_debug_reg_state (pid_t pid
)
294 struct process_info
*proc
= find_process_pid (pid
);
296 return &proc
->priv
->arch_private
->debug_reg_state
;
299 /* Implementation of target ops method "supports_z_point_type". */
302 aarch64_target::supports_z_point_type (char z_type
)
308 case Z_PACKET_WRITE_WP
:
309 case Z_PACKET_READ_WP
:
310 case Z_PACKET_ACCESS_WP
:
317 /* Implementation of linux target ops method "low_insert_point".
319 It actually only records the info of the to-be-inserted bp/wp;
320 the actual insertion will happen when threads are resumed. */
323 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
324 int len
, raw_breakpoint
*bp
)
327 enum target_hw_bp_type targ_type
;
328 struct aarch64_debug_reg_state
*state
329 = aarch64_get_debug_reg_state (pid_of (current_thread
));
332 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
333 (unsigned long) addr
, len
);
335 /* Determine the type from the raw breakpoint type. */
336 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
338 if (targ_type
!= hw_execute
)
340 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
341 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
342 1 /* is_insert */, state
);
350 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
351 instruction. Set it to 2 to correctly encode length bit
352 mask in hardware/watchpoint control register. */
355 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
356 1 /* is_insert */, state
);
360 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
366 /* Implementation of linux target ops method "low_remove_point".
368 It actually only records the info of the to-be-removed bp/wp,
369 the actual removal will be done when threads are resumed. */
372 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
373 int len
, raw_breakpoint
*bp
)
376 enum target_hw_bp_type targ_type
;
377 struct aarch64_debug_reg_state
*state
378 = aarch64_get_debug_reg_state (pid_of (current_thread
));
381 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
382 (unsigned long) addr
, len
);
384 /* Determine the type from the raw breakpoint type. */
385 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
387 /* Set up state pointers. */
388 if (targ_type
!= hw_execute
)
390 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
396 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
397 instruction. Set it to 2 to correctly encode length bit
398 mask in hardware/watchpoint control register. */
401 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
402 0 /* is_insert */, state
);
406 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
412 /* Implementation of linux_target_ops method "stopped_data_address". */
415 aarch64_stopped_data_address (void)
419 struct aarch64_debug_reg_state
*state
;
421 pid
= lwpid_of (current_thread
);
423 /* Get the siginfo. */
424 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
425 return (CORE_ADDR
) 0;
427 /* Need to be a hardware breakpoint/watchpoint trap. */
428 if (siginfo
.si_signo
!= SIGTRAP
429 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
430 return (CORE_ADDR
) 0;
432 /* Check if the address matches any watched address. */
433 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
434 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
436 const unsigned int offset
437 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
438 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
439 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
440 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
441 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
442 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
444 if (state
->dr_ref_count_wp
[i
]
445 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
446 && addr_trap
>= addr_watch_aligned
447 && addr_trap
< addr_watch
+ len
)
449 /* ADDR_TRAP reports the first address of the memory range
450 accessed by the CPU, regardless of what was the memory
451 range watched. Thus, a large CPU access that straddles
452 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
453 ADDR_TRAP that is lower than the
454 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
456 addr: | 4 | 5 | 6 | 7 | 8 |
457 |---- range watched ----|
458 |----------- range accessed ------------|
460 In this case, ADDR_TRAP will be 4.
462 To match a watchpoint known to GDB core, we must never
463 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
464 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
465 positive on kernels older than 4.10. See PR
471 return (CORE_ADDR
) 0;
474 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
477 aarch64_stopped_by_watchpoint (void)
479 if (aarch64_stopped_data_address () != 0)
485 /* Fetch the thread-local storage pointer for libthread_db. */
488 ps_get_thread_area (struct ps_prochandle
*ph
,
489 lwpid_t lwpid
, int idx
, void **base
)
491 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
495 /* Implementation of linux_target_ops method "siginfo_fixup". */
498 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
500 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
501 if (!is_64bit_tdesc ())
504 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
507 aarch64_siginfo_from_compat_siginfo (native
,
508 (struct compat_siginfo
*) inf
);
516 /* Implementation of linux_target_ops method "new_process". */
518 static struct arch_process_info
*
519 aarch64_linux_new_process (void)
521 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
523 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
528 /* Implementation of linux_target_ops method "delete_process". */
531 aarch64_linux_delete_process (struct arch_process_info
*info
)
536 /* Implementation of linux_target_ops method "linux_new_fork". */
539 aarch64_linux_new_fork (struct process_info
*parent
,
540 struct process_info
*child
)
542 /* These are allocated by linux_add_process. */
543 gdb_assert (parent
->priv
!= NULL
544 && parent
->priv
->arch_private
!= NULL
);
545 gdb_assert (child
->priv
!= NULL
546 && child
->priv
->arch_private
!= NULL
);
548 /* Linux kernel before 2.6.33 commit
549 72f674d203cd230426437cdcf7dd6f681dad8b0d
550 will inherit hardware debug registers from parent
551 on fork/vfork/clone. Newer Linux kernels create such tasks with
552 zeroed debug registers.
554 GDB core assumes the child inherits the watchpoints/hw
555 breakpoints of the parent, and will remove them all from the
556 forked off process. Copy the debug registers mirrors into the
557 new process so that all breakpoints and watchpoints can be
558 removed together. The debug registers mirror will become zeroed
559 in the end before detaching the forked off process, thus making
560 this compatible with older Linux kernels too. */
562 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
565 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
566 #define AARCH64_HWCAP_PACA (1 << 30)
568 /* Implementation of linux target ops method "low_arch_setup". */
571 aarch64_target::low_arch_setup ()
573 unsigned int machine
;
577 tid
= lwpid_of (current_thread
);
579 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
583 uint64_t vq
= aarch64_sve_get_vq (tid
);
584 unsigned long hwcap
= linux_get_hwcap (8);
585 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
587 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
590 current_process ()->tdesc
= aarch32_linux_read_description ();
592 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
595 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
598 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
600 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
603 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
606 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
608 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
611 static struct regset_info aarch64_regsets
[] =
613 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
614 sizeof (struct user_pt_regs
), GENERAL_REGS
,
615 aarch64_fill_gregset
, aarch64_store_gregset
},
616 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
617 sizeof (struct user_fpsimd_state
), FP_REGS
,
618 aarch64_fill_fpregset
, aarch64_store_fpregset
620 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
621 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
622 NULL
, aarch64_store_pauthregset
},
626 static struct regsets_info aarch64_regsets_info
=
628 aarch64_regsets
, /* regsets */
630 NULL
, /* disabled_regsets */
633 static struct regs_info regs_info_aarch64
=
635 NULL
, /* regset_bitmap */
637 &aarch64_regsets_info
,
640 static struct regset_info aarch64_sve_regsets
[] =
642 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
643 sizeof (struct user_pt_regs
), GENERAL_REGS
,
644 aarch64_fill_gregset
, aarch64_store_gregset
},
645 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
646 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
647 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
649 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
650 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
651 NULL
, aarch64_store_pauthregset
},
655 static struct regsets_info aarch64_sve_regsets_info
=
657 aarch64_sve_regsets
, /* regsets. */
658 0, /* num_regsets. */
659 NULL
, /* disabled_regsets. */
662 static struct regs_info regs_info_aarch64_sve
=
664 NULL
, /* regset_bitmap. */
666 &aarch64_sve_regsets_info
,
669 /* Implementation of linux target ops method "get_regs_info". */
672 aarch64_target::get_regs_info ()
674 if (!is_64bit_tdesc ())
675 return ®s_info_aarch32
;
678 return ®s_info_aarch64_sve
;
680 return ®s_info_aarch64
;
683 /* Implementation of linux_target_ops method "supports_tracepoints". */
686 aarch64_supports_tracepoints (void)
688 if (current_thread
== NULL
)
692 /* We don't support tracepoints on aarch32 now. */
693 return is_64bit_tdesc ();
697 /* Implementation of linux_target_ops method "get_thread_area". */
700 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
705 iovec
.iov_base
= ®
;
706 iovec
.iov_len
= sizeof (reg
);
708 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
716 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
719 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
721 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
727 collect_register_by_name (regcache
, "x8", &l_sysno
);
728 *sysno
= (int) l_sysno
;
731 collect_register_by_name (regcache
, "r7", sysno
);
734 /* List of condition codes that we need. */
736 enum aarch64_condition_codes
747 enum aarch64_operand_type
753 /* Representation of an operand. At this time, it only supports register
754 and immediate types. */
756 struct aarch64_operand
758 /* Type of the operand. */
759 enum aarch64_operand_type type
;
761 /* Value of the operand according to the type. */
765 struct aarch64_register reg
;
769 /* List of registers that we are currently using, we can add more here as
770 we need to use them. */
772 /* General purpose scratch registers (64 bit). */
773 static const struct aarch64_register x0
= { 0, 1 };
774 static const struct aarch64_register x1
= { 1, 1 };
775 static const struct aarch64_register x2
= { 2, 1 };
776 static const struct aarch64_register x3
= { 3, 1 };
777 static const struct aarch64_register x4
= { 4, 1 };
779 /* General purpose scratch registers (32 bit). */
780 static const struct aarch64_register w0
= { 0, 0 };
781 static const struct aarch64_register w2
= { 2, 0 };
783 /* Intra-procedure scratch registers. */
784 static const struct aarch64_register ip0
= { 16, 1 };
786 /* Special purpose registers. */
787 static const struct aarch64_register fp
= { 29, 1 };
788 static const struct aarch64_register lr
= { 30, 1 };
789 static const struct aarch64_register sp
= { 31, 1 };
790 static const struct aarch64_register xzr
= { 31, 1 };
792 /* Dynamically allocate a new register. If we know the register
793 statically, we should make it a global as above instead of using this
796 static struct aarch64_register
797 aarch64_register (unsigned num
, int is64
)
799 return (struct aarch64_register
) { num
, is64
};
802 /* Helper function to create a register operand, for instructions with
803 different types of operands.
806 p += emit_mov (p, x0, register_operand (x1)); */
808 static struct aarch64_operand
809 register_operand (struct aarch64_register reg
)
811 struct aarch64_operand operand
;
813 operand
.type
= OPERAND_REGISTER
;
819 /* Helper function to create an immediate operand, for instructions with
820 different types of operands.
823 p += emit_mov (p, x0, immediate_operand (12)); */
825 static struct aarch64_operand
826 immediate_operand (uint32_t imm
)
828 struct aarch64_operand operand
;
830 operand
.type
= OPERAND_IMMEDIATE
;
836 /* Helper function to create an offset memory operand.
839 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
841 static struct aarch64_memory_operand
842 offset_memory_operand (int32_t offset
)
844 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
847 /* Helper function to create a pre-index memory operand.
850 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
852 static struct aarch64_memory_operand
853 preindex_memory_operand (int32_t index
)
855 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
858 /* Helper function to create a post-index memory operand.
861 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
863 static struct aarch64_memory_operand
864 postindex_memory_operand (int32_t index
)
866 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
869 /* System control registers. These special registers can be written and
870 read with the MRS and MSR instructions.
872 - NZCV: Condition flags. GDB refers to this register under the CPSR
874 - FPSR: Floating-point status register.
875 - FPCR: Floating-point control registers.
876 - TPIDR_EL0: Software thread ID register. */
878 enum aarch64_system_control_registers
880 /* op0 op1 crn crm op2 */
881 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
882 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
883 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
884 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
887 /* Write a BLR instruction into *BUF.
891 RN is the register to branch to. */
894 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
896 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
899 /* Write a RET instruction into *BUF.
903 RN is the register to branch to. */
906 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
908 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
912 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
913 struct aarch64_register rt
,
914 struct aarch64_register rt2
,
915 struct aarch64_register rn
,
916 struct aarch64_memory_operand operand
)
923 opc
= ENCODE (2, 2, 30);
925 opc
= ENCODE (0, 2, 30);
927 switch (operand
.type
)
929 case MEMORY_OPERAND_OFFSET
:
931 pre_index
= ENCODE (1, 1, 24);
932 write_back
= ENCODE (0, 1, 23);
935 case MEMORY_OPERAND_POSTINDEX
:
937 pre_index
= ENCODE (0, 1, 24);
938 write_back
= ENCODE (1, 1, 23);
941 case MEMORY_OPERAND_PREINDEX
:
943 pre_index
= ENCODE (1, 1, 24);
944 write_back
= ENCODE (1, 1, 23);
951 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
952 | ENCODE (operand
.index
>> 3, 7, 15)
953 | ENCODE (rt2
.num
, 5, 10)
954 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
957 /* Write a STP instruction into *BUF.
959 STP rt, rt2, [rn, #offset]
960 STP rt, rt2, [rn, #index]!
961 STP rt, rt2, [rn], #index
963 RT and RT2 are the registers to store.
964 RN is the base address register.
965 OFFSET is the immediate to add to the base address. It is limited to a
966 -512 .. 504 range (7 bits << 3). */
969 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
970 struct aarch64_register rt2
, struct aarch64_register rn
,
971 struct aarch64_memory_operand operand
)
973 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
976 /* Write a LDP instruction into *BUF.
978 LDP rt, rt2, [rn, #offset]
979 LDP rt, rt2, [rn, #index]!
980 LDP rt, rt2, [rn], #index
982 RT and RT2 are the registers to store.
983 RN is the base address register.
984 OFFSET is the immediate to add to the base address. It is limited to a
985 -512 .. 504 range (7 bits << 3). */
988 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
989 struct aarch64_register rt2
, struct aarch64_register rn
,
990 struct aarch64_memory_operand operand
)
992 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
995 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
997 LDP qt, qt2, [rn, #offset]
999 RT and RT2 are the Q registers to store.
1000 RN is the base address register.
1001 OFFSET is the immediate to add to the base address. It is limited to
1002 -1024 .. 1008 range (7 bits << 4). */
1005 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1006 struct aarch64_register rn
, int32_t offset
)
1008 uint32_t opc
= ENCODE (2, 2, 30);
1009 uint32_t pre_index
= ENCODE (1, 1, 24);
1011 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1012 | ENCODE (offset
>> 4, 7, 15)
1013 | ENCODE (rt2
, 5, 10)
1014 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1017 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1019 STP qt, qt2, [rn, #offset]
1021 RT and RT2 are the Q registers to store.
1022 RN is the base address register.
1023 OFFSET is the immediate to add to the base address. It is limited to
1024 -1024 .. 1008 range (7 bits << 4). */
1027 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1028 struct aarch64_register rn
, int32_t offset
)
1030 uint32_t opc
= ENCODE (2, 2, 30);
1031 uint32_t pre_index
= ENCODE (1, 1, 24);
1033 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1034 | ENCODE (offset
>> 4, 7, 15)
1035 | ENCODE (rt2
, 5, 10)
1036 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1039 /* Write a LDRH instruction into *BUF.
1041 LDRH wt, [xn, #offset]
1042 LDRH wt, [xn, #index]!
1043 LDRH wt, [xn], #index
1045 RT is the register to store.
1046 RN is the base address register.
1047 OFFSET is the immediate to add to the base address. It is limited to
1048 0 .. 32760 range (12 bits << 3). */
1051 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1052 struct aarch64_register rn
,
1053 struct aarch64_memory_operand operand
)
1055 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1058 /* Write a LDRB instruction into *BUF.
1060 LDRB wt, [xn, #offset]
1061 LDRB wt, [xn, #index]!
1062 LDRB wt, [xn], #index
1064 RT is the register to store.
1065 RN is the base address register.
1066 OFFSET is the immediate to add to the base address. It is limited to
1067 0 .. 32760 range (12 bits << 3). */
1070 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1071 struct aarch64_register rn
,
1072 struct aarch64_memory_operand operand
)
1074 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1079 /* Write a STR instruction into *BUF.
1081 STR rt, [rn, #offset]
1082 STR rt, [rn, #index]!
1083 STR rt, [rn], #index
1085 RT is the register to store.
1086 RN is the base address register.
1087 OFFSET is the immediate to add to the base address. It is limited to
1088 0 .. 32760 range (12 bits << 3). */
1091 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1092 struct aarch64_register rn
,
1093 struct aarch64_memory_operand operand
)
1095 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1098 /* Helper function emitting an exclusive load or store instruction. */
1101 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1102 enum aarch64_opcodes opcode
,
1103 struct aarch64_register rs
,
1104 struct aarch64_register rt
,
1105 struct aarch64_register rt2
,
1106 struct aarch64_register rn
)
1108 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1109 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1110 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1113 /* Write a LAXR instruction into *BUF.
1117 RT is the destination register.
1118 RN is the base address register. */
1121 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1122 struct aarch64_register rn
)
1124 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1128 /* Write a STXR instruction into *BUF.
1132 RS is the result register, it indicates if the store succeeded or not.
1133 RT is the destination register.
1134 RN is the base address register. */
1137 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1138 struct aarch64_register rt
, struct aarch64_register rn
)
1140 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1144 /* Write a STLR instruction into *BUF.
1148 RT is the register to store.
1149 RN is the base address register. */
1152 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1153 struct aarch64_register rn
)
1155 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1159 /* Helper function for data processing instructions with register sources. */
1162 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1163 struct aarch64_register rd
,
1164 struct aarch64_register rn
,
1165 struct aarch64_register rm
)
1167 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1169 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1170 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1173 /* Helper function for data processing instructions taking either a register
1177 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1178 struct aarch64_register rd
,
1179 struct aarch64_register rn
,
1180 struct aarch64_operand operand
)
1182 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1183 /* The opcode is different for register and immediate source operands. */
1184 uint32_t operand_opcode
;
1186 if (operand
.type
== OPERAND_IMMEDIATE
)
1188 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1189 operand_opcode
= ENCODE (8, 4, 25);
1191 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1192 | ENCODE (operand
.imm
, 12, 10)
1193 | ENCODE (rn
.num
, 5, 5)
1194 | ENCODE (rd
.num
, 5, 0));
1198 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1199 operand_opcode
= ENCODE (5, 4, 25);
1201 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1206 /* Write an ADD instruction into *BUF.
1211 This function handles both an immediate and register add.
1213 RD is the destination register.
1214 RN is the input register.
1215 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1216 OPERAND_REGISTER. */
1219 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1220 struct aarch64_register rn
, struct aarch64_operand operand
)
1222 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1225 /* Write a SUB instruction into *BUF.
1230 This function handles both an immediate and register sub.
1232 RD is the destination register.
1233 RN is the input register.
1234 IMM is the immediate to substract to RN. */
1237 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1238 struct aarch64_register rn
, struct aarch64_operand operand
)
1240 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1243 /* Write a MOV instruction into *BUF.
1248 This function handles both a wide immediate move and a register move,
1249 with the condition that the source register is not xzr. xzr and the
1250 stack pointer share the same encoding and this function only supports
1253 RD is the destination register.
1254 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1255 OPERAND_REGISTER. */
1258 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1259 struct aarch64_operand operand
)
1261 if (operand
.type
== OPERAND_IMMEDIATE
)
1263 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1264 /* Do not shift the immediate. */
1265 uint32_t shift
= ENCODE (0, 2, 21);
1267 return aarch64_emit_insn (buf
, MOV
| size
| shift
1268 | ENCODE (operand
.imm
, 16, 5)
1269 | ENCODE (rd
.num
, 5, 0));
1272 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1275 /* Write a MOVK instruction into *BUF.
1277 MOVK rd, #imm, lsl #shift
1279 RD is the destination register.
1280 IMM is the immediate.
1281 SHIFT is the logical shift left to apply to IMM. */
1284 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1287 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1289 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1290 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1293 /* Write instructions into *BUF in order to move ADDR into a register.
1294 ADDR can be a 64-bit value.
1296 This function will emit a series of MOV and MOVK instructions, such as:
1299 MOVK xd, #(addr >> 16), lsl #16
1300 MOVK xd, #(addr >> 32), lsl #32
1301 MOVK xd, #(addr >> 48), lsl #48 */
1304 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1308 /* The MOV (wide immediate) instruction clears to top bits of the
1310 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1312 if ((addr
>> 16) != 0)
1313 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1317 if ((addr
>> 32) != 0)
1318 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1322 if ((addr
>> 48) != 0)
1323 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1328 /* Write a SUBS instruction into *BUF.
1332 This instruction update the condition flags.
1334 RD is the destination register.
1335 RN and RM are the source registers. */
1338 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1339 struct aarch64_register rn
, struct aarch64_operand operand
)
1341 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1344 /* Write a CMP instruction into *BUF.
1348 This instruction is an alias of SUBS xzr, rn, rm.
1350 RN and RM are the registers to compare. */
1353 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1354 struct aarch64_operand operand
)
1356 return emit_subs (buf
, xzr
, rn
, operand
);
1359 /* Write a AND instruction into *BUF.
1363 RD is the destination register.
1364 RN and RM are the source registers. */
1367 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1368 struct aarch64_register rn
, struct aarch64_register rm
)
1370 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1373 /* Write a ORR instruction into *BUF.
1377 RD is the destination register.
1378 RN and RM are the source registers. */
1381 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1382 struct aarch64_register rn
, struct aarch64_register rm
)
1384 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1387 /* Write a ORN instruction into *BUF.
1391 RD is the destination register.
1392 RN and RM are the source registers. */
1395 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1396 struct aarch64_register rn
, struct aarch64_register rm
)
1398 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1401 /* Write a EOR instruction into *BUF.
1405 RD is the destination register.
1406 RN and RM are the source registers. */
1409 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1410 struct aarch64_register rn
, struct aarch64_register rm
)
1412 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1415 /* Write a MVN instruction into *BUF.
1419 This is an alias for ORN rd, xzr, rm.
1421 RD is the destination register.
1422 RM is the source register. */
1425 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1426 struct aarch64_register rm
)
1428 return emit_orn (buf
, rd
, xzr
, rm
);
1431 /* Write a LSLV instruction into *BUF.
1435 RD is the destination register.
1436 RN and RM are the source registers. */
1439 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1440 struct aarch64_register rn
, struct aarch64_register rm
)
1442 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1445 /* Write a LSRV instruction into *BUF.
1449 RD is the destination register.
1450 RN and RM are the source registers. */
1453 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1454 struct aarch64_register rn
, struct aarch64_register rm
)
1456 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1459 /* Write a ASRV instruction into *BUF.
1463 RD is the destination register.
1464 RN and RM are the source registers. */
1467 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1468 struct aarch64_register rn
, struct aarch64_register rm
)
1470 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1473 /* Write a MUL instruction into *BUF.
1477 RD is the destination register.
1478 RN and RM are the source registers. */
1481 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1482 struct aarch64_register rn
, struct aarch64_register rm
)
1484 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1487 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1491 RT is the destination register.
1492 SYSTEM_REG is special purpose register to read. */
1495 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1496 enum aarch64_system_control_registers system_reg
)
1498 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1499 | ENCODE (rt
.num
, 5, 0));
1502 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1506 SYSTEM_REG is special purpose register to write.
1507 RT is the input register. */
1510 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1511 struct aarch64_register rt
)
1513 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1514 | ENCODE (rt
.num
, 5, 0));
1517 /* Write a SEVL instruction into *BUF.
1519 This is a hint instruction telling the hardware to trigger an event. */
1522 emit_sevl (uint32_t *buf
)
1524 return aarch64_emit_insn (buf
, SEVL
);
1527 /* Write a WFE instruction into *BUF.
1529 This is a hint instruction telling the hardware to wait for an event. */
1532 emit_wfe (uint32_t *buf
)
1534 return aarch64_emit_insn (buf
, WFE
);
1537 /* Write a SBFM instruction into *BUF.
1539 SBFM rd, rn, #immr, #imms
1541 This instruction moves the bits from #immr to #imms into the
1542 destination, sign extending the result.
1544 RD is the destination register.
1545 RN is the source register.
1546 IMMR is the bit number to start at (least significant bit).
1547 IMMS is the bit number to stop at (most significant bit). */
1550 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1551 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1553 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1554 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1556 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1557 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1558 | ENCODE (rd
.num
, 5, 0));
1561 /* Write a SBFX instruction into *BUF.
1563 SBFX rd, rn, #lsb, #width
1565 This instruction moves #width bits from #lsb into the destination, sign
1566 extending the result. This is an alias for:
1568 SBFM rd, rn, #lsb, #(lsb + width - 1)
1570 RD is the destination register.
1571 RN is the source register.
1572 LSB is the bit number to start at (least significant bit).
1573 WIDTH is the number of bits to move. */
1576 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1577 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1579 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1582 /* Write a UBFM instruction into *BUF.
1584 UBFM rd, rn, #immr, #imms
1586 This instruction moves the bits from #immr to #imms into the
1587 destination, extending the result with zeros.
1589 RD is the destination register.
1590 RN is the source register.
1591 IMMR is the bit number to start at (least significant bit).
1592 IMMS is the bit number to stop at (most significant bit). */
1595 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1596 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1598 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1599 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1601 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1602 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1603 | ENCODE (rd
.num
, 5, 0));
1606 /* Write a UBFX instruction into *BUF.
1608 UBFX rd, rn, #lsb, #width
1610 This instruction moves #width bits from #lsb into the destination,
1611 extending the result with zeros. This is an alias for:
1613 UBFM rd, rn, #lsb, #(lsb + width - 1)
1615 RD is the destination register.
1616 RN is the source register.
1617 LSB is the bit number to start at (least significant bit).
1618 WIDTH is the number of bits to move. */
1621 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1622 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1624 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1627 /* Write a CSINC instruction into *BUF.
1629 CSINC rd, rn, rm, cond
1631 This instruction conditionally increments rn or rm and places the result
1632 in rd. rn is chosen is the condition is true.
1634 RD is the destination register.
1635 RN and RM are the source registers.
1636 COND is the encoded condition. */
1639 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1640 struct aarch64_register rn
, struct aarch64_register rm
,
1643 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1645 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1646 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1647 | ENCODE (rd
.num
, 5, 0));
1650 /* Write a CSET instruction into *BUF.
1654 This instruction conditionally write 1 or 0 in the destination register.
1655 1 is written if the condition is true. This is an alias for:
1657 CSINC rd, xzr, xzr, !cond
1659 Note that the condition needs to be inverted.
1661 RD is the destination register.
1662 RN and RM are the source registers.
1663 COND is the encoded condition. */
1666 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1668 /* The least significant bit of the condition needs toggling in order to
1670 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1673 /* Write LEN instructions from BUF into the inferior memory at *TO.
1675 Note instructions are always little endian on AArch64, unlike data. */
1678 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1680 size_t byte_len
= len
* sizeof (uint32_t);
1681 #if (__BYTE_ORDER == __BIG_ENDIAN)
1682 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1685 for (i
= 0; i
< len
; i
++)
1686 le_buf
[i
] = htole32 (buf
[i
]);
1688 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1692 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1698 /* Sub-class of struct aarch64_insn_data, store information of
1699 instruction relocation for fast tracepoint. Visitor can
1700 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1701 the relocated instructions in buffer pointed by INSN_PTR. */
1703 struct aarch64_insn_relocation_data
1705 struct aarch64_insn_data base
;
1707 /* The new address the instruction is relocated to. */
1709 /* Pointer to the buffer of relocated instruction(s). */
1713 /* Implementation of aarch64_insn_visitor method "b". */
1716 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1717 struct aarch64_insn_data
*data
)
1719 struct aarch64_insn_relocation_data
*insn_reloc
1720 = (struct aarch64_insn_relocation_data
*) data
;
1722 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1724 if (can_encode_int32 (new_offset
, 28))
1725 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1728 /* Implementation of aarch64_insn_visitor method "b_cond". */
1731 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1732 struct aarch64_insn_data
*data
)
1734 struct aarch64_insn_relocation_data
*insn_reloc
1735 = (struct aarch64_insn_relocation_data
*) data
;
1737 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1739 if (can_encode_int32 (new_offset
, 21))
1741 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1744 else if (can_encode_int32 (new_offset
, 28))
1746 /* The offset is out of range for a conditional branch
1747 instruction but not for a unconditional branch. We can use
1748 the following instructions instead:
1750 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1751 B NOT_TAKEN ; Else jump over TAKEN and continue.
1758 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1759 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1760 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1764 /* Implementation of aarch64_insn_visitor method "cb". */
1767 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1768 const unsigned rn
, int is64
,
1769 struct aarch64_insn_data
*data
)
1771 struct aarch64_insn_relocation_data
*insn_reloc
1772 = (struct aarch64_insn_relocation_data
*) data
;
1774 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1776 if (can_encode_int32 (new_offset
, 21))
1778 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1779 aarch64_register (rn
, is64
), new_offset
);
1781 else if (can_encode_int32 (new_offset
, 28))
1783 /* The offset is out of range for a compare and branch
1784 instruction but not for a unconditional branch. We can use
1785 the following instructions instead:
1787 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1788 B NOT_TAKEN ; Else jump over TAKEN and continue.
1794 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1795 aarch64_register (rn
, is64
), 8);
1796 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1797 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1801 /* Implementation of aarch64_insn_visitor method "tb". */
1804 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1805 const unsigned rt
, unsigned bit
,
1806 struct aarch64_insn_data
*data
)
1808 struct aarch64_insn_relocation_data
*insn_reloc
1809 = (struct aarch64_insn_relocation_data
*) data
;
1811 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1813 if (can_encode_int32 (new_offset
, 16))
1815 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1816 aarch64_register (rt
, 1), new_offset
);
1818 else if (can_encode_int32 (new_offset
, 28))
1820 /* The offset is out of range for a test bit and branch
1821 instruction but not for a unconditional branch. We can use
1822 the following instructions instead:
1824 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1825 B NOT_TAKEN ; Else jump over TAKEN and continue.
1831 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1832 aarch64_register (rt
, 1), 8);
1833 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1834 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1839 /* Implementation of aarch64_insn_visitor method "adr". */
1842 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1844 struct aarch64_insn_data
*data
)
1846 struct aarch64_insn_relocation_data
*insn_reloc
1847 = (struct aarch64_insn_relocation_data
*) data
;
1848 /* We know exactly the address the ADR{P,} instruction will compute.
1849 We can just write it to the destination register. */
1850 CORE_ADDR address
= data
->insn_addr
+ offset
;
1854 /* Clear the lower 12 bits of the offset to get the 4K page. */
1855 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1856 aarch64_register (rd
, 1),
1860 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1861 aarch64_register (rd
, 1), address
);
1864 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1867 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1868 const unsigned rt
, const int is64
,
1869 struct aarch64_insn_data
*data
)
1871 struct aarch64_insn_relocation_data
*insn_reloc
1872 = (struct aarch64_insn_relocation_data
*) data
;
1873 CORE_ADDR address
= data
->insn_addr
+ offset
;
1875 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1876 aarch64_register (rt
, 1), address
);
1878 /* We know exactly what address to load from, and what register we
1881 MOV xd, #(oldloc + offset)
1882 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1885 LDR xd, [xd] ; or LDRSW xd, [xd]
1890 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1891 aarch64_register (rt
, 1),
1892 aarch64_register (rt
, 1),
1893 offset_memory_operand (0));
1895 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1896 aarch64_register (rt
, is64
),
1897 aarch64_register (rt
, 1),
1898 offset_memory_operand (0));
1901 /* Implementation of aarch64_insn_visitor method "others". */
1904 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1905 struct aarch64_insn_data
*data
)
1907 struct aarch64_insn_relocation_data
*insn_reloc
1908 = (struct aarch64_insn_relocation_data
*) data
;
1910 /* The instruction is not PC relative. Just re-emit it at the new
1912 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1915 static const struct aarch64_insn_visitor visitor
=
1917 aarch64_ftrace_insn_reloc_b
,
1918 aarch64_ftrace_insn_reloc_b_cond
,
1919 aarch64_ftrace_insn_reloc_cb
,
1920 aarch64_ftrace_insn_reloc_tb
,
1921 aarch64_ftrace_insn_reloc_adr
,
1922 aarch64_ftrace_insn_reloc_ldr_literal
,
1923 aarch64_ftrace_insn_reloc_others
,
1926 /* Implementation of linux_target_ops method
1927 "install_fast_tracepoint_jump_pad". */
1930 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1932 CORE_ADDR collector
,
1935 CORE_ADDR
*jump_entry
,
1936 CORE_ADDR
*trampoline
,
1937 ULONGEST
*trampoline_size
,
1938 unsigned char *jjump_pad_insn
,
1939 ULONGEST
*jjump_pad_insn_size
,
1940 CORE_ADDR
*adjusted_insn_addr
,
1941 CORE_ADDR
*adjusted_insn_addr_end
,
1949 CORE_ADDR buildaddr
= *jump_entry
;
1950 struct aarch64_insn_relocation_data insn_data
;
1952 /* We need to save the current state on the stack both to restore it
1953 later and to collect register values when the tracepoint is hit.
1955 The saved registers are pushed in a layout that needs to be in sync
1956 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1957 the supply_fast_tracepoint_registers function will fill in the
1958 register cache from a pointer to saved registers on the stack we build
1961 For simplicity, we set the size of each cell on the stack to 16 bytes.
1962 This way one cell can hold any register type, from system registers
1963 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1964 has to be 16 bytes aligned anyway.
1966 Note that the CPSR register does not exist on AArch64. Instead we
1967 can access system bits describing the process state with the
1968 MRS/MSR instructions, namely the condition flags. We save them as
1969 if they are part of a CPSR register because that's how GDB
1970 interprets these system bits. At the moment, only the condition
1971 flags are saved in CPSR (NZCV).
1973 Stack layout, each cell is 16 bytes (descending):
1975 High *-------- SIMD&FP registers from 31 down to 0. --------*
1981 *---- General purpose registers from 30 down to 0. ----*
1987 *------------- Special purpose registers. -------------*
1990 | CPSR (NZCV) | 5 cells
1993 *------------- collecting_t object --------------------*
1994 | TPIDR_EL0 | struct tracepoint * |
1995 Low *------------------------------------------------------*
1997 After this stack is set up, we issue a call to the collector, passing
1998 it the saved registers at (SP + 16). */
2000 /* Push SIMD&FP registers on the stack:
2002 SUB sp, sp, #(32 * 16)
2004 STP q30, q31, [sp, #(30 * 16)]
2009 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2010 for (i
= 30; i
>= 0; i
-= 2)
2011 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2013 /* Push general purpose registers on the stack. Note that we do not need
2014 to push x31 as it represents the xzr register and not the stack
2015 pointer in a STR instruction.
2017 SUB sp, sp, #(31 * 16)
2019 STR x30, [sp, #(30 * 16)]
2024 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2025 for (i
= 30; i
>= 0; i
-= 1)
2026 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2027 offset_memory_operand (i
* 16));
2029 /* Make space for 5 more cells.
2031 SUB sp, sp, #(5 * 16)
2034 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2039 ADD x4, sp, #((32 + 31 + 5) * 16)
2040 STR x4, [sp, #(4 * 16)]
2043 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2044 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2046 /* Save PC (tracepoint address):
2051 STR x3, [sp, #(3 * 16)]
2055 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2056 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2058 /* Save CPSR (NZCV), FPSR and FPCR:
2064 STR x2, [sp, #(2 * 16)]
2065 STR x1, [sp, #(1 * 16)]
2066 STR x0, [sp, #(0 * 16)]
2069 p
+= emit_mrs (p
, x2
, NZCV
);
2070 p
+= emit_mrs (p
, x1
, FPSR
);
2071 p
+= emit_mrs (p
, x0
, FPCR
);
2072 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2073 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2074 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2076 /* Push the collecting_t object. It consist of the address of the
2077 tracepoint and an ID for the current thread. We get the latter by
2078 reading the tpidr_el0 system register. It corresponds to the
2079 NT_ARM_TLS register accessible with ptrace.
2086 STP x0, x1, [sp, #-16]!
2090 p
+= emit_mov_addr (p
, x0
, tpoint
);
2091 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2092 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2096 The shared memory for the lock is at lockaddr. It will hold zero
2097 if no-one is holding the lock, otherwise it contains the address of
2098 the collecting_t object on the stack of the thread which acquired it.
2100 At this stage, the stack pointer points to this thread's collecting_t
2103 We use the following registers:
2104 - x0: Address of the lock.
2105 - x1: Pointer to collecting_t object.
2106 - x2: Scratch register.
2112 ; Trigger an event local to this core. So the following WFE
2113 ; instruction is ignored.
2116 ; Wait for an event. The event is triggered by either the SEVL
2117 ; or STLR instructions (store release).
2120 ; Atomically read at lockaddr. This marks the memory location as
2121 ; exclusive. This instruction also has memory constraints which
2122 ; make sure all previous data reads and writes are done before
2126 ; Try again if another thread holds the lock.
2129 ; We can lock it! Write the address of the collecting_t object.
2130 ; This instruction will fail if the memory location is not marked
2131 ; as exclusive anymore. If it succeeds, it will remove the
2132 ; exclusive mark on the memory location. This way, if another
2133 ; thread executes this instruction before us, we will fail and try
2140 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2141 p
+= emit_mov (p
, x1
, register_operand (sp
));
2145 p
+= emit_ldaxr (p
, x2
, x0
);
2146 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2147 p
+= emit_stxr (p
, w2
, x1
, x0
);
2148 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2150 /* Call collector (struct tracepoint *, unsigned char *):
2155 ; Saved registers start after the collecting_t object.
2158 ; We use an intra-procedure-call scratch register.
2159 MOV ip0, #(collector)
2162 ; And call back to C!
2167 p
+= emit_mov_addr (p
, x0
, tpoint
);
2168 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2170 p
+= emit_mov_addr (p
, ip0
, collector
);
2171 p
+= emit_blr (p
, ip0
);
2173 /* Release the lock.
2178 ; This instruction is a normal store with memory ordering
2179 ; constraints. Thanks to this we do not have to put a data
2180 ; barrier instruction to make sure all data read and writes are done
2181 ; before this instruction is executed. Furthermore, this instruction
2182 ; will trigger an event, letting other threads know they can grab
2187 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2188 p
+= emit_stlr (p
, xzr
, x0
);
2190 /* Free collecting_t object:
2195 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2197 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2198 registers from the stack.
2200 LDR x2, [sp, #(2 * 16)]
2201 LDR x1, [sp, #(1 * 16)]
2202 LDR x0, [sp, #(0 * 16)]
2208 ADD sp, sp #(5 * 16)
2211 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2212 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2213 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2214 p
+= emit_msr (p
, NZCV
, x2
);
2215 p
+= emit_msr (p
, FPSR
, x1
);
2216 p
+= emit_msr (p
, FPCR
, x0
);
2218 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2220 /* Pop general purpose registers:
2224 LDR x30, [sp, #(30 * 16)]
2226 ADD sp, sp, #(31 * 16)
2229 for (i
= 0; i
<= 30; i
+= 1)
2230 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2231 offset_memory_operand (i
* 16));
2232 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2234 /* Pop SIMD&FP registers:
2238 LDP q30, q31, [sp, #(30 * 16)]
2240 ADD sp, sp, #(32 * 16)
2243 for (i
= 0; i
<= 30; i
+= 2)
2244 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2245 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2247 /* Write the code into the inferior memory. */
2248 append_insns (&buildaddr
, p
- buf
, buf
);
2250 /* Now emit the relocated instruction. */
2251 *adjusted_insn_addr
= buildaddr
;
2252 target_read_uint32 (tpaddr
, &insn
);
2254 insn_data
.base
.insn_addr
= tpaddr
;
2255 insn_data
.new_addr
= buildaddr
;
2256 insn_data
.insn_ptr
= buf
;
2258 aarch64_relocate_instruction (insn
, &visitor
,
2259 (struct aarch64_insn_data
*) &insn_data
);
2261 /* We may not have been able to relocate the instruction. */
2262 if (insn_data
.insn_ptr
== buf
)
2265 "E.Could not relocate instruction from %s to %s.",
2266 core_addr_to_string_nz (tpaddr
),
2267 core_addr_to_string_nz (buildaddr
));
2271 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2272 *adjusted_insn_addr_end
= buildaddr
;
2274 /* Go back to the start of the buffer. */
2277 /* Emit a branch back from the jump pad. */
2278 offset
= (tpaddr
+ orig_size
- buildaddr
);
2279 if (!can_encode_int32 (offset
, 28))
2282 "E.Jump back from jump pad too far from tracepoint "
2283 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2288 p
+= emit_b (p
, 0, offset
);
2289 append_insns (&buildaddr
, p
- buf
, buf
);
2291 /* Give the caller a branch instruction into the jump pad. */
2292 offset
= (*jump_entry
- tpaddr
);
2293 if (!can_encode_int32 (offset
, 28))
2296 "E.Jump pad too far from tracepoint "
2297 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2302 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2303 *jjump_pad_insn_size
= 4;
2305 /* Return the end address of our pad. */
2306 *jump_entry
= buildaddr
;
2311 /* Helper function writing LEN instructions from START into
2312 current_insn_ptr. */
2315 emit_ops_insns (const uint32_t *start
, int len
)
2317 CORE_ADDR buildaddr
= current_insn_ptr
;
2320 debug_printf ("Adding %d instrucions at %s\n",
2321 len
, paddress (buildaddr
));
2323 append_insns (&buildaddr
, len
, start
);
2324 current_insn_ptr
= buildaddr
;
2327 /* Pop a register from the stack. */
2330 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2332 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2335 /* Push a register on the stack. */
2338 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2340 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2343 /* Implementation of emit_ops method "emit_prologue". */
2346 aarch64_emit_prologue (void)
2351 /* This function emit a prologue for the following function prototype:
2353 enum eval_result_type f (unsigned char *regs,
2356 The first argument is a buffer of raw registers. The second
2357 argument is the result of
2358 evaluating the expression, which will be set to whatever is on top of
2359 the stack at the end.
2361 The stack set up by the prologue is as such:
2363 High *------------------------------------------------------*
2366 | x1 (ULONGEST *value) |
2367 | x0 (unsigned char *regs) |
2368 Low *------------------------------------------------------*
2370 As we are implementing a stack machine, each opcode can expand the
2371 stack so we never know how far we are from the data saved by this
2372 prologue. In order to be able refer to value and regs later, we save
2373 the current stack pointer in the frame pointer. This way, it is not
2374 clobbered when calling C functions.
2376 Finally, throughout every operation, we are using register x0 as the
2377 top of the stack, and x1 as a scratch register. */
2379 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2380 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2381 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2383 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2386 emit_ops_insns (buf
, p
- buf
);
2389 /* Implementation of emit_ops method "emit_epilogue". */
2392 aarch64_emit_epilogue (void)
2397 /* Store the result of the expression (x0) in *value. */
2398 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2399 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2400 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2402 /* Restore the previous state. */
2403 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2404 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2406 /* Return expr_eval_no_error. */
2407 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2408 p
+= emit_ret (p
, lr
);
2410 emit_ops_insns (buf
, p
- buf
);
2413 /* Implementation of emit_ops method "emit_add". */
2416 aarch64_emit_add (void)
2421 p
+= emit_pop (p
, x1
);
2422 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2424 emit_ops_insns (buf
, p
- buf
);
2427 /* Implementation of emit_ops method "emit_sub". */
2430 aarch64_emit_sub (void)
2435 p
+= emit_pop (p
, x1
);
2436 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2438 emit_ops_insns (buf
, p
- buf
);
2441 /* Implementation of emit_ops method "emit_mul". */
2444 aarch64_emit_mul (void)
2449 p
+= emit_pop (p
, x1
);
2450 p
+= emit_mul (p
, x0
, x1
, x0
);
2452 emit_ops_insns (buf
, p
- buf
);
2455 /* Implementation of emit_ops method "emit_lsh". */
2458 aarch64_emit_lsh (void)
2463 p
+= emit_pop (p
, x1
);
2464 p
+= emit_lslv (p
, x0
, x1
, x0
);
2466 emit_ops_insns (buf
, p
- buf
);
2469 /* Implementation of emit_ops method "emit_rsh_signed". */
2472 aarch64_emit_rsh_signed (void)
2477 p
+= emit_pop (p
, x1
);
2478 p
+= emit_asrv (p
, x0
, x1
, x0
);
2480 emit_ops_insns (buf
, p
- buf
);
2483 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2486 aarch64_emit_rsh_unsigned (void)
2491 p
+= emit_pop (p
, x1
);
2492 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2494 emit_ops_insns (buf
, p
- buf
);
2497 /* Implementation of emit_ops method "emit_ext". */
2500 aarch64_emit_ext (int arg
)
2505 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2507 emit_ops_insns (buf
, p
- buf
);
2510 /* Implementation of emit_ops method "emit_log_not". */
2513 aarch64_emit_log_not (void)
2518 /* If the top of the stack is 0, replace it with 1. Else replace it with
2521 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2522 p
+= emit_cset (p
, x0
, EQ
);
2524 emit_ops_insns (buf
, p
- buf
);
2527 /* Implementation of emit_ops method "emit_bit_and". */
2530 aarch64_emit_bit_and (void)
2535 p
+= emit_pop (p
, x1
);
2536 p
+= emit_and (p
, x0
, x0
, x1
);
2538 emit_ops_insns (buf
, p
- buf
);
2541 /* Implementation of emit_ops method "emit_bit_or". */
2544 aarch64_emit_bit_or (void)
2549 p
+= emit_pop (p
, x1
);
2550 p
+= emit_orr (p
, x0
, x0
, x1
);
2552 emit_ops_insns (buf
, p
- buf
);
2555 /* Implementation of emit_ops method "emit_bit_xor". */
2558 aarch64_emit_bit_xor (void)
2563 p
+= emit_pop (p
, x1
);
2564 p
+= emit_eor (p
, x0
, x0
, x1
);
2566 emit_ops_insns (buf
, p
- buf
);
2569 /* Implementation of emit_ops method "emit_bit_not". */
2572 aarch64_emit_bit_not (void)
2577 p
+= emit_mvn (p
, x0
, x0
);
2579 emit_ops_insns (buf
, p
- buf
);
2582 /* Implementation of emit_ops method "emit_equal". */
2585 aarch64_emit_equal (void)
2590 p
+= emit_pop (p
, x1
);
2591 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2592 p
+= emit_cset (p
, x0
, EQ
);
2594 emit_ops_insns (buf
, p
- buf
);
2597 /* Implementation of emit_ops method "emit_less_signed". */
2600 aarch64_emit_less_signed (void)
2605 p
+= emit_pop (p
, x1
);
2606 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2607 p
+= emit_cset (p
, x0
, LT
);
2609 emit_ops_insns (buf
, p
- buf
);
2612 /* Implementation of emit_ops method "emit_less_unsigned". */
2615 aarch64_emit_less_unsigned (void)
2620 p
+= emit_pop (p
, x1
);
2621 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2622 p
+= emit_cset (p
, x0
, LO
);
2624 emit_ops_insns (buf
, p
- buf
);
2627 /* Implementation of emit_ops method "emit_ref". */
2630 aarch64_emit_ref (int size
)
2638 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2641 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2644 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2647 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2650 /* Unknown size, bail on compilation. */
2655 emit_ops_insns (buf
, p
- buf
);
2658 /* Implementation of emit_ops method "emit_if_goto". */
2661 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2666 /* The Z flag is set or cleared here. */
2667 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2668 /* This instruction must not change the Z flag. */
2669 p
+= emit_pop (p
, x0
);
2670 /* Branch over the next instruction if x0 == 0. */
2671 p
+= emit_bcond (p
, EQ
, 8);
2673 /* The NOP instruction will be patched with an unconditional branch. */
2675 *offset_p
= (p
- buf
) * 4;
2680 emit_ops_insns (buf
, p
- buf
);
2683 /* Implementation of emit_ops method "emit_goto". */
2686 aarch64_emit_goto (int *offset_p
, int *size_p
)
2691 /* The NOP instruction will be patched with an unconditional branch. */
2698 emit_ops_insns (buf
, p
- buf
);
2701 /* Implementation of emit_ops method "write_goto_address". */
2704 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2708 emit_b (&insn
, 0, to
- from
);
2709 append_insns (&from
, 1, &insn
);
2712 /* Implementation of emit_ops method "emit_const". */
2715 aarch64_emit_const (LONGEST num
)
2720 p
+= emit_mov_addr (p
, x0
, num
);
2722 emit_ops_insns (buf
, p
- buf
);
2725 /* Implementation of emit_ops method "emit_call". */
2728 aarch64_emit_call (CORE_ADDR fn
)
2733 p
+= emit_mov_addr (p
, ip0
, fn
);
2734 p
+= emit_blr (p
, ip0
);
2736 emit_ops_insns (buf
, p
- buf
);
2739 /* Implementation of emit_ops method "emit_reg". */
2742 aarch64_emit_reg (int reg
)
2747 /* Set x0 to unsigned char *regs. */
2748 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2749 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2750 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2752 emit_ops_insns (buf
, p
- buf
);
2754 aarch64_emit_call (get_raw_reg_func_addr ());
2757 /* Implementation of emit_ops method "emit_pop". */
2760 aarch64_emit_pop (void)
2765 p
+= emit_pop (p
, x0
);
2767 emit_ops_insns (buf
, p
- buf
);
2770 /* Implementation of emit_ops method "emit_stack_flush". */
2773 aarch64_emit_stack_flush (void)
2778 p
+= emit_push (p
, x0
);
2780 emit_ops_insns (buf
, p
- buf
);
2783 /* Implementation of emit_ops method "emit_zero_ext". */
2786 aarch64_emit_zero_ext (int arg
)
2791 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2793 emit_ops_insns (buf
, p
- buf
);
2796 /* Implementation of emit_ops method "emit_swap". */
2799 aarch64_emit_swap (void)
2804 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2805 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2806 p
+= emit_mov (p
, x0
, register_operand (x1
));
2808 emit_ops_insns (buf
, p
- buf
);
2811 /* Implementation of emit_ops method "emit_stack_adjust". */
2814 aarch64_emit_stack_adjust (int n
)
2816 /* This is not needed with our design. */
2820 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2822 emit_ops_insns (buf
, p
- buf
);
2825 /* Implementation of emit_ops method "emit_int_call_1". */
2828 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2833 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2835 emit_ops_insns (buf
, p
- buf
);
2837 aarch64_emit_call (fn
);
2840 /* Implementation of emit_ops method "emit_void_call_2". */
2843 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2848 /* Push x0 on the stack. */
2849 aarch64_emit_stack_flush ();
2851 /* Setup arguments for the function call:
2854 x1: top of the stack
2859 p
+= emit_mov (p
, x1
, register_operand (x0
));
2860 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2862 emit_ops_insns (buf
, p
- buf
);
2864 aarch64_emit_call (fn
);
2867 aarch64_emit_pop ();
2870 /* Implementation of emit_ops method "emit_eq_goto". */
2873 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2878 p
+= emit_pop (p
, x1
);
2879 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2880 /* Branch over the next instruction if x0 != x1. */
2881 p
+= emit_bcond (p
, NE
, 8);
2882 /* The NOP instruction will be patched with an unconditional branch. */
2884 *offset_p
= (p
- buf
) * 4;
2889 emit_ops_insns (buf
, p
- buf
);
2892 /* Implementation of emit_ops method "emit_ne_goto". */
2895 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2900 p
+= emit_pop (p
, x1
);
2901 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2902 /* Branch over the next instruction if x0 == x1. */
2903 p
+= emit_bcond (p
, EQ
, 8);
2904 /* The NOP instruction will be patched with an unconditional branch. */
2906 *offset_p
= (p
- buf
) * 4;
2911 emit_ops_insns (buf
, p
- buf
);
2914 /* Implementation of emit_ops method "emit_lt_goto". */
2917 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2922 p
+= emit_pop (p
, x1
);
2923 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2924 /* Branch over the next instruction if x0 >= x1. */
2925 p
+= emit_bcond (p
, GE
, 8);
2926 /* The NOP instruction will be patched with an unconditional branch. */
2928 *offset_p
= (p
- buf
) * 4;
2933 emit_ops_insns (buf
, p
- buf
);
2936 /* Implementation of emit_ops method "emit_le_goto". */
2939 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2944 p
+= emit_pop (p
, x1
);
2945 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2946 /* Branch over the next instruction if x0 > x1. */
2947 p
+= emit_bcond (p
, GT
, 8);
2948 /* The NOP instruction will be patched with an unconditional branch. */
2950 *offset_p
= (p
- buf
) * 4;
2955 emit_ops_insns (buf
, p
- buf
);
2958 /* Implementation of emit_ops method "emit_gt_goto". */
2961 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2966 p
+= emit_pop (p
, x1
);
2967 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2968 /* Branch over the next instruction if x0 <= x1. */
2969 p
+= emit_bcond (p
, LE
, 8);
2970 /* The NOP instruction will be patched with an unconditional branch. */
2972 *offset_p
= (p
- buf
) * 4;
2977 emit_ops_insns (buf
, p
- buf
);
2980 /* Implementation of emit_ops method "emit_ge_got". */
2983 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2988 p
+= emit_pop (p
, x1
);
2989 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2990 /* Branch over the next instruction if x0 <= x1. */
2991 p
+= emit_bcond (p
, LT
, 8);
2992 /* The NOP instruction will be patched with an unconditional branch. */
2994 *offset_p
= (p
- buf
) * 4;
2999 emit_ops_insns (buf
, p
- buf
);
3002 static struct emit_ops aarch64_emit_ops_impl
=
3004 aarch64_emit_prologue
,
3005 aarch64_emit_epilogue
,
3010 aarch64_emit_rsh_signed
,
3011 aarch64_emit_rsh_unsigned
,
3013 aarch64_emit_log_not
,
3014 aarch64_emit_bit_and
,
3015 aarch64_emit_bit_or
,
3016 aarch64_emit_bit_xor
,
3017 aarch64_emit_bit_not
,
3019 aarch64_emit_less_signed
,
3020 aarch64_emit_less_unsigned
,
3022 aarch64_emit_if_goto
,
3024 aarch64_write_goto_address
,
3029 aarch64_emit_stack_flush
,
3030 aarch64_emit_zero_ext
,
3032 aarch64_emit_stack_adjust
,
3033 aarch64_emit_int_call_1
,
3034 aarch64_emit_void_call_2
,
3035 aarch64_emit_eq_goto
,
3036 aarch64_emit_ne_goto
,
3037 aarch64_emit_lt_goto
,
3038 aarch64_emit_le_goto
,
3039 aarch64_emit_gt_goto
,
3040 aarch64_emit_ge_got
,
3043 /* Implementation of linux_target_ops method "emit_ops". */
3045 static struct emit_ops
*
3046 aarch64_emit_ops (void)
3048 return &aarch64_emit_ops_impl
;
3051 /* Implementation of linux_target_ops method
3052 "get_min_fast_tracepoint_insn_len". */
3055 aarch64_get_min_fast_tracepoint_insn_len (void)
3060 /* Implementation of linux_target_ops method "supports_range_stepping". */
3063 aarch64_supports_range_stepping (void)
3068 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3071 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3073 if (is_64bit_tdesc ())
3075 *size
= aarch64_breakpoint_len
;
3076 return aarch64_breakpoint
;
3079 return arm_sw_breakpoint_from_kind (kind
, size
);
3082 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3085 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3087 if (is_64bit_tdesc ())
3088 return aarch64_breakpoint_len
;
3090 return arm_breakpoint_kind_from_pc (pcptr
);
3093 /* Implementation of the target ops method
3094 "breakpoint_kind_from_current_state". */
3097 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3099 if (is_64bit_tdesc ())
3100 return aarch64_breakpoint_len
;
3102 return arm_breakpoint_kind_from_current_state (pcptr
);
3105 /* Support for hardware single step. */
3108 aarch64_supports_hardware_single_step (void)
3113 struct linux_target_ops the_low_target
=
3115 aarch64_stopped_by_watchpoint
,
3116 aarch64_stopped_data_address
,
3117 NULL
, /* collect_ptrace_register */
3118 NULL
, /* supply_ptrace_register */
3119 aarch64_linux_siginfo_fixup
,
3120 aarch64_linux_new_process
,
3121 aarch64_linux_delete_process
,
3122 aarch64_linux_new_thread
,
3123 aarch64_linux_delete_thread
,
3124 aarch64_linux_new_fork
,
3125 aarch64_linux_prepare_to_resume
,
3126 NULL
, /* process_qsupported */
3127 aarch64_supports_tracepoints
,
3128 aarch64_get_thread_area
,
3129 aarch64_install_fast_tracepoint_jump_pad
,
3131 aarch64_get_min_fast_tracepoint_insn_len
,
3132 aarch64_supports_range_stepping
,
3133 aarch64_supports_hardware_single_step
,
3134 aarch64_get_syscall_trapinfo
,
3137 /* The linux target ops object. */
3139 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3142 initialize_low_arch (void)
3144 initialize_low_arch_aarch32 ();
3146 initialize_regsets_info (&aarch64_regsets_info
);
3147 initialize_regsets_info (&aarch64_sve_regsets_info
);