1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
64 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
66 bool supports_z_point_type (char z_type
) override
;
70 void low_arch_setup () override
;
72 bool low_cannot_fetch_register (int regno
) override
;
74 bool low_cannot_store_register (int regno
) override
;
76 bool low_supports_breakpoints () override
;
78 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
80 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
82 bool low_breakpoint_at (CORE_ADDR pc
) override
;
84 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
85 int size
, raw_breakpoint
*bp
) override
;
87 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
88 int size
, raw_breakpoint
*bp
) override
;
90 bool low_stopped_by_watchpoint () override
;
92 CORE_ADDR
low_stopped_data_address () override
;
94 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
95 int direction
) override
;
98 /* The singleton target ops object. */
100 static aarch64_target the_aarch64_target
;
103 aarch64_target::low_cannot_fetch_register (int regno
)
105 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
106 "is not implemented by the target");
110 aarch64_target::low_cannot_store_register (int regno
)
112 gdb_assert_not_reached ("linux target op low_cannot_store_register "
113 "is not implemented by the target");
116 /* Per-process arch-specific data we want to keep. */
118 struct arch_process_info
120 /* Hardware breakpoint/watchpoint data.
121 The reason for them to be per-process rather than per-thread is
122 due to the lack of information in the gdbserver environment;
123 gdbserver is not told that whether a requested hardware
124 breakpoint/watchpoint is thread specific or not, so it has to set
125 each hw bp/wp for every thread in the current process. The
126 higher level bp/wp management in gdb will resume a thread if a hw
127 bp/wp trap is not expected for it. Since the hw bp/wp setting is
128 same for each thread, it is reasonable for the data to live here.
130 struct aarch64_debug_reg_state debug_reg_state
;
133 /* Return true if the size of register 0 is 8 byte. */
136 is_64bit_tdesc (void)
138 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
140 return register_size (regcache
->tdesc
, 0) == 8;
143 /* Return true if the regcache contains the number of SVE registers. */
148 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
150 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
154 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
156 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
159 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
160 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
161 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
162 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
163 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
167 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
169 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
172 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
173 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
174 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
175 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
176 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
180 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
182 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
185 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
186 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
187 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
188 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
192 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
194 const struct user_fpsimd_state
*regset
195 = (const struct user_fpsimd_state
*) buf
;
198 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
199 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
200 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
201 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
204 /* Store the pauth registers to regcache. */
207 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
209 uint64_t *pauth_regset
= (uint64_t *) buf
;
210 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
215 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
217 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
222 aarch64_target::low_supports_breakpoints ()
227 /* Implementation of linux target ops method "low_get_pc". */
230 aarch64_target::low_get_pc (regcache
*regcache
)
232 if (register_size (regcache
->tdesc
, 0) == 8)
233 return linux_get_pc_64bit (regcache
);
235 return linux_get_pc_32bit (regcache
);
238 /* Implementation of linux target ops method "low_set_pc". */
241 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
243 if (register_size (regcache
->tdesc
, 0) == 8)
244 linux_set_pc_64bit (regcache
, pc
);
246 linux_set_pc_32bit (regcache
, pc
);
249 #define aarch64_breakpoint_len 4
251 /* AArch64 BRK software debug mode instruction.
252 This instruction needs to match gdb/aarch64-tdep.c
253 (aarch64_default_breakpoint). */
254 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
256 /* Implementation of linux target ops method "low_breakpoint_at". */
259 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
261 if (is_64bit_tdesc ())
263 gdb_byte insn
[aarch64_breakpoint_len
];
265 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
266 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
272 return arm_breakpoint_at (where
);
276 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
280 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
282 state
->dr_addr_bp
[i
] = 0;
283 state
->dr_ctrl_bp
[i
] = 0;
284 state
->dr_ref_count_bp
[i
] = 0;
287 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
289 state
->dr_addr_wp
[i
] = 0;
290 state
->dr_ctrl_wp
[i
] = 0;
291 state
->dr_ref_count_wp
[i
] = 0;
295 /* Return the pointer to the debug register state structure in the
296 current process' arch-specific data area. */
298 struct aarch64_debug_reg_state
*
299 aarch64_get_debug_reg_state (pid_t pid
)
301 struct process_info
*proc
= find_process_pid (pid
);
303 return &proc
->priv
->arch_private
->debug_reg_state
;
306 /* Implementation of target ops method "supports_z_point_type". */
309 aarch64_target::supports_z_point_type (char z_type
)
315 case Z_PACKET_WRITE_WP
:
316 case Z_PACKET_READ_WP
:
317 case Z_PACKET_ACCESS_WP
:
324 /* Implementation of linux target ops method "low_insert_point".
326 It actually only records the info of the to-be-inserted bp/wp;
327 the actual insertion will happen when threads are resumed. */
330 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
331 int len
, raw_breakpoint
*bp
)
334 enum target_hw_bp_type targ_type
;
335 struct aarch64_debug_reg_state
*state
336 = aarch64_get_debug_reg_state (pid_of (current_thread
));
339 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
340 (unsigned long) addr
, len
);
342 /* Determine the type from the raw breakpoint type. */
343 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
345 if (targ_type
!= hw_execute
)
347 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
348 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
349 1 /* is_insert */, state
);
357 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
358 instruction. Set it to 2 to correctly encode length bit
359 mask in hardware/watchpoint control register. */
362 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
363 1 /* is_insert */, state
);
367 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
373 /* Implementation of linux target ops method "low_remove_point".
375 It actually only records the info of the to-be-removed bp/wp,
376 the actual removal will be done when threads are resumed. */
379 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
380 int len
, raw_breakpoint
*bp
)
383 enum target_hw_bp_type targ_type
;
384 struct aarch64_debug_reg_state
*state
385 = aarch64_get_debug_reg_state (pid_of (current_thread
));
388 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
389 (unsigned long) addr
, len
);
391 /* Determine the type from the raw breakpoint type. */
392 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
394 /* Set up state pointers. */
395 if (targ_type
!= hw_execute
)
397 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
403 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
404 instruction. Set it to 2 to correctly encode length bit
405 mask in hardware/watchpoint control register. */
408 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
409 0 /* is_insert */, state
);
413 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
419 /* Implementation of linux target ops method "low_stopped_data_address". */
422 aarch64_target::low_stopped_data_address ()
426 struct aarch64_debug_reg_state
*state
;
428 pid
= lwpid_of (current_thread
);
430 /* Get the siginfo. */
431 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
432 return (CORE_ADDR
) 0;
434 /* Need to be a hardware breakpoint/watchpoint trap. */
435 if (siginfo
.si_signo
!= SIGTRAP
436 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
437 return (CORE_ADDR
) 0;
439 /* Check if the address matches any watched address. */
440 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
441 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
443 const unsigned int offset
444 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
445 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
446 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
447 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
448 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
449 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
451 if (state
->dr_ref_count_wp
[i
]
452 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
453 && addr_trap
>= addr_watch_aligned
454 && addr_trap
< addr_watch
+ len
)
456 /* ADDR_TRAP reports the first address of the memory range
457 accessed by the CPU, regardless of what was the memory
458 range watched. Thus, a large CPU access that straddles
459 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
460 ADDR_TRAP that is lower than the
461 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
463 addr: | 4 | 5 | 6 | 7 | 8 |
464 |---- range watched ----|
465 |----------- range accessed ------------|
467 In this case, ADDR_TRAP will be 4.
469 To match a watchpoint known to GDB core, we must never
470 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
471 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
472 positive on kernels older than 4.10. See PR
478 return (CORE_ADDR
) 0;
481 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
484 aarch64_target::low_stopped_by_watchpoint ()
486 return (low_stopped_data_address () != 0);
489 /* Fetch the thread-local storage pointer for libthread_db. */
492 ps_get_thread_area (struct ps_prochandle
*ph
,
493 lwpid_t lwpid
, int idx
, void **base
)
495 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
499 /* Implementation of linux target ops method "low_siginfo_fixup". */
502 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
505 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
506 if (!is_64bit_tdesc ())
509 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
512 aarch64_siginfo_from_compat_siginfo (native
,
513 (struct compat_siginfo
*) inf
);
521 /* Implementation of linux_target_ops method "new_process". */
523 static struct arch_process_info
*
524 aarch64_linux_new_process (void)
526 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
528 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
533 /* Implementation of linux_target_ops method "delete_process". */
536 aarch64_linux_delete_process (struct arch_process_info
*info
)
541 /* Implementation of linux_target_ops method "linux_new_fork". */
544 aarch64_linux_new_fork (struct process_info
*parent
,
545 struct process_info
*child
)
547 /* These are allocated by linux_add_process. */
548 gdb_assert (parent
->priv
!= NULL
549 && parent
->priv
->arch_private
!= NULL
);
550 gdb_assert (child
->priv
!= NULL
551 && child
->priv
->arch_private
!= NULL
);
553 /* Linux kernel before 2.6.33 commit
554 72f674d203cd230426437cdcf7dd6f681dad8b0d
555 will inherit hardware debug registers from parent
556 on fork/vfork/clone. Newer Linux kernels create such tasks with
557 zeroed debug registers.
559 GDB core assumes the child inherits the watchpoints/hw
560 breakpoints of the parent, and will remove them all from the
561 forked off process. Copy the debug registers mirrors into the
562 new process so that all breakpoints and watchpoints can be
563 removed together. The debug registers mirror will become zeroed
564 in the end before detaching the forked off process, thus making
565 this compatible with older Linux kernels too. */
567 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
570 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
571 #define AARCH64_HWCAP_PACA (1 << 30)
573 /* Implementation of linux target ops method "low_arch_setup". */
576 aarch64_target::low_arch_setup ()
578 unsigned int machine
;
582 tid
= lwpid_of (current_thread
);
584 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
588 uint64_t vq
= aarch64_sve_get_vq (tid
);
589 unsigned long hwcap
= linux_get_hwcap (8);
590 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
592 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
595 current_process ()->tdesc
= aarch32_linux_read_description ();
597 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
600 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
603 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
605 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
608 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
611 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
613 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
616 static struct regset_info aarch64_regsets
[] =
618 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
619 sizeof (struct user_pt_regs
), GENERAL_REGS
,
620 aarch64_fill_gregset
, aarch64_store_gregset
},
621 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
622 sizeof (struct user_fpsimd_state
), FP_REGS
,
623 aarch64_fill_fpregset
, aarch64_store_fpregset
625 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
626 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
627 NULL
, aarch64_store_pauthregset
},
631 static struct regsets_info aarch64_regsets_info
=
633 aarch64_regsets
, /* regsets */
635 NULL
, /* disabled_regsets */
638 static struct regs_info regs_info_aarch64
=
640 NULL
, /* regset_bitmap */
642 &aarch64_regsets_info
,
645 static struct regset_info aarch64_sve_regsets
[] =
647 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
648 sizeof (struct user_pt_regs
), GENERAL_REGS
,
649 aarch64_fill_gregset
, aarch64_store_gregset
},
650 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
651 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
652 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
654 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
655 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
656 NULL
, aarch64_store_pauthregset
},
660 static struct regsets_info aarch64_sve_regsets_info
=
662 aarch64_sve_regsets
, /* regsets. */
663 0, /* num_regsets. */
664 NULL
, /* disabled_regsets. */
667 static struct regs_info regs_info_aarch64_sve
=
669 NULL
, /* regset_bitmap. */
671 &aarch64_sve_regsets_info
,
674 /* Implementation of linux target ops method "get_regs_info". */
677 aarch64_target::get_regs_info ()
679 if (!is_64bit_tdesc ())
680 return ®s_info_aarch32
;
683 return ®s_info_aarch64_sve
;
685 return ®s_info_aarch64
;
688 /* Implementation of linux_target_ops method "supports_tracepoints". */
691 aarch64_supports_tracepoints (void)
693 if (current_thread
== NULL
)
697 /* We don't support tracepoints on aarch32 now. */
698 return is_64bit_tdesc ();
702 /* Implementation of linux_target_ops method "get_thread_area". */
705 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
710 iovec
.iov_base
= ®
;
711 iovec
.iov_len
= sizeof (reg
);
713 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
721 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
724 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
726 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
732 collect_register_by_name (regcache
, "x8", &l_sysno
);
733 *sysno
= (int) l_sysno
;
736 collect_register_by_name (regcache
, "r7", sysno
);
739 /* List of condition codes that we need. */
741 enum aarch64_condition_codes
752 enum aarch64_operand_type
758 /* Representation of an operand. At this time, it only supports register
759 and immediate types. */
761 struct aarch64_operand
763 /* Type of the operand. */
764 enum aarch64_operand_type type
;
766 /* Value of the operand according to the type. */
770 struct aarch64_register reg
;
774 /* List of registers that we are currently using, we can add more here as
775 we need to use them. */
777 /* General purpose scratch registers (64 bit). */
778 static const struct aarch64_register x0
= { 0, 1 };
779 static const struct aarch64_register x1
= { 1, 1 };
780 static const struct aarch64_register x2
= { 2, 1 };
781 static const struct aarch64_register x3
= { 3, 1 };
782 static const struct aarch64_register x4
= { 4, 1 };
784 /* General purpose scratch registers (32 bit). */
785 static const struct aarch64_register w0
= { 0, 0 };
786 static const struct aarch64_register w2
= { 2, 0 };
788 /* Intra-procedure scratch registers. */
789 static const struct aarch64_register ip0
= { 16, 1 };
791 /* Special purpose registers. */
792 static const struct aarch64_register fp
= { 29, 1 };
793 static const struct aarch64_register lr
= { 30, 1 };
794 static const struct aarch64_register sp
= { 31, 1 };
795 static const struct aarch64_register xzr
= { 31, 1 };
797 /* Dynamically allocate a new register. If we know the register
798 statically, we should make it a global as above instead of using this
801 static struct aarch64_register
802 aarch64_register (unsigned num
, int is64
)
804 return (struct aarch64_register
) { num
, is64
};
807 /* Helper function to create a register operand, for instructions with
808 different types of operands.
811 p += emit_mov (p, x0, register_operand (x1)); */
813 static struct aarch64_operand
814 register_operand (struct aarch64_register reg
)
816 struct aarch64_operand operand
;
818 operand
.type
= OPERAND_REGISTER
;
824 /* Helper function to create an immediate operand, for instructions with
825 different types of operands.
828 p += emit_mov (p, x0, immediate_operand (12)); */
830 static struct aarch64_operand
831 immediate_operand (uint32_t imm
)
833 struct aarch64_operand operand
;
835 operand
.type
= OPERAND_IMMEDIATE
;
841 /* Helper function to create an offset memory operand.
844 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
846 static struct aarch64_memory_operand
847 offset_memory_operand (int32_t offset
)
849 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
852 /* Helper function to create a pre-index memory operand.
855 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
857 static struct aarch64_memory_operand
858 preindex_memory_operand (int32_t index
)
860 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
863 /* Helper function to create a post-index memory operand.
866 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
868 static struct aarch64_memory_operand
869 postindex_memory_operand (int32_t index
)
871 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
874 /* System control registers. These special registers can be written and
875 read with the MRS and MSR instructions.
877 - NZCV: Condition flags. GDB refers to this register under the CPSR
879 - FPSR: Floating-point status register.
880 - FPCR: Floating-point control registers.
881 - TPIDR_EL0: Software thread ID register. */
883 enum aarch64_system_control_registers
885 /* op0 op1 crn crm op2 */
886 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
887 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
888 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
889 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
892 /* Write a BLR instruction into *BUF.
896 RN is the register to branch to. */
899 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
901 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
904 /* Write a RET instruction into *BUF.
908 RN is the register to branch to. */
911 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
913 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
917 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
918 struct aarch64_register rt
,
919 struct aarch64_register rt2
,
920 struct aarch64_register rn
,
921 struct aarch64_memory_operand operand
)
928 opc
= ENCODE (2, 2, 30);
930 opc
= ENCODE (0, 2, 30);
932 switch (operand
.type
)
934 case MEMORY_OPERAND_OFFSET
:
936 pre_index
= ENCODE (1, 1, 24);
937 write_back
= ENCODE (0, 1, 23);
940 case MEMORY_OPERAND_POSTINDEX
:
942 pre_index
= ENCODE (0, 1, 24);
943 write_back
= ENCODE (1, 1, 23);
946 case MEMORY_OPERAND_PREINDEX
:
948 pre_index
= ENCODE (1, 1, 24);
949 write_back
= ENCODE (1, 1, 23);
956 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
957 | ENCODE (operand
.index
>> 3, 7, 15)
958 | ENCODE (rt2
.num
, 5, 10)
959 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
962 /* Write a STP instruction into *BUF.
964 STP rt, rt2, [rn, #offset]
965 STP rt, rt2, [rn, #index]!
966 STP rt, rt2, [rn], #index
968 RT and RT2 are the registers to store.
969 RN is the base address register.
970 OFFSET is the immediate to add to the base address. It is limited to a
971 -512 .. 504 range (7 bits << 3). */
974 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
975 struct aarch64_register rt2
, struct aarch64_register rn
,
976 struct aarch64_memory_operand operand
)
978 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
981 /* Write a LDP instruction into *BUF.
983 LDP rt, rt2, [rn, #offset]
984 LDP rt, rt2, [rn, #index]!
985 LDP rt, rt2, [rn], #index
987 RT and RT2 are the registers to store.
988 RN is the base address register.
989 OFFSET is the immediate to add to the base address. It is limited to a
990 -512 .. 504 range (7 bits << 3). */
993 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
994 struct aarch64_register rt2
, struct aarch64_register rn
,
995 struct aarch64_memory_operand operand
)
997 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1000 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1002 LDP qt, qt2, [rn, #offset]
1004 RT and RT2 are the Q registers to store.
1005 RN is the base address register.
1006 OFFSET is the immediate to add to the base address. It is limited to
1007 -1024 .. 1008 range (7 bits << 4). */
1010 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1011 struct aarch64_register rn
, int32_t offset
)
1013 uint32_t opc
= ENCODE (2, 2, 30);
1014 uint32_t pre_index
= ENCODE (1, 1, 24);
1016 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1017 | ENCODE (offset
>> 4, 7, 15)
1018 | ENCODE (rt2
, 5, 10)
1019 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1022 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1024 STP qt, qt2, [rn, #offset]
1026 RT and RT2 are the Q registers to store.
1027 RN is the base address register.
1028 OFFSET is the immediate to add to the base address. It is limited to
1029 -1024 .. 1008 range (7 bits << 4). */
1032 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1033 struct aarch64_register rn
, int32_t offset
)
1035 uint32_t opc
= ENCODE (2, 2, 30);
1036 uint32_t pre_index
= ENCODE (1, 1, 24);
1038 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1039 | ENCODE (offset
>> 4, 7, 15)
1040 | ENCODE (rt2
, 5, 10)
1041 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1044 /* Write a LDRH instruction into *BUF.
1046 LDRH wt, [xn, #offset]
1047 LDRH wt, [xn, #index]!
1048 LDRH wt, [xn], #index
1050 RT is the register to store.
1051 RN is the base address register.
1052 OFFSET is the immediate to add to the base address. It is limited to
1053 0 .. 32760 range (12 bits << 3). */
1056 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1057 struct aarch64_register rn
,
1058 struct aarch64_memory_operand operand
)
1060 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1063 /* Write a LDRB instruction into *BUF.
1065 LDRB wt, [xn, #offset]
1066 LDRB wt, [xn, #index]!
1067 LDRB wt, [xn], #index
1069 RT is the register to store.
1070 RN is the base address register.
1071 OFFSET is the immediate to add to the base address. It is limited to
1072 0 .. 32760 range (12 bits << 3). */
1075 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1076 struct aarch64_register rn
,
1077 struct aarch64_memory_operand operand
)
1079 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1084 /* Write a STR instruction into *BUF.
1086 STR rt, [rn, #offset]
1087 STR rt, [rn, #index]!
1088 STR rt, [rn], #index
1090 RT is the register to store.
1091 RN is the base address register.
1092 OFFSET is the immediate to add to the base address. It is limited to
1093 0 .. 32760 range (12 bits << 3). */
1096 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1097 struct aarch64_register rn
,
1098 struct aarch64_memory_operand operand
)
1100 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1103 /* Helper function emitting an exclusive load or store instruction. */
1106 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1107 enum aarch64_opcodes opcode
,
1108 struct aarch64_register rs
,
1109 struct aarch64_register rt
,
1110 struct aarch64_register rt2
,
1111 struct aarch64_register rn
)
1113 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1114 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1115 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1118 /* Write a LAXR instruction into *BUF.
1122 RT is the destination register.
1123 RN is the base address register. */
1126 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1127 struct aarch64_register rn
)
1129 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1133 /* Write a STXR instruction into *BUF.
1137 RS is the result register, it indicates if the store succeeded or not.
1138 RT is the destination register.
1139 RN is the base address register. */
1142 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1143 struct aarch64_register rt
, struct aarch64_register rn
)
1145 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1149 /* Write a STLR instruction into *BUF.
1153 RT is the register to store.
1154 RN is the base address register. */
1157 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1158 struct aarch64_register rn
)
1160 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1164 /* Helper function for data processing instructions with register sources. */
1167 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1168 struct aarch64_register rd
,
1169 struct aarch64_register rn
,
1170 struct aarch64_register rm
)
1172 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1174 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1175 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1178 /* Helper function for data processing instructions taking either a register
1182 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1183 struct aarch64_register rd
,
1184 struct aarch64_register rn
,
1185 struct aarch64_operand operand
)
1187 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1188 /* The opcode is different for register and immediate source operands. */
1189 uint32_t operand_opcode
;
1191 if (operand
.type
== OPERAND_IMMEDIATE
)
1193 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1194 operand_opcode
= ENCODE (8, 4, 25);
1196 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1197 | ENCODE (operand
.imm
, 12, 10)
1198 | ENCODE (rn
.num
, 5, 5)
1199 | ENCODE (rd
.num
, 5, 0));
1203 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1204 operand_opcode
= ENCODE (5, 4, 25);
1206 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1211 /* Write an ADD instruction into *BUF.
1216 This function handles both an immediate and register add.
1218 RD is the destination register.
1219 RN is the input register.
1220 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1221 OPERAND_REGISTER. */
1224 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1225 struct aarch64_register rn
, struct aarch64_operand operand
)
1227 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1230 /* Write a SUB instruction into *BUF.
1235 This function handles both an immediate and register sub.
1237 RD is the destination register.
1238 RN is the input register.
1239 IMM is the immediate to substract to RN. */
1242 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1243 struct aarch64_register rn
, struct aarch64_operand operand
)
1245 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1248 /* Write a MOV instruction into *BUF.
1253 This function handles both a wide immediate move and a register move,
1254 with the condition that the source register is not xzr. xzr and the
1255 stack pointer share the same encoding and this function only supports
1258 RD is the destination register.
1259 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1260 OPERAND_REGISTER. */
1263 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1264 struct aarch64_operand operand
)
1266 if (operand
.type
== OPERAND_IMMEDIATE
)
1268 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1269 /* Do not shift the immediate. */
1270 uint32_t shift
= ENCODE (0, 2, 21);
1272 return aarch64_emit_insn (buf
, MOV
| size
| shift
1273 | ENCODE (operand
.imm
, 16, 5)
1274 | ENCODE (rd
.num
, 5, 0));
1277 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1280 /* Write a MOVK instruction into *BUF.
1282 MOVK rd, #imm, lsl #shift
1284 RD is the destination register.
1285 IMM is the immediate.
1286 SHIFT is the logical shift left to apply to IMM. */
1289 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1292 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1294 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1295 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1298 /* Write instructions into *BUF in order to move ADDR into a register.
1299 ADDR can be a 64-bit value.
1301 This function will emit a series of MOV and MOVK instructions, such as:
1304 MOVK xd, #(addr >> 16), lsl #16
1305 MOVK xd, #(addr >> 32), lsl #32
1306 MOVK xd, #(addr >> 48), lsl #48 */
1309 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1313 /* The MOV (wide immediate) instruction clears to top bits of the
1315 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1317 if ((addr
>> 16) != 0)
1318 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1322 if ((addr
>> 32) != 0)
1323 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1327 if ((addr
>> 48) != 0)
1328 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1333 /* Write a SUBS instruction into *BUF.
1337 This instruction update the condition flags.
1339 RD is the destination register.
1340 RN and RM are the source registers. */
1343 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1344 struct aarch64_register rn
, struct aarch64_operand operand
)
1346 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1349 /* Write a CMP instruction into *BUF.
1353 This instruction is an alias of SUBS xzr, rn, rm.
1355 RN and RM are the registers to compare. */
1358 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1359 struct aarch64_operand operand
)
1361 return emit_subs (buf
, xzr
, rn
, operand
);
1364 /* Write a AND instruction into *BUF.
1368 RD is the destination register.
1369 RN and RM are the source registers. */
1372 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1373 struct aarch64_register rn
, struct aarch64_register rm
)
1375 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1378 /* Write a ORR instruction into *BUF.
1382 RD is the destination register.
1383 RN and RM are the source registers. */
1386 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1387 struct aarch64_register rn
, struct aarch64_register rm
)
1389 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1392 /* Write a ORN instruction into *BUF.
1396 RD is the destination register.
1397 RN and RM are the source registers. */
1400 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1401 struct aarch64_register rn
, struct aarch64_register rm
)
1403 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1406 /* Write a EOR instruction into *BUF.
1410 RD is the destination register.
1411 RN and RM are the source registers. */
1414 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1415 struct aarch64_register rn
, struct aarch64_register rm
)
1417 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1420 /* Write a MVN instruction into *BUF.
1424 This is an alias for ORN rd, xzr, rm.
1426 RD is the destination register.
1427 RM is the source register. */
1430 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1431 struct aarch64_register rm
)
1433 return emit_orn (buf
, rd
, xzr
, rm
);
1436 /* Write a LSLV instruction into *BUF.
1440 RD is the destination register.
1441 RN and RM are the source registers. */
1444 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1445 struct aarch64_register rn
, struct aarch64_register rm
)
1447 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1450 /* Write a LSRV instruction into *BUF.
1454 RD is the destination register.
1455 RN and RM are the source registers. */
1458 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1459 struct aarch64_register rn
, struct aarch64_register rm
)
1461 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1464 /* Write a ASRV instruction into *BUF.
1468 RD is the destination register.
1469 RN and RM are the source registers. */
1472 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1473 struct aarch64_register rn
, struct aarch64_register rm
)
1475 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1478 /* Write a MUL instruction into *BUF.
1482 RD is the destination register.
1483 RN and RM are the source registers. */
1486 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1487 struct aarch64_register rn
, struct aarch64_register rm
)
1489 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1492 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1496 RT is the destination register.
1497 SYSTEM_REG is special purpose register to read. */
1500 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1501 enum aarch64_system_control_registers system_reg
)
1503 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1504 | ENCODE (rt
.num
, 5, 0));
1507 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1511 SYSTEM_REG is special purpose register to write.
1512 RT is the input register. */
1515 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1516 struct aarch64_register rt
)
1518 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1519 | ENCODE (rt
.num
, 5, 0));
1522 /* Write a SEVL instruction into *BUF.
1524 This is a hint instruction telling the hardware to trigger an event. */
1527 emit_sevl (uint32_t *buf
)
1529 return aarch64_emit_insn (buf
, SEVL
);
1532 /* Write a WFE instruction into *BUF.
1534 This is a hint instruction telling the hardware to wait for an event. */
1537 emit_wfe (uint32_t *buf
)
1539 return aarch64_emit_insn (buf
, WFE
);
1542 /* Write a SBFM instruction into *BUF.
1544 SBFM rd, rn, #immr, #imms
1546 This instruction moves the bits from #immr to #imms into the
1547 destination, sign extending the result.
1549 RD is the destination register.
1550 RN is the source register.
1551 IMMR is the bit number to start at (least significant bit).
1552 IMMS is the bit number to stop at (most significant bit). */
1555 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1556 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1558 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1559 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1561 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1562 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1563 | ENCODE (rd
.num
, 5, 0));
1566 /* Write a SBFX instruction into *BUF.
1568 SBFX rd, rn, #lsb, #width
1570 This instruction moves #width bits from #lsb into the destination, sign
1571 extending the result. This is an alias for:
1573 SBFM rd, rn, #lsb, #(lsb + width - 1)
1575 RD is the destination register.
1576 RN is the source register.
1577 LSB is the bit number to start at (least significant bit).
1578 WIDTH is the number of bits to move. */
1581 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1582 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1584 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1587 /* Write a UBFM instruction into *BUF.
1589 UBFM rd, rn, #immr, #imms
1591 This instruction moves the bits from #immr to #imms into the
1592 destination, extending the result with zeros.
1594 RD is the destination register.
1595 RN is the source register.
1596 IMMR is the bit number to start at (least significant bit).
1597 IMMS is the bit number to stop at (most significant bit). */
1600 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1601 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1603 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1604 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1606 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1607 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1608 | ENCODE (rd
.num
, 5, 0));
1611 /* Write a UBFX instruction into *BUF.
1613 UBFX rd, rn, #lsb, #width
1615 This instruction moves #width bits from #lsb into the destination,
1616 extending the result with zeros. This is an alias for:
1618 UBFM rd, rn, #lsb, #(lsb + width - 1)
1620 RD is the destination register.
1621 RN is the source register.
1622 LSB is the bit number to start at (least significant bit).
1623 WIDTH is the number of bits to move. */
1626 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1627 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1629 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1632 /* Write a CSINC instruction into *BUF.
1634 CSINC rd, rn, rm, cond
1636 This instruction conditionally increments rn or rm and places the result
1637 in rd. rn is chosen is the condition is true.
1639 RD is the destination register.
1640 RN and RM are the source registers.
1641 COND is the encoded condition. */
1644 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1645 struct aarch64_register rn
, struct aarch64_register rm
,
1648 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1650 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1651 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1652 | ENCODE (rd
.num
, 5, 0));
1655 /* Write a CSET instruction into *BUF.
1659 This instruction conditionally write 1 or 0 in the destination register.
1660 1 is written if the condition is true. This is an alias for:
1662 CSINC rd, xzr, xzr, !cond
1664 Note that the condition needs to be inverted.
1666 RD is the destination register.
1667 RN and RM are the source registers.
1668 COND is the encoded condition. */
1671 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1673 /* The least significant bit of the condition needs toggling in order to
1675 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1678 /* Write LEN instructions from BUF into the inferior memory at *TO.
1680 Note instructions are always little endian on AArch64, unlike data. */
1683 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1685 size_t byte_len
= len
* sizeof (uint32_t);
1686 #if (__BYTE_ORDER == __BIG_ENDIAN)
1687 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1690 for (i
= 0; i
< len
; i
++)
1691 le_buf
[i
] = htole32 (buf
[i
]);
1693 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1697 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1703 /* Sub-class of struct aarch64_insn_data, store information of
1704 instruction relocation for fast tracepoint. Visitor can
1705 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1706 the relocated instructions in buffer pointed by INSN_PTR. */
1708 struct aarch64_insn_relocation_data
1710 struct aarch64_insn_data base
;
1712 /* The new address the instruction is relocated to. */
1714 /* Pointer to the buffer of relocated instruction(s). */
1718 /* Implementation of aarch64_insn_visitor method "b". */
1721 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1722 struct aarch64_insn_data
*data
)
1724 struct aarch64_insn_relocation_data
*insn_reloc
1725 = (struct aarch64_insn_relocation_data
*) data
;
1727 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1729 if (can_encode_int32 (new_offset
, 28))
1730 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1733 /* Implementation of aarch64_insn_visitor method "b_cond". */
1736 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1737 struct aarch64_insn_data
*data
)
1739 struct aarch64_insn_relocation_data
*insn_reloc
1740 = (struct aarch64_insn_relocation_data
*) data
;
1742 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1744 if (can_encode_int32 (new_offset
, 21))
1746 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1749 else if (can_encode_int32 (new_offset
, 28))
1751 /* The offset is out of range for a conditional branch
1752 instruction but not for a unconditional branch. We can use
1753 the following instructions instead:
1755 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1756 B NOT_TAKEN ; Else jump over TAKEN and continue.
1763 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1764 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1765 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1769 /* Implementation of aarch64_insn_visitor method "cb". */
1772 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1773 const unsigned rn
, int is64
,
1774 struct aarch64_insn_data
*data
)
1776 struct aarch64_insn_relocation_data
*insn_reloc
1777 = (struct aarch64_insn_relocation_data
*) data
;
1779 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1781 if (can_encode_int32 (new_offset
, 21))
1783 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1784 aarch64_register (rn
, is64
), new_offset
);
1786 else if (can_encode_int32 (new_offset
, 28))
1788 /* The offset is out of range for a compare and branch
1789 instruction but not for a unconditional branch. We can use
1790 the following instructions instead:
1792 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1793 B NOT_TAKEN ; Else jump over TAKEN and continue.
1799 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1800 aarch64_register (rn
, is64
), 8);
1801 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1802 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1806 /* Implementation of aarch64_insn_visitor method "tb". */
1809 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1810 const unsigned rt
, unsigned bit
,
1811 struct aarch64_insn_data
*data
)
1813 struct aarch64_insn_relocation_data
*insn_reloc
1814 = (struct aarch64_insn_relocation_data
*) data
;
1816 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1818 if (can_encode_int32 (new_offset
, 16))
1820 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1821 aarch64_register (rt
, 1), new_offset
);
1823 else if (can_encode_int32 (new_offset
, 28))
1825 /* The offset is out of range for a test bit and branch
1826 instruction but not for a unconditional branch. We can use
1827 the following instructions instead:
1829 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1830 B NOT_TAKEN ; Else jump over TAKEN and continue.
1836 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1837 aarch64_register (rt
, 1), 8);
1838 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1839 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1844 /* Implementation of aarch64_insn_visitor method "adr". */
1847 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1849 struct aarch64_insn_data
*data
)
1851 struct aarch64_insn_relocation_data
*insn_reloc
1852 = (struct aarch64_insn_relocation_data
*) data
;
1853 /* We know exactly the address the ADR{P,} instruction will compute.
1854 We can just write it to the destination register. */
1855 CORE_ADDR address
= data
->insn_addr
+ offset
;
1859 /* Clear the lower 12 bits of the offset to get the 4K page. */
1860 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1861 aarch64_register (rd
, 1),
1865 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1866 aarch64_register (rd
, 1), address
);
1869 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1872 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1873 const unsigned rt
, const int is64
,
1874 struct aarch64_insn_data
*data
)
1876 struct aarch64_insn_relocation_data
*insn_reloc
1877 = (struct aarch64_insn_relocation_data
*) data
;
1878 CORE_ADDR address
= data
->insn_addr
+ offset
;
1880 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1881 aarch64_register (rt
, 1), address
);
1883 /* We know exactly what address to load from, and what register we
1886 MOV xd, #(oldloc + offset)
1887 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1890 LDR xd, [xd] ; or LDRSW xd, [xd]
1895 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1896 aarch64_register (rt
, 1),
1897 aarch64_register (rt
, 1),
1898 offset_memory_operand (0));
1900 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1901 aarch64_register (rt
, is64
),
1902 aarch64_register (rt
, 1),
1903 offset_memory_operand (0));
1906 /* Implementation of aarch64_insn_visitor method "others". */
1909 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1910 struct aarch64_insn_data
*data
)
1912 struct aarch64_insn_relocation_data
*insn_reloc
1913 = (struct aarch64_insn_relocation_data
*) data
;
1915 /* The instruction is not PC relative. Just re-emit it at the new
1917 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1920 static const struct aarch64_insn_visitor visitor
=
1922 aarch64_ftrace_insn_reloc_b
,
1923 aarch64_ftrace_insn_reloc_b_cond
,
1924 aarch64_ftrace_insn_reloc_cb
,
1925 aarch64_ftrace_insn_reloc_tb
,
1926 aarch64_ftrace_insn_reloc_adr
,
1927 aarch64_ftrace_insn_reloc_ldr_literal
,
1928 aarch64_ftrace_insn_reloc_others
,
1931 /* Implementation of linux_target_ops method
1932 "install_fast_tracepoint_jump_pad". */
1935 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1937 CORE_ADDR collector
,
1940 CORE_ADDR
*jump_entry
,
1941 CORE_ADDR
*trampoline
,
1942 ULONGEST
*trampoline_size
,
1943 unsigned char *jjump_pad_insn
,
1944 ULONGEST
*jjump_pad_insn_size
,
1945 CORE_ADDR
*adjusted_insn_addr
,
1946 CORE_ADDR
*adjusted_insn_addr_end
,
1954 CORE_ADDR buildaddr
= *jump_entry
;
1955 struct aarch64_insn_relocation_data insn_data
;
1957 /* We need to save the current state on the stack both to restore it
1958 later and to collect register values when the tracepoint is hit.
1960 The saved registers are pushed in a layout that needs to be in sync
1961 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1962 the supply_fast_tracepoint_registers function will fill in the
1963 register cache from a pointer to saved registers on the stack we build
1966 For simplicity, we set the size of each cell on the stack to 16 bytes.
1967 This way one cell can hold any register type, from system registers
1968 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1969 has to be 16 bytes aligned anyway.
1971 Note that the CPSR register does not exist on AArch64. Instead we
1972 can access system bits describing the process state with the
1973 MRS/MSR instructions, namely the condition flags. We save them as
1974 if they are part of a CPSR register because that's how GDB
1975 interprets these system bits. At the moment, only the condition
1976 flags are saved in CPSR (NZCV).
1978 Stack layout, each cell is 16 bytes (descending):
1980 High *-------- SIMD&FP registers from 31 down to 0. --------*
1986 *---- General purpose registers from 30 down to 0. ----*
1992 *------------- Special purpose registers. -------------*
1995 | CPSR (NZCV) | 5 cells
1998 *------------- collecting_t object --------------------*
1999 | TPIDR_EL0 | struct tracepoint * |
2000 Low *------------------------------------------------------*
2002 After this stack is set up, we issue a call to the collector, passing
2003 it the saved registers at (SP + 16). */
2005 /* Push SIMD&FP registers on the stack:
2007 SUB sp, sp, #(32 * 16)
2009 STP q30, q31, [sp, #(30 * 16)]
2014 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2015 for (i
= 30; i
>= 0; i
-= 2)
2016 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2018 /* Push general purpose registers on the stack. Note that we do not need
2019 to push x31 as it represents the xzr register and not the stack
2020 pointer in a STR instruction.
2022 SUB sp, sp, #(31 * 16)
2024 STR x30, [sp, #(30 * 16)]
2029 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2030 for (i
= 30; i
>= 0; i
-= 1)
2031 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2032 offset_memory_operand (i
* 16));
2034 /* Make space for 5 more cells.
2036 SUB sp, sp, #(5 * 16)
2039 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2044 ADD x4, sp, #((32 + 31 + 5) * 16)
2045 STR x4, [sp, #(4 * 16)]
2048 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2049 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2051 /* Save PC (tracepoint address):
2056 STR x3, [sp, #(3 * 16)]
2060 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2061 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2063 /* Save CPSR (NZCV), FPSR and FPCR:
2069 STR x2, [sp, #(2 * 16)]
2070 STR x1, [sp, #(1 * 16)]
2071 STR x0, [sp, #(0 * 16)]
2074 p
+= emit_mrs (p
, x2
, NZCV
);
2075 p
+= emit_mrs (p
, x1
, FPSR
);
2076 p
+= emit_mrs (p
, x0
, FPCR
);
2077 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2078 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2079 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2081 /* Push the collecting_t object. It consist of the address of the
2082 tracepoint and an ID for the current thread. We get the latter by
2083 reading the tpidr_el0 system register. It corresponds to the
2084 NT_ARM_TLS register accessible with ptrace.
2091 STP x0, x1, [sp, #-16]!
2095 p
+= emit_mov_addr (p
, x0
, tpoint
);
2096 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2097 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2101 The shared memory for the lock is at lockaddr. It will hold zero
2102 if no-one is holding the lock, otherwise it contains the address of
2103 the collecting_t object on the stack of the thread which acquired it.
2105 At this stage, the stack pointer points to this thread's collecting_t
2108 We use the following registers:
2109 - x0: Address of the lock.
2110 - x1: Pointer to collecting_t object.
2111 - x2: Scratch register.
2117 ; Trigger an event local to this core. So the following WFE
2118 ; instruction is ignored.
2121 ; Wait for an event. The event is triggered by either the SEVL
2122 ; or STLR instructions (store release).
2125 ; Atomically read at lockaddr. This marks the memory location as
2126 ; exclusive. This instruction also has memory constraints which
2127 ; make sure all previous data reads and writes are done before
2131 ; Try again if another thread holds the lock.
2134 ; We can lock it! Write the address of the collecting_t object.
2135 ; This instruction will fail if the memory location is not marked
2136 ; as exclusive anymore. If it succeeds, it will remove the
2137 ; exclusive mark on the memory location. This way, if another
2138 ; thread executes this instruction before us, we will fail and try
2145 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2146 p
+= emit_mov (p
, x1
, register_operand (sp
));
2150 p
+= emit_ldaxr (p
, x2
, x0
);
2151 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2152 p
+= emit_stxr (p
, w2
, x1
, x0
);
2153 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2155 /* Call collector (struct tracepoint *, unsigned char *):
2160 ; Saved registers start after the collecting_t object.
2163 ; We use an intra-procedure-call scratch register.
2164 MOV ip0, #(collector)
2167 ; And call back to C!
2172 p
+= emit_mov_addr (p
, x0
, tpoint
);
2173 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2175 p
+= emit_mov_addr (p
, ip0
, collector
);
2176 p
+= emit_blr (p
, ip0
);
2178 /* Release the lock.
2183 ; This instruction is a normal store with memory ordering
2184 ; constraints. Thanks to this we do not have to put a data
2185 ; barrier instruction to make sure all data read and writes are done
2186 ; before this instruction is executed. Furthermore, this instruction
2187 ; will trigger an event, letting other threads know they can grab
2192 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2193 p
+= emit_stlr (p
, xzr
, x0
);
2195 /* Free collecting_t object:
2200 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2202 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2203 registers from the stack.
2205 LDR x2, [sp, #(2 * 16)]
2206 LDR x1, [sp, #(1 * 16)]
2207 LDR x0, [sp, #(0 * 16)]
2213 ADD sp, sp #(5 * 16)
2216 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2217 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2218 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2219 p
+= emit_msr (p
, NZCV
, x2
);
2220 p
+= emit_msr (p
, FPSR
, x1
);
2221 p
+= emit_msr (p
, FPCR
, x0
);
2223 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2225 /* Pop general purpose registers:
2229 LDR x30, [sp, #(30 * 16)]
2231 ADD sp, sp, #(31 * 16)
2234 for (i
= 0; i
<= 30; i
+= 1)
2235 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2236 offset_memory_operand (i
* 16));
2237 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2239 /* Pop SIMD&FP registers:
2243 LDP q30, q31, [sp, #(30 * 16)]
2245 ADD sp, sp, #(32 * 16)
2248 for (i
= 0; i
<= 30; i
+= 2)
2249 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2250 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2252 /* Write the code into the inferior memory. */
2253 append_insns (&buildaddr
, p
- buf
, buf
);
2255 /* Now emit the relocated instruction. */
2256 *adjusted_insn_addr
= buildaddr
;
2257 target_read_uint32 (tpaddr
, &insn
);
2259 insn_data
.base
.insn_addr
= tpaddr
;
2260 insn_data
.new_addr
= buildaddr
;
2261 insn_data
.insn_ptr
= buf
;
2263 aarch64_relocate_instruction (insn
, &visitor
,
2264 (struct aarch64_insn_data
*) &insn_data
);
2266 /* We may not have been able to relocate the instruction. */
2267 if (insn_data
.insn_ptr
== buf
)
2270 "E.Could not relocate instruction from %s to %s.",
2271 core_addr_to_string_nz (tpaddr
),
2272 core_addr_to_string_nz (buildaddr
));
2276 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2277 *adjusted_insn_addr_end
= buildaddr
;
2279 /* Go back to the start of the buffer. */
2282 /* Emit a branch back from the jump pad. */
2283 offset
= (tpaddr
+ orig_size
- buildaddr
);
2284 if (!can_encode_int32 (offset
, 28))
2287 "E.Jump back from jump pad too far from tracepoint "
2288 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2293 p
+= emit_b (p
, 0, offset
);
2294 append_insns (&buildaddr
, p
- buf
, buf
);
2296 /* Give the caller a branch instruction into the jump pad. */
2297 offset
= (*jump_entry
- tpaddr
);
2298 if (!can_encode_int32 (offset
, 28))
2301 "E.Jump pad too far from tracepoint "
2302 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2307 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2308 *jjump_pad_insn_size
= 4;
2310 /* Return the end address of our pad. */
2311 *jump_entry
= buildaddr
;
2316 /* Helper function writing LEN instructions from START into
2317 current_insn_ptr. */
2320 emit_ops_insns (const uint32_t *start
, int len
)
2322 CORE_ADDR buildaddr
= current_insn_ptr
;
2325 debug_printf ("Adding %d instrucions at %s\n",
2326 len
, paddress (buildaddr
));
2328 append_insns (&buildaddr
, len
, start
);
2329 current_insn_ptr
= buildaddr
;
2332 /* Pop a register from the stack. */
2335 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2337 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2340 /* Push a register on the stack. */
2343 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2345 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2348 /* Implementation of emit_ops method "emit_prologue". */
2351 aarch64_emit_prologue (void)
2356 /* This function emit a prologue for the following function prototype:
2358 enum eval_result_type f (unsigned char *regs,
2361 The first argument is a buffer of raw registers. The second
2362 argument is the result of
2363 evaluating the expression, which will be set to whatever is on top of
2364 the stack at the end.
2366 The stack set up by the prologue is as such:
2368 High *------------------------------------------------------*
2371 | x1 (ULONGEST *value) |
2372 | x0 (unsigned char *regs) |
2373 Low *------------------------------------------------------*
2375 As we are implementing a stack machine, each opcode can expand the
2376 stack so we never know how far we are from the data saved by this
2377 prologue. In order to be able refer to value and regs later, we save
2378 the current stack pointer in the frame pointer. This way, it is not
2379 clobbered when calling C functions.
2381 Finally, throughout every operation, we are using register x0 as the
2382 top of the stack, and x1 as a scratch register. */
2384 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2385 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2386 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2388 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2391 emit_ops_insns (buf
, p
- buf
);
2394 /* Implementation of emit_ops method "emit_epilogue". */
2397 aarch64_emit_epilogue (void)
2402 /* Store the result of the expression (x0) in *value. */
2403 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2404 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2405 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2407 /* Restore the previous state. */
2408 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2409 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2411 /* Return expr_eval_no_error. */
2412 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2413 p
+= emit_ret (p
, lr
);
2415 emit_ops_insns (buf
, p
- buf
);
2418 /* Implementation of emit_ops method "emit_add". */
2421 aarch64_emit_add (void)
2426 p
+= emit_pop (p
, x1
);
2427 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2429 emit_ops_insns (buf
, p
- buf
);
2432 /* Implementation of emit_ops method "emit_sub". */
2435 aarch64_emit_sub (void)
2440 p
+= emit_pop (p
, x1
);
2441 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2443 emit_ops_insns (buf
, p
- buf
);
2446 /* Implementation of emit_ops method "emit_mul". */
2449 aarch64_emit_mul (void)
2454 p
+= emit_pop (p
, x1
);
2455 p
+= emit_mul (p
, x0
, x1
, x0
);
2457 emit_ops_insns (buf
, p
- buf
);
2460 /* Implementation of emit_ops method "emit_lsh". */
2463 aarch64_emit_lsh (void)
2468 p
+= emit_pop (p
, x1
);
2469 p
+= emit_lslv (p
, x0
, x1
, x0
);
2471 emit_ops_insns (buf
, p
- buf
);
2474 /* Implementation of emit_ops method "emit_rsh_signed". */
2477 aarch64_emit_rsh_signed (void)
2482 p
+= emit_pop (p
, x1
);
2483 p
+= emit_asrv (p
, x0
, x1
, x0
);
2485 emit_ops_insns (buf
, p
- buf
);
2488 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2491 aarch64_emit_rsh_unsigned (void)
2496 p
+= emit_pop (p
, x1
);
2497 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2499 emit_ops_insns (buf
, p
- buf
);
2502 /* Implementation of emit_ops method "emit_ext". */
2505 aarch64_emit_ext (int arg
)
2510 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2512 emit_ops_insns (buf
, p
- buf
);
2515 /* Implementation of emit_ops method "emit_log_not". */
2518 aarch64_emit_log_not (void)
2523 /* If the top of the stack is 0, replace it with 1. Else replace it with
2526 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2527 p
+= emit_cset (p
, x0
, EQ
);
2529 emit_ops_insns (buf
, p
- buf
);
2532 /* Implementation of emit_ops method "emit_bit_and". */
2535 aarch64_emit_bit_and (void)
2540 p
+= emit_pop (p
, x1
);
2541 p
+= emit_and (p
, x0
, x0
, x1
);
2543 emit_ops_insns (buf
, p
- buf
);
2546 /* Implementation of emit_ops method "emit_bit_or". */
2549 aarch64_emit_bit_or (void)
2554 p
+= emit_pop (p
, x1
);
2555 p
+= emit_orr (p
, x0
, x0
, x1
);
2557 emit_ops_insns (buf
, p
- buf
);
2560 /* Implementation of emit_ops method "emit_bit_xor". */
2563 aarch64_emit_bit_xor (void)
2568 p
+= emit_pop (p
, x1
);
2569 p
+= emit_eor (p
, x0
, x0
, x1
);
2571 emit_ops_insns (buf
, p
- buf
);
2574 /* Implementation of emit_ops method "emit_bit_not". */
2577 aarch64_emit_bit_not (void)
2582 p
+= emit_mvn (p
, x0
, x0
);
2584 emit_ops_insns (buf
, p
- buf
);
2587 /* Implementation of emit_ops method "emit_equal". */
2590 aarch64_emit_equal (void)
2595 p
+= emit_pop (p
, x1
);
2596 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2597 p
+= emit_cset (p
, x0
, EQ
);
2599 emit_ops_insns (buf
, p
- buf
);
2602 /* Implementation of emit_ops method "emit_less_signed". */
2605 aarch64_emit_less_signed (void)
2610 p
+= emit_pop (p
, x1
);
2611 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2612 p
+= emit_cset (p
, x0
, LT
);
2614 emit_ops_insns (buf
, p
- buf
);
2617 /* Implementation of emit_ops method "emit_less_unsigned". */
2620 aarch64_emit_less_unsigned (void)
2625 p
+= emit_pop (p
, x1
);
2626 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2627 p
+= emit_cset (p
, x0
, LO
);
2629 emit_ops_insns (buf
, p
- buf
);
2632 /* Implementation of emit_ops method "emit_ref". */
2635 aarch64_emit_ref (int size
)
2643 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2646 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2649 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2652 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2655 /* Unknown size, bail on compilation. */
2660 emit_ops_insns (buf
, p
- buf
);
2663 /* Implementation of emit_ops method "emit_if_goto". */
2666 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2671 /* The Z flag is set or cleared here. */
2672 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2673 /* This instruction must not change the Z flag. */
2674 p
+= emit_pop (p
, x0
);
2675 /* Branch over the next instruction if x0 == 0. */
2676 p
+= emit_bcond (p
, EQ
, 8);
2678 /* The NOP instruction will be patched with an unconditional branch. */
2680 *offset_p
= (p
- buf
) * 4;
2685 emit_ops_insns (buf
, p
- buf
);
2688 /* Implementation of emit_ops method "emit_goto". */
2691 aarch64_emit_goto (int *offset_p
, int *size_p
)
2696 /* The NOP instruction will be patched with an unconditional branch. */
2703 emit_ops_insns (buf
, p
- buf
);
2706 /* Implementation of emit_ops method "write_goto_address". */
2709 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2713 emit_b (&insn
, 0, to
- from
);
2714 append_insns (&from
, 1, &insn
);
2717 /* Implementation of emit_ops method "emit_const". */
2720 aarch64_emit_const (LONGEST num
)
2725 p
+= emit_mov_addr (p
, x0
, num
);
2727 emit_ops_insns (buf
, p
- buf
);
2730 /* Implementation of emit_ops method "emit_call". */
2733 aarch64_emit_call (CORE_ADDR fn
)
2738 p
+= emit_mov_addr (p
, ip0
, fn
);
2739 p
+= emit_blr (p
, ip0
);
2741 emit_ops_insns (buf
, p
- buf
);
2744 /* Implementation of emit_ops method "emit_reg". */
2747 aarch64_emit_reg (int reg
)
2752 /* Set x0 to unsigned char *regs. */
2753 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2754 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2755 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2757 emit_ops_insns (buf
, p
- buf
);
2759 aarch64_emit_call (get_raw_reg_func_addr ());
2762 /* Implementation of emit_ops method "emit_pop". */
2765 aarch64_emit_pop (void)
2770 p
+= emit_pop (p
, x0
);
2772 emit_ops_insns (buf
, p
- buf
);
2775 /* Implementation of emit_ops method "emit_stack_flush". */
2778 aarch64_emit_stack_flush (void)
2783 p
+= emit_push (p
, x0
);
2785 emit_ops_insns (buf
, p
- buf
);
2788 /* Implementation of emit_ops method "emit_zero_ext". */
2791 aarch64_emit_zero_ext (int arg
)
2796 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2798 emit_ops_insns (buf
, p
- buf
);
2801 /* Implementation of emit_ops method "emit_swap". */
2804 aarch64_emit_swap (void)
2809 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2810 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2811 p
+= emit_mov (p
, x0
, register_operand (x1
));
2813 emit_ops_insns (buf
, p
- buf
);
2816 /* Implementation of emit_ops method "emit_stack_adjust". */
2819 aarch64_emit_stack_adjust (int n
)
2821 /* This is not needed with our design. */
2825 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2827 emit_ops_insns (buf
, p
- buf
);
2830 /* Implementation of emit_ops method "emit_int_call_1". */
2833 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2838 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2840 emit_ops_insns (buf
, p
- buf
);
2842 aarch64_emit_call (fn
);
2845 /* Implementation of emit_ops method "emit_void_call_2". */
2848 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2853 /* Push x0 on the stack. */
2854 aarch64_emit_stack_flush ();
2856 /* Setup arguments for the function call:
2859 x1: top of the stack
2864 p
+= emit_mov (p
, x1
, register_operand (x0
));
2865 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2867 emit_ops_insns (buf
, p
- buf
);
2869 aarch64_emit_call (fn
);
2872 aarch64_emit_pop ();
2875 /* Implementation of emit_ops method "emit_eq_goto". */
2878 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2883 p
+= emit_pop (p
, x1
);
2884 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2885 /* Branch over the next instruction if x0 != x1. */
2886 p
+= emit_bcond (p
, NE
, 8);
2887 /* The NOP instruction will be patched with an unconditional branch. */
2889 *offset_p
= (p
- buf
) * 4;
2894 emit_ops_insns (buf
, p
- buf
);
2897 /* Implementation of emit_ops method "emit_ne_goto". */
2900 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2905 p
+= emit_pop (p
, x1
);
2906 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2907 /* Branch over the next instruction if x0 == x1. */
2908 p
+= emit_bcond (p
, EQ
, 8);
2909 /* The NOP instruction will be patched with an unconditional branch. */
2911 *offset_p
= (p
- buf
) * 4;
2916 emit_ops_insns (buf
, p
- buf
);
2919 /* Implementation of emit_ops method "emit_lt_goto". */
2922 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2927 p
+= emit_pop (p
, x1
);
2928 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2929 /* Branch over the next instruction if x0 >= x1. */
2930 p
+= emit_bcond (p
, GE
, 8);
2931 /* The NOP instruction will be patched with an unconditional branch. */
2933 *offset_p
= (p
- buf
) * 4;
2938 emit_ops_insns (buf
, p
- buf
);
2941 /* Implementation of emit_ops method "emit_le_goto". */
2944 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2949 p
+= emit_pop (p
, x1
);
2950 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2951 /* Branch over the next instruction if x0 > x1. */
2952 p
+= emit_bcond (p
, GT
, 8);
2953 /* The NOP instruction will be patched with an unconditional branch. */
2955 *offset_p
= (p
- buf
) * 4;
2960 emit_ops_insns (buf
, p
- buf
);
2963 /* Implementation of emit_ops method "emit_gt_goto". */
2966 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2971 p
+= emit_pop (p
, x1
);
2972 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2973 /* Branch over the next instruction if x0 <= x1. */
2974 p
+= emit_bcond (p
, LE
, 8);
2975 /* The NOP instruction will be patched with an unconditional branch. */
2977 *offset_p
= (p
- buf
) * 4;
2982 emit_ops_insns (buf
, p
- buf
);
2985 /* Implementation of emit_ops method "emit_ge_got". */
2988 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2993 p
+= emit_pop (p
, x1
);
2994 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2995 /* Branch over the next instruction if x0 <= x1. */
2996 p
+= emit_bcond (p
, LT
, 8);
2997 /* The NOP instruction will be patched with an unconditional branch. */
2999 *offset_p
= (p
- buf
) * 4;
3004 emit_ops_insns (buf
, p
- buf
);
3007 static struct emit_ops aarch64_emit_ops_impl
=
3009 aarch64_emit_prologue
,
3010 aarch64_emit_epilogue
,
3015 aarch64_emit_rsh_signed
,
3016 aarch64_emit_rsh_unsigned
,
3018 aarch64_emit_log_not
,
3019 aarch64_emit_bit_and
,
3020 aarch64_emit_bit_or
,
3021 aarch64_emit_bit_xor
,
3022 aarch64_emit_bit_not
,
3024 aarch64_emit_less_signed
,
3025 aarch64_emit_less_unsigned
,
3027 aarch64_emit_if_goto
,
3029 aarch64_write_goto_address
,
3034 aarch64_emit_stack_flush
,
3035 aarch64_emit_zero_ext
,
3037 aarch64_emit_stack_adjust
,
3038 aarch64_emit_int_call_1
,
3039 aarch64_emit_void_call_2
,
3040 aarch64_emit_eq_goto
,
3041 aarch64_emit_ne_goto
,
3042 aarch64_emit_lt_goto
,
3043 aarch64_emit_le_goto
,
3044 aarch64_emit_gt_goto
,
3045 aarch64_emit_ge_got
,
3048 /* Implementation of linux_target_ops method "emit_ops". */
3050 static struct emit_ops
*
3051 aarch64_emit_ops (void)
3053 return &aarch64_emit_ops_impl
;
3056 /* Implementation of linux_target_ops method
3057 "get_min_fast_tracepoint_insn_len". */
3060 aarch64_get_min_fast_tracepoint_insn_len (void)
3065 /* Implementation of linux_target_ops method "supports_range_stepping". */
3068 aarch64_supports_range_stepping (void)
3073 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3076 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3078 if (is_64bit_tdesc ())
3080 *size
= aarch64_breakpoint_len
;
3081 return aarch64_breakpoint
;
3084 return arm_sw_breakpoint_from_kind (kind
, size
);
3087 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3090 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3092 if (is_64bit_tdesc ())
3093 return aarch64_breakpoint_len
;
3095 return arm_breakpoint_kind_from_pc (pcptr
);
3098 /* Implementation of the target ops method
3099 "breakpoint_kind_from_current_state". */
3102 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3104 if (is_64bit_tdesc ())
3105 return aarch64_breakpoint_len
;
3107 return arm_breakpoint_kind_from_current_state (pcptr
);
3110 /* Support for hardware single step. */
3113 aarch64_supports_hardware_single_step (void)
3118 struct linux_target_ops the_low_target
=
3120 aarch64_linux_new_process
,
3121 aarch64_linux_delete_process
,
3122 aarch64_linux_new_thread
,
3123 aarch64_linux_delete_thread
,
3124 aarch64_linux_new_fork
,
3125 aarch64_linux_prepare_to_resume
,
3126 NULL
, /* process_qsupported */
3127 aarch64_supports_tracepoints
,
3128 aarch64_get_thread_area
,
3129 aarch64_install_fast_tracepoint_jump_pad
,
3131 aarch64_get_min_fast_tracepoint_insn_len
,
3132 aarch64_supports_range_stepping
,
3133 aarch64_supports_hardware_single_step
,
3134 aarch64_get_syscall_trapinfo
,
3137 /* The linux target ops object. */
3139 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3142 initialize_low_arch (void)
3144 initialize_low_arch_aarch32 ();
3146 initialize_regsets_info (&aarch64_regsets_info
);
3147 initialize_regsets_info (&aarch64_sve_regsets_info
);