1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
64 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
66 bool supports_z_point_type (char z_type
) override
;
70 void low_arch_setup () override
;
72 bool low_cannot_fetch_register (int regno
) override
;
74 bool low_cannot_store_register (int regno
) override
;
76 bool low_supports_breakpoints () override
;
78 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
80 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
82 bool low_breakpoint_at (CORE_ADDR pc
) override
;
84 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
85 int size
, raw_breakpoint
*bp
) override
;
87 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
88 int size
, raw_breakpoint
*bp
) override
;
90 bool low_stopped_by_watchpoint () override
;
92 CORE_ADDR
low_stopped_data_address () override
;
94 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
95 int direction
) override
;
97 arch_process_info
*low_new_process () override
;
99 void low_delete_process (arch_process_info
*info
) override
;
101 void low_new_thread (lwp_info
*) override
;
103 void low_delete_thread (arch_lwp_info
*) override
;
105 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
107 void low_prepare_to_resume (lwp_info
*lwp
) override
;
110 /* The singleton target ops object. */
112 static aarch64_target the_aarch64_target
;
115 aarch64_target::low_cannot_fetch_register (int regno
)
117 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
118 "is not implemented by the target");
122 aarch64_target::low_cannot_store_register (int regno
)
124 gdb_assert_not_reached ("linux target op low_cannot_store_register "
125 "is not implemented by the target");
129 aarch64_target::low_prepare_to_resume (lwp_info
*lwp
)
131 aarch64_linux_prepare_to_resume (lwp
);
134 /* Per-process arch-specific data we want to keep. */
136 struct arch_process_info
138 /* Hardware breakpoint/watchpoint data.
139 The reason for them to be per-process rather than per-thread is
140 due to the lack of information in the gdbserver environment;
141 gdbserver is not told that whether a requested hardware
142 breakpoint/watchpoint is thread specific or not, so it has to set
143 each hw bp/wp for every thread in the current process. The
144 higher level bp/wp management in gdb will resume a thread if a hw
145 bp/wp trap is not expected for it. Since the hw bp/wp setting is
146 same for each thread, it is reasonable for the data to live here.
148 struct aarch64_debug_reg_state debug_reg_state
;
151 /* Return true if the size of register 0 is 8 byte. */
154 is_64bit_tdesc (void)
156 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
158 return register_size (regcache
->tdesc
, 0) == 8;
161 /* Return true if the regcache contains the number of SVE registers. */
166 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
168 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
172 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
174 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
177 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
178 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
179 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
180 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
181 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
185 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
187 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
190 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
191 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
192 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
193 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
194 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
198 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
200 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
203 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
204 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
205 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
206 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
210 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
212 const struct user_fpsimd_state
*regset
213 = (const struct user_fpsimd_state
*) buf
;
216 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
217 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
218 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
219 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
222 /* Store the pauth registers to regcache. */
225 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
227 uint64_t *pauth_regset
= (uint64_t *) buf
;
228 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
233 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
235 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
240 aarch64_target::low_supports_breakpoints ()
245 /* Implementation of linux target ops method "low_get_pc". */
248 aarch64_target::low_get_pc (regcache
*regcache
)
250 if (register_size (regcache
->tdesc
, 0) == 8)
251 return linux_get_pc_64bit (regcache
);
253 return linux_get_pc_32bit (regcache
);
256 /* Implementation of linux target ops method "low_set_pc". */
259 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
261 if (register_size (regcache
->tdesc
, 0) == 8)
262 linux_set_pc_64bit (regcache
, pc
);
264 linux_set_pc_32bit (regcache
, pc
);
267 #define aarch64_breakpoint_len 4
269 /* AArch64 BRK software debug mode instruction.
270 This instruction needs to match gdb/aarch64-tdep.c
271 (aarch64_default_breakpoint). */
272 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
274 /* Implementation of linux target ops method "low_breakpoint_at". */
277 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
279 if (is_64bit_tdesc ())
281 gdb_byte insn
[aarch64_breakpoint_len
];
283 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
284 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
290 return arm_breakpoint_at (where
);
294 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
298 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
300 state
->dr_addr_bp
[i
] = 0;
301 state
->dr_ctrl_bp
[i
] = 0;
302 state
->dr_ref_count_bp
[i
] = 0;
305 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
307 state
->dr_addr_wp
[i
] = 0;
308 state
->dr_ctrl_wp
[i
] = 0;
309 state
->dr_ref_count_wp
[i
] = 0;
313 /* Return the pointer to the debug register state structure in the
314 current process' arch-specific data area. */
316 struct aarch64_debug_reg_state
*
317 aarch64_get_debug_reg_state (pid_t pid
)
319 struct process_info
*proc
= find_process_pid (pid
);
321 return &proc
->priv
->arch_private
->debug_reg_state
;
324 /* Implementation of target ops method "supports_z_point_type". */
327 aarch64_target::supports_z_point_type (char z_type
)
333 case Z_PACKET_WRITE_WP
:
334 case Z_PACKET_READ_WP
:
335 case Z_PACKET_ACCESS_WP
:
342 /* Implementation of linux target ops method "low_insert_point".
344 It actually only records the info of the to-be-inserted bp/wp;
345 the actual insertion will happen when threads are resumed. */
348 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
349 int len
, raw_breakpoint
*bp
)
352 enum target_hw_bp_type targ_type
;
353 struct aarch64_debug_reg_state
*state
354 = aarch64_get_debug_reg_state (pid_of (current_thread
));
357 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
358 (unsigned long) addr
, len
);
360 /* Determine the type from the raw breakpoint type. */
361 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
363 if (targ_type
!= hw_execute
)
365 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
366 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
367 1 /* is_insert */, state
);
375 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
376 instruction. Set it to 2 to correctly encode length bit
377 mask in hardware/watchpoint control register. */
380 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
381 1 /* is_insert */, state
);
385 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
391 /* Implementation of linux target ops method "low_remove_point".
393 It actually only records the info of the to-be-removed bp/wp,
394 the actual removal will be done when threads are resumed. */
397 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
398 int len
, raw_breakpoint
*bp
)
401 enum target_hw_bp_type targ_type
;
402 struct aarch64_debug_reg_state
*state
403 = aarch64_get_debug_reg_state (pid_of (current_thread
));
406 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
407 (unsigned long) addr
, len
);
409 /* Determine the type from the raw breakpoint type. */
410 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
412 /* Set up state pointers. */
413 if (targ_type
!= hw_execute
)
415 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
421 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
422 instruction. Set it to 2 to correctly encode length bit
423 mask in hardware/watchpoint control register. */
426 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
427 0 /* is_insert */, state
);
431 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
437 /* Implementation of linux target ops method "low_stopped_data_address". */
440 aarch64_target::low_stopped_data_address ()
444 struct aarch64_debug_reg_state
*state
;
446 pid
= lwpid_of (current_thread
);
448 /* Get the siginfo. */
449 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
450 return (CORE_ADDR
) 0;
452 /* Need to be a hardware breakpoint/watchpoint trap. */
453 if (siginfo
.si_signo
!= SIGTRAP
454 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
455 return (CORE_ADDR
) 0;
457 /* Check if the address matches any watched address. */
458 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
459 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
461 const unsigned int offset
462 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
463 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
464 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
465 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
466 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
467 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
469 if (state
->dr_ref_count_wp
[i
]
470 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
471 && addr_trap
>= addr_watch_aligned
472 && addr_trap
< addr_watch
+ len
)
474 /* ADDR_TRAP reports the first address of the memory range
475 accessed by the CPU, regardless of what was the memory
476 range watched. Thus, a large CPU access that straddles
477 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
478 ADDR_TRAP that is lower than the
479 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
481 addr: | 4 | 5 | 6 | 7 | 8 |
482 |---- range watched ----|
483 |----------- range accessed ------------|
485 In this case, ADDR_TRAP will be 4.
487 To match a watchpoint known to GDB core, we must never
488 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
489 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
490 positive on kernels older than 4.10. See PR
496 return (CORE_ADDR
) 0;
499 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
502 aarch64_target::low_stopped_by_watchpoint ()
504 return (low_stopped_data_address () != 0);
507 /* Fetch the thread-local storage pointer for libthread_db. */
510 ps_get_thread_area (struct ps_prochandle
*ph
,
511 lwpid_t lwpid
, int idx
, void **base
)
513 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
517 /* Implementation of linux target ops method "low_siginfo_fixup". */
520 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
523 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
524 if (!is_64bit_tdesc ())
527 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
530 aarch64_siginfo_from_compat_siginfo (native
,
531 (struct compat_siginfo
*) inf
);
539 /* Implementation of linux target ops method "low_new_process". */
542 aarch64_target::low_new_process ()
544 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
546 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
551 /* Implementation of linux target ops method "low_delete_process". */
554 aarch64_target::low_delete_process (arch_process_info
*info
)
560 aarch64_target::low_new_thread (lwp_info
*lwp
)
562 aarch64_linux_new_thread (lwp
);
566 aarch64_target::low_delete_thread (arch_lwp_info
*arch_lwp
)
568 aarch64_linux_delete_thread (arch_lwp
);
571 /* Implementation of linux target ops method "low_new_fork". */
574 aarch64_target::low_new_fork (process_info
*parent
,
577 /* These are allocated by linux_add_process. */
578 gdb_assert (parent
->priv
!= NULL
579 && parent
->priv
->arch_private
!= NULL
);
580 gdb_assert (child
->priv
!= NULL
581 && child
->priv
->arch_private
!= NULL
);
583 /* Linux kernel before 2.6.33 commit
584 72f674d203cd230426437cdcf7dd6f681dad8b0d
585 will inherit hardware debug registers from parent
586 on fork/vfork/clone. Newer Linux kernels create such tasks with
587 zeroed debug registers.
589 GDB core assumes the child inherits the watchpoints/hw
590 breakpoints of the parent, and will remove them all from the
591 forked off process. Copy the debug registers mirrors into the
592 new process so that all breakpoints and watchpoints can be
593 removed together. The debug registers mirror will become zeroed
594 in the end before detaching the forked off process, thus making
595 this compatible with older Linux kernels too. */
597 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
600 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
601 #define AARCH64_HWCAP_PACA (1 << 30)
603 /* Implementation of linux target ops method "low_arch_setup". */
606 aarch64_target::low_arch_setup ()
608 unsigned int machine
;
612 tid
= lwpid_of (current_thread
);
614 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
618 uint64_t vq
= aarch64_sve_get_vq (tid
);
619 unsigned long hwcap
= linux_get_hwcap (8);
620 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
622 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
625 current_process ()->tdesc
= aarch32_linux_read_description ();
627 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
630 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
633 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
635 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
638 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
641 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
643 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
646 static struct regset_info aarch64_regsets
[] =
648 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
649 sizeof (struct user_pt_regs
), GENERAL_REGS
,
650 aarch64_fill_gregset
, aarch64_store_gregset
},
651 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
652 sizeof (struct user_fpsimd_state
), FP_REGS
,
653 aarch64_fill_fpregset
, aarch64_store_fpregset
655 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
656 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
657 NULL
, aarch64_store_pauthregset
},
661 static struct regsets_info aarch64_regsets_info
=
663 aarch64_regsets
, /* regsets */
665 NULL
, /* disabled_regsets */
668 static struct regs_info regs_info_aarch64
=
670 NULL
, /* regset_bitmap */
672 &aarch64_regsets_info
,
675 static struct regset_info aarch64_sve_regsets
[] =
677 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
678 sizeof (struct user_pt_regs
), GENERAL_REGS
,
679 aarch64_fill_gregset
, aarch64_store_gregset
},
680 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
681 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
682 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
684 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
685 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
686 NULL
, aarch64_store_pauthregset
},
690 static struct regsets_info aarch64_sve_regsets_info
=
692 aarch64_sve_regsets
, /* regsets. */
693 0, /* num_regsets. */
694 NULL
, /* disabled_regsets. */
697 static struct regs_info regs_info_aarch64_sve
=
699 NULL
, /* regset_bitmap. */
701 &aarch64_sve_regsets_info
,
704 /* Implementation of linux target ops method "get_regs_info". */
707 aarch64_target::get_regs_info ()
709 if (!is_64bit_tdesc ())
710 return ®s_info_aarch32
;
713 return ®s_info_aarch64_sve
;
715 return ®s_info_aarch64
;
718 /* Implementation of linux_target_ops method "supports_tracepoints". */
721 aarch64_supports_tracepoints (void)
723 if (current_thread
== NULL
)
727 /* We don't support tracepoints on aarch32 now. */
728 return is_64bit_tdesc ();
732 /* Implementation of linux_target_ops method "get_thread_area". */
735 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
740 iovec
.iov_base
= ®
;
741 iovec
.iov_len
= sizeof (reg
);
743 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
751 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
754 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
756 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
762 collect_register_by_name (regcache
, "x8", &l_sysno
);
763 *sysno
= (int) l_sysno
;
766 collect_register_by_name (regcache
, "r7", sysno
);
769 /* List of condition codes that we need. */
771 enum aarch64_condition_codes
782 enum aarch64_operand_type
788 /* Representation of an operand. At this time, it only supports register
789 and immediate types. */
791 struct aarch64_operand
793 /* Type of the operand. */
794 enum aarch64_operand_type type
;
796 /* Value of the operand according to the type. */
800 struct aarch64_register reg
;
804 /* List of registers that we are currently using, we can add more here as
805 we need to use them. */
807 /* General purpose scratch registers (64 bit). */
808 static const struct aarch64_register x0
= { 0, 1 };
809 static const struct aarch64_register x1
= { 1, 1 };
810 static const struct aarch64_register x2
= { 2, 1 };
811 static const struct aarch64_register x3
= { 3, 1 };
812 static const struct aarch64_register x4
= { 4, 1 };
814 /* General purpose scratch registers (32 bit). */
815 static const struct aarch64_register w0
= { 0, 0 };
816 static const struct aarch64_register w2
= { 2, 0 };
818 /* Intra-procedure scratch registers. */
819 static const struct aarch64_register ip0
= { 16, 1 };
821 /* Special purpose registers. */
822 static const struct aarch64_register fp
= { 29, 1 };
823 static const struct aarch64_register lr
= { 30, 1 };
824 static const struct aarch64_register sp
= { 31, 1 };
825 static const struct aarch64_register xzr
= { 31, 1 };
827 /* Dynamically allocate a new register. If we know the register
828 statically, we should make it a global as above instead of using this
831 static struct aarch64_register
832 aarch64_register (unsigned num
, int is64
)
834 return (struct aarch64_register
) { num
, is64
};
837 /* Helper function to create a register operand, for instructions with
838 different types of operands.
841 p += emit_mov (p, x0, register_operand (x1)); */
843 static struct aarch64_operand
844 register_operand (struct aarch64_register reg
)
846 struct aarch64_operand operand
;
848 operand
.type
= OPERAND_REGISTER
;
854 /* Helper function to create an immediate operand, for instructions with
855 different types of operands.
858 p += emit_mov (p, x0, immediate_operand (12)); */
860 static struct aarch64_operand
861 immediate_operand (uint32_t imm
)
863 struct aarch64_operand operand
;
865 operand
.type
= OPERAND_IMMEDIATE
;
871 /* Helper function to create an offset memory operand.
874 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
876 static struct aarch64_memory_operand
877 offset_memory_operand (int32_t offset
)
879 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
882 /* Helper function to create a pre-index memory operand.
885 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
887 static struct aarch64_memory_operand
888 preindex_memory_operand (int32_t index
)
890 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
893 /* Helper function to create a post-index memory operand.
896 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
898 static struct aarch64_memory_operand
899 postindex_memory_operand (int32_t index
)
901 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
904 /* System control registers. These special registers can be written and
905 read with the MRS and MSR instructions.
907 - NZCV: Condition flags. GDB refers to this register under the CPSR
909 - FPSR: Floating-point status register.
910 - FPCR: Floating-point control registers.
911 - TPIDR_EL0: Software thread ID register. */
913 enum aarch64_system_control_registers
915 /* op0 op1 crn crm op2 */
916 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
917 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
918 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
919 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
922 /* Write a BLR instruction into *BUF.
926 RN is the register to branch to. */
929 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
931 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
934 /* Write a RET instruction into *BUF.
938 RN is the register to branch to. */
941 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
943 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
947 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
948 struct aarch64_register rt
,
949 struct aarch64_register rt2
,
950 struct aarch64_register rn
,
951 struct aarch64_memory_operand operand
)
958 opc
= ENCODE (2, 2, 30);
960 opc
= ENCODE (0, 2, 30);
962 switch (operand
.type
)
964 case MEMORY_OPERAND_OFFSET
:
966 pre_index
= ENCODE (1, 1, 24);
967 write_back
= ENCODE (0, 1, 23);
970 case MEMORY_OPERAND_POSTINDEX
:
972 pre_index
= ENCODE (0, 1, 24);
973 write_back
= ENCODE (1, 1, 23);
976 case MEMORY_OPERAND_PREINDEX
:
978 pre_index
= ENCODE (1, 1, 24);
979 write_back
= ENCODE (1, 1, 23);
986 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
987 | ENCODE (operand
.index
>> 3, 7, 15)
988 | ENCODE (rt2
.num
, 5, 10)
989 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
992 /* Write a STP instruction into *BUF.
994 STP rt, rt2, [rn, #offset]
995 STP rt, rt2, [rn, #index]!
996 STP rt, rt2, [rn], #index
998 RT and RT2 are the registers to store.
999 RN is the base address register.
1000 OFFSET is the immediate to add to the base address. It is limited to a
1001 -512 .. 504 range (7 bits << 3). */
1004 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
1005 struct aarch64_register rt2
, struct aarch64_register rn
,
1006 struct aarch64_memory_operand operand
)
1008 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
1011 /* Write a LDP instruction into *BUF.
1013 LDP rt, rt2, [rn, #offset]
1014 LDP rt, rt2, [rn, #index]!
1015 LDP rt, rt2, [rn], #index
1017 RT and RT2 are the registers to store.
1018 RN is the base address register.
1019 OFFSET is the immediate to add to the base address. It is limited to a
1020 -512 .. 504 range (7 bits << 3). */
1023 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
1024 struct aarch64_register rt2
, struct aarch64_register rn
,
1025 struct aarch64_memory_operand operand
)
1027 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1030 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1032 LDP qt, qt2, [rn, #offset]
1034 RT and RT2 are the Q registers to store.
1035 RN is the base address register.
1036 OFFSET is the immediate to add to the base address. It is limited to
1037 -1024 .. 1008 range (7 bits << 4). */
1040 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1041 struct aarch64_register rn
, int32_t offset
)
1043 uint32_t opc
= ENCODE (2, 2, 30);
1044 uint32_t pre_index
= ENCODE (1, 1, 24);
1046 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1047 | ENCODE (offset
>> 4, 7, 15)
1048 | ENCODE (rt2
, 5, 10)
1049 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1052 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1054 STP qt, qt2, [rn, #offset]
1056 RT and RT2 are the Q registers to store.
1057 RN is the base address register.
1058 OFFSET is the immediate to add to the base address. It is limited to
1059 -1024 .. 1008 range (7 bits << 4). */
1062 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1063 struct aarch64_register rn
, int32_t offset
)
1065 uint32_t opc
= ENCODE (2, 2, 30);
1066 uint32_t pre_index
= ENCODE (1, 1, 24);
1068 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1069 | ENCODE (offset
>> 4, 7, 15)
1070 | ENCODE (rt2
, 5, 10)
1071 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1074 /* Write a LDRH instruction into *BUF.
1076 LDRH wt, [xn, #offset]
1077 LDRH wt, [xn, #index]!
1078 LDRH wt, [xn], #index
1080 RT is the register to store.
1081 RN is the base address register.
1082 OFFSET is the immediate to add to the base address. It is limited to
1083 0 .. 32760 range (12 bits << 3). */
1086 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1087 struct aarch64_register rn
,
1088 struct aarch64_memory_operand operand
)
1090 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1093 /* Write a LDRB instruction into *BUF.
1095 LDRB wt, [xn, #offset]
1096 LDRB wt, [xn, #index]!
1097 LDRB wt, [xn], #index
1099 RT is the register to store.
1100 RN is the base address register.
1101 OFFSET is the immediate to add to the base address. It is limited to
1102 0 .. 32760 range (12 bits << 3). */
1105 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1106 struct aarch64_register rn
,
1107 struct aarch64_memory_operand operand
)
1109 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1114 /* Write a STR instruction into *BUF.
1116 STR rt, [rn, #offset]
1117 STR rt, [rn, #index]!
1118 STR rt, [rn], #index
1120 RT is the register to store.
1121 RN is the base address register.
1122 OFFSET is the immediate to add to the base address. It is limited to
1123 0 .. 32760 range (12 bits << 3). */
1126 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1127 struct aarch64_register rn
,
1128 struct aarch64_memory_operand operand
)
1130 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1133 /* Helper function emitting an exclusive load or store instruction. */
1136 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1137 enum aarch64_opcodes opcode
,
1138 struct aarch64_register rs
,
1139 struct aarch64_register rt
,
1140 struct aarch64_register rt2
,
1141 struct aarch64_register rn
)
1143 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1144 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1145 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1148 /* Write a LAXR instruction into *BUF.
1152 RT is the destination register.
1153 RN is the base address register. */
1156 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1157 struct aarch64_register rn
)
1159 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1163 /* Write a STXR instruction into *BUF.
1167 RS is the result register, it indicates if the store succeeded or not.
1168 RT is the destination register.
1169 RN is the base address register. */
1172 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1173 struct aarch64_register rt
, struct aarch64_register rn
)
1175 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1179 /* Write a STLR instruction into *BUF.
1183 RT is the register to store.
1184 RN is the base address register. */
1187 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1188 struct aarch64_register rn
)
1190 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1194 /* Helper function for data processing instructions with register sources. */
1197 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1198 struct aarch64_register rd
,
1199 struct aarch64_register rn
,
1200 struct aarch64_register rm
)
1202 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1204 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1205 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1208 /* Helper function for data processing instructions taking either a register
1212 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1213 struct aarch64_register rd
,
1214 struct aarch64_register rn
,
1215 struct aarch64_operand operand
)
1217 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1218 /* The opcode is different for register and immediate source operands. */
1219 uint32_t operand_opcode
;
1221 if (operand
.type
== OPERAND_IMMEDIATE
)
1223 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1224 operand_opcode
= ENCODE (8, 4, 25);
1226 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1227 | ENCODE (operand
.imm
, 12, 10)
1228 | ENCODE (rn
.num
, 5, 5)
1229 | ENCODE (rd
.num
, 5, 0));
1233 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1234 operand_opcode
= ENCODE (5, 4, 25);
1236 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1241 /* Write an ADD instruction into *BUF.
1246 This function handles both an immediate and register add.
1248 RD is the destination register.
1249 RN is the input register.
1250 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1251 OPERAND_REGISTER. */
1254 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1255 struct aarch64_register rn
, struct aarch64_operand operand
)
1257 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1260 /* Write a SUB instruction into *BUF.
1265 This function handles both an immediate and register sub.
1267 RD is the destination register.
1268 RN is the input register.
1269 IMM is the immediate to substract to RN. */
1272 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1273 struct aarch64_register rn
, struct aarch64_operand operand
)
1275 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1278 /* Write a MOV instruction into *BUF.
1283 This function handles both a wide immediate move and a register move,
1284 with the condition that the source register is not xzr. xzr and the
1285 stack pointer share the same encoding and this function only supports
1288 RD is the destination register.
1289 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1290 OPERAND_REGISTER. */
1293 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1294 struct aarch64_operand operand
)
1296 if (operand
.type
== OPERAND_IMMEDIATE
)
1298 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1299 /* Do not shift the immediate. */
1300 uint32_t shift
= ENCODE (0, 2, 21);
1302 return aarch64_emit_insn (buf
, MOV
| size
| shift
1303 | ENCODE (operand
.imm
, 16, 5)
1304 | ENCODE (rd
.num
, 5, 0));
1307 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1310 /* Write a MOVK instruction into *BUF.
1312 MOVK rd, #imm, lsl #shift
1314 RD is the destination register.
1315 IMM is the immediate.
1316 SHIFT is the logical shift left to apply to IMM. */
1319 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1322 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1324 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1325 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1328 /* Write instructions into *BUF in order to move ADDR into a register.
1329 ADDR can be a 64-bit value.
1331 This function will emit a series of MOV and MOVK instructions, such as:
1334 MOVK xd, #(addr >> 16), lsl #16
1335 MOVK xd, #(addr >> 32), lsl #32
1336 MOVK xd, #(addr >> 48), lsl #48 */
1339 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1343 /* The MOV (wide immediate) instruction clears to top bits of the
1345 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1347 if ((addr
>> 16) != 0)
1348 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1352 if ((addr
>> 32) != 0)
1353 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1357 if ((addr
>> 48) != 0)
1358 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1363 /* Write a SUBS instruction into *BUF.
1367 This instruction update the condition flags.
1369 RD is the destination register.
1370 RN and RM are the source registers. */
1373 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1374 struct aarch64_register rn
, struct aarch64_operand operand
)
1376 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1379 /* Write a CMP instruction into *BUF.
1383 This instruction is an alias of SUBS xzr, rn, rm.
1385 RN and RM are the registers to compare. */
1388 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1389 struct aarch64_operand operand
)
1391 return emit_subs (buf
, xzr
, rn
, operand
);
1394 /* Write a AND instruction into *BUF.
1398 RD is the destination register.
1399 RN and RM are the source registers. */
1402 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1403 struct aarch64_register rn
, struct aarch64_register rm
)
1405 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1408 /* Write a ORR instruction into *BUF.
1412 RD is the destination register.
1413 RN and RM are the source registers. */
1416 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1417 struct aarch64_register rn
, struct aarch64_register rm
)
1419 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1422 /* Write a ORN instruction into *BUF.
1426 RD is the destination register.
1427 RN and RM are the source registers. */
1430 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1431 struct aarch64_register rn
, struct aarch64_register rm
)
1433 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1436 /* Write a EOR instruction into *BUF.
1440 RD is the destination register.
1441 RN and RM are the source registers. */
1444 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1445 struct aarch64_register rn
, struct aarch64_register rm
)
1447 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1450 /* Write a MVN instruction into *BUF.
1454 This is an alias for ORN rd, xzr, rm.
1456 RD is the destination register.
1457 RM is the source register. */
1460 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1461 struct aarch64_register rm
)
1463 return emit_orn (buf
, rd
, xzr
, rm
);
1466 /* Write a LSLV instruction into *BUF.
1470 RD is the destination register.
1471 RN and RM are the source registers. */
1474 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1475 struct aarch64_register rn
, struct aarch64_register rm
)
1477 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1480 /* Write a LSRV instruction into *BUF.
1484 RD is the destination register.
1485 RN and RM are the source registers. */
1488 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1489 struct aarch64_register rn
, struct aarch64_register rm
)
1491 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1494 /* Write a ASRV instruction into *BUF.
1498 RD is the destination register.
1499 RN and RM are the source registers. */
1502 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1503 struct aarch64_register rn
, struct aarch64_register rm
)
1505 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1508 /* Write a MUL instruction into *BUF.
1512 RD is the destination register.
1513 RN and RM are the source registers. */
1516 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1517 struct aarch64_register rn
, struct aarch64_register rm
)
1519 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1522 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1526 RT is the destination register.
1527 SYSTEM_REG is special purpose register to read. */
1530 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1531 enum aarch64_system_control_registers system_reg
)
1533 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1534 | ENCODE (rt
.num
, 5, 0));
1537 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1541 SYSTEM_REG is special purpose register to write.
1542 RT is the input register. */
1545 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1546 struct aarch64_register rt
)
1548 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1549 | ENCODE (rt
.num
, 5, 0));
1552 /* Write a SEVL instruction into *BUF.
1554 This is a hint instruction telling the hardware to trigger an event. */
1557 emit_sevl (uint32_t *buf
)
1559 return aarch64_emit_insn (buf
, SEVL
);
1562 /* Write a WFE instruction into *BUF.
1564 This is a hint instruction telling the hardware to wait for an event. */
1567 emit_wfe (uint32_t *buf
)
1569 return aarch64_emit_insn (buf
, WFE
);
1572 /* Write a SBFM instruction into *BUF.
1574 SBFM rd, rn, #immr, #imms
1576 This instruction moves the bits from #immr to #imms into the
1577 destination, sign extending the result.
1579 RD is the destination register.
1580 RN is the source register.
1581 IMMR is the bit number to start at (least significant bit).
1582 IMMS is the bit number to stop at (most significant bit). */
1585 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1586 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1588 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1589 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1591 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1592 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1593 | ENCODE (rd
.num
, 5, 0));
1596 /* Write a SBFX instruction into *BUF.
1598 SBFX rd, rn, #lsb, #width
1600 This instruction moves #width bits from #lsb into the destination, sign
1601 extending the result. This is an alias for:
1603 SBFM rd, rn, #lsb, #(lsb + width - 1)
1605 RD is the destination register.
1606 RN is the source register.
1607 LSB is the bit number to start at (least significant bit).
1608 WIDTH is the number of bits to move. */
1611 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1612 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1614 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1617 /* Write a UBFM instruction into *BUF.
1619 UBFM rd, rn, #immr, #imms
1621 This instruction moves the bits from #immr to #imms into the
1622 destination, extending the result with zeros.
1624 RD is the destination register.
1625 RN is the source register.
1626 IMMR is the bit number to start at (least significant bit).
1627 IMMS is the bit number to stop at (most significant bit). */
1630 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1631 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1633 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1634 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1636 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1637 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1638 | ENCODE (rd
.num
, 5, 0));
1641 /* Write a UBFX instruction into *BUF.
1643 UBFX rd, rn, #lsb, #width
1645 This instruction moves #width bits from #lsb into the destination,
1646 extending the result with zeros. This is an alias for:
1648 UBFM rd, rn, #lsb, #(lsb + width - 1)
1650 RD is the destination register.
1651 RN is the source register.
1652 LSB is the bit number to start at (least significant bit).
1653 WIDTH is the number of bits to move. */
1656 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1657 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1659 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1662 /* Write a CSINC instruction into *BUF.
1664 CSINC rd, rn, rm, cond
1666 This instruction conditionally increments rn or rm and places the result
1667 in rd. rn is chosen is the condition is true.
1669 RD is the destination register.
1670 RN and RM are the source registers.
1671 COND is the encoded condition. */
1674 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1675 struct aarch64_register rn
, struct aarch64_register rm
,
1678 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1680 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1681 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1682 | ENCODE (rd
.num
, 5, 0));
1685 /* Write a CSET instruction into *BUF.
1689 This instruction conditionally write 1 or 0 in the destination register.
1690 1 is written if the condition is true. This is an alias for:
1692 CSINC rd, xzr, xzr, !cond
1694 Note that the condition needs to be inverted.
1696 RD is the destination register.
1697 RN and RM are the source registers.
1698 COND is the encoded condition. */
1701 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1703 /* The least significant bit of the condition needs toggling in order to
1705 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1708 /* Write LEN instructions from BUF into the inferior memory at *TO.
1710 Note instructions are always little endian on AArch64, unlike data. */
1713 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1715 size_t byte_len
= len
* sizeof (uint32_t);
1716 #if (__BYTE_ORDER == __BIG_ENDIAN)
1717 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1720 for (i
= 0; i
< len
; i
++)
1721 le_buf
[i
] = htole32 (buf
[i
]);
1723 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1727 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1733 /* Sub-class of struct aarch64_insn_data, store information of
1734 instruction relocation for fast tracepoint. Visitor can
1735 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1736 the relocated instructions in buffer pointed by INSN_PTR. */
1738 struct aarch64_insn_relocation_data
1740 struct aarch64_insn_data base
;
1742 /* The new address the instruction is relocated to. */
1744 /* Pointer to the buffer of relocated instruction(s). */
1748 /* Implementation of aarch64_insn_visitor method "b". */
1751 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1752 struct aarch64_insn_data
*data
)
1754 struct aarch64_insn_relocation_data
*insn_reloc
1755 = (struct aarch64_insn_relocation_data
*) data
;
1757 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1759 if (can_encode_int32 (new_offset
, 28))
1760 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1763 /* Implementation of aarch64_insn_visitor method "b_cond". */
1766 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1767 struct aarch64_insn_data
*data
)
1769 struct aarch64_insn_relocation_data
*insn_reloc
1770 = (struct aarch64_insn_relocation_data
*) data
;
1772 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1774 if (can_encode_int32 (new_offset
, 21))
1776 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1779 else if (can_encode_int32 (new_offset
, 28))
1781 /* The offset is out of range for a conditional branch
1782 instruction but not for a unconditional branch. We can use
1783 the following instructions instead:
1785 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1786 B NOT_TAKEN ; Else jump over TAKEN and continue.
1793 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1794 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1795 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1799 /* Implementation of aarch64_insn_visitor method "cb". */
1802 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1803 const unsigned rn
, int is64
,
1804 struct aarch64_insn_data
*data
)
1806 struct aarch64_insn_relocation_data
*insn_reloc
1807 = (struct aarch64_insn_relocation_data
*) data
;
1809 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1811 if (can_encode_int32 (new_offset
, 21))
1813 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1814 aarch64_register (rn
, is64
), new_offset
);
1816 else if (can_encode_int32 (new_offset
, 28))
1818 /* The offset is out of range for a compare and branch
1819 instruction but not for a unconditional branch. We can use
1820 the following instructions instead:
1822 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1823 B NOT_TAKEN ; Else jump over TAKEN and continue.
1829 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1830 aarch64_register (rn
, is64
), 8);
1831 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1832 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1836 /* Implementation of aarch64_insn_visitor method "tb". */
1839 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1840 const unsigned rt
, unsigned bit
,
1841 struct aarch64_insn_data
*data
)
1843 struct aarch64_insn_relocation_data
*insn_reloc
1844 = (struct aarch64_insn_relocation_data
*) data
;
1846 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1848 if (can_encode_int32 (new_offset
, 16))
1850 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1851 aarch64_register (rt
, 1), new_offset
);
1853 else if (can_encode_int32 (new_offset
, 28))
1855 /* The offset is out of range for a test bit and branch
1856 instruction but not for a unconditional branch. We can use
1857 the following instructions instead:
1859 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1860 B NOT_TAKEN ; Else jump over TAKEN and continue.
1866 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1867 aarch64_register (rt
, 1), 8);
1868 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1869 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1874 /* Implementation of aarch64_insn_visitor method "adr". */
1877 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1879 struct aarch64_insn_data
*data
)
1881 struct aarch64_insn_relocation_data
*insn_reloc
1882 = (struct aarch64_insn_relocation_data
*) data
;
1883 /* We know exactly the address the ADR{P,} instruction will compute.
1884 We can just write it to the destination register. */
1885 CORE_ADDR address
= data
->insn_addr
+ offset
;
1889 /* Clear the lower 12 bits of the offset to get the 4K page. */
1890 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1891 aarch64_register (rd
, 1),
1895 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1896 aarch64_register (rd
, 1), address
);
1899 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1902 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1903 const unsigned rt
, const int is64
,
1904 struct aarch64_insn_data
*data
)
1906 struct aarch64_insn_relocation_data
*insn_reloc
1907 = (struct aarch64_insn_relocation_data
*) data
;
1908 CORE_ADDR address
= data
->insn_addr
+ offset
;
1910 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1911 aarch64_register (rt
, 1), address
);
1913 /* We know exactly what address to load from, and what register we
1916 MOV xd, #(oldloc + offset)
1917 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1920 LDR xd, [xd] ; or LDRSW xd, [xd]
1925 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1926 aarch64_register (rt
, 1),
1927 aarch64_register (rt
, 1),
1928 offset_memory_operand (0));
1930 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1931 aarch64_register (rt
, is64
),
1932 aarch64_register (rt
, 1),
1933 offset_memory_operand (0));
1936 /* Implementation of aarch64_insn_visitor method "others". */
1939 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1940 struct aarch64_insn_data
*data
)
1942 struct aarch64_insn_relocation_data
*insn_reloc
1943 = (struct aarch64_insn_relocation_data
*) data
;
1945 /* The instruction is not PC relative. Just re-emit it at the new
1947 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1950 static const struct aarch64_insn_visitor visitor
=
1952 aarch64_ftrace_insn_reloc_b
,
1953 aarch64_ftrace_insn_reloc_b_cond
,
1954 aarch64_ftrace_insn_reloc_cb
,
1955 aarch64_ftrace_insn_reloc_tb
,
1956 aarch64_ftrace_insn_reloc_adr
,
1957 aarch64_ftrace_insn_reloc_ldr_literal
,
1958 aarch64_ftrace_insn_reloc_others
,
1961 /* Implementation of linux_target_ops method
1962 "install_fast_tracepoint_jump_pad". */
1965 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1967 CORE_ADDR collector
,
1970 CORE_ADDR
*jump_entry
,
1971 CORE_ADDR
*trampoline
,
1972 ULONGEST
*trampoline_size
,
1973 unsigned char *jjump_pad_insn
,
1974 ULONGEST
*jjump_pad_insn_size
,
1975 CORE_ADDR
*adjusted_insn_addr
,
1976 CORE_ADDR
*adjusted_insn_addr_end
,
1984 CORE_ADDR buildaddr
= *jump_entry
;
1985 struct aarch64_insn_relocation_data insn_data
;
1987 /* We need to save the current state on the stack both to restore it
1988 later and to collect register values when the tracepoint is hit.
1990 The saved registers are pushed in a layout that needs to be in sync
1991 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1992 the supply_fast_tracepoint_registers function will fill in the
1993 register cache from a pointer to saved registers on the stack we build
1996 For simplicity, we set the size of each cell on the stack to 16 bytes.
1997 This way one cell can hold any register type, from system registers
1998 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1999 has to be 16 bytes aligned anyway.
2001 Note that the CPSR register does not exist on AArch64. Instead we
2002 can access system bits describing the process state with the
2003 MRS/MSR instructions, namely the condition flags. We save them as
2004 if they are part of a CPSR register because that's how GDB
2005 interprets these system bits. At the moment, only the condition
2006 flags are saved in CPSR (NZCV).
2008 Stack layout, each cell is 16 bytes (descending):
2010 High *-------- SIMD&FP registers from 31 down to 0. --------*
2016 *---- General purpose registers from 30 down to 0. ----*
2022 *------------- Special purpose registers. -------------*
2025 | CPSR (NZCV) | 5 cells
2028 *------------- collecting_t object --------------------*
2029 | TPIDR_EL0 | struct tracepoint * |
2030 Low *------------------------------------------------------*
2032 After this stack is set up, we issue a call to the collector, passing
2033 it the saved registers at (SP + 16). */
2035 /* Push SIMD&FP registers on the stack:
2037 SUB sp, sp, #(32 * 16)
2039 STP q30, q31, [sp, #(30 * 16)]
2044 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2045 for (i
= 30; i
>= 0; i
-= 2)
2046 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2048 /* Push general purpose registers on the stack. Note that we do not need
2049 to push x31 as it represents the xzr register and not the stack
2050 pointer in a STR instruction.
2052 SUB sp, sp, #(31 * 16)
2054 STR x30, [sp, #(30 * 16)]
2059 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2060 for (i
= 30; i
>= 0; i
-= 1)
2061 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2062 offset_memory_operand (i
* 16));
2064 /* Make space for 5 more cells.
2066 SUB sp, sp, #(5 * 16)
2069 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2074 ADD x4, sp, #((32 + 31 + 5) * 16)
2075 STR x4, [sp, #(4 * 16)]
2078 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2079 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2081 /* Save PC (tracepoint address):
2086 STR x3, [sp, #(3 * 16)]
2090 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2091 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2093 /* Save CPSR (NZCV), FPSR and FPCR:
2099 STR x2, [sp, #(2 * 16)]
2100 STR x1, [sp, #(1 * 16)]
2101 STR x0, [sp, #(0 * 16)]
2104 p
+= emit_mrs (p
, x2
, NZCV
);
2105 p
+= emit_mrs (p
, x1
, FPSR
);
2106 p
+= emit_mrs (p
, x0
, FPCR
);
2107 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2108 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2109 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2111 /* Push the collecting_t object. It consist of the address of the
2112 tracepoint and an ID for the current thread. We get the latter by
2113 reading the tpidr_el0 system register. It corresponds to the
2114 NT_ARM_TLS register accessible with ptrace.
2121 STP x0, x1, [sp, #-16]!
2125 p
+= emit_mov_addr (p
, x0
, tpoint
);
2126 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2127 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2131 The shared memory for the lock is at lockaddr. It will hold zero
2132 if no-one is holding the lock, otherwise it contains the address of
2133 the collecting_t object on the stack of the thread which acquired it.
2135 At this stage, the stack pointer points to this thread's collecting_t
2138 We use the following registers:
2139 - x0: Address of the lock.
2140 - x1: Pointer to collecting_t object.
2141 - x2: Scratch register.
2147 ; Trigger an event local to this core. So the following WFE
2148 ; instruction is ignored.
2151 ; Wait for an event. The event is triggered by either the SEVL
2152 ; or STLR instructions (store release).
2155 ; Atomically read at lockaddr. This marks the memory location as
2156 ; exclusive. This instruction also has memory constraints which
2157 ; make sure all previous data reads and writes are done before
2161 ; Try again if another thread holds the lock.
2164 ; We can lock it! Write the address of the collecting_t object.
2165 ; This instruction will fail if the memory location is not marked
2166 ; as exclusive anymore. If it succeeds, it will remove the
2167 ; exclusive mark on the memory location. This way, if another
2168 ; thread executes this instruction before us, we will fail and try
2175 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2176 p
+= emit_mov (p
, x1
, register_operand (sp
));
2180 p
+= emit_ldaxr (p
, x2
, x0
);
2181 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2182 p
+= emit_stxr (p
, w2
, x1
, x0
);
2183 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2185 /* Call collector (struct tracepoint *, unsigned char *):
2190 ; Saved registers start after the collecting_t object.
2193 ; We use an intra-procedure-call scratch register.
2194 MOV ip0, #(collector)
2197 ; And call back to C!
2202 p
+= emit_mov_addr (p
, x0
, tpoint
);
2203 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2205 p
+= emit_mov_addr (p
, ip0
, collector
);
2206 p
+= emit_blr (p
, ip0
);
2208 /* Release the lock.
2213 ; This instruction is a normal store with memory ordering
2214 ; constraints. Thanks to this we do not have to put a data
2215 ; barrier instruction to make sure all data read and writes are done
2216 ; before this instruction is executed. Furthermore, this instruction
2217 ; will trigger an event, letting other threads know they can grab
2222 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2223 p
+= emit_stlr (p
, xzr
, x0
);
2225 /* Free collecting_t object:
2230 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2232 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2233 registers from the stack.
2235 LDR x2, [sp, #(2 * 16)]
2236 LDR x1, [sp, #(1 * 16)]
2237 LDR x0, [sp, #(0 * 16)]
2243 ADD sp, sp #(5 * 16)
2246 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2247 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2248 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2249 p
+= emit_msr (p
, NZCV
, x2
);
2250 p
+= emit_msr (p
, FPSR
, x1
);
2251 p
+= emit_msr (p
, FPCR
, x0
);
2253 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2255 /* Pop general purpose registers:
2259 LDR x30, [sp, #(30 * 16)]
2261 ADD sp, sp, #(31 * 16)
2264 for (i
= 0; i
<= 30; i
+= 1)
2265 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2266 offset_memory_operand (i
* 16));
2267 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2269 /* Pop SIMD&FP registers:
2273 LDP q30, q31, [sp, #(30 * 16)]
2275 ADD sp, sp, #(32 * 16)
2278 for (i
= 0; i
<= 30; i
+= 2)
2279 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2280 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2282 /* Write the code into the inferior memory. */
2283 append_insns (&buildaddr
, p
- buf
, buf
);
2285 /* Now emit the relocated instruction. */
2286 *adjusted_insn_addr
= buildaddr
;
2287 target_read_uint32 (tpaddr
, &insn
);
2289 insn_data
.base
.insn_addr
= tpaddr
;
2290 insn_data
.new_addr
= buildaddr
;
2291 insn_data
.insn_ptr
= buf
;
2293 aarch64_relocate_instruction (insn
, &visitor
,
2294 (struct aarch64_insn_data
*) &insn_data
);
2296 /* We may not have been able to relocate the instruction. */
2297 if (insn_data
.insn_ptr
== buf
)
2300 "E.Could not relocate instruction from %s to %s.",
2301 core_addr_to_string_nz (tpaddr
),
2302 core_addr_to_string_nz (buildaddr
));
2306 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2307 *adjusted_insn_addr_end
= buildaddr
;
2309 /* Go back to the start of the buffer. */
2312 /* Emit a branch back from the jump pad. */
2313 offset
= (tpaddr
+ orig_size
- buildaddr
);
2314 if (!can_encode_int32 (offset
, 28))
2317 "E.Jump back from jump pad too far from tracepoint "
2318 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2323 p
+= emit_b (p
, 0, offset
);
2324 append_insns (&buildaddr
, p
- buf
, buf
);
2326 /* Give the caller a branch instruction into the jump pad. */
2327 offset
= (*jump_entry
- tpaddr
);
2328 if (!can_encode_int32 (offset
, 28))
2331 "E.Jump pad too far from tracepoint "
2332 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2337 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2338 *jjump_pad_insn_size
= 4;
2340 /* Return the end address of our pad. */
2341 *jump_entry
= buildaddr
;
2346 /* Helper function writing LEN instructions from START into
2347 current_insn_ptr. */
2350 emit_ops_insns (const uint32_t *start
, int len
)
2352 CORE_ADDR buildaddr
= current_insn_ptr
;
2355 debug_printf ("Adding %d instrucions at %s\n",
2356 len
, paddress (buildaddr
));
2358 append_insns (&buildaddr
, len
, start
);
2359 current_insn_ptr
= buildaddr
;
2362 /* Pop a register from the stack. */
2365 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2367 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2370 /* Push a register on the stack. */
2373 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2375 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2378 /* Implementation of emit_ops method "emit_prologue". */
2381 aarch64_emit_prologue (void)
2386 /* This function emit a prologue for the following function prototype:
2388 enum eval_result_type f (unsigned char *regs,
2391 The first argument is a buffer of raw registers. The second
2392 argument is the result of
2393 evaluating the expression, which will be set to whatever is on top of
2394 the stack at the end.
2396 The stack set up by the prologue is as such:
2398 High *------------------------------------------------------*
2401 | x1 (ULONGEST *value) |
2402 | x0 (unsigned char *regs) |
2403 Low *------------------------------------------------------*
2405 As we are implementing a stack machine, each opcode can expand the
2406 stack so we never know how far we are from the data saved by this
2407 prologue. In order to be able refer to value and regs later, we save
2408 the current stack pointer in the frame pointer. This way, it is not
2409 clobbered when calling C functions.
2411 Finally, throughout every operation, we are using register x0 as the
2412 top of the stack, and x1 as a scratch register. */
2414 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2415 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2416 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2418 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2421 emit_ops_insns (buf
, p
- buf
);
2424 /* Implementation of emit_ops method "emit_epilogue". */
2427 aarch64_emit_epilogue (void)
2432 /* Store the result of the expression (x0) in *value. */
2433 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2434 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2435 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2437 /* Restore the previous state. */
2438 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2439 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2441 /* Return expr_eval_no_error. */
2442 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2443 p
+= emit_ret (p
, lr
);
2445 emit_ops_insns (buf
, p
- buf
);
2448 /* Implementation of emit_ops method "emit_add". */
2451 aarch64_emit_add (void)
2456 p
+= emit_pop (p
, x1
);
2457 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2459 emit_ops_insns (buf
, p
- buf
);
2462 /* Implementation of emit_ops method "emit_sub". */
2465 aarch64_emit_sub (void)
2470 p
+= emit_pop (p
, x1
);
2471 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2473 emit_ops_insns (buf
, p
- buf
);
2476 /* Implementation of emit_ops method "emit_mul". */
2479 aarch64_emit_mul (void)
2484 p
+= emit_pop (p
, x1
);
2485 p
+= emit_mul (p
, x0
, x1
, x0
);
2487 emit_ops_insns (buf
, p
- buf
);
2490 /* Implementation of emit_ops method "emit_lsh". */
2493 aarch64_emit_lsh (void)
2498 p
+= emit_pop (p
, x1
);
2499 p
+= emit_lslv (p
, x0
, x1
, x0
);
2501 emit_ops_insns (buf
, p
- buf
);
2504 /* Implementation of emit_ops method "emit_rsh_signed". */
2507 aarch64_emit_rsh_signed (void)
2512 p
+= emit_pop (p
, x1
);
2513 p
+= emit_asrv (p
, x0
, x1
, x0
);
2515 emit_ops_insns (buf
, p
- buf
);
2518 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2521 aarch64_emit_rsh_unsigned (void)
2526 p
+= emit_pop (p
, x1
);
2527 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2529 emit_ops_insns (buf
, p
- buf
);
2532 /* Implementation of emit_ops method "emit_ext". */
2535 aarch64_emit_ext (int arg
)
2540 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2542 emit_ops_insns (buf
, p
- buf
);
2545 /* Implementation of emit_ops method "emit_log_not". */
2548 aarch64_emit_log_not (void)
2553 /* If the top of the stack is 0, replace it with 1. Else replace it with
2556 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2557 p
+= emit_cset (p
, x0
, EQ
);
2559 emit_ops_insns (buf
, p
- buf
);
2562 /* Implementation of emit_ops method "emit_bit_and". */
2565 aarch64_emit_bit_and (void)
2570 p
+= emit_pop (p
, x1
);
2571 p
+= emit_and (p
, x0
, x0
, x1
);
2573 emit_ops_insns (buf
, p
- buf
);
2576 /* Implementation of emit_ops method "emit_bit_or". */
2579 aarch64_emit_bit_or (void)
2584 p
+= emit_pop (p
, x1
);
2585 p
+= emit_orr (p
, x0
, x0
, x1
);
2587 emit_ops_insns (buf
, p
- buf
);
2590 /* Implementation of emit_ops method "emit_bit_xor". */
2593 aarch64_emit_bit_xor (void)
2598 p
+= emit_pop (p
, x1
);
2599 p
+= emit_eor (p
, x0
, x0
, x1
);
2601 emit_ops_insns (buf
, p
- buf
);
2604 /* Implementation of emit_ops method "emit_bit_not". */
2607 aarch64_emit_bit_not (void)
2612 p
+= emit_mvn (p
, x0
, x0
);
2614 emit_ops_insns (buf
, p
- buf
);
2617 /* Implementation of emit_ops method "emit_equal". */
2620 aarch64_emit_equal (void)
2625 p
+= emit_pop (p
, x1
);
2626 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2627 p
+= emit_cset (p
, x0
, EQ
);
2629 emit_ops_insns (buf
, p
- buf
);
2632 /* Implementation of emit_ops method "emit_less_signed". */
2635 aarch64_emit_less_signed (void)
2640 p
+= emit_pop (p
, x1
);
2641 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2642 p
+= emit_cset (p
, x0
, LT
);
2644 emit_ops_insns (buf
, p
- buf
);
2647 /* Implementation of emit_ops method "emit_less_unsigned". */
2650 aarch64_emit_less_unsigned (void)
2655 p
+= emit_pop (p
, x1
);
2656 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2657 p
+= emit_cset (p
, x0
, LO
);
2659 emit_ops_insns (buf
, p
- buf
);
2662 /* Implementation of emit_ops method "emit_ref". */
2665 aarch64_emit_ref (int size
)
2673 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2676 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2679 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2682 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2685 /* Unknown size, bail on compilation. */
2690 emit_ops_insns (buf
, p
- buf
);
2693 /* Implementation of emit_ops method "emit_if_goto". */
2696 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2701 /* The Z flag is set or cleared here. */
2702 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2703 /* This instruction must not change the Z flag. */
2704 p
+= emit_pop (p
, x0
);
2705 /* Branch over the next instruction if x0 == 0. */
2706 p
+= emit_bcond (p
, EQ
, 8);
2708 /* The NOP instruction will be patched with an unconditional branch. */
2710 *offset_p
= (p
- buf
) * 4;
2715 emit_ops_insns (buf
, p
- buf
);
2718 /* Implementation of emit_ops method "emit_goto". */
2721 aarch64_emit_goto (int *offset_p
, int *size_p
)
2726 /* The NOP instruction will be patched with an unconditional branch. */
2733 emit_ops_insns (buf
, p
- buf
);
2736 /* Implementation of emit_ops method "write_goto_address". */
2739 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2743 emit_b (&insn
, 0, to
- from
);
2744 append_insns (&from
, 1, &insn
);
2747 /* Implementation of emit_ops method "emit_const". */
2750 aarch64_emit_const (LONGEST num
)
2755 p
+= emit_mov_addr (p
, x0
, num
);
2757 emit_ops_insns (buf
, p
- buf
);
2760 /* Implementation of emit_ops method "emit_call". */
2763 aarch64_emit_call (CORE_ADDR fn
)
2768 p
+= emit_mov_addr (p
, ip0
, fn
);
2769 p
+= emit_blr (p
, ip0
);
2771 emit_ops_insns (buf
, p
- buf
);
2774 /* Implementation of emit_ops method "emit_reg". */
2777 aarch64_emit_reg (int reg
)
2782 /* Set x0 to unsigned char *regs. */
2783 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2784 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2785 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2787 emit_ops_insns (buf
, p
- buf
);
2789 aarch64_emit_call (get_raw_reg_func_addr ());
2792 /* Implementation of emit_ops method "emit_pop". */
2795 aarch64_emit_pop (void)
2800 p
+= emit_pop (p
, x0
);
2802 emit_ops_insns (buf
, p
- buf
);
2805 /* Implementation of emit_ops method "emit_stack_flush". */
2808 aarch64_emit_stack_flush (void)
2813 p
+= emit_push (p
, x0
);
2815 emit_ops_insns (buf
, p
- buf
);
2818 /* Implementation of emit_ops method "emit_zero_ext". */
2821 aarch64_emit_zero_ext (int arg
)
2826 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2828 emit_ops_insns (buf
, p
- buf
);
2831 /* Implementation of emit_ops method "emit_swap". */
2834 aarch64_emit_swap (void)
2839 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2840 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2841 p
+= emit_mov (p
, x0
, register_operand (x1
));
2843 emit_ops_insns (buf
, p
- buf
);
2846 /* Implementation of emit_ops method "emit_stack_adjust". */
2849 aarch64_emit_stack_adjust (int n
)
2851 /* This is not needed with our design. */
2855 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2857 emit_ops_insns (buf
, p
- buf
);
2860 /* Implementation of emit_ops method "emit_int_call_1". */
2863 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2868 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2870 emit_ops_insns (buf
, p
- buf
);
2872 aarch64_emit_call (fn
);
2875 /* Implementation of emit_ops method "emit_void_call_2". */
2878 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2883 /* Push x0 on the stack. */
2884 aarch64_emit_stack_flush ();
2886 /* Setup arguments for the function call:
2889 x1: top of the stack
2894 p
+= emit_mov (p
, x1
, register_operand (x0
));
2895 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2897 emit_ops_insns (buf
, p
- buf
);
2899 aarch64_emit_call (fn
);
2902 aarch64_emit_pop ();
2905 /* Implementation of emit_ops method "emit_eq_goto". */
2908 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2913 p
+= emit_pop (p
, x1
);
2914 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2915 /* Branch over the next instruction if x0 != x1. */
2916 p
+= emit_bcond (p
, NE
, 8);
2917 /* The NOP instruction will be patched with an unconditional branch. */
2919 *offset_p
= (p
- buf
) * 4;
2924 emit_ops_insns (buf
, p
- buf
);
2927 /* Implementation of emit_ops method "emit_ne_goto". */
2930 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2935 p
+= emit_pop (p
, x1
);
2936 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2937 /* Branch over the next instruction if x0 == x1. */
2938 p
+= emit_bcond (p
, EQ
, 8);
2939 /* The NOP instruction will be patched with an unconditional branch. */
2941 *offset_p
= (p
- buf
) * 4;
2946 emit_ops_insns (buf
, p
- buf
);
2949 /* Implementation of emit_ops method "emit_lt_goto". */
2952 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2957 p
+= emit_pop (p
, x1
);
2958 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2959 /* Branch over the next instruction if x0 >= x1. */
2960 p
+= emit_bcond (p
, GE
, 8);
2961 /* The NOP instruction will be patched with an unconditional branch. */
2963 *offset_p
= (p
- buf
) * 4;
2968 emit_ops_insns (buf
, p
- buf
);
2971 /* Implementation of emit_ops method "emit_le_goto". */
2974 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2979 p
+= emit_pop (p
, x1
);
2980 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2981 /* Branch over the next instruction if x0 > x1. */
2982 p
+= emit_bcond (p
, GT
, 8);
2983 /* The NOP instruction will be patched with an unconditional branch. */
2985 *offset_p
= (p
- buf
) * 4;
2990 emit_ops_insns (buf
, p
- buf
);
2993 /* Implementation of emit_ops method "emit_gt_goto". */
2996 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
3001 p
+= emit_pop (p
, x1
);
3002 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3003 /* Branch over the next instruction if x0 <= x1. */
3004 p
+= emit_bcond (p
, LE
, 8);
3005 /* The NOP instruction will be patched with an unconditional branch. */
3007 *offset_p
= (p
- buf
) * 4;
3012 emit_ops_insns (buf
, p
- buf
);
3015 /* Implementation of emit_ops method "emit_ge_got". */
3018 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
3023 p
+= emit_pop (p
, x1
);
3024 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3025 /* Branch over the next instruction if x0 <= x1. */
3026 p
+= emit_bcond (p
, LT
, 8);
3027 /* The NOP instruction will be patched with an unconditional branch. */
3029 *offset_p
= (p
- buf
) * 4;
3034 emit_ops_insns (buf
, p
- buf
);
3037 static struct emit_ops aarch64_emit_ops_impl
=
3039 aarch64_emit_prologue
,
3040 aarch64_emit_epilogue
,
3045 aarch64_emit_rsh_signed
,
3046 aarch64_emit_rsh_unsigned
,
3048 aarch64_emit_log_not
,
3049 aarch64_emit_bit_and
,
3050 aarch64_emit_bit_or
,
3051 aarch64_emit_bit_xor
,
3052 aarch64_emit_bit_not
,
3054 aarch64_emit_less_signed
,
3055 aarch64_emit_less_unsigned
,
3057 aarch64_emit_if_goto
,
3059 aarch64_write_goto_address
,
3064 aarch64_emit_stack_flush
,
3065 aarch64_emit_zero_ext
,
3067 aarch64_emit_stack_adjust
,
3068 aarch64_emit_int_call_1
,
3069 aarch64_emit_void_call_2
,
3070 aarch64_emit_eq_goto
,
3071 aarch64_emit_ne_goto
,
3072 aarch64_emit_lt_goto
,
3073 aarch64_emit_le_goto
,
3074 aarch64_emit_gt_goto
,
3075 aarch64_emit_ge_got
,
3078 /* Implementation of linux_target_ops method "emit_ops". */
3080 static struct emit_ops
*
3081 aarch64_emit_ops (void)
3083 return &aarch64_emit_ops_impl
;
3086 /* Implementation of linux_target_ops method
3087 "get_min_fast_tracepoint_insn_len". */
3090 aarch64_get_min_fast_tracepoint_insn_len (void)
3095 /* Implementation of linux_target_ops method "supports_range_stepping". */
3098 aarch64_supports_range_stepping (void)
3103 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3106 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3108 if (is_64bit_tdesc ())
3110 *size
= aarch64_breakpoint_len
;
3111 return aarch64_breakpoint
;
3114 return arm_sw_breakpoint_from_kind (kind
, size
);
3117 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3120 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3122 if (is_64bit_tdesc ())
3123 return aarch64_breakpoint_len
;
3125 return arm_breakpoint_kind_from_pc (pcptr
);
3128 /* Implementation of the target ops method
3129 "breakpoint_kind_from_current_state". */
3132 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3134 if (is_64bit_tdesc ())
3135 return aarch64_breakpoint_len
;
3137 return arm_breakpoint_kind_from_current_state (pcptr
);
3140 /* Support for hardware single step. */
3143 aarch64_supports_hardware_single_step (void)
3148 struct linux_target_ops the_low_target
=
3150 aarch64_supports_tracepoints
,
3151 aarch64_get_thread_area
,
3152 aarch64_install_fast_tracepoint_jump_pad
,
3154 aarch64_get_min_fast_tracepoint_insn_len
,
3155 aarch64_supports_range_stepping
,
3156 aarch64_supports_hardware_single_step
,
3157 aarch64_get_syscall_trapinfo
,
3160 /* The linux target ops object. */
3162 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3165 initialize_low_arch (void)
3167 initialize_low_arch_aarch32 ();
3169 initialize_regsets_info (&aarch64_regsets_info
);
3170 initialize_regsets_info (&aarch64_sve_regsets_info
);