1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
66 void low_arch_setup () override
;
68 bool low_cannot_fetch_register (int regno
) override
;
70 bool low_cannot_store_register (int regno
) override
;
72 bool low_supports_breakpoints () override
;
74 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
76 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
79 /* The singleton target ops object. */
81 static aarch64_target the_aarch64_target
;
84 aarch64_target::low_cannot_fetch_register (int regno
)
86 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
87 "is not implemented by the target");
91 aarch64_target::low_cannot_store_register (int regno
)
93 gdb_assert_not_reached ("linux target op low_cannot_store_register "
94 "is not implemented by the target");
97 /* Per-process arch-specific data we want to keep. */
99 struct arch_process_info
101 /* Hardware breakpoint/watchpoint data.
102 The reason for them to be per-process rather than per-thread is
103 due to the lack of information in the gdbserver environment;
104 gdbserver is not told that whether a requested hardware
105 breakpoint/watchpoint is thread specific or not, so it has to set
106 each hw bp/wp for every thread in the current process. The
107 higher level bp/wp management in gdb will resume a thread if a hw
108 bp/wp trap is not expected for it. Since the hw bp/wp setting is
109 same for each thread, it is reasonable for the data to live here.
111 struct aarch64_debug_reg_state debug_reg_state
;
114 /* Return true if the size of register 0 is 8 byte. */
117 is_64bit_tdesc (void)
119 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
121 return register_size (regcache
->tdesc
, 0) == 8;
124 /* Return true if the regcache contains the number of SVE registers. */
129 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
131 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
135 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
137 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
140 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
141 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
142 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
143 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
144 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
148 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
150 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
153 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
154 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
155 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
156 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
157 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
161 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
163 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
166 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
167 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
168 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
169 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
173 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
175 const struct user_fpsimd_state
*regset
176 = (const struct user_fpsimd_state
*) buf
;
179 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
180 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
181 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
182 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
185 /* Store the pauth registers to regcache. */
188 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
190 uint64_t *pauth_regset
= (uint64_t *) buf
;
191 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
196 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
198 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
203 aarch64_target::low_supports_breakpoints ()
208 /* Implementation of linux target ops method "low_get_pc". */
211 aarch64_target::low_get_pc (regcache
*regcache
)
213 if (register_size (regcache
->tdesc
, 0) == 8)
214 return linux_get_pc_64bit (regcache
);
216 return linux_get_pc_32bit (regcache
);
219 /* Implementation of linux target ops method "low_set_pc". */
222 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
224 if (register_size (regcache
->tdesc
, 0) == 8)
225 linux_set_pc_64bit (regcache
, pc
);
227 linux_set_pc_32bit (regcache
, pc
);
230 #define aarch64_breakpoint_len 4
232 /* AArch64 BRK software debug mode instruction.
233 This instruction needs to match gdb/aarch64-tdep.c
234 (aarch64_default_breakpoint). */
235 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
237 /* Implementation of linux_target_ops method "breakpoint_at". */
240 aarch64_breakpoint_at (CORE_ADDR where
)
242 if (is_64bit_tdesc ())
244 gdb_byte insn
[aarch64_breakpoint_len
];
246 the_target
->read_memory (where
, (unsigned char *) &insn
,
247 aarch64_breakpoint_len
);
248 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
254 return arm_breakpoint_at (where
);
258 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
262 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
264 state
->dr_addr_bp
[i
] = 0;
265 state
->dr_ctrl_bp
[i
] = 0;
266 state
->dr_ref_count_bp
[i
] = 0;
269 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
271 state
->dr_addr_wp
[i
] = 0;
272 state
->dr_ctrl_wp
[i
] = 0;
273 state
->dr_ref_count_wp
[i
] = 0;
277 /* Return the pointer to the debug register state structure in the
278 current process' arch-specific data area. */
280 struct aarch64_debug_reg_state
*
281 aarch64_get_debug_reg_state (pid_t pid
)
283 struct process_info
*proc
= find_process_pid (pid
);
285 return &proc
->priv
->arch_private
->debug_reg_state
;
288 /* Implementation of linux_target_ops method "supports_z_point_type". */
291 aarch64_supports_z_point_type (char z_type
)
297 case Z_PACKET_WRITE_WP
:
298 case Z_PACKET_READ_WP
:
299 case Z_PACKET_ACCESS_WP
:
306 /* Implementation of linux_target_ops method "insert_point".
308 It actually only records the info of the to-be-inserted bp/wp;
309 the actual insertion will happen when threads are resumed. */
312 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
313 int len
, struct raw_breakpoint
*bp
)
316 enum target_hw_bp_type targ_type
;
317 struct aarch64_debug_reg_state
*state
318 = aarch64_get_debug_reg_state (pid_of (current_thread
));
321 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
322 (unsigned long) addr
, len
);
324 /* Determine the type from the raw breakpoint type. */
325 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
327 if (targ_type
!= hw_execute
)
329 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
330 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
331 1 /* is_insert */, state
);
339 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
340 instruction. Set it to 2 to correctly encode length bit
341 mask in hardware/watchpoint control register. */
344 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
345 1 /* is_insert */, state
);
349 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
355 /* Implementation of linux_target_ops method "remove_point".
357 It actually only records the info of the to-be-removed bp/wp,
358 the actual removal will be done when threads are resumed. */
361 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
362 int len
, struct raw_breakpoint
*bp
)
365 enum target_hw_bp_type targ_type
;
366 struct aarch64_debug_reg_state
*state
367 = aarch64_get_debug_reg_state (pid_of (current_thread
));
370 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
371 (unsigned long) addr
, len
);
373 /* Determine the type from the raw breakpoint type. */
374 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
376 /* Set up state pointers. */
377 if (targ_type
!= hw_execute
)
379 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
385 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
386 instruction. Set it to 2 to correctly encode length bit
387 mask in hardware/watchpoint control register. */
390 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
391 0 /* is_insert */, state
);
395 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
401 /* Implementation of linux_target_ops method "stopped_data_address". */
404 aarch64_stopped_data_address (void)
408 struct aarch64_debug_reg_state
*state
;
410 pid
= lwpid_of (current_thread
);
412 /* Get the siginfo. */
413 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
414 return (CORE_ADDR
) 0;
416 /* Need to be a hardware breakpoint/watchpoint trap. */
417 if (siginfo
.si_signo
!= SIGTRAP
418 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
419 return (CORE_ADDR
) 0;
421 /* Check if the address matches any watched address. */
422 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
423 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
425 const unsigned int offset
426 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
427 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
428 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
429 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
430 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
431 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
433 if (state
->dr_ref_count_wp
[i
]
434 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
435 && addr_trap
>= addr_watch_aligned
436 && addr_trap
< addr_watch
+ len
)
438 /* ADDR_TRAP reports the first address of the memory range
439 accessed by the CPU, regardless of what was the memory
440 range watched. Thus, a large CPU access that straddles
441 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
442 ADDR_TRAP that is lower than the
443 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
445 addr: | 4 | 5 | 6 | 7 | 8 |
446 |---- range watched ----|
447 |----------- range accessed ------------|
449 In this case, ADDR_TRAP will be 4.
451 To match a watchpoint known to GDB core, we must never
452 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
453 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
454 positive on kernels older than 4.10. See PR
460 return (CORE_ADDR
) 0;
463 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
466 aarch64_stopped_by_watchpoint (void)
468 if (aarch64_stopped_data_address () != 0)
474 /* Fetch the thread-local storage pointer for libthread_db. */
477 ps_get_thread_area (struct ps_prochandle
*ph
,
478 lwpid_t lwpid
, int idx
, void **base
)
480 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
484 /* Implementation of linux_target_ops method "siginfo_fixup". */
487 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
489 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
490 if (!is_64bit_tdesc ())
493 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
496 aarch64_siginfo_from_compat_siginfo (native
,
497 (struct compat_siginfo
*) inf
);
505 /* Implementation of linux_target_ops method "new_process". */
507 static struct arch_process_info
*
508 aarch64_linux_new_process (void)
510 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
512 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
517 /* Implementation of linux_target_ops method "delete_process". */
520 aarch64_linux_delete_process (struct arch_process_info
*info
)
525 /* Implementation of linux_target_ops method "linux_new_fork". */
528 aarch64_linux_new_fork (struct process_info
*parent
,
529 struct process_info
*child
)
531 /* These are allocated by linux_add_process. */
532 gdb_assert (parent
->priv
!= NULL
533 && parent
->priv
->arch_private
!= NULL
);
534 gdb_assert (child
->priv
!= NULL
535 && child
->priv
->arch_private
!= NULL
);
537 /* Linux kernel before 2.6.33 commit
538 72f674d203cd230426437cdcf7dd6f681dad8b0d
539 will inherit hardware debug registers from parent
540 on fork/vfork/clone. Newer Linux kernels create such tasks with
541 zeroed debug registers.
543 GDB core assumes the child inherits the watchpoints/hw
544 breakpoints of the parent, and will remove them all from the
545 forked off process. Copy the debug registers mirrors into the
546 new process so that all breakpoints and watchpoints can be
547 removed together. The debug registers mirror will become zeroed
548 in the end before detaching the forked off process, thus making
549 this compatible with older Linux kernels too. */
551 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
554 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
555 #define AARCH64_HWCAP_PACA (1 << 30)
557 /* Implementation of linux target ops method "low_arch_setup". */
560 aarch64_target::low_arch_setup ()
562 unsigned int machine
;
566 tid
= lwpid_of (current_thread
);
568 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
572 uint64_t vq
= aarch64_sve_get_vq (tid
);
573 unsigned long hwcap
= linux_get_hwcap (8);
574 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
576 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
579 current_process ()->tdesc
= aarch32_linux_read_description ();
581 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
584 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
587 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
589 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
592 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
595 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
597 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
600 static struct regset_info aarch64_regsets
[] =
602 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
603 sizeof (struct user_pt_regs
), GENERAL_REGS
,
604 aarch64_fill_gregset
, aarch64_store_gregset
},
605 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
606 sizeof (struct user_fpsimd_state
), FP_REGS
,
607 aarch64_fill_fpregset
, aarch64_store_fpregset
609 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
610 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
611 NULL
, aarch64_store_pauthregset
},
615 static struct regsets_info aarch64_regsets_info
=
617 aarch64_regsets
, /* regsets */
619 NULL
, /* disabled_regsets */
622 static struct regs_info regs_info_aarch64
=
624 NULL
, /* regset_bitmap */
626 &aarch64_regsets_info
,
629 static struct regset_info aarch64_sve_regsets
[] =
631 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
632 sizeof (struct user_pt_regs
), GENERAL_REGS
,
633 aarch64_fill_gregset
, aarch64_store_gregset
},
634 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
635 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
636 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
638 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
639 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
640 NULL
, aarch64_store_pauthregset
},
644 static struct regsets_info aarch64_sve_regsets_info
=
646 aarch64_sve_regsets
, /* regsets. */
647 0, /* num_regsets. */
648 NULL
, /* disabled_regsets. */
651 static struct regs_info regs_info_aarch64_sve
=
653 NULL
, /* regset_bitmap. */
655 &aarch64_sve_regsets_info
,
658 /* Implementation of linux target ops method "get_regs_info". */
661 aarch64_target::get_regs_info ()
663 if (!is_64bit_tdesc ())
664 return ®s_info_aarch32
;
667 return ®s_info_aarch64_sve
;
669 return ®s_info_aarch64
;
672 /* Implementation of linux_target_ops method "supports_tracepoints". */
675 aarch64_supports_tracepoints (void)
677 if (current_thread
== NULL
)
681 /* We don't support tracepoints on aarch32 now. */
682 return is_64bit_tdesc ();
686 /* Implementation of linux_target_ops method "get_thread_area". */
689 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
694 iovec
.iov_base
= ®
;
695 iovec
.iov_len
= sizeof (reg
);
697 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
705 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
708 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
710 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
716 collect_register_by_name (regcache
, "x8", &l_sysno
);
717 *sysno
= (int) l_sysno
;
720 collect_register_by_name (regcache
, "r7", sysno
);
723 /* List of condition codes that we need. */
725 enum aarch64_condition_codes
736 enum aarch64_operand_type
742 /* Representation of an operand. At this time, it only supports register
743 and immediate types. */
745 struct aarch64_operand
747 /* Type of the operand. */
748 enum aarch64_operand_type type
;
750 /* Value of the operand according to the type. */
754 struct aarch64_register reg
;
758 /* List of registers that we are currently using, we can add more here as
759 we need to use them. */
761 /* General purpose scratch registers (64 bit). */
762 static const struct aarch64_register x0
= { 0, 1 };
763 static const struct aarch64_register x1
= { 1, 1 };
764 static const struct aarch64_register x2
= { 2, 1 };
765 static const struct aarch64_register x3
= { 3, 1 };
766 static const struct aarch64_register x4
= { 4, 1 };
768 /* General purpose scratch registers (32 bit). */
769 static const struct aarch64_register w0
= { 0, 0 };
770 static const struct aarch64_register w2
= { 2, 0 };
772 /* Intra-procedure scratch registers. */
773 static const struct aarch64_register ip0
= { 16, 1 };
775 /* Special purpose registers. */
776 static const struct aarch64_register fp
= { 29, 1 };
777 static const struct aarch64_register lr
= { 30, 1 };
778 static const struct aarch64_register sp
= { 31, 1 };
779 static const struct aarch64_register xzr
= { 31, 1 };
781 /* Dynamically allocate a new register. If we know the register
782 statically, we should make it a global as above instead of using this
785 static struct aarch64_register
786 aarch64_register (unsigned num
, int is64
)
788 return (struct aarch64_register
) { num
, is64
};
791 /* Helper function to create a register operand, for instructions with
792 different types of operands.
795 p += emit_mov (p, x0, register_operand (x1)); */
797 static struct aarch64_operand
798 register_operand (struct aarch64_register reg
)
800 struct aarch64_operand operand
;
802 operand
.type
= OPERAND_REGISTER
;
808 /* Helper function to create an immediate operand, for instructions with
809 different types of operands.
812 p += emit_mov (p, x0, immediate_operand (12)); */
814 static struct aarch64_operand
815 immediate_operand (uint32_t imm
)
817 struct aarch64_operand operand
;
819 operand
.type
= OPERAND_IMMEDIATE
;
825 /* Helper function to create an offset memory operand.
828 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
830 static struct aarch64_memory_operand
831 offset_memory_operand (int32_t offset
)
833 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
836 /* Helper function to create a pre-index memory operand.
839 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
841 static struct aarch64_memory_operand
842 preindex_memory_operand (int32_t index
)
844 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
847 /* Helper function to create a post-index memory operand.
850 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
852 static struct aarch64_memory_operand
853 postindex_memory_operand (int32_t index
)
855 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
858 /* System control registers. These special registers can be written and
859 read with the MRS and MSR instructions.
861 - NZCV: Condition flags. GDB refers to this register under the CPSR
863 - FPSR: Floating-point status register.
864 - FPCR: Floating-point control registers.
865 - TPIDR_EL0: Software thread ID register. */
867 enum aarch64_system_control_registers
869 /* op0 op1 crn crm op2 */
870 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
871 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
872 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
873 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
876 /* Write a BLR instruction into *BUF.
880 RN is the register to branch to. */
883 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
885 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
888 /* Write a RET instruction into *BUF.
892 RN is the register to branch to. */
895 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
897 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
901 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
902 struct aarch64_register rt
,
903 struct aarch64_register rt2
,
904 struct aarch64_register rn
,
905 struct aarch64_memory_operand operand
)
912 opc
= ENCODE (2, 2, 30);
914 opc
= ENCODE (0, 2, 30);
916 switch (operand
.type
)
918 case MEMORY_OPERAND_OFFSET
:
920 pre_index
= ENCODE (1, 1, 24);
921 write_back
= ENCODE (0, 1, 23);
924 case MEMORY_OPERAND_POSTINDEX
:
926 pre_index
= ENCODE (0, 1, 24);
927 write_back
= ENCODE (1, 1, 23);
930 case MEMORY_OPERAND_PREINDEX
:
932 pre_index
= ENCODE (1, 1, 24);
933 write_back
= ENCODE (1, 1, 23);
940 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
941 | ENCODE (operand
.index
>> 3, 7, 15)
942 | ENCODE (rt2
.num
, 5, 10)
943 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
946 /* Write a STP instruction into *BUF.
948 STP rt, rt2, [rn, #offset]
949 STP rt, rt2, [rn, #index]!
950 STP rt, rt2, [rn], #index
952 RT and RT2 are the registers to store.
953 RN is the base address register.
954 OFFSET is the immediate to add to the base address. It is limited to a
955 -512 .. 504 range (7 bits << 3). */
958 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
959 struct aarch64_register rt2
, struct aarch64_register rn
,
960 struct aarch64_memory_operand operand
)
962 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
965 /* Write a LDP instruction into *BUF.
967 LDP rt, rt2, [rn, #offset]
968 LDP rt, rt2, [rn, #index]!
969 LDP rt, rt2, [rn], #index
971 RT and RT2 are the registers to store.
972 RN is the base address register.
973 OFFSET is the immediate to add to the base address. It is limited to a
974 -512 .. 504 range (7 bits << 3). */
977 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
978 struct aarch64_register rt2
, struct aarch64_register rn
,
979 struct aarch64_memory_operand operand
)
981 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
984 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
986 LDP qt, qt2, [rn, #offset]
988 RT and RT2 are the Q registers to store.
989 RN is the base address register.
990 OFFSET is the immediate to add to the base address. It is limited to
991 -1024 .. 1008 range (7 bits << 4). */
994 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
995 struct aarch64_register rn
, int32_t offset
)
997 uint32_t opc
= ENCODE (2, 2, 30);
998 uint32_t pre_index
= ENCODE (1, 1, 24);
1000 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1001 | ENCODE (offset
>> 4, 7, 15)
1002 | ENCODE (rt2
, 5, 10)
1003 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1006 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1008 STP qt, qt2, [rn, #offset]
1010 RT and RT2 are the Q registers to store.
1011 RN is the base address register.
1012 OFFSET is the immediate to add to the base address. It is limited to
1013 -1024 .. 1008 range (7 bits << 4). */
1016 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1017 struct aarch64_register rn
, int32_t offset
)
1019 uint32_t opc
= ENCODE (2, 2, 30);
1020 uint32_t pre_index
= ENCODE (1, 1, 24);
1022 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1023 | ENCODE (offset
>> 4, 7, 15)
1024 | ENCODE (rt2
, 5, 10)
1025 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1028 /* Write a LDRH instruction into *BUF.
1030 LDRH wt, [xn, #offset]
1031 LDRH wt, [xn, #index]!
1032 LDRH wt, [xn], #index
1034 RT is the register to store.
1035 RN is the base address register.
1036 OFFSET is the immediate to add to the base address. It is limited to
1037 0 .. 32760 range (12 bits << 3). */
1040 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1041 struct aarch64_register rn
,
1042 struct aarch64_memory_operand operand
)
1044 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1047 /* Write a LDRB instruction into *BUF.
1049 LDRB wt, [xn, #offset]
1050 LDRB wt, [xn, #index]!
1051 LDRB wt, [xn], #index
1053 RT is the register to store.
1054 RN is the base address register.
1055 OFFSET is the immediate to add to the base address. It is limited to
1056 0 .. 32760 range (12 bits << 3). */
1059 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1060 struct aarch64_register rn
,
1061 struct aarch64_memory_operand operand
)
1063 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1068 /* Write a STR instruction into *BUF.
1070 STR rt, [rn, #offset]
1071 STR rt, [rn, #index]!
1072 STR rt, [rn], #index
1074 RT is the register to store.
1075 RN is the base address register.
1076 OFFSET is the immediate to add to the base address. It is limited to
1077 0 .. 32760 range (12 bits << 3). */
1080 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1081 struct aarch64_register rn
,
1082 struct aarch64_memory_operand operand
)
1084 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1087 /* Helper function emitting an exclusive load or store instruction. */
1090 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1091 enum aarch64_opcodes opcode
,
1092 struct aarch64_register rs
,
1093 struct aarch64_register rt
,
1094 struct aarch64_register rt2
,
1095 struct aarch64_register rn
)
1097 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1098 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1099 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1102 /* Write a LAXR instruction into *BUF.
1106 RT is the destination register.
1107 RN is the base address register. */
1110 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1111 struct aarch64_register rn
)
1113 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1117 /* Write a STXR instruction into *BUF.
1121 RS is the result register, it indicates if the store succeeded or not.
1122 RT is the destination register.
1123 RN is the base address register. */
1126 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1127 struct aarch64_register rt
, struct aarch64_register rn
)
1129 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1133 /* Write a STLR instruction into *BUF.
1137 RT is the register to store.
1138 RN is the base address register. */
1141 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1142 struct aarch64_register rn
)
1144 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1148 /* Helper function for data processing instructions with register sources. */
1151 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1152 struct aarch64_register rd
,
1153 struct aarch64_register rn
,
1154 struct aarch64_register rm
)
1156 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1158 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1159 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1162 /* Helper function for data processing instructions taking either a register
1166 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1167 struct aarch64_register rd
,
1168 struct aarch64_register rn
,
1169 struct aarch64_operand operand
)
1171 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1172 /* The opcode is different for register and immediate source operands. */
1173 uint32_t operand_opcode
;
1175 if (operand
.type
== OPERAND_IMMEDIATE
)
1177 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1178 operand_opcode
= ENCODE (8, 4, 25);
1180 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1181 | ENCODE (operand
.imm
, 12, 10)
1182 | ENCODE (rn
.num
, 5, 5)
1183 | ENCODE (rd
.num
, 5, 0));
1187 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1188 operand_opcode
= ENCODE (5, 4, 25);
1190 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1195 /* Write an ADD instruction into *BUF.
1200 This function handles both an immediate and register add.
1202 RD is the destination register.
1203 RN is the input register.
1204 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1205 OPERAND_REGISTER. */
1208 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1209 struct aarch64_register rn
, struct aarch64_operand operand
)
1211 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1214 /* Write a SUB instruction into *BUF.
1219 This function handles both an immediate and register sub.
1221 RD is the destination register.
1222 RN is the input register.
1223 IMM is the immediate to substract to RN. */
1226 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1227 struct aarch64_register rn
, struct aarch64_operand operand
)
1229 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1232 /* Write a MOV instruction into *BUF.
1237 This function handles both a wide immediate move and a register move,
1238 with the condition that the source register is not xzr. xzr and the
1239 stack pointer share the same encoding and this function only supports
1242 RD is the destination register.
1243 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1244 OPERAND_REGISTER. */
1247 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1248 struct aarch64_operand operand
)
1250 if (operand
.type
== OPERAND_IMMEDIATE
)
1252 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1253 /* Do not shift the immediate. */
1254 uint32_t shift
= ENCODE (0, 2, 21);
1256 return aarch64_emit_insn (buf
, MOV
| size
| shift
1257 | ENCODE (operand
.imm
, 16, 5)
1258 | ENCODE (rd
.num
, 5, 0));
1261 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1264 /* Write a MOVK instruction into *BUF.
1266 MOVK rd, #imm, lsl #shift
1268 RD is the destination register.
1269 IMM is the immediate.
1270 SHIFT is the logical shift left to apply to IMM. */
1273 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1276 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1278 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1279 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1282 /* Write instructions into *BUF in order to move ADDR into a register.
1283 ADDR can be a 64-bit value.
1285 This function will emit a series of MOV and MOVK instructions, such as:
1288 MOVK xd, #(addr >> 16), lsl #16
1289 MOVK xd, #(addr >> 32), lsl #32
1290 MOVK xd, #(addr >> 48), lsl #48 */
1293 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1297 /* The MOV (wide immediate) instruction clears to top bits of the
1299 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1301 if ((addr
>> 16) != 0)
1302 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1306 if ((addr
>> 32) != 0)
1307 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1311 if ((addr
>> 48) != 0)
1312 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1317 /* Write a SUBS instruction into *BUF.
1321 This instruction update the condition flags.
1323 RD is the destination register.
1324 RN and RM are the source registers. */
1327 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1328 struct aarch64_register rn
, struct aarch64_operand operand
)
1330 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1333 /* Write a CMP instruction into *BUF.
1337 This instruction is an alias of SUBS xzr, rn, rm.
1339 RN and RM are the registers to compare. */
1342 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1343 struct aarch64_operand operand
)
1345 return emit_subs (buf
, xzr
, rn
, operand
);
1348 /* Write a AND instruction into *BUF.
1352 RD is the destination register.
1353 RN and RM are the source registers. */
1356 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1357 struct aarch64_register rn
, struct aarch64_register rm
)
1359 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1362 /* Write a ORR instruction into *BUF.
1366 RD is the destination register.
1367 RN and RM are the source registers. */
1370 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1371 struct aarch64_register rn
, struct aarch64_register rm
)
1373 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1376 /* Write a ORN instruction into *BUF.
1380 RD is the destination register.
1381 RN and RM are the source registers. */
1384 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1385 struct aarch64_register rn
, struct aarch64_register rm
)
1387 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1390 /* Write a EOR instruction into *BUF.
1394 RD is the destination register.
1395 RN and RM are the source registers. */
1398 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1399 struct aarch64_register rn
, struct aarch64_register rm
)
1401 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1404 /* Write a MVN instruction into *BUF.
1408 This is an alias for ORN rd, xzr, rm.
1410 RD is the destination register.
1411 RM is the source register. */
1414 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1415 struct aarch64_register rm
)
1417 return emit_orn (buf
, rd
, xzr
, rm
);
1420 /* Write a LSLV instruction into *BUF.
1424 RD is the destination register.
1425 RN and RM are the source registers. */
1428 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1429 struct aarch64_register rn
, struct aarch64_register rm
)
1431 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1434 /* Write a LSRV instruction into *BUF.
1438 RD is the destination register.
1439 RN and RM are the source registers. */
1442 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1443 struct aarch64_register rn
, struct aarch64_register rm
)
1445 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1448 /* Write a ASRV instruction into *BUF.
1452 RD is the destination register.
1453 RN and RM are the source registers. */
1456 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1457 struct aarch64_register rn
, struct aarch64_register rm
)
1459 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1462 /* Write a MUL instruction into *BUF.
1466 RD is the destination register.
1467 RN and RM are the source registers. */
1470 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1471 struct aarch64_register rn
, struct aarch64_register rm
)
1473 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1476 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1480 RT is the destination register.
1481 SYSTEM_REG is special purpose register to read. */
1484 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1485 enum aarch64_system_control_registers system_reg
)
1487 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1488 | ENCODE (rt
.num
, 5, 0));
1491 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1495 SYSTEM_REG is special purpose register to write.
1496 RT is the input register. */
1499 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1500 struct aarch64_register rt
)
1502 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1503 | ENCODE (rt
.num
, 5, 0));
1506 /* Write a SEVL instruction into *BUF.
1508 This is a hint instruction telling the hardware to trigger an event. */
1511 emit_sevl (uint32_t *buf
)
1513 return aarch64_emit_insn (buf
, SEVL
);
1516 /* Write a WFE instruction into *BUF.
1518 This is a hint instruction telling the hardware to wait for an event. */
1521 emit_wfe (uint32_t *buf
)
1523 return aarch64_emit_insn (buf
, WFE
);
1526 /* Write a SBFM instruction into *BUF.
1528 SBFM rd, rn, #immr, #imms
1530 This instruction moves the bits from #immr to #imms into the
1531 destination, sign extending the result.
1533 RD is the destination register.
1534 RN is the source register.
1535 IMMR is the bit number to start at (least significant bit).
1536 IMMS is the bit number to stop at (most significant bit). */
1539 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1540 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1542 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1543 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1545 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1546 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1547 | ENCODE (rd
.num
, 5, 0));
1550 /* Write a SBFX instruction into *BUF.
1552 SBFX rd, rn, #lsb, #width
1554 This instruction moves #width bits from #lsb into the destination, sign
1555 extending the result. This is an alias for:
1557 SBFM rd, rn, #lsb, #(lsb + width - 1)
1559 RD is the destination register.
1560 RN is the source register.
1561 LSB is the bit number to start at (least significant bit).
1562 WIDTH is the number of bits to move. */
1565 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1566 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1568 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1571 /* Write a UBFM instruction into *BUF.
1573 UBFM rd, rn, #immr, #imms
1575 This instruction moves the bits from #immr to #imms into the
1576 destination, extending the result with zeros.
1578 RD is the destination register.
1579 RN is the source register.
1580 IMMR is the bit number to start at (least significant bit).
1581 IMMS is the bit number to stop at (most significant bit). */
1584 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1585 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1587 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1588 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1590 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1591 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1592 | ENCODE (rd
.num
, 5, 0));
1595 /* Write a UBFX instruction into *BUF.
1597 UBFX rd, rn, #lsb, #width
1599 This instruction moves #width bits from #lsb into the destination,
1600 extending the result with zeros. This is an alias for:
1602 UBFM rd, rn, #lsb, #(lsb + width - 1)
1604 RD is the destination register.
1605 RN is the source register.
1606 LSB is the bit number to start at (least significant bit).
1607 WIDTH is the number of bits to move. */
1610 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1611 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1613 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1616 /* Write a CSINC instruction into *BUF.
1618 CSINC rd, rn, rm, cond
1620 This instruction conditionally increments rn or rm and places the result
1621 in rd. rn is chosen is the condition is true.
1623 RD is the destination register.
1624 RN and RM are the source registers.
1625 COND is the encoded condition. */
1628 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1629 struct aarch64_register rn
, struct aarch64_register rm
,
1632 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1634 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1635 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1636 | ENCODE (rd
.num
, 5, 0));
1639 /* Write a CSET instruction into *BUF.
1643 This instruction conditionally write 1 or 0 in the destination register.
1644 1 is written if the condition is true. This is an alias for:
1646 CSINC rd, xzr, xzr, !cond
1648 Note that the condition needs to be inverted.
1650 RD is the destination register.
1651 RN and RM are the source registers.
1652 COND is the encoded condition. */
1655 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1657 /* The least significant bit of the condition needs toggling in order to
1659 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1662 /* Write LEN instructions from BUF into the inferior memory at *TO.
1664 Note instructions are always little endian on AArch64, unlike data. */
1667 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1669 size_t byte_len
= len
* sizeof (uint32_t);
1670 #if (__BYTE_ORDER == __BIG_ENDIAN)
1671 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1674 for (i
= 0; i
< len
; i
++)
1675 le_buf
[i
] = htole32 (buf
[i
]);
1677 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1681 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1687 /* Sub-class of struct aarch64_insn_data, store information of
1688 instruction relocation for fast tracepoint. Visitor can
1689 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1690 the relocated instructions in buffer pointed by INSN_PTR. */
1692 struct aarch64_insn_relocation_data
1694 struct aarch64_insn_data base
;
1696 /* The new address the instruction is relocated to. */
1698 /* Pointer to the buffer of relocated instruction(s). */
1702 /* Implementation of aarch64_insn_visitor method "b". */
1705 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1706 struct aarch64_insn_data
*data
)
1708 struct aarch64_insn_relocation_data
*insn_reloc
1709 = (struct aarch64_insn_relocation_data
*) data
;
1711 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1713 if (can_encode_int32 (new_offset
, 28))
1714 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1717 /* Implementation of aarch64_insn_visitor method "b_cond". */
1720 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1721 struct aarch64_insn_data
*data
)
1723 struct aarch64_insn_relocation_data
*insn_reloc
1724 = (struct aarch64_insn_relocation_data
*) data
;
1726 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1728 if (can_encode_int32 (new_offset
, 21))
1730 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1733 else if (can_encode_int32 (new_offset
, 28))
1735 /* The offset is out of range for a conditional branch
1736 instruction but not for a unconditional branch. We can use
1737 the following instructions instead:
1739 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1740 B NOT_TAKEN ; Else jump over TAKEN and continue.
1747 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1748 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1749 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1753 /* Implementation of aarch64_insn_visitor method "cb". */
1756 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1757 const unsigned rn
, int is64
,
1758 struct aarch64_insn_data
*data
)
1760 struct aarch64_insn_relocation_data
*insn_reloc
1761 = (struct aarch64_insn_relocation_data
*) data
;
1763 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1765 if (can_encode_int32 (new_offset
, 21))
1767 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1768 aarch64_register (rn
, is64
), new_offset
);
1770 else if (can_encode_int32 (new_offset
, 28))
1772 /* The offset is out of range for a compare and branch
1773 instruction but not for a unconditional branch. We can use
1774 the following instructions instead:
1776 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1777 B NOT_TAKEN ; Else jump over TAKEN and continue.
1783 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1784 aarch64_register (rn
, is64
), 8);
1785 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1786 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1790 /* Implementation of aarch64_insn_visitor method "tb". */
1793 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1794 const unsigned rt
, unsigned bit
,
1795 struct aarch64_insn_data
*data
)
1797 struct aarch64_insn_relocation_data
*insn_reloc
1798 = (struct aarch64_insn_relocation_data
*) data
;
1800 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1802 if (can_encode_int32 (new_offset
, 16))
1804 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1805 aarch64_register (rt
, 1), new_offset
);
1807 else if (can_encode_int32 (new_offset
, 28))
1809 /* The offset is out of range for a test bit and branch
1810 instruction but not for a unconditional branch. We can use
1811 the following instructions instead:
1813 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1814 B NOT_TAKEN ; Else jump over TAKEN and continue.
1820 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1821 aarch64_register (rt
, 1), 8);
1822 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1823 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1828 /* Implementation of aarch64_insn_visitor method "adr". */
1831 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1833 struct aarch64_insn_data
*data
)
1835 struct aarch64_insn_relocation_data
*insn_reloc
1836 = (struct aarch64_insn_relocation_data
*) data
;
1837 /* We know exactly the address the ADR{P,} instruction will compute.
1838 We can just write it to the destination register. */
1839 CORE_ADDR address
= data
->insn_addr
+ offset
;
1843 /* Clear the lower 12 bits of the offset to get the 4K page. */
1844 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1845 aarch64_register (rd
, 1),
1849 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1850 aarch64_register (rd
, 1), address
);
1853 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1856 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1857 const unsigned rt
, const int is64
,
1858 struct aarch64_insn_data
*data
)
1860 struct aarch64_insn_relocation_data
*insn_reloc
1861 = (struct aarch64_insn_relocation_data
*) data
;
1862 CORE_ADDR address
= data
->insn_addr
+ offset
;
1864 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1865 aarch64_register (rt
, 1), address
);
1867 /* We know exactly what address to load from, and what register we
1870 MOV xd, #(oldloc + offset)
1871 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1874 LDR xd, [xd] ; or LDRSW xd, [xd]
1879 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1880 aarch64_register (rt
, 1),
1881 aarch64_register (rt
, 1),
1882 offset_memory_operand (0));
1884 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1885 aarch64_register (rt
, is64
),
1886 aarch64_register (rt
, 1),
1887 offset_memory_operand (0));
1890 /* Implementation of aarch64_insn_visitor method "others". */
1893 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1894 struct aarch64_insn_data
*data
)
1896 struct aarch64_insn_relocation_data
*insn_reloc
1897 = (struct aarch64_insn_relocation_data
*) data
;
1899 /* The instruction is not PC relative. Just re-emit it at the new
1901 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1904 static const struct aarch64_insn_visitor visitor
=
1906 aarch64_ftrace_insn_reloc_b
,
1907 aarch64_ftrace_insn_reloc_b_cond
,
1908 aarch64_ftrace_insn_reloc_cb
,
1909 aarch64_ftrace_insn_reloc_tb
,
1910 aarch64_ftrace_insn_reloc_adr
,
1911 aarch64_ftrace_insn_reloc_ldr_literal
,
1912 aarch64_ftrace_insn_reloc_others
,
1915 /* Implementation of linux_target_ops method
1916 "install_fast_tracepoint_jump_pad". */
1919 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1921 CORE_ADDR collector
,
1924 CORE_ADDR
*jump_entry
,
1925 CORE_ADDR
*trampoline
,
1926 ULONGEST
*trampoline_size
,
1927 unsigned char *jjump_pad_insn
,
1928 ULONGEST
*jjump_pad_insn_size
,
1929 CORE_ADDR
*adjusted_insn_addr
,
1930 CORE_ADDR
*adjusted_insn_addr_end
,
1938 CORE_ADDR buildaddr
= *jump_entry
;
1939 struct aarch64_insn_relocation_data insn_data
;
1941 /* We need to save the current state on the stack both to restore it
1942 later and to collect register values when the tracepoint is hit.
1944 The saved registers are pushed in a layout that needs to be in sync
1945 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1946 the supply_fast_tracepoint_registers function will fill in the
1947 register cache from a pointer to saved registers on the stack we build
1950 For simplicity, we set the size of each cell on the stack to 16 bytes.
1951 This way one cell can hold any register type, from system registers
1952 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1953 has to be 16 bytes aligned anyway.
1955 Note that the CPSR register does not exist on AArch64. Instead we
1956 can access system bits describing the process state with the
1957 MRS/MSR instructions, namely the condition flags. We save them as
1958 if they are part of a CPSR register because that's how GDB
1959 interprets these system bits. At the moment, only the condition
1960 flags are saved in CPSR (NZCV).
1962 Stack layout, each cell is 16 bytes (descending):
1964 High *-------- SIMD&FP registers from 31 down to 0. --------*
1970 *---- General purpose registers from 30 down to 0. ----*
1976 *------------- Special purpose registers. -------------*
1979 | CPSR (NZCV) | 5 cells
1982 *------------- collecting_t object --------------------*
1983 | TPIDR_EL0 | struct tracepoint * |
1984 Low *------------------------------------------------------*
1986 After this stack is set up, we issue a call to the collector, passing
1987 it the saved registers at (SP + 16). */
1989 /* Push SIMD&FP registers on the stack:
1991 SUB sp, sp, #(32 * 16)
1993 STP q30, q31, [sp, #(30 * 16)]
1998 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1999 for (i
= 30; i
>= 0; i
-= 2)
2000 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2002 /* Push general purpose registers on the stack. Note that we do not need
2003 to push x31 as it represents the xzr register and not the stack
2004 pointer in a STR instruction.
2006 SUB sp, sp, #(31 * 16)
2008 STR x30, [sp, #(30 * 16)]
2013 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2014 for (i
= 30; i
>= 0; i
-= 1)
2015 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2016 offset_memory_operand (i
* 16));
2018 /* Make space for 5 more cells.
2020 SUB sp, sp, #(5 * 16)
2023 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2028 ADD x4, sp, #((32 + 31 + 5) * 16)
2029 STR x4, [sp, #(4 * 16)]
2032 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2033 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2035 /* Save PC (tracepoint address):
2040 STR x3, [sp, #(3 * 16)]
2044 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2045 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2047 /* Save CPSR (NZCV), FPSR and FPCR:
2053 STR x2, [sp, #(2 * 16)]
2054 STR x1, [sp, #(1 * 16)]
2055 STR x0, [sp, #(0 * 16)]
2058 p
+= emit_mrs (p
, x2
, NZCV
);
2059 p
+= emit_mrs (p
, x1
, FPSR
);
2060 p
+= emit_mrs (p
, x0
, FPCR
);
2061 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2062 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2063 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2065 /* Push the collecting_t object. It consist of the address of the
2066 tracepoint and an ID for the current thread. We get the latter by
2067 reading the tpidr_el0 system register. It corresponds to the
2068 NT_ARM_TLS register accessible with ptrace.
2075 STP x0, x1, [sp, #-16]!
2079 p
+= emit_mov_addr (p
, x0
, tpoint
);
2080 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2081 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2085 The shared memory for the lock is at lockaddr. It will hold zero
2086 if no-one is holding the lock, otherwise it contains the address of
2087 the collecting_t object on the stack of the thread which acquired it.
2089 At this stage, the stack pointer points to this thread's collecting_t
2092 We use the following registers:
2093 - x0: Address of the lock.
2094 - x1: Pointer to collecting_t object.
2095 - x2: Scratch register.
2101 ; Trigger an event local to this core. So the following WFE
2102 ; instruction is ignored.
2105 ; Wait for an event. The event is triggered by either the SEVL
2106 ; or STLR instructions (store release).
2109 ; Atomically read at lockaddr. This marks the memory location as
2110 ; exclusive. This instruction also has memory constraints which
2111 ; make sure all previous data reads and writes are done before
2115 ; Try again if another thread holds the lock.
2118 ; We can lock it! Write the address of the collecting_t object.
2119 ; This instruction will fail if the memory location is not marked
2120 ; as exclusive anymore. If it succeeds, it will remove the
2121 ; exclusive mark on the memory location. This way, if another
2122 ; thread executes this instruction before us, we will fail and try
2129 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2130 p
+= emit_mov (p
, x1
, register_operand (sp
));
2134 p
+= emit_ldaxr (p
, x2
, x0
);
2135 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2136 p
+= emit_stxr (p
, w2
, x1
, x0
);
2137 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2139 /* Call collector (struct tracepoint *, unsigned char *):
2144 ; Saved registers start after the collecting_t object.
2147 ; We use an intra-procedure-call scratch register.
2148 MOV ip0, #(collector)
2151 ; And call back to C!
2156 p
+= emit_mov_addr (p
, x0
, tpoint
);
2157 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2159 p
+= emit_mov_addr (p
, ip0
, collector
);
2160 p
+= emit_blr (p
, ip0
);
2162 /* Release the lock.
2167 ; This instruction is a normal store with memory ordering
2168 ; constraints. Thanks to this we do not have to put a data
2169 ; barrier instruction to make sure all data read and writes are done
2170 ; before this instruction is executed. Furthermore, this instruction
2171 ; will trigger an event, letting other threads know they can grab
2176 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2177 p
+= emit_stlr (p
, xzr
, x0
);
2179 /* Free collecting_t object:
2184 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2186 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2187 registers from the stack.
2189 LDR x2, [sp, #(2 * 16)]
2190 LDR x1, [sp, #(1 * 16)]
2191 LDR x0, [sp, #(0 * 16)]
2197 ADD sp, sp #(5 * 16)
2200 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2201 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2202 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2203 p
+= emit_msr (p
, NZCV
, x2
);
2204 p
+= emit_msr (p
, FPSR
, x1
);
2205 p
+= emit_msr (p
, FPCR
, x0
);
2207 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2209 /* Pop general purpose registers:
2213 LDR x30, [sp, #(30 * 16)]
2215 ADD sp, sp, #(31 * 16)
2218 for (i
= 0; i
<= 30; i
+= 1)
2219 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2220 offset_memory_operand (i
* 16));
2221 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2223 /* Pop SIMD&FP registers:
2227 LDP q30, q31, [sp, #(30 * 16)]
2229 ADD sp, sp, #(32 * 16)
2232 for (i
= 0; i
<= 30; i
+= 2)
2233 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2234 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2236 /* Write the code into the inferior memory. */
2237 append_insns (&buildaddr
, p
- buf
, buf
);
2239 /* Now emit the relocated instruction. */
2240 *adjusted_insn_addr
= buildaddr
;
2241 target_read_uint32 (tpaddr
, &insn
);
2243 insn_data
.base
.insn_addr
= tpaddr
;
2244 insn_data
.new_addr
= buildaddr
;
2245 insn_data
.insn_ptr
= buf
;
2247 aarch64_relocate_instruction (insn
, &visitor
,
2248 (struct aarch64_insn_data
*) &insn_data
);
2250 /* We may not have been able to relocate the instruction. */
2251 if (insn_data
.insn_ptr
== buf
)
2254 "E.Could not relocate instruction from %s to %s.",
2255 core_addr_to_string_nz (tpaddr
),
2256 core_addr_to_string_nz (buildaddr
));
2260 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2261 *adjusted_insn_addr_end
= buildaddr
;
2263 /* Go back to the start of the buffer. */
2266 /* Emit a branch back from the jump pad. */
2267 offset
= (tpaddr
+ orig_size
- buildaddr
);
2268 if (!can_encode_int32 (offset
, 28))
2271 "E.Jump back from jump pad too far from tracepoint "
2272 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2277 p
+= emit_b (p
, 0, offset
);
2278 append_insns (&buildaddr
, p
- buf
, buf
);
2280 /* Give the caller a branch instruction into the jump pad. */
2281 offset
= (*jump_entry
- tpaddr
);
2282 if (!can_encode_int32 (offset
, 28))
2285 "E.Jump pad too far from tracepoint "
2286 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2291 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2292 *jjump_pad_insn_size
= 4;
2294 /* Return the end address of our pad. */
2295 *jump_entry
= buildaddr
;
2300 /* Helper function writing LEN instructions from START into
2301 current_insn_ptr. */
2304 emit_ops_insns (const uint32_t *start
, int len
)
2306 CORE_ADDR buildaddr
= current_insn_ptr
;
2309 debug_printf ("Adding %d instrucions at %s\n",
2310 len
, paddress (buildaddr
));
2312 append_insns (&buildaddr
, len
, start
);
2313 current_insn_ptr
= buildaddr
;
2316 /* Pop a register from the stack. */
2319 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2321 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2324 /* Push a register on the stack. */
2327 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2329 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2332 /* Implementation of emit_ops method "emit_prologue". */
2335 aarch64_emit_prologue (void)
2340 /* This function emit a prologue for the following function prototype:
2342 enum eval_result_type f (unsigned char *regs,
2345 The first argument is a buffer of raw registers. The second
2346 argument is the result of
2347 evaluating the expression, which will be set to whatever is on top of
2348 the stack at the end.
2350 The stack set up by the prologue is as such:
2352 High *------------------------------------------------------*
2355 | x1 (ULONGEST *value) |
2356 | x0 (unsigned char *regs) |
2357 Low *------------------------------------------------------*
2359 As we are implementing a stack machine, each opcode can expand the
2360 stack so we never know how far we are from the data saved by this
2361 prologue. In order to be able refer to value and regs later, we save
2362 the current stack pointer in the frame pointer. This way, it is not
2363 clobbered when calling C functions.
2365 Finally, throughout every operation, we are using register x0 as the
2366 top of the stack, and x1 as a scratch register. */
2368 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2369 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2370 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2372 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2375 emit_ops_insns (buf
, p
- buf
);
2378 /* Implementation of emit_ops method "emit_epilogue". */
2381 aarch64_emit_epilogue (void)
2386 /* Store the result of the expression (x0) in *value. */
2387 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2388 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2389 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2391 /* Restore the previous state. */
2392 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2393 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2395 /* Return expr_eval_no_error. */
2396 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2397 p
+= emit_ret (p
, lr
);
2399 emit_ops_insns (buf
, p
- buf
);
2402 /* Implementation of emit_ops method "emit_add". */
2405 aarch64_emit_add (void)
2410 p
+= emit_pop (p
, x1
);
2411 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2413 emit_ops_insns (buf
, p
- buf
);
2416 /* Implementation of emit_ops method "emit_sub". */
2419 aarch64_emit_sub (void)
2424 p
+= emit_pop (p
, x1
);
2425 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2427 emit_ops_insns (buf
, p
- buf
);
2430 /* Implementation of emit_ops method "emit_mul". */
2433 aarch64_emit_mul (void)
2438 p
+= emit_pop (p
, x1
);
2439 p
+= emit_mul (p
, x0
, x1
, x0
);
2441 emit_ops_insns (buf
, p
- buf
);
2444 /* Implementation of emit_ops method "emit_lsh". */
2447 aarch64_emit_lsh (void)
2452 p
+= emit_pop (p
, x1
);
2453 p
+= emit_lslv (p
, x0
, x1
, x0
);
2455 emit_ops_insns (buf
, p
- buf
);
2458 /* Implementation of emit_ops method "emit_rsh_signed". */
2461 aarch64_emit_rsh_signed (void)
2466 p
+= emit_pop (p
, x1
);
2467 p
+= emit_asrv (p
, x0
, x1
, x0
);
2469 emit_ops_insns (buf
, p
- buf
);
2472 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2475 aarch64_emit_rsh_unsigned (void)
2480 p
+= emit_pop (p
, x1
);
2481 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2483 emit_ops_insns (buf
, p
- buf
);
2486 /* Implementation of emit_ops method "emit_ext". */
2489 aarch64_emit_ext (int arg
)
2494 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2496 emit_ops_insns (buf
, p
- buf
);
2499 /* Implementation of emit_ops method "emit_log_not". */
2502 aarch64_emit_log_not (void)
2507 /* If the top of the stack is 0, replace it with 1. Else replace it with
2510 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2511 p
+= emit_cset (p
, x0
, EQ
);
2513 emit_ops_insns (buf
, p
- buf
);
2516 /* Implementation of emit_ops method "emit_bit_and". */
2519 aarch64_emit_bit_and (void)
2524 p
+= emit_pop (p
, x1
);
2525 p
+= emit_and (p
, x0
, x0
, x1
);
2527 emit_ops_insns (buf
, p
- buf
);
2530 /* Implementation of emit_ops method "emit_bit_or". */
2533 aarch64_emit_bit_or (void)
2538 p
+= emit_pop (p
, x1
);
2539 p
+= emit_orr (p
, x0
, x0
, x1
);
2541 emit_ops_insns (buf
, p
- buf
);
2544 /* Implementation of emit_ops method "emit_bit_xor". */
2547 aarch64_emit_bit_xor (void)
2552 p
+= emit_pop (p
, x1
);
2553 p
+= emit_eor (p
, x0
, x0
, x1
);
2555 emit_ops_insns (buf
, p
- buf
);
2558 /* Implementation of emit_ops method "emit_bit_not". */
2561 aarch64_emit_bit_not (void)
2566 p
+= emit_mvn (p
, x0
, x0
);
2568 emit_ops_insns (buf
, p
- buf
);
2571 /* Implementation of emit_ops method "emit_equal". */
2574 aarch64_emit_equal (void)
2579 p
+= emit_pop (p
, x1
);
2580 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2581 p
+= emit_cset (p
, x0
, EQ
);
2583 emit_ops_insns (buf
, p
- buf
);
2586 /* Implementation of emit_ops method "emit_less_signed". */
2589 aarch64_emit_less_signed (void)
2594 p
+= emit_pop (p
, x1
);
2595 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2596 p
+= emit_cset (p
, x0
, LT
);
2598 emit_ops_insns (buf
, p
- buf
);
2601 /* Implementation of emit_ops method "emit_less_unsigned". */
2604 aarch64_emit_less_unsigned (void)
2609 p
+= emit_pop (p
, x1
);
2610 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2611 p
+= emit_cset (p
, x0
, LO
);
2613 emit_ops_insns (buf
, p
- buf
);
2616 /* Implementation of emit_ops method "emit_ref". */
2619 aarch64_emit_ref (int size
)
2627 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2630 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2633 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2636 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2639 /* Unknown size, bail on compilation. */
2644 emit_ops_insns (buf
, p
- buf
);
2647 /* Implementation of emit_ops method "emit_if_goto". */
2650 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2655 /* The Z flag is set or cleared here. */
2656 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2657 /* This instruction must not change the Z flag. */
2658 p
+= emit_pop (p
, x0
);
2659 /* Branch over the next instruction if x0 == 0. */
2660 p
+= emit_bcond (p
, EQ
, 8);
2662 /* The NOP instruction will be patched with an unconditional branch. */
2664 *offset_p
= (p
- buf
) * 4;
2669 emit_ops_insns (buf
, p
- buf
);
2672 /* Implementation of emit_ops method "emit_goto". */
2675 aarch64_emit_goto (int *offset_p
, int *size_p
)
2680 /* The NOP instruction will be patched with an unconditional branch. */
2687 emit_ops_insns (buf
, p
- buf
);
2690 /* Implementation of emit_ops method "write_goto_address". */
2693 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2697 emit_b (&insn
, 0, to
- from
);
2698 append_insns (&from
, 1, &insn
);
2701 /* Implementation of emit_ops method "emit_const". */
2704 aarch64_emit_const (LONGEST num
)
2709 p
+= emit_mov_addr (p
, x0
, num
);
2711 emit_ops_insns (buf
, p
- buf
);
2714 /* Implementation of emit_ops method "emit_call". */
2717 aarch64_emit_call (CORE_ADDR fn
)
2722 p
+= emit_mov_addr (p
, ip0
, fn
);
2723 p
+= emit_blr (p
, ip0
);
2725 emit_ops_insns (buf
, p
- buf
);
2728 /* Implementation of emit_ops method "emit_reg". */
2731 aarch64_emit_reg (int reg
)
2736 /* Set x0 to unsigned char *regs. */
2737 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2738 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2739 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2741 emit_ops_insns (buf
, p
- buf
);
2743 aarch64_emit_call (get_raw_reg_func_addr ());
2746 /* Implementation of emit_ops method "emit_pop". */
2749 aarch64_emit_pop (void)
2754 p
+= emit_pop (p
, x0
);
2756 emit_ops_insns (buf
, p
- buf
);
2759 /* Implementation of emit_ops method "emit_stack_flush". */
2762 aarch64_emit_stack_flush (void)
2767 p
+= emit_push (p
, x0
);
2769 emit_ops_insns (buf
, p
- buf
);
2772 /* Implementation of emit_ops method "emit_zero_ext". */
2775 aarch64_emit_zero_ext (int arg
)
2780 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2782 emit_ops_insns (buf
, p
- buf
);
2785 /* Implementation of emit_ops method "emit_swap". */
2788 aarch64_emit_swap (void)
2793 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2794 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2795 p
+= emit_mov (p
, x0
, register_operand (x1
));
2797 emit_ops_insns (buf
, p
- buf
);
2800 /* Implementation of emit_ops method "emit_stack_adjust". */
2803 aarch64_emit_stack_adjust (int n
)
2805 /* This is not needed with our design. */
2809 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2811 emit_ops_insns (buf
, p
- buf
);
2814 /* Implementation of emit_ops method "emit_int_call_1". */
2817 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2822 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2824 emit_ops_insns (buf
, p
- buf
);
2826 aarch64_emit_call (fn
);
2829 /* Implementation of emit_ops method "emit_void_call_2". */
2832 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2837 /* Push x0 on the stack. */
2838 aarch64_emit_stack_flush ();
2840 /* Setup arguments for the function call:
2843 x1: top of the stack
2848 p
+= emit_mov (p
, x1
, register_operand (x0
));
2849 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2851 emit_ops_insns (buf
, p
- buf
);
2853 aarch64_emit_call (fn
);
2856 aarch64_emit_pop ();
2859 /* Implementation of emit_ops method "emit_eq_goto". */
2862 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2867 p
+= emit_pop (p
, x1
);
2868 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2869 /* Branch over the next instruction if x0 != x1. */
2870 p
+= emit_bcond (p
, NE
, 8);
2871 /* The NOP instruction will be patched with an unconditional branch. */
2873 *offset_p
= (p
- buf
) * 4;
2878 emit_ops_insns (buf
, p
- buf
);
2881 /* Implementation of emit_ops method "emit_ne_goto". */
2884 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2889 p
+= emit_pop (p
, x1
);
2890 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2891 /* Branch over the next instruction if x0 == x1. */
2892 p
+= emit_bcond (p
, EQ
, 8);
2893 /* The NOP instruction will be patched with an unconditional branch. */
2895 *offset_p
= (p
- buf
) * 4;
2900 emit_ops_insns (buf
, p
- buf
);
2903 /* Implementation of emit_ops method "emit_lt_goto". */
2906 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2911 p
+= emit_pop (p
, x1
);
2912 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2913 /* Branch over the next instruction if x0 >= x1. */
2914 p
+= emit_bcond (p
, GE
, 8);
2915 /* The NOP instruction will be patched with an unconditional branch. */
2917 *offset_p
= (p
- buf
) * 4;
2922 emit_ops_insns (buf
, p
- buf
);
2925 /* Implementation of emit_ops method "emit_le_goto". */
2928 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2933 p
+= emit_pop (p
, x1
);
2934 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2935 /* Branch over the next instruction if x0 > x1. */
2936 p
+= emit_bcond (p
, GT
, 8);
2937 /* The NOP instruction will be patched with an unconditional branch. */
2939 *offset_p
= (p
- buf
) * 4;
2944 emit_ops_insns (buf
, p
- buf
);
2947 /* Implementation of emit_ops method "emit_gt_goto". */
2950 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2955 p
+= emit_pop (p
, x1
);
2956 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2957 /* Branch over the next instruction if x0 <= x1. */
2958 p
+= emit_bcond (p
, LE
, 8);
2959 /* The NOP instruction will be patched with an unconditional branch. */
2961 *offset_p
= (p
- buf
) * 4;
2966 emit_ops_insns (buf
, p
- buf
);
2969 /* Implementation of emit_ops method "emit_ge_got". */
2972 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2977 p
+= emit_pop (p
, x1
);
2978 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2979 /* Branch over the next instruction if x0 <= x1. */
2980 p
+= emit_bcond (p
, LT
, 8);
2981 /* The NOP instruction will be patched with an unconditional branch. */
2983 *offset_p
= (p
- buf
) * 4;
2988 emit_ops_insns (buf
, p
- buf
);
2991 static struct emit_ops aarch64_emit_ops_impl
=
2993 aarch64_emit_prologue
,
2994 aarch64_emit_epilogue
,
2999 aarch64_emit_rsh_signed
,
3000 aarch64_emit_rsh_unsigned
,
3002 aarch64_emit_log_not
,
3003 aarch64_emit_bit_and
,
3004 aarch64_emit_bit_or
,
3005 aarch64_emit_bit_xor
,
3006 aarch64_emit_bit_not
,
3008 aarch64_emit_less_signed
,
3009 aarch64_emit_less_unsigned
,
3011 aarch64_emit_if_goto
,
3013 aarch64_write_goto_address
,
3018 aarch64_emit_stack_flush
,
3019 aarch64_emit_zero_ext
,
3021 aarch64_emit_stack_adjust
,
3022 aarch64_emit_int_call_1
,
3023 aarch64_emit_void_call_2
,
3024 aarch64_emit_eq_goto
,
3025 aarch64_emit_ne_goto
,
3026 aarch64_emit_lt_goto
,
3027 aarch64_emit_le_goto
,
3028 aarch64_emit_gt_goto
,
3029 aarch64_emit_ge_got
,
3032 /* Implementation of linux_target_ops method "emit_ops". */
3034 static struct emit_ops
*
3035 aarch64_emit_ops (void)
3037 return &aarch64_emit_ops_impl
;
3040 /* Implementation of linux_target_ops method
3041 "get_min_fast_tracepoint_insn_len". */
3044 aarch64_get_min_fast_tracepoint_insn_len (void)
3049 /* Implementation of linux_target_ops method "supports_range_stepping". */
3052 aarch64_supports_range_stepping (void)
3057 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3059 static const gdb_byte
*
3060 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
3062 if (is_64bit_tdesc ())
3064 *size
= aarch64_breakpoint_len
;
3065 return aarch64_breakpoint
;
3068 return arm_sw_breakpoint_from_kind (kind
, size
);
3071 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3074 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3076 if (is_64bit_tdesc ())
3077 return aarch64_breakpoint_len
;
3079 return arm_breakpoint_kind_from_pc (pcptr
);
3082 /* Implementation of the target ops method
3083 "breakpoint_kind_from_current_state". */
3086 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3088 if (is_64bit_tdesc ())
3089 return aarch64_breakpoint_len
;
3091 return arm_breakpoint_kind_from_current_state (pcptr
);
3094 /* Support for hardware single step. */
3097 aarch64_supports_hardware_single_step (void)
3102 struct linux_target_ops the_low_target
=
3104 aarch64_sw_breakpoint_from_kind
,
3105 NULL
, /* get_next_pcs */
3106 0, /* decr_pc_after_break */
3107 aarch64_breakpoint_at
,
3108 aarch64_supports_z_point_type
,
3109 aarch64_insert_point
,
3110 aarch64_remove_point
,
3111 aarch64_stopped_by_watchpoint
,
3112 aarch64_stopped_data_address
,
3113 NULL
, /* collect_ptrace_register */
3114 NULL
, /* supply_ptrace_register */
3115 aarch64_linux_siginfo_fixup
,
3116 aarch64_linux_new_process
,
3117 aarch64_linux_delete_process
,
3118 aarch64_linux_new_thread
,
3119 aarch64_linux_delete_thread
,
3120 aarch64_linux_new_fork
,
3121 aarch64_linux_prepare_to_resume
,
3122 NULL
, /* process_qsupported */
3123 aarch64_supports_tracepoints
,
3124 aarch64_get_thread_area
,
3125 aarch64_install_fast_tracepoint_jump_pad
,
3127 aarch64_get_min_fast_tracepoint_insn_len
,
3128 aarch64_supports_range_stepping
,
3129 aarch64_supports_hardware_single_step
,
3130 aarch64_get_syscall_trapinfo
,
3133 /* The linux target ops object. */
3135 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3138 initialize_low_arch (void)
3140 initialize_low_arch_aarch32 ();
3142 initialize_regsets_info (&aarch64_regsets_info
);
3143 initialize_regsets_info (&aarch64_sve_regsets_info
);