1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
60 /* The singleton target ops object. */
62 static aarch64_target the_aarch64_target
;
64 /* Per-process arch-specific data we want to keep. */
66 struct arch_process_info
68 /* Hardware breakpoint/watchpoint data.
69 The reason for them to be per-process rather than per-thread is
70 due to the lack of information in the gdbserver environment;
71 gdbserver is not told that whether a requested hardware
72 breakpoint/watchpoint is thread specific or not, so it has to set
73 each hw bp/wp for every thread in the current process. The
74 higher level bp/wp management in gdb will resume a thread if a hw
75 bp/wp trap is not expected for it. Since the hw bp/wp setting is
76 same for each thread, it is reasonable for the data to live here.
78 struct aarch64_debug_reg_state debug_reg_state
;
81 /* Return true if the size of register 0 is 8 byte. */
86 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
88 return register_size (regcache
->tdesc
, 0) == 8;
91 /* Return true if the regcache contains the number of SVE registers. */
96 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
98 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
102 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
104 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
107 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
108 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
109 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
110 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
111 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
115 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
117 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
120 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
121 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
122 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
123 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
124 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
128 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
130 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
133 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
134 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
135 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
136 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
140 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
142 const struct user_fpsimd_state
*regset
143 = (const struct user_fpsimd_state
*) buf
;
146 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
147 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
148 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
149 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
152 /* Store the pauth registers to regcache. */
155 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
157 uint64_t *pauth_regset
= (uint64_t *) buf
;
158 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
163 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
165 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
169 /* Implementation of linux_target_ops method "get_pc". */
172 aarch64_get_pc (struct regcache
*regcache
)
174 if (register_size (regcache
->tdesc
, 0) == 8)
175 return linux_get_pc_64bit (regcache
);
177 return linux_get_pc_32bit (regcache
);
180 /* Implementation of linux_target_ops method "set_pc". */
183 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
185 if (register_size (regcache
->tdesc
, 0) == 8)
186 linux_set_pc_64bit (regcache
, pc
);
188 linux_set_pc_32bit (regcache
, pc
);
191 #define aarch64_breakpoint_len 4
193 /* AArch64 BRK software debug mode instruction.
194 This instruction needs to match gdb/aarch64-tdep.c
195 (aarch64_default_breakpoint). */
196 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
198 /* Implementation of linux_target_ops method "breakpoint_at". */
201 aarch64_breakpoint_at (CORE_ADDR where
)
203 if (is_64bit_tdesc ())
205 gdb_byte insn
[aarch64_breakpoint_len
];
207 the_target
->read_memory (where
, (unsigned char *) &insn
,
208 aarch64_breakpoint_len
);
209 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
215 return arm_breakpoint_at (where
);
219 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
223 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
225 state
->dr_addr_bp
[i
] = 0;
226 state
->dr_ctrl_bp
[i
] = 0;
227 state
->dr_ref_count_bp
[i
] = 0;
230 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
232 state
->dr_addr_wp
[i
] = 0;
233 state
->dr_ctrl_wp
[i
] = 0;
234 state
->dr_ref_count_wp
[i
] = 0;
238 /* Return the pointer to the debug register state structure in the
239 current process' arch-specific data area. */
241 struct aarch64_debug_reg_state
*
242 aarch64_get_debug_reg_state (pid_t pid
)
244 struct process_info
*proc
= find_process_pid (pid
);
246 return &proc
->priv
->arch_private
->debug_reg_state
;
249 /* Implementation of linux_target_ops method "supports_z_point_type". */
252 aarch64_supports_z_point_type (char z_type
)
258 case Z_PACKET_WRITE_WP
:
259 case Z_PACKET_READ_WP
:
260 case Z_PACKET_ACCESS_WP
:
267 /* Implementation of linux_target_ops method "insert_point".
269 It actually only records the info of the to-be-inserted bp/wp;
270 the actual insertion will happen when threads are resumed. */
273 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
274 int len
, struct raw_breakpoint
*bp
)
277 enum target_hw_bp_type targ_type
;
278 struct aarch64_debug_reg_state
*state
279 = aarch64_get_debug_reg_state (pid_of (current_thread
));
282 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
283 (unsigned long) addr
, len
);
285 /* Determine the type from the raw breakpoint type. */
286 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
288 if (targ_type
!= hw_execute
)
290 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
291 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
292 1 /* is_insert */, state
);
300 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
301 instruction. Set it to 2 to correctly encode length bit
302 mask in hardware/watchpoint control register. */
305 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
306 1 /* is_insert */, state
);
310 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
316 /* Implementation of linux_target_ops method "remove_point".
318 It actually only records the info of the to-be-removed bp/wp,
319 the actual removal will be done when threads are resumed. */
322 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
323 int len
, struct raw_breakpoint
*bp
)
326 enum target_hw_bp_type targ_type
;
327 struct aarch64_debug_reg_state
*state
328 = aarch64_get_debug_reg_state (pid_of (current_thread
));
331 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
332 (unsigned long) addr
, len
);
334 /* Determine the type from the raw breakpoint type. */
335 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
337 /* Set up state pointers. */
338 if (targ_type
!= hw_execute
)
340 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
346 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
347 instruction. Set it to 2 to correctly encode length bit
348 mask in hardware/watchpoint control register. */
351 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
352 0 /* is_insert */, state
);
356 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
362 /* Implementation of linux_target_ops method "stopped_data_address". */
365 aarch64_stopped_data_address (void)
369 struct aarch64_debug_reg_state
*state
;
371 pid
= lwpid_of (current_thread
);
373 /* Get the siginfo. */
374 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
375 return (CORE_ADDR
) 0;
377 /* Need to be a hardware breakpoint/watchpoint trap. */
378 if (siginfo
.si_signo
!= SIGTRAP
379 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
380 return (CORE_ADDR
) 0;
382 /* Check if the address matches any watched address. */
383 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
384 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
386 const unsigned int offset
387 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
388 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
389 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
390 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
391 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
392 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
394 if (state
->dr_ref_count_wp
[i
]
395 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
396 && addr_trap
>= addr_watch_aligned
397 && addr_trap
< addr_watch
+ len
)
399 /* ADDR_TRAP reports the first address of the memory range
400 accessed by the CPU, regardless of what was the memory
401 range watched. Thus, a large CPU access that straddles
402 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
403 ADDR_TRAP that is lower than the
404 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
406 addr: | 4 | 5 | 6 | 7 | 8 |
407 |---- range watched ----|
408 |----------- range accessed ------------|
410 In this case, ADDR_TRAP will be 4.
412 To match a watchpoint known to GDB core, we must never
413 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
414 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
415 positive on kernels older than 4.10. See PR
421 return (CORE_ADDR
) 0;
424 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
427 aarch64_stopped_by_watchpoint (void)
429 if (aarch64_stopped_data_address () != 0)
435 /* Fetch the thread-local storage pointer for libthread_db. */
438 ps_get_thread_area (struct ps_prochandle
*ph
,
439 lwpid_t lwpid
, int idx
, void **base
)
441 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
445 /* Implementation of linux_target_ops method "siginfo_fixup". */
448 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
450 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
451 if (!is_64bit_tdesc ())
454 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
457 aarch64_siginfo_from_compat_siginfo (native
,
458 (struct compat_siginfo
*) inf
);
466 /* Implementation of linux_target_ops method "new_process". */
468 static struct arch_process_info
*
469 aarch64_linux_new_process (void)
471 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
473 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
478 /* Implementation of linux_target_ops method "delete_process". */
481 aarch64_linux_delete_process (struct arch_process_info
*info
)
486 /* Implementation of linux_target_ops method "linux_new_fork". */
489 aarch64_linux_new_fork (struct process_info
*parent
,
490 struct process_info
*child
)
492 /* These are allocated by linux_add_process. */
493 gdb_assert (parent
->priv
!= NULL
494 && parent
->priv
->arch_private
!= NULL
);
495 gdb_assert (child
->priv
!= NULL
496 && child
->priv
->arch_private
!= NULL
);
498 /* Linux kernel before 2.6.33 commit
499 72f674d203cd230426437cdcf7dd6f681dad8b0d
500 will inherit hardware debug registers from parent
501 on fork/vfork/clone. Newer Linux kernels create such tasks with
502 zeroed debug registers.
504 GDB core assumes the child inherits the watchpoints/hw
505 breakpoints of the parent, and will remove them all from the
506 forked off process. Copy the debug registers mirrors into the
507 new process so that all breakpoints and watchpoints can be
508 removed together. The debug registers mirror will become zeroed
509 in the end before detaching the forked off process, thus making
510 this compatible with older Linux kernels too. */
512 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
515 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
516 #define AARCH64_HWCAP_PACA (1 << 30)
518 /* Implementation of linux_target_ops method "arch_setup". */
521 aarch64_arch_setup (void)
523 unsigned int machine
;
527 tid
= lwpid_of (current_thread
);
529 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
533 uint64_t vq
= aarch64_sve_get_vq (tid
);
534 unsigned long hwcap
= linux_get_hwcap (8);
535 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
537 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
540 current_process ()->tdesc
= aarch32_linux_read_description ();
542 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
545 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
548 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
550 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
553 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
556 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
558 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
561 static struct regset_info aarch64_regsets
[] =
563 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
564 sizeof (struct user_pt_regs
), GENERAL_REGS
,
565 aarch64_fill_gregset
, aarch64_store_gregset
},
566 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
567 sizeof (struct user_fpsimd_state
), FP_REGS
,
568 aarch64_fill_fpregset
, aarch64_store_fpregset
570 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
571 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
572 NULL
, aarch64_store_pauthregset
},
576 static struct regsets_info aarch64_regsets_info
=
578 aarch64_regsets
, /* regsets */
580 NULL
, /* disabled_regsets */
583 static struct regs_info regs_info_aarch64
=
585 NULL
, /* regset_bitmap */
587 &aarch64_regsets_info
,
590 static struct regset_info aarch64_sve_regsets
[] =
592 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
593 sizeof (struct user_pt_regs
), GENERAL_REGS
,
594 aarch64_fill_gregset
, aarch64_store_gregset
},
595 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
596 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
597 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
599 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
600 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
601 NULL
, aarch64_store_pauthregset
},
605 static struct regsets_info aarch64_sve_regsets_info
=
607 aarch64_sve_regsets
, /* regsets. */
608 0, /* num_regsets. */
609 NULL
, /* disabled_regsets. */
612 static struct regs_info regs_info_aarch64_sve
=
614 NULL
, /* regset_bitmap. */
616 &aarch64_sve_regsets_info
,
619 /* Implementation of linux_target_ops method "regs_info". */
621 static const struct regs_info
*
622 aarch64_regs_info (void)
624 if (!is_64bit_tdesc ())
625 return ®s_info_aarch32
;
628 return ®s_info_aarch64_sve
;
630 return ®s_info_aarch64
;
633 /* Implementation of linux_target_ops method "supports_tracepoints". */
636 aarch64_supports_tracepoints (void)
638 if (current_thread
== NULL
)
642 /* We don't support tracepoints on aarch32 now. */
643 return is_64bit_tdesc ();
647 /* Implementation of linux_target_ops method "get_thread_area". */
650 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
655 iovec
.iov_base
= ®
;
656 iovec
.iov_len
= sizeof (reg
);
658 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
666 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
669 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
671 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
677 collect_register_by_name (regcache
, "x8", &l_sysno
);
678 *sysno
= (int) l_sysno
;
681 collect_register_by_name (regcache
, "r7", sysno
);
684 /* List of condition codes that we need. */
686 enum aarch64_condition_codes
697 enum aarch64_operand_type
703 /* Representation of an operand. At this time, it only supports register
704 and immediate types. */
706 struct aarch64_operand
708 /* Type of the operand. */
709 enum aarch64_operand_type type
;
711 /* Value of the operand according to the type. */
715 struct aarch64_register reg
;
719 /* List of registers that we are currently using, we can add more here as
720 we need to use them. */
722 /* General purpose scratch registers (64 bit). */
723 static const struct aarch64_register x0
= { 0, 1 };
724 static const struct aarch64_register x1
= { 1, 1 };
725 static const struct aarch64_register x2
= { 2, 1 };
726 static const struct aarch64_register x3
= { 3, 1 };
727 static const struct aarch64_register x4
= { 4, 1 };
729 /* General purpose scratch registers (32 bit). */
730 static const struct aarch64_register w0
= { 0, 0 };
731 static const struct aarch64_register w2
= { 2, 0 };
733 /* Intra-procedure scratch registers. */
734 static const struct aarch64_register ip0
= { 16, 1 };
736 /* Special purpose registers. */
737 static const struct aarch64_register fp
= { 29, 1 };
738 static const struct aarch64_register lr
= { 30, 1 };
739 static const struct aarch64_register sp
= { 31, 1 };
740 static const struct aarch64_register xzr
= { 31, 1 };
742 /* Dynamically allocate a new register. If we know the register
743 statically, we should make it a global as above instead of using this
746 static struct aarch64_register
747 aarch64_register (unsigned num
, int is64
)
749 return (struct aarch64_register
) { num
, is64
};
752 /* Helper function to create a register operand, for instructions with
753 different types of operands.
756 p += emit_mov (p, x0, register_operand (x1)); */
758 static struct aarch64_operand
759 register_operand (struct aarch64_register reg
)
761 struct aarch64_operand operand
;
763 operand
.type
= OPERAND_REGISTER
;
769 /* Helper function to create an immediate operand, for instructions with
770 different types of operands.
773 p += emit_mov (p, x0, immediate_operand (12)); */
775 static struct aarch64_operand
776 immediate_operand (uint32_t imm
)
778 struct aarch64_operand operand
;
780 operand
.type
= OPERAND_IMMEDIATE
;
786 /* Helper function to create an offset memory operand.
789 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
791 static struct aarch64_memory_operand
792 offset_memory_operand (int32_t offset
)
794 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
797 /* Helper function to create a pre-index memory operand.
800 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
802 static struct aarch64_memory_operand
803 preindex_memory_operand (int32_t index
)
805 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
808 /* Helper function to create a post-index memory operand.
811 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
813 static struct aarch64_memory_operand
814 postindex_memory_operand (int32_t index
)
816 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
819 /* System control registers. These special registers can be written and
820 read with the MRS and MSR instructions.
822 - NZCV: Condition flags. GDB refers to this register under the CPSR
824 - FPSR: Floating-point status register.
825 - FPCR: Floating-point control registers.
826 - TPIDR_EL0: Software thread ID register. */
828 enum aarch64_system_control_registers
830 /* op0 op1 crn crm op2 */
831 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
832 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
833 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
834 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
837 /* Write a BLR instruction into *BUF.
841 RN is the register to branch to. */
844 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
846 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
849 /* Write a RET instruction into *BUF.
853 RN is the register to branch to. */
856 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
858 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
862 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
863 struct aarch64_register rt
,
864 struct aarch64_register rt2
,
865 struct aarch64_register rn
,
866 struct aarch64_memory_operand operand
)
873 opc
= ENCODE (2, 2, 30);
875 opc
= ENCODE (0, 2, 30);
877 switch (operand
.type
)
879 case MEMORY_OPERAND_OFFSET
:
881 pre_index
= ENCODE (1, 1, 24);
882 write_back
= ENCODE (0, 1, 23);
885 case MEMORY_OPERAND_POSTINDEX
:
887 pre_index
= ENCODE (0, 1, 24);
888 write_back
= ENCODE (1, 1, 23);
891 case MEMORY_OPERAND_PREINDEX
:
893 pre_index
= ENCODE (1, 1, 24);
894 write_back
= ENCODE (1, 1, 23);
901 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
902 | ENCODE (operand
.index
>> 3, 7, 15)
903 | ENCODE (rt2
.num
, 5, 10)
904 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
907 /* Write a STP instruction into *BUF.
909 STP rt, rt2, [rn, #offset]
910 STP rt, rt2, [rn, #index]!
911 STP rt, rt2, [rn], #index
913 RT and RT2 are the registers to store.
914 RN is the base address register.
915 OFFSET is the immediate to add to the base address. It is limited to a
916 -512 .. 504 range (7 bits << 3). */
919 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
920 struct aarch64_register rt2
, struct aarch64_register rn
,
921 struct aarch64_memory_operand operand
)
923 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
926 /* Write a LDP instruction into *BUF.
928 LDP rt, rt2, [rn, #offset]
929 LDP rt, rt2, [rn, #index]!
930 LDP rt, rt2, [rn], #index
932 RT and RT2 are the registers to store.
933 RN is the base address register.
934 OFFSET is the immediate to add to the base address. It is limited to a
935 -512 .. 504 range (7 bits << 3). */
938 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
939 struct aarch64_register rt2
, struct aarch64_register rn
,
940 struct aarch64_memory_operand operand
)
942 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
945 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
947 LDP qt, qt2, [rn, #offset]
949 RT and RT2 are the Q registers to store.
950 RN is the base address register.
951 OFFSET is the immediate to add to the base address. It is limited to
952 -1024 .. 1008 range (7 bits << 4). */
955 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
956 struct aarch64_register rn
, int32_t offset
)
958 uint32_t opc
= ENCODE (2, 2, 30);
959 uint32_t pre_index
= ENCODE (1, 1, 24);
961 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
962 | ENCODE (offset
>> 4, 7, 15)
963 | ENCODE (rt2
, 5, 10)
964 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
967 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
969 STP qt, qt2, [rn, #offset]
971 RT and RT2 are the Q registers to store.
972 RN is the base address register.
973 OFFSET is the immediate to add to the base address. It is limited to
974 -1024 .. 1008 range (7 bits << 4). */
977 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
978 struct aarch64_register rn
, int32_t offset
)
980 uint32_t opc
= ENCODE (2, 2, 30);
981 uint32_t pre_index
= ENCODE (1, 1, 24);
983 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
984 | ENCODE (offset
>> 4, 7, 15)
985 | ENCODE (rt2
, 5, 10)
986 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
989 /* Write a LDRH instruction into *BUF.
991 LDRH wt, [xn, #offset]
992 LDRH wt, [xn, #index]!
993 LDRH wt, [xn], #index
995 RT is the register to store.
996 RN is the base address register.
997 OFFSET is the immediate to add to the base address. It is limited to
998 0 .. 32760 range (12 bits << 3). */
1001 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1002 struct aarch64_register rn
,
1003 struct aarch64_memory_operand operand
)
1005 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1008 /* Write a LDRB instruction into *BUF.
1010 LDRB wt, [xn, #offset]
1011 LDRB wt, [xn, #index]!
1012 LDRB wt, [xn], #index
1014 RT is the register to store.
1015 RN is the base address register.
1016 OFFSET is the immediate to add to the base address. It is limited to
1017 0 .. 32760 range (12 bits << 3). */
1020 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1021 struct aarch64_register rn
,
1022 struct aarch64_memory_operand operand
)
1024 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1029 /* Write a STR instruction into *BUF.
1031 STR rt, [rn, #offset]
1032 STR rt, [rn, #index]!
1033 STR rt, [rn], #index
1035 RT is the register to store.
1036 RN is the base address register.
1037 OFFSET is the immediate to add to the base address. It is limited to
1038 0 .. 32760 range (12 bits << 3). */
1041 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1042 struct aarch64_register rn
,
1043 struct aarch64_memory_operand operand
)
1045 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1048 /* Helper function emitting an exclusive load or store instruction. */
1051 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1052 enum aarch64_opcodes opcode
,
1053 struct aarch64_register rs
,
1054 struct aarch64_register rt
,
1055 struct aarch64_register rt2
,
1056 struct aarch64_register rn
)
1058 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1059 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1060 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1063 /* Write a LAXR instruction into *BUF.
1067 RT is the destination register.
1068 RN is the base address register. */
1071 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1072 struct aarch64_register rn
)
1074 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1078 /* Write a STXR instruction into *BUF.
1082 RS is the result register, it indicates if the store succeeded or not.
1083 RT is the destination register.
1084 RN is the base address register. */
1087 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1088 struct aarch64_register rt
, struct aarch64_register rn
)
1090 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1094 /* Write a STLR instruction into *BUF.
1098 RT is the register to store.
1099 RN is the base address register. */
1102 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1103 struct aarch64_register rn
)
1105 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1109 /* Helper function for data processing instructions with register sources. */
1112 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1113 struct aarch64_register rd
,
1114 struct aarch64_register rn
,
1115 struct aarch64_register rm
)
1117 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1119 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1120 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1123 /* Helper function for data processing instructions taking either a register
1127 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1128 struct aarch64_register rd
,
1129 struct aarch64_register rn
,
1130 struct aarch64_operand operand
)
1132 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1133 /* The opcode is different for register and immediate source operands. */
1134 uint32_t operand_opcode
;
1136 if (operand
.type
== OPERAND_IMMEDIATE
)
1138 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1139 operand_opcode
= ENCODE (8, 4, 25);
1141 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1142 | ENCODE (operand
.imm
, 12, 10)
1143 | ENCODE (rn
.num
, 5, 5)
1144 | ENCODE (rd
.num
, 5, 0));
1148 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1149 operand_opcode
= ENCODE (5, 4, 25);
1151 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1156 /* Write an ADD instruction into *BUF.
1161 This function handles both an immediate and register add.
1163 RD is the destination register.
1164 RN is the input register.
1165 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1166 OPERAND_REGISTER. */
1169 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1170 struct aarch64_register rn
, struct aarch64_operand operand
)
1172 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1175 /* Write a SUB instruction into *BUF.
1180 This function handles both an immediate and register sub.
1182 RD is the destination register.
1183 RN is the input register.
1184 IMM is the immediate to substract to RN. */
1187 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1188 struct aarch64_register rn
, struct aarch64_operand operand
)
1190 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1193 /* Write a MOV instruction into *BUF.
1198 This function handles both a wide immediate move and a register move,
1199 with the condition that the source register is not xzr. xzr and the
1200 stack pointer share the same encoding and this function only supports
1203 RD is the destination register.
1204 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1205 OPERAND_REGISTER. */
1208 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1209 struct aarch64_operand operand
)
1211 if (operand
.type
== OPERAND_IMMEDIATE
)
1213 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1214 /* Do not shift the immediate. */
1215 uint32_t shift
= ENCODE (0, 2, 21);
1217 return aarch64_emit_insn (buf
, MOV
| size
| shift
1218 | ENCODE (operand
.imm
, 16, 5)
1219 | ENCODE (rd
.num
, 5, 0));
1222 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1225 /* Write a MOVK instruction into *BUF.
1227 MOVK rd, #imm, lsl #shift
1229 RD is the destination register.
1230 IMM is the immediate.
1231 SHIFT is the logical shift left to apply to IMM. */
1234 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1237 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1239 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1240 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1243 /* Write instructions into *BUF in order to move ADDR into a register.
1244 ADDR can be a 64-bit value.
1246 This function will emit a series of MOV and MOVK instructions, such as:
1249 MOVK xd, #(addr >> 16), lsl #16
1250 MOVK xd, #(addr >> 32), lsl #32
1251 MOVK xd, #(addr >> 48), lsl #48 */
1254 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1258 /* The MOV (wide immediate) instruction clears to top bits of the
1260 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1262 if ((addr
>> 16) != 0)
1263 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1267 if ((addr
>> 32) != 0)
1268 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1272 if ((addr
>> 48) != 0)
1273 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1278 /* Write a SUBS instruction into *BUF.
1282 This instruction update the condition flags.
1284 RD is the destination register.
1285 RN and RM are the source registers. */
1288 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1289 struct aarch64_register rn
, struct aarch64_operand operand
)
1291 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1294 /* Write a CMP instruction into *BUF.
1298 This instruction is an alias of SUBS xzr, rn, rm.
1300 RN and RM are the registers to compare. */
1303 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1304 struct aarch64_operand operand
)
1306 return emit_subs (buf
, xzr
, rn
, operand
);
1309 /* Write a AND instruction into *BUF.
1313 RD is the destination register.
1314 RN and RM are the source registers. */
1317 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1318 struct aarch64_register rn
, struct aarch64_register rm
)
1320 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1323 /* Write a ORR instruction into *BUF.
1327 RD is the destination register.
1328 RN and RM are the source registers. */
1331 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1332 struct aarch64_register rn
, struct aarch64_register rm
)
1334 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1337 /* Write a ORN instruction into *BUF.
1341 RD is the destination register.
1342 RN and RM are the source registers. */
1345 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1346 struct aarch64_register rn
, struct aarch64_register rm
)
1348 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1351 /* Write a EOR instruction into *BUF.
1355 RD is the destination register.
1356 RN and RM are the source registers. */
1359 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1360 struct aarch64_register rn
, struct aarch64_register rm
)
1362 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1365 /* Write a MVN instruction into *BUF.
1369 This is an alias for ORN rd, xzr, rm.
1371 RD is the destination register.
1372 RM is the source register. */
1375 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1376 struct aarch64_register rm
)
1378 return emit_orn (buf
, rd
, xzr
, rm
);
1381 /* Write a LSLV instruction into *BUF.
1385 RD is the destination register.
1386 RN and RM are the source registers. */
1389 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1390 struct aarch64_register rn
, struct aarch64_register rm
)
1392 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1395 /* Write a LSRV instruction into *BUF.
1399 RD is the destination register.
1400 RN and RM are the source registers. */
1403 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1404 struct aarch64_register rn
, struct aarch64_register rm
)
1406 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1409 /* Write a ASRV instruction into *BUF.
1413 RD is the destination register.
1414 RN and RM are the source registers. */
1417 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1418 struct aarch64_register rn
, struct aarch64_register rm
)
1420 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1423 /* Write a MUL instruction into *BUF.
1427 RD is the destination register.
1428 RN and RM are the source registers. */
1431 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1432 struct aarch64_register rn
, struct aarch64_register rm
)
1434 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1437 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1441 RT is the destination register.
1442 SYSTEM_REG is special purpose register to read. */
1445 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1446 enum aarch64_system_control_registers system_reg
)
1448 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1449 | ENCODE (rt
.num
, 5, 0));
1452 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1456 SYSTEM_REG is special purpose register to write.
1457 RT is the input register. */
1460 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1461 struct aarch64_register rt
)
1463 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1464 | ENCODE (rt
.num
, 5, 0));
1467 /* Write a SEVL instruction into *BUF.
1469 This is a hint instruction telling the hardware to trigger an event. */
1472 emit_sevl (uint32_t *buf
)
1474 return aarch64_emit_insn (buf
, SEVL
);
1477 /* Write a WFE instruction into *BUF.
1479 This is a hint instruction telling the hardware to wait for an event. */
1482 emit_wfe (uint32_t *buf
)
1484 return aarch64_emit_insn (buf
, WFE
);
1487 /* Write a SBFM instruction into *BUF.
1489 SBFM rd, rn, #immr, #imms
1491 This instruction moves the bits from #immr to #imms into the
1492 destination, sign extending the result.
1494 RD is the destination register.
1495 RN is the source register.
1496 IMMR is the bit number to start at (least significant bit).
1497 IMMS is the bit number to stop at (most significant bit). */
1500 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1501 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1503 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1504 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1506 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1507 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1508 | ENCODE (rd
.num
, 5, 0));
1511 /* Write a SBFX instruction into *BUF.
1513 SBFX rd, rn, #lsb, #width
1515 This instruction moves #width bits from #lsb into the destination, sign
1516 extending the result. This is an alias for:
1518 SBFM rd, rn, #lsb, #(lsb + width - 1)
1520 RD is the destination register.
1521 RN is the source register.
1522 LSB is the bit number to start at (least significant bit).
1523 WIDTH is the number of bits to move. */
1526 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1527 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1529 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1532 /* Write a UBFM instruction into *BUF.
1534 UBFM rd, rn, #immr, #imms
1536 This instruction moves the bits from #immr to #imms into the
1537 destination, extending the result with zeros.
1539 RD is the destination register.
1540 RN is the source register.
1541 IMMR is the bit number to start at (least significant bit).
1542 IMMS is the bit number to stop at (most significant bit). */
1545 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1546 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1548 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1549 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1551 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1552 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1553 | ENCODE (rd
.num
, 5, 0));
1556 /* Write a UBFX instruction into *BUF.
1558 UBFX rd, rn, #lsb, #width
1560 This instruction moves #width bits from #lsb into the destination,
1561 extending the result with zeros. This is an alias for:
1563 UBFM rd, rn, #lsb, #(lsb + width - 1)
1565 RD is the destination register.
1566 RN is the source register.
1567 LSB is the bit number to start at (least significant bit).
1568 WIDTH is the number of bits to move. */
1571 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1572 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1574 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1577 /* Write a CSINC instruction into *BUF.
1579 CSINC rd, rn, rm, cond
1581 This instruction conditionally increments rn or rm and places the result
1582 in rd. rn is chosen is the condition is true.
1584 RD is the destination register.
1585 RN and RM are the source registers.
1586 COND is the encoded condition. */
1589 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1590 struct aarch64_register rn
, struct aarch64_register rm
,
1593 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1595 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1596 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1597 | ENCODE (rd
.num
, 5, 0));
1600 /* Write a CSET instruction into *BUF.
1604 This instruction conditionally write 1 or 0 in the destination register.
1605 1 is written if the condition is true. This is an alias for:
1607 CSINC rd, xzr, xzr, !cond
1609 Note that the condition needs to be inverted.
1611 RD is the destination register.
1612 RN and RM are the source registers.
1613 COND is the encoded condition. */
1616 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1618 /* The least significant bit of the condition needs toggling in order to
1620 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1623 /* Write LEN instructions from BUF into the inferior memory at *TO.
1625 Note instructions are always little endian on AArch64, unlike data. */
1628 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1630 size_t byte_len
= len
* sizeof (uint32_t);
1631 #if (__BYTE_ORDER == __BIG_ENDIAN)
1632 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1635 for (i
= 0; i
< len
; i
++)
1636 le_buf
[i
] = htole32 (buf
[i
]);
1638 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1642 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1648 /* Sub-class of struct aarch64_insn_data, store information of
1649 instruction relocation for fast tracepoint. Visitor can
1650 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1651 the relocated instructions in buffer pointed by INSN_PTR. */
1653 struct aarch64_insn_relocation_data
1655 struct aarch64_insn_data base
;
1657 /* The new address the instruction is relocated to. */
1659 /* Pointer to the buffer of relocated instruction(s). */
1663 /* Implementation of aarch64_insn_visitor method "b". */
1666 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1667 struct aarch64_insn_data
*data
)
1669 struct aarch64_insn_relocation_data
*insn_reloc
1670 = (struct aarch64_insn_relocation_data
*) data
;
1672 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1674 if (can_encode_int32 (new_offset
, 28))
1675 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1678 /* Implementation of aarch64_insn_visitor method "b_cond". */
1681 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1682 struct aarch64_insn_data
*data
)
1684 struct aarch64_insn_relocation_data
*insn_reloc
1685 = (struct aarch64_insn_relocation_data
*) data
;
1687 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1689 if (can_encode_int32 (new_offset
, 21))
1691 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1694 else if (can_encode_int32 (new_offset
, 28))
1696 /* The offset is out of range for a conditional branch
1697 instruction but not for a unconditional branch. We can use
1698 the following instructions instead:
1700 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1701 B NOT_TAKEN ; Else jump over TAKEN and continue.
1708 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1709 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1710 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1714 /* Implementation of aarch64_insn_visitor method "cb". */
1717 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1718 const unsigned rn
, int is64
,
1719 struct aarch64_insn_data
*data
)
1721 struct aarch64_insn_relocation_data
*insn_reloc
1722 = (struct aarch64_insn_relocation_data
*) data
;
1724 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1726 if (can_encode_int32 (new_offset
, 21))
1728 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1729 aarch64_register (rn
, is64
), new_offset
);
1731 else if (can_encode_int32 (new_offset
, 28))
1733 /* The offset is out of range for a compare and branch
1734 instruction but not for a unconditional branch. We can use
1735 the following instructions instead:
1737 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1738 B NOT_TAKEN ; Else jump over TAKEN and continue.
1744 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1745 aarch64_register (rn
, is64
), 8);
1746 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1747 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1751 /* Implementation of aarch64_insn_visitor method "tb". */
1754 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1755 const unsigned rt
, unsigned bit
,
1756 struct aarch64_insn_data
*data
)
1758 struct aarch64_insn_relocation_data
*insn_reloc
1759 = (struct aarch64_insn_relocation_data
*) data
;
1761 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1763 if (can_encode_int32 (new_offset
, 16))
1765 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1766 aarch64_register (rt
, 1), new_offset
);
1768 else if (can_encode_int32 (new_offset
, 28))
1770 /* The offset is out of range for a test bit and branch
1771 instruction but not for a unconditional branch. We can use
1772 the following instructions instead:
1774 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1775 B NOT_TAKEN ; Else jump over TAKEN and continue.
1781 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1782 aarch64_register (rt
, 1), 8);
1783 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1784 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1789 /* Implementation of aarch64_insn_visitor method "adr". */
1792 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1794 struct aarch64_insn_data
*data
)
1796 struct aarch64_insn_relocation_data
*insn_reloc
1797 = (struct aarch64_insn_relocation_data
*) data
;
1798 /* We know exactly the address the ADR{P,} instruction will compute.
1799 We can just write it to the destination register. */
1800 CORE_ADDR address
= data
->insn_addr
+ offset
;
1804 /* Clear the lower 12 bits of the offset to get the 4K page. */
1805 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1806 aarch64_register (rd
, 1),
1810 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1811 aarch64_register (rd
, 1), address
);
1814 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1817 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1818 const unsigned rt
, const int is64
,
1819 struct aarch64_insn_data
*data
)
1821 struct aarch64_insn_relocation_data
*insn_reloc
1822 = (struct aarch64_insn_relocation_data
*) data
;
1823 CORE_ADDR address
= data
->insn_addr
+ offset
;
1825 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1826 aarch64_register (rt
, 1), address
);
1828 /* We know exactly what address to load from, and what register we
1831 MOV xd, #(oldloc + offset)
1832 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1835 LDR xd, [xd] ; or LDRSW xd, [xd]
1840 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1841 aarch64_register (rt
, 1),
1842 aarch64_register (rt
, 1),
1843 offset_memory_operand (0));
1845 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1846 aarch64_register (rt
, is64
),
1847 aarch64_register (rt
, 1),
1848 offset_memory_operand (0));
1851 /* Implementation of aarch64_insn_visitor method "others". */
1854 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1855 struct aarch64_insn_data
*data
)
1857 struct aarch64_insn_relocation_data
*insn_reloc
1858 = (struct aarch64_insn_relocation_data
*) data
;
1860 /* The instruction is not PC relative. Just re-emit it at the new
1862 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1865 static const struct aarch64_insn_visitor visitor
=
1867 aarch64_ftrace_insn_reloc_b
,
1868 aarch64_ftrace_insn_reloc_b_cond
,
1869 aarch64_ftrace_insn_reloc_cb
,
1870 aarch64_ftrace_insn_reloc_tb
,
1871 aarch64_ftrace_insn_reloc_adr
,
1872 aarch64_ftrace_insn_reloc_ldr_literal
,
1873 aarch64_ftrace_insn_reloc_others
,
1876 /* Implementation of linux_target_ops method
1877 "install_fast_tracepoint_jump_pad". */
1880 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1882 CORE_ADDR collector
,
1885 CORE_ADDR
*jump_entry
,
1886 CORE_ADDR
*trampoline
,
1887 ULONGEST
*trampoline_size
,
1888 unsigned char *jjump_pad_insn
,
1889 ULONGEST
*jjump_pad_insn_size
,
1890 CORE_ADDR
*adjusted_insn_addr
,
1891 CORE_ADDR
*adjusted_insn_addr_end
,
1899 CORE_ADDR buildaddr
= *jump_entry
;
1900 struct aarch64_insn_relocation_data insn_data
;
1902 /* We need to save the current state on the stack both to restore it
1903 later and to collect register values when the tracepoint is hit.
1905 The saved registers are pushed in a layout that needs to be in sync
1906 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1907 the supply_fast_tracepoint_registers function will fill in the
1908 register cache from a pointer to saved registers on the stack we build
1911 For simplicity, we set the size of each cell on the stack to 16 bytes.
1912 This way one cell can hold any register type, from system registers
1913 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1914 has to be 16 bytes aligned anyway.
1916 Note that the CPSR register does not exist on AArch64. Instead we
1917 can access system bits describing the process state with the
1918 MRS/MSR instructions, namely the condition flags. We save them as
1919 if they are part of a CPSR register because that's how GDB
1920 interprets these system bits. At the moment, only the condition
1921 flags are saved in CPSR (NZCV).
1923 Stack layout, each cell is 16 bytes (descending):
1925 High *-------- SIMD&FP registers from 31 down to 0. --------*
1931 *---- General purpose registers from 30 down to 0. ----*
1937 *------------- Special purpose registers. -------------*
1940 | CPSR (NZCV) | 5 cells
1943 *------------- collecting_t object --------------------*
1944 | TPIDR_EL0 | struct tracepoint * |
1945 Low *------------------------------------------------------*
1947 After this stack is set up, we issue a call to the collector, passing
1948 it the saved registers at (SP + 16). */
1950 /* Push SIMD&FP registers on the stack:
1952 SUB sp, sp, #(32 * 16)
1954 STP q30, q31, [sp, #(30 * 16)]
1959 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1960 for (i
= 30; i
>= 0; i
-= 2)
1961 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1963 /* Push general purpose registers on the stack. Note that we do not need
1964 to push x31 as it represents the xzr register and not the stack
1965 pointer in a STR instruction.
1967 SUB sp, sp, #(31 * 16)
1969 STR x30, [sp, #(30 * 16)]
1974 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1975 for (i
= 30; i
>= 0; i
-= 1)
1976 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
1977 offset_memory_operand (i
* 16));
1979 /* Make space for 5 more cells.
1981 SUB sp, sp, #(5 * 16)
1984 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
1989 ADD x4, sp, #((32 + 31 + 5) * 16)
1990 STR x4, [sp, #(4 * 16)]
1993 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
1994 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
1996 /* Save PC (tracepoint address):
2001 STR x3, [sp, #(3 * 16)]
2005 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2006 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2008 /* Save CPSR (NZCV), FPSR and FPCR:
2014 STR x2, [sp, #(2 * 16)]
2015 STR x1, [sp, #(1 * 16)]
2016 STR x0, [sp, #(0 * 16)]
2019 p
+= emit_mrs (p
, x2
, NZCV
);
2020 p
+= emit_mrs (p
, x1
, FPSR
);
2021 p
+= emit_mrs (p
, x0
, FPCR
);
2022 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2023 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2024 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2026 /* Push the collecting_t object. It consist of the address of the
2027 tracepoint and an ID for the current thread. We get the latter by
2028 reading the tpidr_el0 system register. It corresponds to the
2029 NT_ARM_TLS register accessible with ptrace.
2036 STP x0, x1, [sp, #-16]!
2040 p
+= emit_mov_addr (p
, x0
, tpoint
);
2041 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2042 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2046 The shared memory for the lock is at lockaddr. It will hold zero
2047 if no-one is holding the lock, otherwise it contains the address of
2048 the collecting_t object on the stack of the thread which acquired it.
2050 At this stage, the stack pointer points to this thread's collecting_t
2053 We use the following registers:
2054 - x0: Address of the lock.
2055 - x1: Pointer to collecting_t object.
2056 - x2: Scratch register.
2062 ; Trigger an event local to this core. So the following WFE
2063 ; instruction is ignored.
2066 ; Wait for an event. The event is triggered by either the SEVL
2067 ; or STLR instructions (store release).
2070 ; Atomically read at lockaddr. This marks the memory location as
2071 ; exclusive. This instruction also has memory constraints which
2072 ; make sure all previous data reads and writes are done before
2076 ; Try again if another thread holds the lock.
2079 ; We can lock it! Write the address of the collecting_t object.
2080 ; This instruction will fail if the memory location is not marked
2081 ; as exclusive anymore. If it succeeds, it will remove the
2082 ; exclusive mark on the memory location. This way, if another
2083 ; thread executes this instruction before us, we will fail and try
2090 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2091 p
+= emit_mov (p
, x1
, register_operand (sp
));
2095 p
+= emit_ldaxr (p
, x2
, x0
);
2096 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2097 p
+= emit_stxr (p
, w2
, x1
, x0
);
2098 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2100 /* Call collector (struct tracepoint *, unsigned char *):
2105 ; Saved registers start after the collecting_t object.
2108 ; We use an intra-procedure-call scratch register.
2109 MOV ip0, #(collector)
2112 ; And call back to C!
2117 p
+= emit_mov_addr (p
, x0
, tpoint
);
2118 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2120 p
+= emit_mov_addr (p
, ip0
, collector
);
2121 p
+= emit_blr (p
, ip0
);
2123 /* Release the lock.
2128 ; This instruction is a normal store with memory ordering
2129 ; constraints. Thanks to this we do not have to put a data
2130 ; barrier instruction to make sure all data read and writes are done
2131 ; before this instruction is executed. Furthermore, this instruction
2132 ; will trigger an event, letting other threads know they can grab
2137 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2138 p
+= emit_stlr (p
, xzr
, x0
);
2140 /* Free collecting_t object:
2145 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2147 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2148 registers from the stack.
2150 LDR x2, [sp, #(2 * 16)]
2151 LDR x1, [sp, #(1 * 16)]
2152 LDR x0, [sp, #(0 * 16)]
2158 ADD sp, sp #(5 * 16)
2161 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2162 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2163 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2164 p
+= emit_msr (p
, NZCV
, x2
);
2165 p
+= emit_msr (p
, FPSR
, x1
);
2166 p
+= emit_msr (p
, FPCR
, x0
);
2168 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2170 /* Pop general purpose registers:
2174 LDR x30, [sp, #(30 * 16)]
2176 ADD sp, sp, #(31 * 16)
2179 for (i
= 0; i
<= 30; i
+= 1)
2180 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2181 offset_memory_operand (i
* 16));
2182 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2184 /* Pop SIMD&FP registers:
2188 LDP q30, q31, [sp, #(30 * 16)]
2190 ADD sp, sp, #(32 * 16)
2193 for (i
= 0; i
<= 30; i
+= 2)
2194 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2195 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2197 /* Write the code into the inferior memory. */
2198 append_insns (&buildaddr
, p
- buf
, buf
);
2200 /* Now emit the relocated instruction. */
2201 *adjusted_insn_addr
= buildaddr
;
2202 target_read_uint32 (tpaddr
, &insn
);
2204 insn_data
.base
.insn_addr
= tpaddr
;
2205 insn_data
.new_addr
= buildaddr
;
2206 insn_data
.insn_ptr
= buf
;
2208 aarch64_relocate_instruction (insn
, &visitor
,
2209 (struct aarch64_insn_data
*) &insn_data
);
2211 /* We may not have been able to relocate the instruction. */
2212 if (insn_data
.insn_ptr
== buf
)
2215 "E.Could not relocate instruction from %s to %s.",
2216 core_addr_to_string_nz (tpaddr
),
2217 core_addr_to_string_nz (buildaddr
));
2221 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2222 *adjusted_insn_addr_end
= buildaddr
;
2224 /* Go back to the start of the buffer. */
2227 /* Emit a branch back from the jump pad. */
2228 offset
= (tpaddr
+ orig_size
- buildaddr
);
2229 if (!can_encode_int32 (offset
, 28))
2232 "E.Jump back from jump pad too far from tracepoint "
2233 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2238 p
+= emit_b (p
, 0, offset
);
2239 append_insns (&buildaddr
, p
- buf
, buf
);
2241 /* Give the caller a branch instruction into the jump pad. */
2242 offset
= (*jump_entry
- tpaddr
);
2243 if (!can_encode_int32 (offset
, 28))
2246 "E.Jump pad too far from tracepoint "
2247 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2252 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2253 *jjump_pad_insn_size
= 4;
2255 /* Return the end address of our pad. */
2256 *jump_entry
= buildaddr
;
2261 /* Helper function writing LEN instructions from START into
2262 current_insn_ptr. */
2265 emit_ops_insns (const uint32_t *start
, int len
)
2267 CORE_ADDR buildaddr
= current_insn_ptr
;
2270 debug_printf ("Adding %d instrucions at %s\n",
2271 len
, paddress (buildaddr
));
2273 append_insns (&buildaddr
, len
, start
);
2274 current_insn_ptr
= buildaddr
;
2277 /* Pop a register from the stack. */
2280 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2282 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2285 /* Push a register on the stack. */
2288 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2290 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2293 /* Implementation of emit_ops method "emit_prologue". */
2296 aarch64_emit_prologue (void)
2301 /* This function emit a prologue for the following function prototype:
2303 enum eval_result_type f (unsigned char *regs,
2306 The first argument is a buffer of raw registers. The second
2307 argument is the result of
2308 evaluating the expression, which will be set to whatever is on top of
2309 the stack at the end.
2311 The stack set up by the prologue is as such:
2313 High *------------------------------------------------------*
2316 | x1 (ULONGEST *value) |
2317 | x0 (unsigned char *regs) |
2318 Low *------------------------------------------------------*
2320 As we are implementing a stack machine, each opcode can expand the
2321 stack so we never know how far we are from the data saved by this
2322 prologue. In order to be able refer to value and regs later, we save
2323 the current stack pointer in the frame pointer. This way, it is not
2324 clobbered when calling C functions.
2326 Finally, throughout every operation, we are using register x0 as the
2327 top of the stack, and x1 as a scratch register. */
2329 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2330 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2331 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2333 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2336 emit_ops_insns (buf
, p
- buf
);
2339 /* Implementation of emit_ops method "emit_epilogue". */
2342 aarch64_emit_epilogue (void)
2347 /* Store the result of the expression (x0) in *value. */
2348 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2349 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2350 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2352 /* Restore the previous state. */
2353 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2354 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2356 /* Return expr_eval_no_error. */
2357 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2358 p
+= emit_ret (p
, lr
);
2360 emit_ops_insns (buf
, p
- buf
);
2363 /* Implementation of emit_ops method "emit_add". */
2366 aarch64_emit_add (void)
2371 p
+= emit_pop (p
, x1
);
2372 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2374 emit_ops_insns (buf
, p
- buf
);
2377 /* Implementation of emit_ops method "emit_sub". */
2380 aarch64_emit_sub (void)
2385 p
+= emit_pop (p
, x1
);
2386 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2388 emit_ops_insns (buf
, p
- buf
);
2391 /* Implementation of emit_ops method "emit_mul". */
2394 aarch64_emit_mul (void)
2399 p
+= emit_pop (p
, x1
);
2400 p
+= emit_mul (p
, x0
, x1
, x0
);
2402 emit_ops_insns (buf
, p
- buf
);
2405 /* Implementation of emit_ops method "emit_lsh". */
2408 aarch64_emit_lsh (void)
2413 p
+= emit_pop (p
, x1
);
2414 p
+= emit_lslv (p
, x0
, x1
, x0
);
2416 emit_ops_insns (buf
, p
- buf
);
2419 /* Implementation of emit_ops method "emit_rsh_signed". */
2422 aarch64_emit_rsh_signed (void)
2427 p
+= emit_pop (p
, x1
);
2428 p
+= emit_asrv (p
, x0
, x1
, x0
);
2430 emit_ops_insns (buf
, p
- buf
);
2433 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2436 aarch64_emit_rsh_unsigned (void)
2441 p
+= emit_pop (p
, x1
);
2442 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2444 emit_ops_insns (buf
, p
- buf
);
2447 /* Implementation of emit_ops method "emit_ext". */
2450 aarch64_emit_ext (int arg
)
2455 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2457 emit_ops_insns (buf
, p
- buf
);
2460 /* Implementation of emit_ops method "emit_log_not". */
2463 aarch64_emit_log_not (void)
2468 /* If the top of the stack is 0, replace it with 1. Else replace it with
2471 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2472 p
+= emit_cset (p
, x0
, EQ
);
2474 emit_ops_insns (buf
, p
- buf
);
2477 /* Implementation of emit_ops method "emit_bit_and". */
2480 aarch64_emit_bit_and (void)
2485 p
+= emit_pop (p
, x1
);
2486 p
+= emit_and (p
, x0
, x0
, x1
);
2488 emit_ops_insns (buf
, p
- buf
);
2491 /* Implementation of emit_ops method "emit_bit_or". */
2494 aarch64_emit_bit_or (void)
2499 p
+= emit_pop (p
, x1
);
2500 p
+= emit_orr (p
, x0
, x0
, x1
);
2502 emit_ops_insns (buf
, p
- buf
);
2505 /* Implementation of emit_ops method "emit_bit_xor". */
2508 aarch64_emit_bit_xor (void)
2513 p
+= emit_pop (p
, x1
);
2514 p
+= emit_eor (p
, x0
, x0
, x1
);
2516 emit_ops_insns (buf
, p
- buf
);
2519 /* Implementation of emit_ops method "emit_bit_not". */
2522 aarch64_emit_bit_not (void)
2527 p
+= emit_mvn (p
, x0
, x0
);
2529 emit_ops_insns (buf
, p
- buf
);
2532 /* Implementation of emit_ops method "emit_equal". */
2535 aarch64_emit_equal (void)
2540 p
+= emit_pop (p
, x1
);
2541 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2542 p
+= emit_cset (p
, x0
, EQ
);
2544 emit_ops_insns (buf
, p
- buf
);
2547 /* Implementation of emit_ops method "emit_less_signed". */
2550 aarch64_emit_less_signed (void)
2555 p
+= emit_pop (p
, x1
);
2556 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2557 p
+= emit_cset (p
, x0
, LT
);
2559 emit_ops_insns (buf
, p
- buf
);
2562 /* Implementation of emit_ops method "emit_less_unsigned". */
2565 aarch64_emit_less_unsigned (void)
2570 p
+= emit_pop (p
, x1
);
2571 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2572 p
+= emit_cset (p
, x0
, LO
);
2574 emit_ops_insns (buf
, p
- buf
);
2577 /* Implementation of emit_ops method "emit_ref". */
2580 aarch64_emit_ref (int size
)
2588 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2591 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2594 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2597 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2600 /* Unknown size, bail on compilation. */
2605 emit_ops_insns (buf
, p
- buf
);
2608 /* Implementation of emit_ops method "emit_if_goto". */
2611 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2616 /* The Z flag is set or cleared here. */
2617 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2618 /* This instruction must not change the Z flag. */
2619 p
+= emit_pop (p
, x0
);
2620 /* Branch over the next instruction if x0 == 0. */
2621 p
+= emit_bcond (p
, EQ
, 8);
2623 /* The NOP instruction will be patched with an unconditional branch. */
2625 *offset_p
= (p
- buf
) * 4;
2630 emit_ops_insns (buf
, p
- buf
);
2633 /* Implementation of emit_ops method "emit_goto". */
2636 aarch64_emit_goto (int *offset_p
, int *size_p
)
2641 /* The NOP instruction will be patched with an unconditional branch. */
2648 emit_ops_insns (buf
, p
- buf
);
2651 /* Implementation of emit_ops method "write_goto_address". */
2654 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2658 emit_b (&insn
, 0, to
- from
);
2659 append_insns (&from
, 1, &insn
);
2662 /* Implementation of emit_ops method "emit_const". */
2665 aarch64_emit_const (LONGEST num
)
2670 p
+= emit_mov_addr (p
, x0
, num
);
2672 emit_ops_insns (buf
, p
- buf
);
2675 /* Implementation of emit_ops method "emit_call". */
2678 aarch64_emit_call (CORE_ADDR fn
)
2683 p
+= emit_mov_addr (p
, ip0
, fn
);
2684 p
+= emit_blr (p
, ip0
);
2686 emit_ops_insns (buf
, p
- buf
);
2689 /* Implementation of emit_ops method "emit_reg". */
2692 aarch64_emit_reg (int reg
)
2697 /* Set x0 to unsigned char *regs. */
2698 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2699 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2700 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2702 emit_ops_insns (buf
, p
- buf
);
2704 aarch64_emit_call (get_raw_reg_func_addr ());
2707 /* Implementation of emit_ops method "emit_pop". */
2710 aarch64_emit_pop (void)
2715 p
+= emit_pop (p
, x0
);
2717 emit_ops_insns (buf
, p
- buf
);
2720 /* Implementation of emit_ops method "emit_stack_flush". */
2723 aarch64_emit_stack_flush (void)
2728 p
+= emit_push (p
, x0
);
2730 emit_ops_insns (buf
, p
- buf
);
2733 /* Implementation of emit_ops method "emit_zero_ext". */
2736 aarch64_emit_zero_ext (int arg
)
2741 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2743 emit_ops_insns (buf
, p
- buf
);
2746 /* Implementation of emit_ops method "emit_swap". */
2749 aarch64_emit_swap (void)
2754 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2755 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2756 p
+= emit_mov (p
, x0
, register_operand (x1
));
2758 emit_ops_insns (buf
, p
- buf
);
2761 /* Implementation of emit_ops method "emit_stack_adjust". */
2764 aarch64_emit_stack_adjust (int n
)
2766 /* This is not needed with our design. */
2770 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2772 emit_ops_insns (buf
, p
- buf
);
2775 /* Implementation of emit_ops method "emit_int_call_1". */
2778 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2783 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2785 emit_ops_insns (buf
, p
- buf
);
2787 aarch64_emit_call (fn
);
2790 /* Implementation of emit_ops method "emit_void_call_2". */
2793 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2798 /* Push x0 on the stack. */
2799 aarch64_emit_stack_flush ();
2801 /* Setup arguments for the function call:
2804 x1: top of the stack
2809 p
+= emit_mov (p
, x1
, register_operand (x0
));
2810 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2812 emit_ops_insns (buf
, p
- buf
);
2814 aarch64_emit_call (fn
);
2817 aarch64_emit_pop ();
2820 /* Implementation of emit_ops method "emit_eq_goto". */
2823 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2828 p
+= emit_pop (p
, x1
);
2829 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2830 /* Branch over the next instruction if x0 != x1. */
2831 p
+= emit_bcond (p
, NE
, 8);
2832 /* The NOP instruction will be patched with an unconditional branch. */
2834 *offset_p
= (p
- buf
) * 4;
2839 emit_ops_insns (buf
, p
- buf
);
2842 /* Implementation of emit_ops method "emit_ne_goto". */
2845 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2850 p
+= emit_pop (p
, x1
);
2851 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2852 /* Branch over the next instruction if x0 == x1. */
2853 p
+= emit_bcond (p
, EQ
, 8);
2854 /* The NOP instruction will be patched with an unconditional branch. */
2856 *offset_p
= (p
- buf
) * 4;
2861 emit_ops_insns (buf
, p
- buf
);
2864 /* Implementation of emit_ops method "emit_lt_goto". */
2867 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2872 p
+= emit_pop (p
, x1
);
2873 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2874 /* Branch over the next instruction if x0 >= x1. */
2875 p
+= emit_bcond (p
, GE
, 8);
2876 /* The NOP instruction will be patched with an unconditional branch. */
2878 *offset_p
= (p
- buf
) * 4;
2883 emit_ops_insns (buf
, p
- buf
);
2886 /* Implementation of emit_ops method "emit_le_goto". */
2889 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2894 p
+= emit_pop (p
, x1
);
2895 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2896 /* Branch over the next instruction if x0 > x1. */
2897 p
+= emit_bcond (p
, GT
, 8);
2898 /* The NOP instruction will be patched with an unconditional branch. */
2900 *offset_p
= (p
- buf
) * 4;
2905 emit_ops_insns (buf
, p
- buf
);
2908 /* Implementation of emit_ops method "emit_gt_goto". */
2911 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2916 p
+= emit_pop (p
, x1
);
2917 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2918 /* Branch over the next instruction if x0 <= x1. */
2919 p
+= emit_bcond (p
, LE
, 8);
2920 /* The NOP instruction will be patched with an unconditional branch. */
2922 *offset_p
= (p
- buf
) * 4;
2927 emit_ops_insns (buf
, p
- buf
);
2930 /* Implementation of emit_ops method "emit_ge_got". */
2933 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2938 p
+= emit_pop (p
, x1
);
2939 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2940 /* Branch over the next instruction if x0 <= x1. */
2941 p
+= emit_bcond (p
, LT
, 8);
2942 /* The NOP instruction will be patched with an unconditional branch. */
2944 *offset_p
= (p
- buf
) * 4;
2949 emit_ops_insns (buf
, p
- buf
);
2952 static struct emit_ops aarch64_emit_ops_impl
=
2954 aarch64_emit_prologue
,
2955 aarch64_emit_epilogue
,
2960 aarch64_emit_rsh_signed
,
2961 aarch64_emit_rsh_unsigned
,
2963 aarch64_emit_log_not
,
2964 aarch64_emit_bit_and
,
2965 aarch64_emit_bit_or
,
2966 aarch64_emit_bit_xor
,
2967 aarch64_emit_bit_not
,
2969 aarch64_emit_less_signed
,
2970 aarch64_emit_less_unsigned
,
2972 aarch64_emit_if_goto
,
2974 aarch64_write_goto_address
,
2979 aarch64_emit_stack_flush
,
2980 aarch64_emit_zero_ext
,
2982 aarch64_emit_stack_adjust
,
2983 aarch64_emit_int_call_1
,
2984 aarch64_emit_void_call_2
,
2985 aarch64_emit_eq_goto
,
2986 aarch64_emit_ne_goto
,
2987 aarch64_emit_lt_goto
,
2988 aarch64_emit_le_goto
,
2989 aarch64_emit_gt_goto
,
2990 aarch64_emit_ge_got
,
2993 /* Implementation of linux_target_ops method "emit_ops". */
2995 static struct emit_ops
*
2996 aarch64_emit_ops (void)
2998 return &aarch64_emit_ops_impl
;
3001 /* Implementation of linux_target_ops method
3002 "get_min_fast_tracepoint_insn_len". */
3005 aarch64_get_min_fast_tracepoint_insn_len (void)
3010 /* Implementation of linux_target_ops method "supports_range_stepping". */
3013 aarch64_supports_range_stepping (void)
3018 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3020 static const gdb_byte
*
3021 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
3023 if (is_64bit_tdesc ())
3025 *size
= aarch64_breakpoint_len
;
3026 return aarch64_breakpoint
;
3029 return arm_sw_breakpoint_from_kind (kind
, size
);
3032 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3035 aarch64_breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3037 if (is_64bit_tdesc ())
3038 return aarch64_breakpoint_len
;
3040 return arm_breakpoint_kind_from_pc (pcptr
);
3043 /* Implementation of the linux_target_ops method
3044 "breakpoint_kind_from_current_state". */
3047 aarch64_breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3049 if (is_64bit_tdesc ())
3050 return aarch64_breakpoint_len
;
3052 return arm_breakpoint_kind_from_current_state (pcptr
);
3055 /* Support for hardware single step. */
3058 aarch64_supports_hardware_single_step (void)
3063 struct linux_target_ops the_low_target
=
3067 NULL
, /* cannot_fetch_register */
3068 NULL
, /* cannot_store_register */
3069 NULL
, /* fetch_register */
3072 aarch64_breakpoint_kind_from_pc
,
3073 aarch64_sw_breakpoint_from_kind
,
3074 NULL
, /* get_next_pcs */
3075 0, /* decr_pc_after_break */
3076 aarch64_breakpoint_at
,
3077 aarch64_supports_z_point_type
,
3078 aarch64_insert_point
,
3079 aarch64_remove_point
,
3080 aarch64_stopped_by_watchpoint
,
3081 aarch64_stopped_data_address
,
3082 NULL
, /* collect_ptrace_register */
3083 NULL
, /* supply_ptrace_register */
3084 aarch64_linux_siginfo_fixup
,
3085 aarch64_linux_new_process
,
3086 aarch64_linux_delete_process
,
3087 aarch64_linux_new_thread
,
3088 aarch64_linux_delete_thread
,
3089 aarch64_linux_new_fork
,
3090 aarch64_linux_prepare_to_resume
,
3091 NULL
, /* process_qsupported */
3092 aarch64_supports_tracepoints
,
3093 aarch64_get_thread_area
,
3094 aarch64_install_fast_tracepoint_jump_pad
,
3096 aarch64_get_min_fast_tracepoint_insn_len
,
3097 aarch64_supports_range_stepping
,
3098 aarch64_breakpoint_kind_from_current_state
,
3099 aarch64_supports_hardware_single_step
,
3100 aarch64_get_syscall_trapinfo
,
3103 /* The linux target ops object. */
3105 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3108 initialize_low_arch (void)
3110 initialize_low_arch_aarch32 ();
3112 initialize_regsets_info (&aarch64_regsets_info
);
3113 initialize_regsets_info (&aarch64_sve_regsets_info
);