1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
62 void low_arch_setup () override
;
65 /* The singleton target ops object. */
67 static aarch64_target the_aarch64_target
;
69 /* Per-process arch-specific data we want to keep. */
71 struct arch_process_info
73 /* Hardware breakpoint/watchpoint data.
74 The reason for them to be per-process rather than per-thread is
75 due to the lack of information in the gdbserver environment;
76 gdbserver is not told that whether a requested hardware
77 breakpoint/watchpoint is thread specific or not, so it has to set
78 each hw bp/wp for every thread in the current process. The
79 higher level bp/wp management in gdb will resume a thread if a hw
80 bp/wp trap is not expected for it. Since the hw bp/wp setting is
81 same for each thread, it is reasonable for the data to live here.
83 struct aarch64_debug_reg_state debug_reg_state
;
86 /* Return true if the size of register 0 is 8 byte. */
91 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
93 return register_size (regcache
->tdesc
, 0) == 8;
96 /* Return true if the regcache contains the number of SVE registers. */
101 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
103 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
107 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
109 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
112 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
113 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
114 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
115 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
116 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
120 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
122 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
125 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
126 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
127 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
128 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
129 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
133 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
135 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
138 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
139 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
140 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
141 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
145 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
147 const struct user_fpsimd_state
*regset
148 = (const struct user_fpsimd_state
*) buf
;
151 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
152 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
153 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
154 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
157 /* Store the pauth registers to regcache. */
160 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
162 uint64_t *pauth_regset
= (uint64_t *) buf
;
163 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
168 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
170 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
174 /* Implementation of linux_target_ops method "get_pc". */
177 aarch64_get_pc (struct regcache
*regcache
)
179 if (register_size (regcache
->tdesc
, 0) == 8)
180 return linux_get_pc_64bit (regcache
);
182 return linux_get_pc_32bit (regcache
);
185 /* Implementation of linux_target_ops method "set_pc". */
188 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
190 if (register_size (regcache
->tdesc
, 0) == 8)
191 linux_set_pc_64bit (regcache
, pc
);
193 linux_set_pc_32bit (regcache
, pc
);
196 #define aarch64_breakpoint_len 4
198 /* AArch64 BRK software debug mode instruction.
199 This instruction needs to match gdb/aarch64-tdep.c
200 (aarch64_default_breakpoint). */
201 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
203 /* Implementation of linux_target_ops method "breakpoint_at". */
206 aarch64_breakpoint_at (CORE_ADDR where
)
208 if (is_64bit_tdesc ())
210 gdb_byte insn
[aarch64_breakpoint_len
];
212 the_target
->read_memory (where
, (unsigned char *) &insn
,
213 aarch64_breakpoint_len
);
214 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
220 return arm_breakpoint_at (where
);
224 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
228 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
230 state
->dr_addr_bp
[i
] = 0;
231 state
->dr_ctrl_bp
[i
] = 0;
232 state
->dr_ref_count_bp
[i
] = 0;
235 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
237 state
->dr_addr_wp
[i
] = 0;
238 state
->dr_ctrl_wp
[i
] = 0;
239 state
->dr_ref_count_wp
[i
] = 0;
243 /* Return the pointer to the debug register state structure in the
244 current process' arch-specific data area. */
246 struct aarch64_debug_reg_state
*
247 aarch64_get_debug_reg_state (pid_t pid
)
249 struct process_info
*proc
= find_process_pid (pid
);
251 return &proc
->priv
->arch_private
->debug_reg_state
;
254 /* Implementation of linux_target_ops method "supports_z_point_type". */
257 aarch64_supports_z_point_type (char z_type
)
263 case Z_PACKET_WRITE_WP
:
264 case Z_PACKET_READ_WP
:
265 case Z_PACKET_ACCESS_WP
:
272 /* Implementation of linux_target_ops method "insert_point".
274 It actually only records the info of the to-be-inserted bp/wp;
275 the actual insertion will happen when threads are resumed. */
278 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
279 int len
, struct raw_breakpoint
*bp
)
282 enum target_hw_bp_type targ_type
;
283 struct aarch64_debug_reg_state
*state
284 = aarch64_get_debug_reg_state (pid_of (current_thread
));
287 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
288 (unsigned long) addr
, len
);
290 /* Determine the type from the raw breakpoint type. */
291 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
293 if (targ_type
!= hw_execute
)
295 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
296 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
297 1 /* is_insert */, state
);
305 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
306 instruction. Set it to 2 to correctly encode length bit
307 mask in hardware/watchpoint control register. */
310 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
311 1 /* is_insert */, state
);
315 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
321 /* Implementation of linux_target_ops method "remove_point".
323 It actually only records the info of the to-be-removed bp/wp,
324 the actual removal will be done when threads are resumed. */
327 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
328 int len
, struct raw_breakpoint
*bp
)
331 enum target_hw_bp_type targ_type
;
332 struct aarch64_debug_reg_state
*state
333 = aarch64_get_debug_reg_state (pid_of (current_thread
));
336 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
337 (unsigned long) addr
, len
);
339 /* Determine the type from the raw breakpoint type. */
340 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
342 /* Set up state pointers. */
343 if (targ_type
!= hw_execute
)
345 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
351 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
352 instruction. Set it to 2 to correctly encode length bit
353 mask in hardware/watchpoint control register. */
356 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
357 0 /* is_insert */, state
);
361 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
367 /* Implementation of linux_target_ops method "stopped_data_address". */
370 aarch64_stopped_data_address (void)
374 struct aarch64_debug_reg_state
*state
;
376 pid
= lwpid_of (current_thread
);
378 /* Get the siginfo. */
379 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
380 return (CORE_ADDR
) 0;
382 /* Need to be a hardware breakpoint/watchpoint trap. */
383 if (siginfo
.si_signo
!= SIGTRAP
384 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
385 return (CORE_ADDR
) 0;
387 /* Check if the address matches any watched address. */
388 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
389 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
391 const unsigned int offset
392 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
393 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
394 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
395 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
396 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
397 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
399 if (state
->dr_ref_count_wp
[i
]
400 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
401 && addr_trap
>= addr_watch_aligned
402 && addr_trap
< addr_watch
+ len
)
404 /* ADDR_TRAP reports the first address of the memory range
405 accessed by the CPU, regardless of what was the memory
406 range watched. Thus, a large CPU access that straddles
407 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
408 ADDR_TRAP that is lower than the
409 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
411 addr: | 4 | 5 | 6 | 7 | 8 |
412 |---- range watched ----|
413 |----------- range accessed ------------|
415 In this case, ADDR_TRAP will be 4.
417 To match a watchpoint known to GDB core, we must never
418 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
419 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
420 positive on kernels older than 4.10. See PR
426 return (CORE_ADDR
) 0;
429 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
432 aarch64_stopped_by_watchpoint (void)
434 if (aarch64_stopped_data_address () != 0)
440 /* Fetch the thread-local storage pointer for libthread_db. */
443 ps_get_thread_area (struct ps_prochandle
*ph
,
444 lwpid_t lwpid
, int idx
, void **base
)
446 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
450 /* Implementation of linux_target_ops method "siginfo_fixup". */
453 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
455 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
456 if (!is_64bit_tdesc ())
459 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
462 aarch64_siginfo_from_compat_siginfo (native
,
463 (struct compat_siginfo
*) inf
);
471 /* Implementation of linux_target_ops method "new_process". */
473 static struct arch_process_info
*
474 aarch64_linux_new_process (void)
476 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
478 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
483 /* Implementation of linux_target_ops method "delete_process". */
486 aarch64_linux_delete_process (struct arch_process_info
*info
)
491 /* Implementation of linux_target_ops method "linux_new_fork". */
494 aarch64_linux_new_fork (struct process_info
*parent
,
495 struct process_info
*child
)
497 /* These are allocated by linux_add_process. */
498 gdb_assert (parent
->priv
!= NULL
499 && parent
->priv
->arch_private
!= NULL
);
500 gdb_assert (child
->priv
!= NULL
501 && child
->priv
->arch_private
!= NULL
);
503 /* Linux kernel before 2.6.33 commit
504 72f674d203cd230426437cdcf7dd6f681dad8b0d
505 will inherit hardware debug registers from parent
506 on fork/vfork/clone. Newer Linux kernels create such tasks with
507 zeroed debug registers.
509 GDB core assumes the child inherits the watchpoints/hw
510 breakpoints of the parent, and will remove them all from the
511 forked off process. Copy the debug registers mirrors into the
512 new process so that all breakpoints and watchpoints can be
513 removed together. The debug registers mirror will become zeroed
514 in the end before detaching the forked off process, thus making
515 this compatible with older Linux kernels too. */
517 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
520 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
521 #define AARCH64_HWCAP_PACA (1 << 30)
523 /* Implementation of linux target ops method "low_arch_setup". */
526 aarch64_target::low_arch_setup ()
528 unsigned int machine
;
532 tid
= lwpid_of (current_thread
);
534 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
538 uint64_t vq
= aarch64_sve_get_vq (tid
);
539 unsigned long hwcap
= linux_get_hwcap (8);
540 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
542 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
545 current_process ()->tdesc
= aarch32_linux_read_description ();
547 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
550 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
553 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
555 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
558 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
561 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
563 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
566 static struct regset_info aarch64_regsets
[] =
568 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
569 sizeof (struct user_pt_regs
), GENERAL_REGS
,
570 aarch64_fill_gregset
, aarch64_store_gregset
},
571 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
572 sizeof (struct user_fpsimd_state
), FP_REGS
,
573 aarch64_fill_fpregset
, aarch64_store_fpregset
575 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
576 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
577 NULL
, aarch64_store_pauthregset
},
581 static struct regsets_info aarch64_regsets_info
=
583 aarch64_regsets
, /* regsets */
585 NULL
, /* disabled_regsets */
588 static struct regs_info regs_info_aarch64
=
590 NULL
, /* regset_bitmap */
592 &aarch64_regsets_info
,
595 static struct regset_info aarch64_sve_regsets
[] =
597 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
598 sizeof (struct user_pt_regs
), GENERAL_REGS
,
599 aarch64_fill_gregset
, aarch64_store_gregset
},
600 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
601 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
602 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
604 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
605 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
606 NULL
, aarch64_store_pauthregset
},
610 static struct regsets_info aarch64_sve_regsets_info
=
612 aarch64_sve_regsets
, /* regsets. */
613 0, /* num_regsets. */
614 NULL
, /* disabled_regsets. */
617 static struct regs_info regs_info_aarch64_sve
=
619 NULL
, /* regset_bitmap. */
621 &aarch64_sve_regsets_info
,
624 /* Implementation of linux target ops method "get_regs_info". */
627 aarch64_target::get_regs_info ()
629 if (!is_64bit_tdesc ())
630 return ®s_info_aarch32
;
633 return ®s_info_aarch64_sve
;
635 return ®s_info_aarch64
;
638 /* Implementation of linux_target_ops method "supports_tracepoints". */
641 aarch64_supports_tracepoints (void)
643 if (current_thread
== NULL
)
647 /* We don't support tracepoints on aarch32 now. */
648 return is_64bit_tdesc ();
652 /* Implementation of linux_target_ops method "get_thread_area". */
655 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
660 iovec
.iov_base
= ®
;
661 iovec
.iov_len
= sizeof (reg
);
663 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
671 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
674 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
676 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
682 collect_register_by_name (regcache
, "x8", &l_sysno
);
683 *sysno
= (int) l_sysno
;
686 collect_register_by_name (regcache
, "r7", sysno
);
689 /* List of condition codes that we need. */
691 enum aarch64_condition_codes
702 enum aarch64_operand_type
708 /* Representation of an operand. At this time, it only supports register
709 and immediate types. */
711 struct aarch64_operand
713 /* Type of the operand. */
714 enum aarch64_operand_type type
;
716 /* Value of the operand according to the type. */
720 struct aarch64_register reg
;
724 /* List of registers that we are currently using, we can add more here as
725 we need to use them. */
727 /* General purpose scratch registers (64 bit). */
728 static const struct aarch64_register x0
= { 0, 1 };
729 static const struct aarch64_register x1
= { 1, 1 };
730 static const struct aarch64_register x2
= { 2, 1 };
731 static const struct aarch64_register x3
= { 3, 1 };
732 static const struct aarch64_register x4
= { 4, 1 };
734 /* General purpose scratch registers (32 bit). */
735 static const struct aarch64_register w0
= { 0, 0 };
736 static const struct aarch64_register w2
= { 2, 0 };
738 /* Intra-procedure scratch registers. */
739 static const struct aarch64_register ip0
= { 16, 1 };
741 /* Special purpose registers. */
742 static const struct aarch64_register fp
= { 29, 1 };
743 static const struct aarch64_register lr
= { 30, 1 };
744 static const struct aarch64_register sp
= { 31, 1 };
745 static const struct aarch64_register xzr
= { 31, 1 };
747 /* Dynamically allocate a new register. If we know the register
748 statically, we should make it a global as above instead of using this
751 static struct aarch64_register
752 aarch64_register (unsigned num
, int is64
)
754 return (struct aarch64_register
) { num
, is64
};
757 /* Helper function to create a register operand, for instructions with
758 different types of operands.
761 p += emit_mov (p, x0, register_operand (x1)); */
763 static struct aarch64_operand
764 register_operand (struct aarch64_register reg
)
766 struct aarch64_operand operand
;
768 operand
.type
= OPERAND_REGISTER
;
774 /* Helper function to create an immediate operand, for instructions with
775 different types of operands.
778 p += emit_mov (p, x0, immediate_operand (12)); */
780 static struct aarch64_operand
781 immediate_operand (uint32_t imm
)
783 struct aarch64_operand operand
;
785 operand
.type
= OPERAND_IMMEDIATE
;
791 /* Helper function to create an offset memory operand.
794 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
796 static struct aarch64_memory_operand
797 offset_memory_operand (int32_t offset
)
799 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
802 /* Helper function to create a pre-index memory operand.
805 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
807 static struct aarch64_memory_operand
808 preindex_memory_operand (int32_t index
)
810 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
813 /* Helper function to create a post-index memory operand.
816 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
818 static struct aarch64_memory_operand
819 postindex_memory_operand (int32_t index
)
821 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
824 /* System control registers. These special registers can be written and
825 read with the MRS and MSR instructions.
827 - NZCV: Condition flags. GDB refers to this register under the CPSR
829 - FPSR: Floating-point status register.
830 - FPCR: Floating-point control registers.
831 - TPIDR_EL0: Software thread ID register. */
833 enum aarch64_system_control_registers
835 /* op0 op1 crn crm op2 */
836 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
837 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
838 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
839 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
842 /* Write a BLR instruction into *BUF.
846 RN is the register to branch to. */
849 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
851 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
854 /* Write a RET instruction into *BUF.
858 RN is the register to branch to. */
861 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
863 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
867 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
868 struct aarch64_register rt
,
869 struct aarch64_register rt2
,
870 struct aarch64_register rn
,
871 struct aarch64_memory_operand operand
)
878 opc
= ENCODE (2, 2, 30);
880 opc
= ENCODE (0, 2, 30);
882 switch (operand
.type
)
884 case MEMORY_OPERAND_OFFSET
:
886 pre_index
= ENCODE (1, 1, 24);
887 write_back
= ENCODE (0, 1, 23);
890 case MEMORY_OPERAND_POSTINDEX
:
892 pre_index
= ENCODE (0, 1, 24);
893 write_back
= ENCODE (1, 1, 23);
896 case MEMORY_OPERAND_PREINDEX
:
898 pre_index
= ENCODE (1, 1, 24);
899 write_back
= ENCODE (1, 1, 23);
906 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
907 | ENCODE (operand
.index
>> 3, 7, 15)
908 | ENCODE (rt2
.num
, 5, 10)
909 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
912 /* Write a STP instruction into *BUF.
914 STP rt, rt2, [rn, #offset]
915 STP rt, rt2, [rn, #index]!
916 STP rt, rt2, [rn], #index
918 RT and RT2 are the registers to store.
919 RN is the base address register.
920 OFFSET is the immediate to add to the base address. It is limited to a
921 -512 .. 504 range (7 bits << 3). */
924 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
925 struct aarch64_register rt2
, struct aarch64_register rn
,
926 struct aarch64_memory_operand operand
)
928 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
931 /* Write a LDP instruction into *BUF.
933 LDP rt, rt2, [rn, #offset]
934 LDP rt, rt2, [rn, #index]!
935 LDP rt, rt2, [rn], #index
937 RT and RT2 are the registers to store.
938 RN is the base address register.
939 OFFSET is the immediate to add to the base address. It is limited to a
940 -512 .. 504 range (7 bits << 3). */
943 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
944 struct aarch64_register rt2
, struct aarch64_register rn
,
945 struct aarch64_memory_operand operand
)
947 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
950 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
952 LDP qt, qt2, [rn, #offset]
954 RT and RT2 are the Q registers to store.
955 RN is the base address register.
956 OFFSET is the immediate to add to the base address. It is limited to
957 -1024 .. 1008 range (7 bits << 4). */
960 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
961 struct aarch64_register rn
, int32_t offset
)
963 uint32_t opc
= ENCODE (2, 2, 30);
964 uint32_t pre_index
= ENCODE (1, 1, 24);
966 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
967 | ENCODE (offset
>> 4, 7, 15)
968 | ENCODE (rt2
, 5, 10)
969 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
972 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
974 STP qt, qt2, [rn, #offset]
976 RT and RT2 are the Q registers to store.
977 RN is the base address register.
978 OFFSET is the immediate to add to the base address. It is limited to
979 -1024 .. 1008 range (7 bits << 4). */
982 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
983 struct aarch64_register rn
, int32_t offset
)
985 uint32_t opc
= ENCODE (2, 2, 30);
986 uint32_t pre_index
= ENCODE (1, 1, 24);
988 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
989 | ENCODE (offset
>> 4, 7, 15)
990 | ENCODE (rt2
, 5, 10)
991 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
994 /* Write a LDRH instruction into *BUF.
996 LDRH wt, [xn, #offset]
997 LDRH wt, [xn, #index]!
998 LDRH wt, [xn], #index
1000 RT is the register to store.
1001 RN is the base address register.
1002 OFFSET is the immediate to add to the base address. It is limited to
1003 0 .. 32760 range (12 bits << 3). */
1006 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1007 struct aarch64_register rn
,
1008 struct aarch64_memory_operand operand
)
1010 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1013 /* Write a LDRB instruction into *BUF.
1015 LDRB wt, [xn, #offset]
1016 LDRB wt, [xn, #index]!
1017 LDRB wt, [xn], #index
1019 RT is the register to store.
1020 RN is the base address register.
1021 OFFSET is the immediate to add to the base address. It is limited to
1022 0 .. 32760 range (12 bits << 3). */
1025 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1026 struct aarch64_register rn
,
1027 struct aarch64_memory_operand operand
)
1029 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1034 /* Write a STR instruction into *BUF.
1036 STR rt, [rn, #offset]
1037 STR rt, [rn, #index]!
1038 STR rt, [rn], #index
1040 RT is the register to store.
1041 RN is the base address register.
1042 OFFSET is the immediate to add to the base address. It is limited to
1043 0 .. 32760 range (12 bits << 3). */
1046 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1047 struct aarch64_register rn
,
1048 struct aarch64_memory_operand operand
)
1050 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1053 /* Helper function emitting an exclusive load or store instruction. */
1056 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1057 enum aarch64_opcodes opcode
,
1058 struct aarch64_register rs
,
1059 struct aarch64_register rt
,
1060 struct aarch64_register rt2
,
1061 struct aarch64_register rn
)
1063 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1064 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1065 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1068 /* Write a LAXR instruction into *BUF.
1072 RT is the destination register.
1073 RN is the base address register. */
1076 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1077 struct aarch64_register rn
)
1079 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1083 /* Write a STXR instruction into *BUF.
1087 RS is the result register, it indicates if the store succeeded or not.
1088 RT is the destination register.
1089 RN is the base address register. */
1092 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1093 struct aarch64_register rt
, struct aarch64_register rn
)
1095 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1099 /* Write a STLR instruction into *BUF.
1103 RT is the register to store.
1104 RN is the base address register. */
1107 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1108 struct aarch64_register rn
)
1110 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1114 /* Helper function for data processing instructions with register sources. */
1117 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1118 struct aarch64_register rd
,
1119 struct aarch64_register rn
,
1120 struct aarch64_register rm
)
1122 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1124 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1125 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1128 /* Helper function for data processing instructions taking either a register
1132 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1133 struct aarch64_register rd
,
1134 struct aarch64_register rn
,
1135 struct aarch64_operand operand
)
1137 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1138 /* The opcode is different for register and immediate source operands. */
1139 uint32_t operand_opcode
;
1141 if (operand
.type
== OPERAND_IMMEDIATE
)
1143 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1144 operand_opcode
= ENCODE (8, 4, 25);
1146 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1147 | ENCODE (operand
.imm
, 12, 10)
1148 | ENCODE (rn
.num
, 5, 5)
1149 | ENCODE (rd
.num
, 5, 0));
1153 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1154 operand_opcode
= ENCODE (5, 4, 25);
1156 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1161 /* Write an ADD instruction into *BUF.
1166 This function handles both an immediate and register add.
1168 RD is the destination register.
1169 RN is the input register.
1170 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1171 OPERAND_REGISTER. */
1174 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1175 struct aarch64_register rn
, struct aarch64_operand operand
)
1177 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1180 /* Write a SUB instruction into *BUF.
1185 This function handles both an immediate and register sub.
1187 RD is the destination register.
1188 RN is the input register.
1189 IMM is the immediate to substract to RN. */
1192 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1193 struct aarch64_register rn
, struct aarch64_operand operand
)
1195 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1198 /* Write a MOV instruction into *BUF.
1203 This function handles both a wide immediate move and a register move,
1204 with the condition that the source register is not xzr. xzr and the
1205 stack pointer share the same encoding and this function only supports
1208 RD is the destination register.
1209 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1210 OPERAND_REGISTER. */
1213 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1214 struct aarch64_operand operand
)
1216 if (operand
.type
== OPERAND_IMMEDIATE
)
1218 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1219 /* Do not shift the immediate. */
1220 uint32_t shift
= ENCODE (0, 2, 21);
1222 return aarch64_emit_insn (buf
, MOV
| size
| shift
1223 | ENCODE (operand
.imm
, 16, 5)
1224 | ENCODE (rd
.num
, 5, 0));
1227 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1230 /* Write a MOVK instruction into *BUF.
1232 MOVK rd, #imm, lsl #shift
1234 RD is the destination register.
1235 IMM is the immediate.
1236 SHIFT is the logical shift left to apply to IMM. */
1239 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1242 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1244 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1245 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1248 /* Write instructions into *BUF in order to move ADDR into a register.
1249 ADDR can be a 64-bit value.
1251 This function will emit a series of MOV and MOVK instructions, such as:
1254 MOVK xd, #(addr >> 16), lsl #16
1255 MOVK xd, #(addr >> 32), lsl #32
1256 MOVK xd, #(addr >> 48), lsl #48 */
1259 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1263 /* The MOV (wide immediate) instruction clears to top bits of the
1265 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1267 if ((addr
>> 16) != 0)
1268 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1272 if ((addr
>> 32) != 0)
1273 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1277 if ((addr
>> 48) != 0)
1278 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1283 /* Write a SUBS instruction into *BUF.
1287 This instruction update the condition flags.
1289 RD is the destination register.
1290 RN and RM are the source registers. */
1293 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1294 struct aarch64_register rn
, struct aarch64_operand operand
)
1296 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1299 /* Write a CMP instruction into *BUF.
1303 This instruction is an alias of SUBS xzr, rn, rm.
1305 RN and RM are the registers to compare. */
1308 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1309 struct aarch64_operand operand
)
1311 return emit_subs (buf
, xzr
, rn
, operand
);
1314 /* Write a AND instruction into *BUF.
1318 RD is the destination register.
1319 RN and RM are the source registers. */
1322 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1323 struct aarch64_register rn
, struct aarch64_register rm
)
1325 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1328 /* Write a ORR instruction into *BUF.
1332 RD is the destination register.
1333 RN and RM are the source registers. */
1336 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1337 struct aarch64_register rn
, struct aarch64_register rm
)
1339 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1342 /* Write a ORN instruction into *BUF.
1346 RD is the destination register.
1347 RN and RM are the source registers. */
1350 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1351 struct aarch64_register rn
, struct aarch64_register rm
)
1353 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1356 /* Write a EOR instruction into *BUF.
1360 RD is the destination register.
1361 RN and RM are the source registers. */
1364 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1365 struct aarch64_register rn
, struct aarch64_register rm
)
1367 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1370 /* Write a MVN instruction into *BUF.
1374 This is an alias for ORN rd, xzr, rm.
1376 RD is the destination register.
1377 RM is the source register. */
1380 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1381 struct aarch64_register rm
)
1383 return emit_orn (buf
, rd
, xzr
, rm
);
1386 /* Write a LSLV instruction into *BUF.
1390 RD is the destination register.
1391 RN and RM are the source registers. */
1394 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1395 struct aarch64_register rn
, struct aarch64_register rm
)
1397 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1400 /* Write a LSRV instruction into *BUF.
1404 RD is the destination register.
1405 RN and RM are the source registers. */
1408 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1409 struct aarch64_register rn
, struct aarch64_register rm
)
1411 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1414 /* Write a ASRV instruction into *BUF.
1418 RD is the destination register.
1419 RN and RM are the source registers. */
1422 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1423 struct aarch64_register rn
, struct aarch64_register rm
)
1425 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1428 /* Write a MUL instruction into *BUF.
1432 RD is the destination register.
1433 RN and RM are the source registers. */
1436 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1437 struct aarch64_register rn
, struct aarch64_register rm
)
1439 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1442 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1446 RT is the destination register.
1447 SYSTEM_REG is special purpose register to read. */
1450 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1451 enum aarch64_system_control_registers system_reg
)
1453 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1454 | ENCODE (rt
.num
, 5, 0));
1457 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1461 SYSTEM_REG is special purpose register to write.
1462 RT is the input register. */
1465 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1466 struct aarch64_register rt
)
1468 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1469 | ENCODE (rt
.num
, 5, 0));
1472 /* Write a SEVL instruction into *BUF.
1474 This is a hint instruction telling the hardware to trigger an event. */
1477 emit_sevl (uint32_t *buf
)
1479 return aarch64_emit_insn (buf
, SEVL
);
1482 /* Write a WFE instruction into *BUF.
1484 This is a hint instruction telling the hardware to wait for an event. */
1487 emit_wfe (uint32_t *buf
)
1489 return aarch64_emit_insn (buf
, WFE
);
1492 /* Write a SBFM instruction into *BUF.
1494 SBFM rd, rn, #immr, #imms
1496 This instruction moves the bits from #immr to #imms into the
1497 destination, sign extending the result.
1499 RD is the destination register.
1500 RN is the source register.
1501 IMMR is the bit number to start at (least significant bit).
1502 IMMS is the bit number to stop at (most significant bit). */
1505 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1506 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1508 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1509 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1511 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1512 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1513 | ENCODE (rd
.num
, 5, 0));
1516 /* Write a SBFX instruction into *BUF.
1518 SBFX rd, rn, #lsb, #width
1520 This instruction moves #width bits from #lsb into the destination, sign
1521 extending the result. This is an alias for:
1523 SBFM rd, rn, #lsb, #(lsb + width - 1)
1525 RD is the destination register.
1526 RN is the source register.
1527 LSB is the bit number to start at (least significant bit).
1528 WIDTH is the number of bits to move. */
1531 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1532 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1534 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1537 /* Write a UBFM instruction into *BUF.
1539 UBFM rd, rn, #immr, #imms
1541 This instruction moves the bits from #immr to #imms into the
1542 destination, extending the result with zeros.
1544 RD is the destination register.
1545 RN is the source register.
1546 IMMR is the bit number to start at (least significant bit).
1547 IMMS is the bit number to stop at (most significant bit). */
1550 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1551 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1553 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1554 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1556 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1557 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1558 | ENCODE (rd
.num
, 5, 0));
1561 /* Write a UBFX instruction into *BUF.
1563 UBFX rd, rn, #lsb, #width
1565 This instruction moves #width bits from #lsb into the destination,
1566 extending the result with zeros. This is an alias for:
1568 UBFM rd, rn, #lsb, #(lsb + width - 1)
1570 RD is the destination register.
1571 RN is the source register.
1572 LSB is the bit number to start at (least significant bit).
1573 WIDTH is the number of bits to move. */
1576 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1577 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1579 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1582 /* Write a CSINC instruction into *BUF.
1584 CSINC rd, rn, rm, cond
1586 This instruction conditionally increments rn or rm and places the result
1587 in rd. rn is chosen is the condition is true.
1589 RD is the destination register.
1590 RN and RM are the source registers.
1591 COND is the encoded condition. */
1594 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1595 struct aarch64_register rn
, struct aarch64_register rm
,
1598 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1600 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1601 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1602 | ENCODE (rd
.num
, 5, 0));
1605 /* Write a CSET instruction into *BUF.
1609 This instruction conditionally write 1 or 0 in the destination register.
1610 1 is written if the condition is true. This is an alias for:
1612 CSINC rd, xzr, xzr, !cond
1614 Note that the condition needs to be inverted.
1616 RD is the destination register.
1617 RN and RM are the source registers.
1618 COND is the encoded condition. */
1621 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1623 /* The least significant bit of the condition needs toggling in order to
1625 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1628 /* Write LEN instructions from BUF into the inferior memory at *TO.
1630 Note instructions are always little endian on AArch64, unlike data. */
1633 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1635 size_t byte_len
= len
* sizeof (uint32_t);
1636 #if (__BYTE_ORDER == __BIG_ENDIAN)
1637 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1640 for (i
= 0; i
< len
; i
++)
1641 le_buf
[i
] = htole32 (buf
[i
]);
1643 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1647 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1653 /* Sub-class of struct aarch64_insn_data, store information of
1654 instruction relocation for fast tracepoint. Visitor can
1655 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1656 the relocated instructions in buffer pointed by INSN_PTR. */
1658 struct aarch64_insn_relocation_data
1660 struct aarch64_insn_data base
;
1662 /* The new address the instruction is relocated to. */
1664 /* Pointer to the buffer of relocated instruction(s). */
1668 /* Implementation of aarch64_insn_visitor method "b". */
1671 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1672 struct aarch64_insn_data
*data
)
1674 struct aarch64_insn_relocation_data
*insn_reloc
1675 = (struct aarch64_insn_relocation_data
*) data
;
1677 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1679 if (can_encode_int32 (new_offset
, 28))
1680 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1683 /* Implementation of aarch64_insn_visitor method "b_cond". */
1686 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1687 struct aarch64_insn_data
*data
)
1689 struct aarch64_insn_relocation_data
*insn_reloc
1690 = (struct aarch64_insn_relocation_data
*) data
;
1692 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1694 if (can_encode_int32 (new_offset
, 21))
1696 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1699 else if (can_encode_int32 (new_offset
, 28))
1701 /* The offset is out of range for a conditional branch
1702 instruction but not for a unconditional branch. We can use
1703 the following instructions instead:
1705 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1706 B NOT_TAKEN ; Else jump over TAKEN and continue.
1713 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1714 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1715 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1719 /* Implementation of aarch64_insn_visitor method "cb". */
1722 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1723 const unsigned rn
, int is64
,
1724 struct aarch64_insn_data
*data
)
1726 struct aarch64_insn_relocation_data
*insn_reloc
1727 = (struct aarch64_insn_relocation_data
*) data
;
1729 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1731 if (can_encode_int32 (new_offset
, 21))
1733 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1734 aarch64_register (rn
, is64
), new_offset
);
1736 else if (can_encode_int32 (new_offset
, 28))
1738 /* The offset is out of range for a compare and branch
1739 instruction but not for a unconditional branch. We can use
1740 the following instructions instead:
1742 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1743 B NOT_TAKEN ; Else jump over TAKEN and continue.
1749 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1750 aarch64_register (rn
, is64
), 8);
1751 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1752 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1756 /* Implementation of aarch64_insn_visitor method "tb". */
1759 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1760 const unsigned rt
, unsigned bit
,
1761 struct aarch64_insn_data
*data
)
1763 struct aarch64_insn_relocation_data
*insn_reloc
1764 = (struct aarch64_insn_relocation_data
*) data
;
1766 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1768 if (can_encode_int32 (new_offset
, 16))
1770 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1771 aarch64_register (rt
, 1), new_offset
);
1773 else if (can_encode_int32 (new_offset
, 28))
1775 /* The offset is out of range for a test bit and branch
1776 instruction but not for a unconditional branch. We can use
1777 the following instructions instead:
1779 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1780 B NOT_TAKEN ; Else jump over TAKEN and continue.
1786 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1787 aarch64_register (rt
, 1), 8);
1788 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1789 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1794 /* Implementation of aarch64_insn_visitor method "adr". */
1797 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1799 struct aarch64_insn_data
*data
)
1801 struct aarch64_insn_relocation_data
*insn_reloc
1802 = (struct aarch64_insn_relocation_data
*) data
;
1803 /* We know exactly the address the ADR{P,} instruction will compute.
1804 We can just write it to the destination register. */
1805 CORE_ADDR address
= data
->insn_addr
+ offset
;
1809 /* Clear the lower 12 bits of the offset to get the 4K page. */
1810 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1811 aarch64_register (rd
, 1),
1815 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1816 aarch64_register (rd
, 1), address
);
1819 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1822 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1823 const unsigned rt
, const int is64
,
1824 struct aarch64_insn_data
*data
)
1826 struct aarch64_insn_relocation_data
*insn_reloc
1827 = (struct aarch64_insn_relocation_data
*) data
;
1828 CORE_ADDR address
= data
->insn_addr
+ offset
;
1830 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1831 aarch64_register (rt
, 1), address
);
1833 /* We know exactly what address to load from, and what register we
1836 MOV xd, #(oldloc + offset)
1837 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1840 LDR xd, [xd] ; or LDRSW xd, [xd]
1845 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1846 aarch64_register (rt
, 1),
1847 aarch64_register (rt
, 1),
1848 offset_memory_operand (0));
1850 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1851 aarch64_register (rt
, is64
),
1852 aarch64_register (rt
, 1),
1853 offset_memory_operand (0));
1856 /* Implementation of aarch64_insn_visitor method "others". */
1859 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1860 struct aarch64_insn_data
*data
)
1862 struct aarch64_insn_relocation_data
*insn_reloc
1863 = (struct aarch64_insn_relocation_data
*) data
;
1865 /* The instruction is not PC relative. Just re-emit it at the new
1867 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1870 static const struct aarch64_insn_visitor visitor
=
1872 aarch64_ftrace_insn_reloc_b
,
1873 aarch64_ftrace_insn_reloc_b_cond
,
1874 aarch64_ftrace_insn_reloc_cb
,
1875 aarch64_ftrace_insn_reloc_tb
,
1876 aarch64_ftrace_insn_reloc_adr
,
1877 aarch64_ftrace_insn_reloc_ldr_literal
,
1878 aarch64_ftrace_insn_reloc_others
,
1881 /* Implementation of linux_target_ops method
1882 "install_fast_tracepoint_jump_pad". */
1885 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1887 CORE_ADDR collector
,
1890 CORE_ADDR
*jump_entry
,
1891 CORE_ADDR
*trampoline
,
1892 ULONGEST
*trampoline_size
,
1893 unsigned char *jjump_pad_insn
,
1894 ULONGEST
*jjump_pad_insn_size
,
1895 CORE_ADDR
*adjusted_insn_addr
,
1896 CORE_ADDR
*adjusted_insn_addr_end
,
1904 CORE_ADDR buildaddr
= *jump_entry
;
1905 struct aarch64_insn_relocation_data insn_data
;
1907 /* We need to save the current state on the stack both to restore it
1908 later and to collect register values when the tracepoint is hit.
1910 The saved registers are pushed in a layout that needs to be in sync
1911 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1912 the supply_fast_tracepoint_registers function will fill in the
1913 register cache from a pointer to saved registers on the stack we build
1916 For simplicity, we set the size of each cell on the stack to 16 bytes.
1917 This way one cell can hold any register type, from system registers
1918 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1919 has to be 16 bytes aligned anyway.
1921 Note that the CPSR register does not exist on AArch64. Instead we
1922 can access system bits describing the process state with the
1923 MRS/MSR instructions, namely the condition flags. We save them as
1924 if they are part of a CPSR register because that's how GDB
1925 interprets these system bits. At the moment, only the condition
1926 flags are saved in CPSR (NZCV).
1928 Stack layout, each cell is 16 bytes (descending):
1930 High *-------- SIMD&FP registers from 31 down to 0. --------*
1936 *---- General purpose registers from 30 down to 0. ----*
1942 *------------- Special purpose registers. -------------*
1945 | CPSR (NZCV) | 5 cells
1948 *------------- collecting_t object --------------------*
1949 | TPIDR_EL0 | struct tracepoint * |
1950 Low *------------------------------------------------------*
1952 After this stack is set up, we issue a call to the collector, passing
1953 it the saved registers at (SP + 16). */
1955 /* Push SIMD&FP registers on the stack:
1957 SUB sp, sp, #(32 * 16)
1959 STP q30, q31, [sp, #(30 * 16)]
1964 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1965 for (i
= 30; i
>= 0; i
-= 2)
1966 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1968 /* Push general purpose registers on the stack. Note that we do not need
1969 to push x31 as it represents the xzr register and not the stack
1970 pointer in a STR instruction.
1972 SUB sp, sp, #(31 * 16)
1974 STR x30, [sp, #(30 * 16)]
1979 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1980 for (i
= 30; i
>= 0; i
-= 1)
1981 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
1982 offset_memory_operand (i
* 16));
1984 /* Make space for 5 more cells.
1986 SUB sp, sp, #(5 * 16)
1989 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
1994 ADD x4, sp, #((32 + 31 + 5) * 16)
1995 STR x4, [sp, #(4 * 16)]
1998 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
1999 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2001 /* Save PC (tracepoint address):
2006 STR x3, [sp, #(3 * 16)]
2010 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2011 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2013 /* Save CPSR (NZCV), FPSR and FPCR:
2019 STR x2, [sp, #(2 * 16)]
2020 STR x1, [sp, #(1 * 16)]
2021 STR x0, [sp, #(0 * 16)]
2024 p
+= emit_mrs (p
, x2
, NZCV
);
2025 p
+= emit_mrs (p
, x1
, FPSR
);
2026 p
+= emit_mrs (p
, x0
, FPCR
);
2027 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2028 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2029 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2031 /* Push the collecting_t object. It consist of the address of the
2032 tracepoint and an ID for the current thread. We get the latter by
2033 reading the tpidr_el0 system register. It corresponds to the
2034 NT_ARM_TLS register accessible with ptrace.
2041 STP x0, x1, [sp, #-16]!
2045 p
+= emit_mov_addr (p
, x0
, tpoint
);
2046 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2047 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2051 The shared memory for the lock is at lockaddr. It will hold zero
2052 if no-one is holding the lock, otherwise it contains the address of
2053 the collecting_t object on the stack of the thread which acquired it.
2055 At this stage, the stack pointer points to this thread's collecting_t
2058 We use the following registers:
2059 - x0: Address of the lock.
2060 - x1: Pointer to collecting_t object.
2061 - x2: Scratch register.
2067 ; Trigger an event local to this core. So the following WFE
2068 ; instruction is ignored.
2071 ; Wait for an event. The event is triggered by either the SEVL
2072 ; or STLR instructions (store release).
2075 ; Atomically read at lockaddr. This marks the memory location as
2076 ; exclusive. This instruction also has memory constraints which
2077 ; make sure all previous data reads and writes are done before
2081 ; Try again if another thread holds the lock.
2084 ; We can lock it! Write the address of the collecting_t object.
2085 ; This instruction will fail if the memory location is not marked
2086 ; as exclusive anymore. If it succeeds, it will remove the
2087 ; exclusive mark on the memory location. This way, if another
2088 ; thread executes this instruction before us, we will fail and try
2095 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2096 p
+= emit_mov (p
, x1
, register_operand (sp
));
2100 p
+= emit_ldaxr (p
, x2
, x0
);
2101 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2102 p
+= emit_stxr (p
, w2
, x1
, x0
);
2103 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2105 /* Call collector (struct tracepoint *, unsigned char *):
2110 ; Saved registers start after the collecting_t object.
2113 ; We use an intra-procedure-call scratch register.
2114 MOV ip0, #(collector)
2117 ; And call back to C!
2122 p
+= emit_mov_addr (p
, x0
, tpoint
);
2123 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2125 p
+= emit_mov_addr (p
, ip0
, collector
);
2126 p
+= emit_blr (p
, ip0
);
2128 /* Release the lock.
2133 ; This instruction is a normal store with memory ordering
2134 ; constraints. Thanks to this we do not have to put a data
2135 ; barrier instruction to make sure all data read and writes are done
2136 ; before this instruction is executed. Furthermore, this instruction
2137 ; will trigger an event, letting other threads know they can grab
2142 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2143 p
+= emit_stlr (p
, xzr
, x0
);
2145 /* Free collecting_t object:
2150 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2152 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2153 registers from the stack.
2155 LDR x2, [sp, #(2 * 16)]
2156 LDR x1, [sp, #(1 * 16)]
2157 LDR x0, [sp, #(0 * 16)]
2163 ADD sp, sp #(5 * 16)
2166 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2167 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2168 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2169 p
+= emit_msr (p
, NZCV
, x2
);
2170 p
+= emit_msr (p
, FPSR
, x1
);
2171 p
+= emit_msr (p
, FPCR
, x0
);
2173 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2175 /* Pop general purpose registers:
2179 LDR x30, [sp, #(30 * 16)]
2181 ADD sp, sp, #(31 * 16)
2184 for (i
= 0; i
<= 30; i
+= 1)
2185 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2186 offset_memory_operand (i
* 16));
2187 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2189 /* Pop SIMD&FP registers:
2193 LDP q30, q31, [sp, #(30 * 16)]
2195 ADD sp, sp, #(32 * 16)
2198 for (i
= 0; i
<= 30; i
+= 2)
2199 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2200 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2202 /* Write the code into the inferior memory. */
2203 append_insns (&buildaddr
, p
- buf
, buf
);
2205 /* Now emit the relocated instruction. */
2206 *adjusted_insn_addr
= buildaddr
;
2207 target_read_uint32 (tpaddr
, &insn
);
2209 insn_data
.base
.insn_addr
= tpaddr
;
2210 insn_data
.new_addr
= buildaddr
;
2211 insn_data
.insn_ptr
= buf
;
2213 aarch64_relocate_instruction (insn
, &visitor
,
2214 (struct aarch64_insn_data
*) &insn_data
);
2216 /* We may not have been able to relocate the instruction. */
2217 if (insn_data
.insn_ptr
== buf
)
2220 "E.Could not relocate instruction from %s to %s.",
2221 core_addr_to_string_nz (tpaddr
),
2222 core_addr_to_string_nz (buildaddr
));
2226 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2227 *adjusted_insn_addr_end
= buildaddr
;
2229 /* Go back to the start of the buffer. */
2232 /* Emit a branch back from the jump pad. */
2233 offset
= (tpaddr
+ orig_size
- buildaddr
);
2234 if (!can_encode_int32 (offset
, 28))
2237 "E.Jump back from jump pad too far from tracepoint "
2238 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2243 p
+= emit_b (p
, 0, offset
);
2244 append_insns (&buildaddr
, p
- buf
, buf
);
2246 /* Give the caller a branch instruction into the jump pad. */
2247 offset
= (*jump_entry
- tpaddr
);
2248 if (!can_encode_int32 (offset
, 28))
2251 "E.Jump pad too far from tracepoint "
2252 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2257 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2258 *jjump_pad_insn_size
= 4;
2260 /* Return the end address of our pad. */
2261 *jump_entry
= buildaddr
;
2266 /* Helper function writing LEN instructions from START into
2267 current_insn_ptr. */
2270 emit_ops_insns (const uint32_t *start
, int len
)
2272 CORE_ADDR buildaddr
= current_insn_ptr
;
2275 debug_printf ("Adding %d instrucions at %s\n",
2276 len
, paddress (buildaddr
));
2278 append_insns (&buildaddr
, len
, start
);
2279 current_insn_ptr
= buildaddr
;
2282 /* Pop a register from the stack. */
2285 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2287 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2290 /* Push a register on the stack. */
2293 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2295 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2298 /* Implementation of emit_ops method "emit_prologue". */
2301 aarch64_emit_prologue (void)
2306 /* This function emit a prologue for the following function prototype:
2308 enum eval_result_type f (unsigned char *regs,
2311 The first argument is a buffer of raw registers. The second
2312 argument is the result of
2313 evaluating the expression, which will be set to whatever is on top of
2314 the stack at the end.
2316 The stack set up by the prologue is as such:
2318 High *------------------------------------------------------*
2321 | x1 (ULONGEST *value) |
2322 | x0 (unsigned char *regs) |
2323 Low *------------------------------------------------------*
2325 As we are implementing a stack machine, each opcode can expand the
2326 stack so we never know how far we are from the data saved by this
2327 prologue. In order to be able refer to value and regs later, we save
2328 the current stack pointer in the frame pointer. This way, it is not
2329 clobbered when calling C functions.
2331 Finally, throughout every operation, we are using register x0 as the
2332 top of the stack, and x1 as a scratch register. */
2334 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2335 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2336 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2338 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2341 emit_ops_insns (buf
, p
- buf
);
2344 /* Implementation of emit_ops method "emit_epilogue". */
2347 aarch64_emit_epilogue (void)
2352 /* Store the result of the expression (x0) in *value. */
2353 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2354 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2355 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2357 /* Restore the previous state. */
2358 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2359 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2361 /* Return expr_eval_no_error. */
2362 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2363 p
+= emit_ret (p
, lr
);
2365 emit_ops_insns (buf
, p
- buf
);
2368 /* Implementation of emit_ops method "emit_add". */
2371 aarch64_emit_add (void)
2376 p
+= emit_pop (p
, x1
);
2377 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2379 emit_ops_insns (buf
, p
- buf
);
2382 /* Implementation of emit_ops method "emit_sub". */
2385 aarch64_emit_sub (void)
2390 p
+= emit_pop (p
, x1
);
2391 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2393 emit_ops_insns (buf
, p
- buf
);
2396 /* Implementation of emit_ops method "emit_mul". */
2399 aarch64_emit_mul (void)
2404 p
+= emit_pop (p
, x1
);
2405 p
+= emit_mul (p
, x0
, x1
, x0
);
2407 emit_ops_insns (buf
, p
- buf
);
2410 /* Implementation of emit_ops method "emit_lsh". */
2413 aarch64_emit_lsh (void)
2418 p
+= emit_pop (p
, x1
);
2419 p
+= emit_lslv (p
, x0
, x1
, x0
);
2421 emit_ops_insns (buf
, p
- buf
);
2424 /* Implementation of emit_ops method "emit_rsh_signed". */
2427 aarch64_emit_rsh_signed (void)
2432 p
+= emit_pop (p
, x1
);
2433 p
+= emit_asrv (p
, x0
, x1
, x0
);
2435 emit_ops_insns (buf
, p
- buf
);
2438 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2441 aarch64_emit_rsh_unsigned (void)
2446 p
+= emit_pop (p
, x1
);
2447 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2449 emit_ops_insns (buf
, p
- buf
);
2452 /* Implementation of emit_ops method "emit_ext". */
2455 aarch64_emit_ext (int arg
)
2460 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2462 emit_ops_insns (buf
, p
- buf
);
2465 /* Implementation of emit_ops method "emit_log_not". */
2468 aarch64_emit_log_not (void)
2473 /* If the top of the stack is 0, replace it with 1. Else replace it with
2476 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2477 p
+= emit_cset (p
, x0
, EQ
);
2479 emit_ops_insns (buf
, p
- buf
);
2482 /* Implementation of emit_ops method "emit_bit_and". */
2485 aarch64_emit_bit_and (void)
2490 p
+= emit_pop (p
, x1
);
2491 p
+= emit_and (p
, x0
, x0
, x1
);
2493 emit_ops_insns (buf
, p
- buf
);
2496 /* Implementation of emit_ops method "emit_bit_or". */
2499 aarch64_emit_bit_or (void)
2504 p
+= emit_pop (p
, x1
);
2505 p
+= emit_orr (p
, x0
, x0
, x1
);
2507 emit_ops_insns (buf
, p
- buf
);
2510 /* Implementation of emit_ops method "emit_bit_xor". */
2513 aarch64_emit_bit_xor (void)
2518 p
+= emit_pop (p
, x1
);
2519 p
+= emit_eor (p
, x0
, x0
, x1
);
2521 emit_ops_insns (buf
, p
- buf
);
2524 /* Implementation of emit_ops method "emit_bit_not". */
2527 aarch64_emit_bit_not (void)
2532 p
+= emit_mvn (p
, x0
, x0
);
2534 emit_ops_insns (buf
, p
- buf
);
2537 /* Implementation of emit_ops method "emit_equal". */
2540 aarch64_emit_equal (void)
2545 p
+= emit_pop (p
, x1
);
2546 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2547 p
+= emit_cset (p
, x0
, EQ
);
2549 emit_ops_insns (buf
, p
- buf
);
2552 /* Implementation of emit_ops method "emit_less_signed". */
2555 aarch64_emit_less_signed (void)
2560 p
+= emit_pop (p
, x1
);
2561 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2562 p
+= emit_cset (p
, x0
, LT
);
2564 emit_ops_insns (buf
, p
- buf
);
2567 /* Implementation of emit_ops method "emit_less_unsigned". */
2570 aarch64_emit_less_unsigned (void)
2575 p
+= emit_pop (p
, x1
);
2576 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2577 p
+= emit_cset (p
, x0
, LO
);
2579 emit_ops_insns (buf
, p
- buf
);
2582 /* Implementation of emit_ops method "emit_ref". */
2585 aarch64_emit_ref (int size
)
2593 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2596 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2599 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2602 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2605 /* Unknown size, bail on compilation. */
2610 emit_ops_insns (buf
, p
- buf
);
2613 /* Implementation of emit_ops method "emit_if_goto". */
2616 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2621 /* The Z flag is set or cleared here. */
2622 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2623 /* This instruction must not change the Z flag. */
2624 p
+= emit_pop (p
, x0
);
2625 /* Branch over the next instruction if x0 == 0. */
2626 p
+= emit_bcond (p
, EQ
, 8);
2628 /* The NOP instruction will be patched with an unconditional branch. */
2630 *offset_p
= (p
- buf
) * 4;
2635 emit_ops_insns (buf
, p
- buf
);
2638 /* Implementation of emit_ops method "emit_goto". */
2641 aarch64_emit_goto (int *offset_p
, int *size_p
)
2646 /* The NOP instruction will be patched with an unconditional branch. */
2653 emit_ops_insns (buf
, p
- buf
);
2656 /* Implementation of emit_ops method "write_goto_address". */
2659 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2663 emit_b (&insn
, 0, to
- from
);
2664 append_insns (&from
, 1, &insn
);
2667 /* Implementation of emit_ops method "emit_const". */
2670 aarch64_emit_const (LONGEST num
)
2675 p
+= emit_mov_addr (p
, x0
, num
);
2677 emit_ops_insns (buf
, p
- buf
);
2680 /* Implementation of emit_ops method "emit_call". */
2683 aarch64_emit_call (CORE_ADDR fn
)
2688 p
+= emit_mov_addr (p
, ip0
, fn
);
2689 p
+= emit_blr (p
, ip0
);
2691 emit_ops_insns (buf
, p
- buf
);
2694 /* Implementation of emit_ops method "emit_reg". */
2697 aarch64_emit_reg (int reg
)
2702 /* Set x0 to unsigned char *regs. */
2703 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2704 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2705 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2707 emit_ops_insns (buf
, p
- buf
);
2709 aarch64_emit_call (get_raw_reg_func_addr ());
2712 /* Implementation of emit_ops method "emit_pop". */
2715 aarch64_emit_pop (void)
2720 p
+= emit_pop (p
, x0
);
2722 emit_ops_insns (buf
, p
- buf
);
2725 /* Implementation of emit_ops method "emit_stack_flush". */
2728 aarch64_emit_stack_flush (void)
2733 p
+= emit_push (p
, x0
);
2735 emit_ops_insns (buf
, p
- buf
);
2738 /* Implementation of emit_ops method "emit_zero_ext". */
2741 aarch64_emit_zero_ext (int arg
)
2746 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2748 emit_ops_insns (buf
, p
- buf
);
2751 /* Implementation of emit_ops method "emit_swap". */
2754 aarch64_emit_swap (void)
2759 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2760 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2761 p
+= emit_mov (p
, x0
, register_operand (x1
));
2763 emit_ops_insns (buf
, p
- buf
);
2766 /* Implementation of emit_ops method "emit_stack_adjust". */
2769 aarch64_emit_stack_adjust (int n
)
2771 /* This is not needed with our design. */
2775 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2777 emit_ops_insns (buf
, p
- buf
);
2780 /* Implementation of emit_ops method "emit_int_call_1". */
2783 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2788 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2790 emit_ops_insns (buf
, p
- buf
);
2792 aarch64_emit_call (fn
);
2795 /* Implementation of emit_ops method "emit_void_call_2". */
2798 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2803 /* Push x0 on the stack. */
2804 aarch64_emit_stack_flush ();
2806 /* Setup arguments for the function call:
2809 x1: top of the stack
2814 p
+= emit_mov (p
, x1
, register_operand (x0
));
2815 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2817 emit_ops_insns (buf
, p
- buf
);
2819 aarch64_emit_call (fn
);
2822 aarch64_emit_pop ();
2825 /* Implementation of emit_ops method "emit_eq_goto". */
2828 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2833 p
+= emit_pop (p
, x1
);
2834 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2835 /* Branch over the next instruction if x0 != x1. */
2836 p
+= emit_bcond (p
, NE
, 8);
2837 /* The NOP instruction will be patched with an unconditional branch. */
2839 *offset_p
= (p
- buf
) * 4;
2844 emit_ops_insns (buf
, p
- buf
);
2847 /* Implementation of emit_ops method "emit_ne_goto". */
2850 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2855 p
+= emit_pop (p
, x1
);
2856 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2857 /* Branch over the next instruction if x0 == x1. */
2858 p
+= emit_bcond (p
, EQ
, 8);
2859 /* The NOP instruction will be patched with an unconditional branch. */
2861 *offset_p
= (p
- buf
) * 4;
2866 emit_ops_insns (buf
, p
- buf
);
2869 /* Implementation of emit_ops method "emit_lt_goto". */
2872 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2877 p
+= emit_pop (p
, x1
);
2878 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2879 /* Branch over the next instruction if x0 >= x1. */
2880 p
+= emit_bcond (p
, GE
, 8);
2881 /* The NOP instruction will be patched with an unconditional branch. */
2883 *offset_p
= (p
- buf
) * 4;
2888 emit_ops_insns (buf
, p
- buf
);
2891 /* Implementation of emit_ops method "emit_le_goto". */
2894 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2899 p
+= emit_pop (p
, x1
);
2900 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2901 /* Branch over the next instruction if x0 > x1. */
2902 p
+= emit_bcond (p
, GT
, 8);
2903 /* The NOP instruction will be patched with an unconditional branch. */
2905 *offset_p
= (p
- buf
) * 4;
2910 emit_ops_insns (buf
, p
- buf
);
2913 /* Implementation of emit_ops method "emit_gt_goto". */
2916 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2921 p
+= emit_pop (p
, x1
);
2922 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2923 /* Branch over the next instruction if x0 <= x1. */
2924 p
+= emit_bcond (p
, LE
, 8);
2925 /* The NOP instruction will be patched with an unconditional branch. */
2927 *offset_p
= (p
- buf
) * 4;
2932 emit_ops_insns (buf
, p
- buf
);
2935 /* Implementation of emit_ops method "emit_ge_got". */
2938 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2943 p
+= emit_pop (p
, x1
);
2944 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2945 /* Branch over the next instruction if x0 <= x1. */
2946 p
+= emit_bcond (p
, LT
, 8);
2947 /* The NOP instruction will be patched with an unconditional branch. */
2949 *offset_p
= (p
- buf
) * 4;
2954 emit_ops_insns (buf
, p
- buf
);
2957 static struct emit_ops aarch64_emit_ops_impl
=
2959 aarch64_emit_prologue
,
2960 aarch64_emit_epilogue
,
2965 aarch64_emit_rsh_signed
,
2966 aarch64_emit_rsh_unsigned
,
2968 aarch64_emit_log_not
,
2969 aarch64_emit_bit_and
,
2970 aarch64_emit_bit_or
,
2971 aarch64_emit_bit_xor
,
2972 aarch64_emit_bit_not
,
2974 aarch64_emit_less_signed
,
2975 aarch64_emit_less_unsigned
,
2977 aarch64_emit_if_goto
,
2979 aarch64_write_goto_address
,
2984 aarch64_emit_stack_flush
,
2985 aarch64_emit_zero_ext
,
2987 aarch64_emit_stack_adjust
,
2988 aarch64_emit_int_call_1
,
2989 aarch64_emit_void_call_2
,
2990 aarch64_emit_eq_goto
,
2991 aarch64_emit_ne_goto
,
2992 aarch64_emit_lt_goto
,
2993 aarch64_emit_le_goto
,
2994 aarch64_emit_gt_goto
,
2995 aarch64_emit_ge_got
,
2998 /* Implementation of linux_target_ops method "emit_ops". */
3000 static struct emit_ops
*
3001 aarch64_emit_ops (void)
3003 return &aarch64_emit_ops_impl
;
3006 /* Implementation of linux_target_ops method
3007 "get_min_fast_tracepoint_insn_len". */
3010 aarch64_get_min_fast_tracepoint_insn_len (void)
3015 /* Implementation of linux_target_ops method "supports_range_stepping". */
3018 aarch64_supports_range_stepping (void)
3023 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3025 static const gdb_byte
*
3026 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
3028 if (is_64bit_tdesc ())
3030 *size
= aarch64_breakpoint_len
;
3031 return aarch64_breakpoint
;
3034 return arm_sw_breakpoint_from_kind (kind
, size
);
3037 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3040 aarch64_breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3042 if (is_64bit_tdesc ())
3043 return aarch64_breakpoint_len
;
3045 return arm_breakpoint_kind_from_pc (pcptr
);
3048 /* Implementation of the linux_target_ops method
3049 "breakpoint_kind_from_current_state". */
3052 aarch64_breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3054 if (is_64bit_tdesc ())
3055 return aarch64_breakpoint_len
;
3057 return arm_breakpoint_kind_from_current_state (pcptr
);
3060 /* Support for hardware single step. */
3063 aarch64_supports_hardware_single_step (void)
3068 struct linux_target_ops the_low_target
=
3070 NULL
, /* cannot_fetch_register */
3071 NULL
, /* cannot_store_register */
3072 NULL
, /* fetch_register */
3075 aarch64_breakpoint_kind_from_pc
,
3076 aarch64_sw_breakpoint_from_kind
,
3077 NULL
, /* get_next_pcs */
3078 0, /* decr_pc_after_break */
3079 aarch64_breakpoint_at
,
3080 aarch64_supports_z_point_type
,
3081 aarch64_insert_point
,
3082 aarch64_remove_point
,
3083 aarch64_stopped_by_watchpoint
,
3084 aarch64_stopped_data_address
,
3085 NULL
, /* collect_ptrace_register */
3086 NULL
, /* supply_ptrace_register */
3087 aarch64_linux_siginfo_fixup
,
3088 aarch64_linux_new_process
,
3089 aarch64_linux_delete_process
,
3090 aarch64_linux_new_thread
,
3091 aarch64_linux_delete_thread
,
3092 aarch64_linux_new_fork
,
3093 aarch64_linux_prepare_to_resume
,
3094 NULL
, /* process_qsupported */
3095 aarch64_supports_tracepoints
,
3096 aarch64_get_thread_area
,
3097 aarch64_install_fast_tracepoint_jump_pad
,
3099 aarch64_get_min_fast_tracepoint_insn_len
,
3100 aarch64_supports_range_stepping
,
3101 aarch64_breakpoint_kind_from_current_state
,
3102 aarch64_supports_hardware_single_step
,
3103 aarch64_get_syscall_trapinfo
,
3106 /* The linux target ops object. */
3108 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3111 initialize_low_arch (void)
3113 initialize_low_arch_aarch32 ();
3115 initialize_regsets_info (&aarch64_regsets_info
);
3116 initialize_regsets_info (&aarch64_sve_regsets_info
);