1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2021 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
60 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
62 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
64 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
66 bool supports_z_point_type (char z_type
) override
;
68 bool supports_tracepoints () override
;
70 bool supports_fast_tracepoints () override
;
72 int install_fast_tracepoint_jump_pad
73 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
74 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
75 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
76 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
77 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
80 int get_min_fast_tracepoint_insn_len () override
;
82 struct emit_ops
*emit_ops () override
;
86 void low_arch_setup () override
;
88 bool low_cannot_fetch_register (int regno
) override
;
90 bool low_cannot_store_register (int regno
) override
;
92 bool low_supports_breakpoints () override
;
94 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
96 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
98 bool low_breakpoint_at (CORE_ADDR pc
) override
;
100 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
101 int size
, raw_breakpoint
*bp
) override
;
103 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
104 int size
, raw_breakpoint
*bp
) override
;
106 bool low_stopped_by_watchpoint () override
;
108 CORE_ADDR
low_stopped_data_address () override
;
110 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
111 int direction
) override
;
113 arch_process_info
*low_new_process () override
;
115 void low_delete_process (arch_process_info
*info
) override
;
117 void low_new_thread (lwp_info
*) override
;
119 void low_delete_thread (arch_lwp_info
*) override
;
121 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
123 void low_prepare_to_resume (lwp_info
*lwp
) override
;
125 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
127 bool low_supports_range_stepping () override
;
129 bool low_supports_catch_syscall () override
;
131 void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
) override
;
134 /* The singleton target ops object. */
136 static aarch64_target the_aarch64_target
;
139 aarch64_target::low_cannot_fetch_register (int regno
)
141 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
142 "is not implemented by the target");
146 aarch64_target::low_cannot_store_register (int regno
)
148 gdb_assert_not_reached ("linux target op low_cannot_store_register "
149 "is not implemented by the target");
153 aarch64_target::low_prepare_to_resume (lwp_info
*lwp
)
155 aarch64_linux_prepare_to_resume (lwp
);
158 /* Per-process arch-specific data we want to keep. */
160 struct arch_process_info
162 /* Hardware breakpoint/watchpoint data.
163 The reason for them to be per-process rather than per-thread is
164 due to the lack of information in the gdbserver environment;
165 gdbserver is not told that whether a requested hardware
166 breakpoint/watchpoint is thread specific or not, so it has to set
167 each hw bp/wp for every thread in the current process. The
168 higher level bp/wp management in gdb will resume a thread if a hw
169 bp/wp trap is not expected for it. Since the hw bp/wp setting is
170 same for each thread, it is reasonable for the data to live here.
172 struct aarch64_debug_reg_state debug_reg_state
;
175 /* Return true if the size of register 0 is 8 byte. */
178 is_64bit_tdesc (void)
180 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
182 return register_size (regcache
->tdesc
, 0) == 8;
185 /* Return true if the regcache contains the number of SVE registers. */
190 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
192 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
196 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
198 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
201 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
202 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
203 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
204 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
205 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
209 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
211 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
214 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
215 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
216 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
217 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
218 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
222 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
224 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
227 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
228 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
229 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
230 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
234 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
236 const struct user_fpsimd_state
*regset
237 = (const struct user_fpsimd_state
*) buf
;
240 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
241 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
242 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
243 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
246 /* Store the pauth registers to regcache. */
249 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
251 uint64_t *pauth_regset
= (uint64_t *) buf
;
252 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
257 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
259 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
264 aarch64_target::low_supports_breakpoints ()
269 /* Implementation of linux target ops method "low_get_pc". */
272 aarch64_target::low_get_pc (regcache
*regcache
)
274 if (register_size (regcache
->tdesc
, 0) == 8)
275 return linux_get_pc_64bit (regcache
);
277 return linux_get_pc_32bit (regcache
);
280 /* Implementation of linux target ops method "low_set_pc". */
283 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
285 if (register_size (regcache
->tdesc
, 0) == 8)
286 linux_set_pc_64bit (regcache
, pc
);
288 linux_set_pc_32bit (regcache
, pc
);
291 #define aarch64_breakpoint_len 4
293 /* AArch64 BRK software debug mode instruction.
294 This instruction needs to match gdb/aarch64-tdep.c
295 (aarch64_default_breakpoint). */
296 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
298 /* Implementation of linux target ops method "low_breakpoint_at". */
301 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
303 if (is_64bit_tdesc ())
305 gdb_byte insn
[aarch64_breakpoint_len
];
307 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
308 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
314 return arm_breakpoint_at (where
);
318 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
322 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
324 state
->dr_addr_bp
[i
] = 0;
325 state
->dr_ctrl_bp
[i
] = 0;
326 state
->dr_ref_count_bp
[i
] = 0;
329 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
331 state
->dr_addr_wp
[i
] = 0;
332 state
->dr_ctrl_wp
[i
] = 0;
333 state
->dr_ref_count_wp
[i
] = 0;
337 /* Return the pointer to the debug register state structure in the
338 current process' arch-specific data area. */
340 struct aarch64_debug_reg_state
*
341 aarch64_get_debug_reg_state (pid_t pid
)
343 struct process_info
*proc
= find_process_pid (pid
);
345 return &proc
->priv
->arch_private
->debug_reg_state
;
348 /* Implementation of target ops method "supports_z_point_type". */
351 aarch64_target::supports_z_point_type (char z_type
)
357 case Z_PACKET_WRITE_WP
:
358 case Z_PACKET_READ_WP
:
359 case Z_PACKET_ACCESS_WP
:
366 /* Implementation of linux target ops method "low_insert_point".
368 It actually only records the info of the to-be-inserted bp/wp;
369 the actual insertion will happen when threads are resumed. */
372 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
373 int len
, raw_breakpoint
*bp
)
376 enum target_hw_bp_type targ_type
;
377 struct aarch64_debug_reg_state
*state
378 = aarch64_get_debug_reg_state (pid_of (current_thread
));
381 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
382 (unsigned long) addr
, len
);
384 /* Determine the type from the raw breakpoint type. */
385 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
387 if (targ_type
!= hw_execute
)
389 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
390 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
391 1 /* is_insert */, state
);
399 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
400 instruction. Set it to 2 to correctly encode length bit
401 mask in hardware/watchpoint control register. */
404 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
405 1 /* is_insert */, state
);
409 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
415 /* Implementation of linux target ops method "low_remove_point".
417 It actually only records the info of the to-be-removed bp/wp,
418 the actual removal will be done when threads are resumed. */
421 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
422 int len
, raw_breakpoint
*bp
)
425 enum target_hw_bp_type targ_type
;
426 struct aarch64_debug_reg_state
*state
427 = aarch64_get_debug_reg_state (pid_of (current_thread
));
430 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
431 (unsigned long) addr
, len
);
433 /* Determine the type from the raw breakpoint type. */
434 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
436 /* Set up state pointers. */
437 if (targ_type
!= hw_execute
)
439 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
445 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
446 instruction. Set it to 2 to correctly encode length bit
447 mask in hardware/watchpoint control register. */
450 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
451 0 /* is_insert */, state
);
455 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
461 /* Return the address only having significant bits. This is used to ignore
462 the top byte (TBI). */
465 address_significant (CORE_ADDR addr
)
467 /* Clear insignificant bits of a target address and sign extend resulting
471 CORE_ADDR sign
= (CORE_ADDR
) 1 << (addr_bit
- 1);
472 addr
&= ((CORE_ADDR
) 1 << addr_bit
) - 1;
473 addr
= (addr
^ sign
) - sign
;
478 /* Implementation of linux target ops method "low_stopped_data_address". */
481 aarch64_target::low_stopped_data_address ()
485 struct aarch64_debug_reg_state
*state
;
487 pid
= lwpid_of (current_thread
);
489 /* Get the siginfo. */
490 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
491 return (CORE_ADDR
) 0;
493 /* Need to be a hardware breakpoint/watchpoint trap. */
494 if (siginfo
.si_signo
!= SIGTRAP
495 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
496 return (CORE_ADDR
) 0;
498 /* Make sure to ignore the top byte, otherwise we may not recognize a
499 hardware watchpoint hit. The stopped data addresses coming from the
500 kernel can potentially be tagged addresses. */
501 const CORE_ADDR addr_trap
502 = address_significant ((CORE_ADDR
) siginfo
.si_addr
);
504 /* Check if the address matches any watched address. */
505 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
506 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
508 const unsigned int offset
509 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
510 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
511 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
512 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
513 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
515 if (state
->dr_ref_count_wp
[i
]
516 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
517 && addr_trap
>= addr_watch_aligned
518 && addr_trap
< addr_watch
+ len
)
520 /* ADDR_TRAP reports the first address of the memory range
521 accessed by the CPU, regardless of what was the memory
522 range watched. Thus, a large CPU access that straddles
523 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
524 ADDR_TRAP that is lower than the
525 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
527 addr: | 4 | 5 | 6 | 7 | 8 |
528 |---- range watched ----|
529 |----------- range accessed ------------|
531 In this case, ADDR_TRAP will be 4.
533 To match a watchpoint known to GDB core, we must never
534 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
535 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
536 positive on kernels older than 4.10. See PR
542 return (CORE_ADDR
) 0;
545 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
548 aarch64_target::low_stopped_by_watchpoint ()
550 return (low_stopped_data_address () != 0);
553 /* Fetch the thread-local storage pointer for libthread_db. */
556 ps_get_thread_area (struct ps_prochandle
*ph
,
557 lwpid_t lwpid
, int idx
, void **base
)
559 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
563 /* Implementation of linux target ops method "low_siginfo_fixup". */
566 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
569 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
570 if (!is_64bit_tdesc ())
573 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
576 aarch64_siginfo_from_compat_siginfo (native
,
577 (struct compat_siginfo
*) inf
);
585 /* Implementation of linux target ops method "low_new_process". */
588 aarch64_target::low_new_process ()
590 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
592 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
597 /* Implementation of linux target ops method "low_delete_process". */
600 aarch64_target::low_delete_process (arch_process_info
*info
)
606 aarch64_target::low_new_thread (lwp_info
*lwp
)
608 aarch64_linux_new_thread (lwp
);
612 aarch64_target::low_delete_thread (arch_lwp_info
*arch_lwp
)
614 aarch64_linux_delete_thread (arch_lwp
);
617 /* Implementation of linux target ops method "low_new_fork". */
620 aarch64_target::low_new_fork (process_info
*parent
,
623 /* These are allocated by linux_add_process. */
624 gdb_assert (parent
->priv
!= NULL
625 && parent
->priv
->arch_private
!= NULL
);
626 gdb_assert (child
->priv
!= NULL
627 && child
->priv
->arch_private
!= NULL
);
629 /* Linux kernel before 2.6.33 commit
630 72f674d203cd230426437cdcf7dd6f681dad8b0d
631 will inherit hardware debug registers from parent
632 on fork/vfork/clone. Newer Linux kernels create such tasks with
633 zeroed debug registers.
635 GDB core assumes the child inherits the watchpoints/hw
636 breakpoints of the parent, and will remove them all from the
637 forked off process. Copy the debug registers mirrors into the
638 new process so that all breakpoints and watchpoints can be
639 removed together. The debug registers mirror will become zeroed
640 in the end before detaching the forked off process, thus making
641 this compatible with older Linux kernels too. */
643 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
646 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
647 #define AARCH64_HWCAP_PACA (1 << 30)
649 /* Implementation of linux target ops method "low_arch_setup". */
652 aarch64_target::low_arch_setup ()
654 unsigned int machine
;
658 tid
= lwpid_of (current_thread
);
660 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
664 uint64_t vq
= aarch64_sve_get_vq (tid
);
665 unsigned long hwcap
= linux_get_hwcap (8);
666 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
668 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
671 current_process ()->tdesc
= aarch32_linux_read_description ();
673 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
676 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
679 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
681 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
684 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
687 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
689 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
692 static struct regset_info aarch64_regsets
[] =
694 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
695 sizeof (struct user_pt_regs
), GENERAL_REGS
,
696 aarch64_fill_gregset
, aarch64_store_gregset
},
697 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
698 sizeof (struct user_fpsimd_state
), FP_REGS
,
699 aarch64_fill_fpregset
, aarch64_store_fpregset
701 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
702 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
703 NULL
, aarch64_store_pauthregset
},
707 static struct regsets_info aarch64_regsets_info
=
709 aarch64_regsets
, /* regsets */
711 NULL
, /* disabled_regsets */
714 static struct regs_info regs_info_aarch64
=
716 NULL
, /* regset_bitmap */
718 &aarch64_regsets_info
,
721 static struct regset_info aarch64_sve_regsets
[] =
723 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
724 sizeof (struct user_pt_regs
), GENERAL_REGS
,
725 aarch64_fill_gregset
, aarch64_store_gregset
},
726 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
727 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
728 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
730 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
731 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
732 NULL
, aarch64_store_pauthregset
},
736 static struct regsets_info aarch64_sve_regsets_info
=
738 aarch64_sve_regsets
, /* regsets. */
739 0, /* num_regsets. */
740 NULL
, /* disabled_regsets. */
743 static struct regs_info regs_info_aarch64_sve
=
745 NULL
, /* regset_bitmap. */
747 &aarch64_sve_regsets_info
,
750 /* Implementation of linux target ops method "get_regs_info". */
753 aarch64_target::get_regs_info ()
755 if (!is_64bit_tdesc ())
756 return ®s_info_aarch32
;
759 return ®s_info_aarch64_sve
;
761 return ®s_info_aarch64
;
764 /* Implementation of target ops method "supports_tracepoints". */
767 aarch64_target::supports_tracepoints ()
769 if (current_thread
== NULL
)
773 /* We don't support tracepoints on aarch32 now. */
774 return is_64bit_tdesc ();
778 /* Implementation of linux target ops method "low_get_thread_area". */
781 aarch64_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
786 iovec
.iov_base
= ®
;
787 iovec
.iov_len
= sizeof (reg
);
789 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
798 aarch64_target::low_supports_catch_syscall ()
803 /* Implementation of linux target ops method "low_get_syscall_trapinfo". */
806 aarch64_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
808 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
814 collect_register_by_name (regcache
, "x8", &l_sysno
);
815 *sysno
= (int) l_sysno
;
818 collect_register_by_name (regcache
, "r7", sysno
);
821 /* List of condition codes that we need. */
823 enum aarch64_condition_codes
834 enum aarch64_operand_type
840 /* Representation of an operand. At this time, it only supports register
841 and immediate types. */
843 struct aarch64_operand
845 /* Type of the operand. */
846 enum aarch64_operand_type type
;
848 /* Value of the operand according to the type. */
852 struct aarch64_register reg
;
856 /* List of registers that we are currently using, we can add more here as
857 we need to use them. */
859 /* General purpose scratch registers (64 bit). */
860 static const struct aarch64_register x0
= { 0, 1 };
861 static const struct aarch64_register x1
= { 1, 1 };
862 static const struct aarch64_register x2
= { 2, 1 };
863 static const struct aarch64_register x3
= { 3, 1 };
864 static const struct aarch64_register x4
= { 4, 1 };
866 /* General purpose scratch registers (32 bit). */
867 static const struct aarch64_register w0
= { 0, 0 };
868 static const struct aarch64_register w2
= { 2, 0 };
870 /* Intra-procedure scratch registers. */
871 static const struct aarch64_register ip0
= { 16, 1 };
873 /* Special purpose registers. */
874 static const struct aarch64_register fp
= { 29, 1 };
875 static const struct aarch64_register lr
= { 30, 1 };
876 static const struct aarch64_register sp
= { 31, 1 };
877 static const struct aarch64_register xzr
= { 31, 1 };
879 /* Dynamically allocate a new register. If we know the register
880 statically, we should make it a global as above instead of using this
883 static struct aarch64_register
884 aarch64_register (unsigned num
, int is64
)
886 return (struct aarch64_register
) { num
, is64
};
889 /* Helper function to create a register operand, for instructions with
890 different types of operands.
893 p += emit_mov (p, x0, register_operand (x1)); */
895 static struct aarch64_operand
896 register_operand (struct aarch64_register reg
)
898 struct aarch64_operand operand
;
900 operand
.type
= OPERAND_REGISTER
;
906 /* Helper function to create an immediate operand, for instructions with
907 different types of operands.
910 p += emit_mov (p, x0, immediate_operand (12)); */
912 static struct aarch64_operand
913 immediate_operand (uint32_t imm
)
915 struct aarch64_operand operand
;
917 operand
.type
= OPERAND_IMMEDIATE
;
923 /* Helper function to create an offset memory operand.
926 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
928 static struct aarch64_memory_operand
929 offset_memory_operand (int32_t offset
)
931 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
934 /* Helper function to create a pre-index memory operand.
937 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
939 static struct aarch64_memory_operand
940 preindex_memory_operand (int32_t index
)
942 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
945 /* Helper function to create a post-index memory operand.
948 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
950 static struct aarch64_memory_operand
951 postindex_memory_operand (int32_t index
)
953 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
956 /* System control registers. These special registers can be written and
957 read with the MRS and MSR instructions.
959 - NZCV: Condition flags. GDB refers to this register under the CPSR
961 - FPSR: Floating-point status register.
962 - FPCR: Floating-point control registers.
963 - TPIDR_EL0: Software thread ID register. */
965 enum aarch64_system_control_registers
967 /* op0 op1 crn crm op2 */
968 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
969 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
970 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
971 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
974 /* Write a BLR instruction into *BUF.
978 RN is the register to branch to. */
981 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
983 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
986 /* Write a RET instruction into *BUF.
990 RN is the register to branch to. */
993 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
995 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
999 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
1000 struct aarch64_register rt
,
1001 struct aarch64_register rt2
,
1002 struct aarch64_register rn
,
1003 struct aarch64_memory_operand operand
)
1007 uint32_t write_back
;
1010 opc
= ENCODE (2, 2, 30);
1012 opc
= ENCODE (0, 2, 30);
1014 switch (operand
.type
)
1016 case MEMORY_OPERAND_OFFSET
:
1018 pre_index
= ENCODE (1, 1, 24);
1019 write_back
= ENCODE (0, 1, 23);
1022 case MEMORY_OPERAND_POSTINDEX
:
1024 pre_index
= ENCODE (0, 1, 24);
1025 write_back
= ENCODE (1, 1, 23);
1028 case MEMORY_OPERAND_PREINDEX
:
1030 pre_index
= ENCODE (1, 1, 24);
1031 write_back
= ENCODE (1, 1, 23);
1038 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
1039 | ENCODE (operand
.index
>> 3, 7, 15)
1040 | ENCODE (rt2
.num
, 5, 10)
1041 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1044 /* Write a STP instruction into *BUF.
1046 STP rt, rt2, [rn, #offset]
1047 STP rt, rt2, [rn, #index]!
1048 STP rt, rt2, [rn], #index
1050 RT and RT2 are the registers to store.
1051 RN is the base address register.
1052 OFFSET is the immediate to add to the base address. It is limited to a
1053 -512 .. 504 range (7 bits << 3). */
1056 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
1057 struct aarch64_register rt2
, struct aarch64_register rn
,
1058 struct aarch64_memory_operand operand
)
1060 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
1063 /* Write a LDP instruction into *BUF.
1065 LDP rt, rt2, [rn, #offset]
1066 LDP rt, rt2, [rn, #index]!
1067 LDP rt, rt2, [rn], #index
1069 RT and RT2 are the registers to store.
1070 RN is the base address register.
1071 OFFSET is the immediate to add to the base address. It is limited to a
1072 -512 .. 504 range (7 bits << 3). */
1075 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
1076 struct aarch64_register rt2
, struct aarch64_register rn
,
1077 struct aarch64_memory_operand operand
)
1079 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1082 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1084 LDP qt, qt2, [rn, #offset]
1086 RT and RT2 are the Q registers to store.
1087 RN is the base address register.
1088 OFFSET is the immediate to add to the base address. It is limited to
1089 -1024 .. 1008 range (7 bits << 4). */
1092 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1093 struct aarch64_register rn
, int32_t offset
)
1095 uint32_t opc
= ENCODE (2, 2, 30);
1096 uint32_t pre_index
= ENCODE (1, 1, 24);
1098 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1099 | ENCODE (offset
>> 4, 7, 15)
1100 | ENCODE (rt2
, 5, 10)
1101 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1104 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1106 STP qt, qt2, [rn, #offset]
1108 RT and RT2 are the Q registers to store.
1109 RN is the base address register.
1110 OFFSET is the immediate to add to the base address. It is limited to
1111 -1024 .. 1008 range (7 bits << 4). */
1114 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1115 struct aarch64_register rn
, int32_t offset
)
1117 uint32_t opc
= ENCODE (2, 2, 30);
1118 uint32_t pre_index
= ENCODE (1, 1, 24);
1120 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1121 | ENCODE (offset
>> 4, 7, 15)
1122 | ENCODE (rt2
, 5, 10)
1123 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1126 /* Write a LDRH instruction into *BUF.
1128 LDRH wt, [xn, #offset]
1129 LDRH wt, [xn, #index]!
1130 LDRH wt, [xn], #index
1132 RT is the register to store.
1133 RN is the base address register.
1134 OFFSET is the immediate to add to the base address. It is limited to
1135 0 .. 32760 range (12 bits << 3). */
1138 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1139 struct aarch64_register rn
,
1140 struct aarch64_memory_operand operand
)
1142 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1145 /* Write a LDRB instruction into *BUF.
1147 LDRB wt, [xn, #offset]
1148 LDRB wt, [xn, #index]!
1149 LDRB wt, [xn], #index
1151 RT is the register to store.
1152 RN is the base address register.
1153 OFFSET is the immediate to add to the base address. It is limited to
1154 0 .. 32760 range (12 bits << 3). */
1157 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1158 struct aarch64_register rn
,
1159 struct aarch64_memory_operand operand
)
1161 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1166 /* Write a STR instruction into *BUF.
1168 STR rt, [rn, #offset]
1169 STR rt, [rn, #index]!
1170 STR rt, [rn], #index
1172 RT is the register to store.
1173 RN is the base address register.
1174 OFFSET is the immediate to add to the base address. It is limited to
1175 0 .. 32760 range (12 bits << 3). */
1178 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1179 struct aarch64_register rn
,
1180 struct aarch64_memory_operand operand
)
1182 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1185 /* Helper function emitting an exclusive load or store instruction. */
1188 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1189 enum aarch64_opcodes opcode
,
1190 struct aarch64_register rs
,
1191 struct aarch64_register rt
,
1192 struct aarch64_register rt2
,
1193 struct aarch64_register rn
)
1195 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1196 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1197 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1200 /* Write a LAXR instruction into *BUF.
1204 RT is the destination register.
1205 RN is the base address register. */
1208 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1209 struct aarch64_register rn
)
1211 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1215 /* Write a STXR instruction into *BUF.
1219 RS is the result register, it indicates if the store succeeded or not.
1220 RT is the destination register.
1221 RN is the base address register. */
1224 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1225 struct aarch64_register rt
, struct aarch64_register rn
)
1227 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1231 /* Write a STLR instruction into *BUF.
1235 RT is the register to store.
1236 RN is the base address register. */
1239 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1240 struct aarch64_register rn
)
1242 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1246 /* Helper function for data processing instructions with register sources. */
1249 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1250 struct aarch64_register rd
,
1251 struct aarch64_register rn
,
1252 struct aarch64_register rm
)
1254 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1256 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1257 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1260 /* Helper function for data processing instructions taking either a register
1264 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1265 struct aarch64_register rd
,
1266 struct aarch64_register rn
,
1267 struct aarch64_operand operand
)
1269 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1270 /* The opcode is different for register and immediate source operands. */
1271 uint32_t operand_opcode
;
1273 if (operand
.type
== OPERAND_IMMEDIATE
)
1275 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1276 operand_opcode
= ENCODE (8, 4, 25);
1278 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1279 | ENCODE (operand
.imm
, 12, 10)
1280 | ENCODE (rn
.num
, 5, 5)
1281 | ENCODE (rd
.num
, 5, 0));
1285 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1286 operand_opcode
= ENCODE (5, 4, 25);
1288 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1293 /* Write an ADD instruction into *BUF.
1298 This function handles both an immediate and register add.
1300 RD is the destination register.
1301 RN is the input register.
1302 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1303 OPERAND_REGISTER. */
1306 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1307 struct aarch64_register rn
, struct aarch64_operand operand
)
1309 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1312 /* Write a SUB instruction into *BUF.
1317 This function handles both an immediate and register sub.
1319 RD is the destination register.
1320 RN is the input register.
1321 IMM is the immediate to substract to RN. */
1324 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1325 struct aarch64_register rn
, struct aarch64_operand operand
)
1327 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1330 /* Write a MOV instruction into *BUF.
1335 This function handles both a wide immediate move and a register move,
1336 with the condition that the source register is not xzr. xzr and the
1337 stack pointer share the same encoding and this function only supports
1340 RD is the destination register.
1341 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1342 OPERAND_REGISTER. */
1345 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1346 struct aarch64_operand operand
)
1348 if (operand
.type
== OPERAND_IMMEDIATE
)
1350 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1351 /* Do not shift the immediate. */
1352 uint32_t shift
= ENCODE (0, 2, 21);
1354 return aarch64_emit_insn (buf
, MOV
| size
| shift
1355 | ENCODE (operand
.imm
, 16, 5)
1356 | ENCODE (rd
.num
, 5, 0));
1359 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1362 /* Write a MOVK instruction into *BUF.
1364 MOVK rd, #imm, lsl #shift
1366 RD is the destination register.
1367 IMM is the immediate.
1368 SHIFT is the logical shift left to apply to IMM. */
1371 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1374 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1376 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1377 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1380 /* Write instructions into *BUF in order to move ADDR into a register.
1381 ADDR can be a 64-bit value.
1383 This function will emit a series of MOV and MOVK instructions, such as:
1386 MOVK xd, #(addr >> 16), lsl #16
1387 MOVK xd, #(addr >> 32), lsl #32
1388 MOVK xd, #(addr >> 48), lsl #48 */
1391 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1395 /* The MOV (wide immediate) instruction clears to top bits of the
1397 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1399 if ((addr
>> 16) != 0)
1400 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1404 if ((addr
>> 32) != 0)
1405 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1409 if ((addr
>> 48) != 0)
1410 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1415 /* Write a SUBS instruction into *BUF.
1419 This instruction update the condition flags.
1421 RD is the destination register.
1422 RN and RM are the source registers. */
1425 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1426 struct aarch64_register rn
, struct aarch64_operand operand
)
1428 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1431 /* Write a CMP instruction into *BUF.
1435 This instruction is an alias of SUBS xzr, rn, rm.
1437 RN and RM are the registers to compare. */
1440 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1441 struct aarch64_operand operand
)
1443 return emit_subs (buf
, xzr
, rn
, operand
);
1446 /* Write a AND instruction into *BUF.
1450 RD is the destination register.
1451 RN and RM are the source registers. */
1454 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1455 struct aarch64_register rn
, struct aarch64_register rm
)
1457 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1460 /* Write a ORR instruction into *BUF.
1464 RD is the destination register.
1465 RN and RM are the source registers. */
1468 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1469 struct aarch64_register rn
, struct aarch64_register rm
)
1471 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1474 /* Write a ORN instruction into *BUF.
1478 RD is the destination register.
1479 RN and RM are the source registers. */
1482 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1483 struct aarch64_register rn
, struct aarch64_register rm
)
1485 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1488 /* Write a EOR instruction into *BUF.
1492 RD is the destination register.
1493 RN and RM are the source registers. */
1496 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1497 struct aarch64_register rn
, struct aarch64_register rm
)
1499 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1502 /* Write a MVN instruction into *BUF.
1506 This is an alias for ORN rd, xzr, rm.
1508 RD is the destination register.
1509 RM is the source register. */
1512 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1513 struct aarch64_register rm
)
1515 return emit_orn (buf
, rd
, xzr
, rm
);
1518 /* Write a LSLV instruction into *BUF.
1522 RD is the destination register.
1523 RN and RM are the source registers. */
1526 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1527 struct aarch64_register rn
, struct aarch64_register rm
)
1529 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1532 /* Write a LSRV instruction into *BUF.
1536 RD is the destination register.
1537 RN and RM are the source registers. */
1540 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1541 struct aarch64_register rn
, struct aarch64_register rm
)
1543 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1546 /* Write a ASRV instruction into *BUF.
1550 RD is the destination register.
1551 RN and RM are the source registers. */
1554 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1555 struct aarch64_register rn
, struct aarch64_register rm
)
1557 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1560 /* Write a MUL instruction into *BUF.
1564 RD is the destination register.
1565 RN and RM are the source registers. */
1568 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1569 struct aarch64_register rn
, struct aarch64_register rm
)
1571 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1574 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1578 RT is the destination register.
1579 SYSTEM_REG is special purpose register to read. */
1582 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1583 enum aarch64_system_control_registers system_reg
)
1585 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1586 | ENCODE (rt
.num
, 5, 0));
1589 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1593 SYSTEM_REG is special purpose register to write.
1594 RT is the input register. */
1597 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1598 struct aarch64_register rt
)
1600 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1601 | ENCODE (rt
.num
, 5, 0));
1604 /* Write a SEVL instruction into *BUF.
1606 This is a hint instruction telling the hardware to trigger an event. */
1609 emit_sevl (uint32_t *buf
)
1611 return aarch64_emit_insn (buf
, SEVL
);
1614 /* Write a WFE instruction into *BUF.
1616 This is a hint instruction telling the hardware to wait for an event. */
1619 emit_wfe (uint32_t *buf
)
1621 return aarch64_emit_insn (buf
, WFE
);
1624 /* Write a SBFM instruction into *BUF.
1626 SBFM rd, rn, #immr, #imms
1628 This instruction moves the bits from #immr to #imms into the
1629 destination, sign extending the result.
1631 RD is the destination register.
1632 RN is the source register.
1633 IMMR is the bit number to start at (least significant bit).
1634 IMMS is the bit number to stop at (most significant bit). */
1637 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1638 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1640 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1641 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1643 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1644 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1645 | ENCODE (rd
.num
, 5, 0));
1648 /* Write a SBFX instruction into *BUF.
1650 SBFX rd, rn, #lsb, #width
1652 This instruction moves #width bits from #lsb into the destination, sign
1653 extending the result. This is an alias for:
1655 SBFM rd, rn, #lsb, #(lsb + width - 1)
1657 RD is the destination register.
1658 RN is the source register.
1659 LSB is the bit number to start at (least significant bit).
1660 WIDTH is the number of bits to move. */
1663 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1664 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1666 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1669 /* Write a UBFM instruction into *BUF.
1671 UBFM rd, rn, #immr, #imms
1673 This instruction moves the bits from #immr to #imms into the
1674 destination, extending the result with zeros.
1676 RD is the destination register.
1677 RN is the source register.
1678 IMMR is the bit number to start at (least significant bit).
1679 IMMS is the bit number to stop at (most significant bit). */
1682 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1683 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1685 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1686 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1688 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1689 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1690 | ENCODE (rd
.num
, 5, 0));
1693 /* Write a UBFX instruction into *BUF.
1695 UBFX rd, rn, #lsb, #width
1697 This instruction moves #width bits from #lsb into the destination,
1698 extending the result with zeros. This is an alias for:
1700 UBFM rd, rn, #lsb, #(lsb + width - 1)
1702 RD is the destination register.
1703 RN is the source register.
1704 LSB is the bit number to start at (least significant bit).
1705 WIDTH is the number of bits to move. */
1708 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1709 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1711 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1714 /* Write a CSINC instruction into *BUF.
1716 CSINC rd, rn, rm, cond
1718 This instruction conditionally increments rn or rm and places the result
1719 in rd. rn is chosen is the condition is true.
1721 RD is the destination register.
1722 RN and RM are the source registers.
1723 COND is the encoded condition. */
1726 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1727 struct aarch64_register rn
, struct aarch64_register rm
,
1730 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1732 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1733 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1734 | ENCODE (rd
.num
, 5, 0));
1737 /* Write a CSET instruction into *BUF.
1741 This instruction conditionally write 1 or 0 in the destination register.
1742 1 is written if the condition is true. This is an alias for:
1744 CSINC rd, xzr, xzr, !cond
1746 Note that the condition needs to be inverted.
1748 RD is the destination register.
1749 RN and RM are the source registers.
1750 COND is the encoded condition. */
1753 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1755 /* The least significant bit of the condition needs toggling in order to
1757 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1760 /* Write LEN instructions from BUF into the inferior memory at *TO.
1762 Note instructions are always little endian on AArch64, unlike data. */
1765 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1767 size_t byte_len
= len
* sizeof (uint32_t);
1768 #if (__BYTE_ORDER == __BIG_ENDIAN)
1769 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1772 for (i
= 0; i
< len
; i
++)
1773 le_buf
[i
] = htole32 (buf
[i
]);
1775 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1779 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1785 /* Sub-class of struct aarch64_insn_data, store information of
1786 instruction relocation for fast tracepoint. Visitor can
1787 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1788 the relocated instructions in buffer pointed by INSN_PTR. */
1790 struct aarch64_insn_relocation_data
1792 struct aarch64_insn_data base
;
1794 /* The new address the instruction is relocated to. */
1796 /* Pointer to the buffer of relocated instruction(s). */
1800 /* Implementation of aarch64_insn_visitor method "b". */
1803 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1804 struct aarch64_insn_data
*data
)
1806 struct aarch64_insn_relocation_data
*insn_reloc
1807 = (struct aarch64_insn_relocation_data
*) data
;
1809 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1811 if (can_encode_int32 (new_offset
, 28))
1812 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1815 /* Implementation of aarch64_insn_visitor method "b_cond". */
1818 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1819 struct aarch64_insn_data
*data
)
1821 struct aarch64_insn_relocation_data
*insn_reloc
1822 = (struct aarch64_insn_relocation_data
*) data
;
1824 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1826 if (can_encode_int32 (new_offset
, 21))
1828 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1831 else if (can_encode_int32 (new_offset
, 28))
1833 /* The offset is out of range for a conditional branch
1834 instruction but not for a unconditional branch. We can use
1835 the following instructions instead:
1837 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1838 B NOT_TAKEN ; Else jump over TAKEN and continue.
1845 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1846 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1847 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1851 /* Implementation of aarch64_insn_visitor method "cb". */
1854 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1855 const unsigned rn
, int is64
,
1856 struct aarch64_insn_data
*data
)
1858 struct aarch64_insn_relocation_data
*insn_reloc
1859 = (struct aarch64_insn_relocation_data
*) data
;
1861 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1863 if (can_encode_int32 (new_offset
, 21))
1865 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1866 aarch64_register (rn
, is64
), new_offset
);
1868 else if (can_encode_int32 (new_offset
, 28))
1870 /* The offset is out of range for a compare and branch
1871 instruction but not for a unconditional branch. We can use
1872 the following instructions instead:
1874 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1875 B NOT_TAKEN ; Else jump over TAKEN and continue.
1881 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1882 aarch64_register (rn
, is64
), 8);
1883 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1884 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1888 /* Implementation of aarch64_insn_visitor method "tb". */
1891 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1892 const unsigned rt
, unsigned bit
,
1893 struct aarch64_insn_data
*data
)
1895 struct aarch64_insn_relocation_data
*insn_reloc
1896 = (struct aarch64_insn_relocation_data
*) data
;
1898 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1900 if (can_encode_int32 (new_offset
, 16))
1902 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1903 aarch64_register (rt
, 1), new_offset
);
1905 else if (can_encode_int32 (new_offset
, 28))
1907 /* The offset is out of range for a test bit and branch
1908 instruction but not for a unconditional branch. We can use
1909 the following instructions instead:
1911 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1912 B NOT_TAKEN ; Else jump over TAKEN and continue.
1918 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1919 aarch64_register (rt
, 1), 8);
1920 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1921 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1926 /* Implementation of aarch64_insn_visitor method "adr". */
1929 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1931 struct aarch64_insn_data
*data
)
1933 struct aarch64_insn_relocation_data
*insn_reloc
1934 = (struct aarch64_insn_relocation_data
*) data
;
1935 /* We know exactly the address the ADR{P,} instruction will compute.
1936 We can just write it to the destination register. */
1937 CORE_ADDR address
= data
->insn_addr
+ offset
;
1941 /* Clear the lower 12 bits of the offset to get the 4K page. */
1942 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1943 aarch64_register (rd
, 1),
1947 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1948 aarch64_register (rd
, 1), address
);
1951 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1954 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1955 const unsigned rt
, const int is64
,
1956 struct aarch64_insn_data
*data
)
1958 struct aarch64_insn_relocation_data
*insn_reloc
1959 = (struct aarch64_insn_relocation_data
*) data
;
1960 CORE_ADDR address
= data
->insn_addr
+ offset
;
1962 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1963 aarch64_register (rt
, 1), address
);
1965 /* We know exactly what address to load from, and what register we
1968 MOV xd, #(oldloc + offset)
1969 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1972 LDR xd, [xd] ; or LDRSW xd, [xd]
1977 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1978 aarch64_register (rt
, 1),
1979 aarch64_register (rt
, 1),
1980 offset_memory_operand (0));
1982 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1983 aarch64_register (rt
, is64
),
1984 aarch64_register (rt
, 1),
1985 offset_memory_operand (0));
1988 /* Implementation of aarch64_insn_visitor method "others". */
1991 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1992 struct aarch64_insn_data
*data
)
1994 struct aarch64_insn_relocation_data
*insn_reloc
1995 = (struct aarch64_insn_relocation_data
*) data
;
1997 /* The instruction is not PC relative. Just re-emit it at the new
1999 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
2002 static const struct aarch64_insn_visitor visitor
=
2004 aarch64_ftrace_insn_reloc_b
,
2005 aarch64_ftrace_insn_reloc_b_cond
,
2006 aarch64_ftrace_insn_reloc_cb
,
2007 aarch64_ftrace_insn_reloc_tb
,
2008 aarch64_ftrace_insn_reloc_adr
,
2009 aarch64_ftrace_insn_reloc_ldr_literal
,
2010 aarch64_ftrace_insn_reloc_others
,
2014 aarch64_target::supports_fast_tracepoints ()
2019 /* Implementation of target ops method
2020 "install_fast_tracepoint_jump_pad". */
2023 aarch64_target::install_fast_tracepoint_jump_pad
2024 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
2025 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
2026 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
2027 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
2028 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
2036 CORE_ADDR buildaddr
= *jump_entry
;
2037 struct aarch64_insn_relocation_data insn_data
;
2039 /* We need to save the current state on the stack both to restore it
2040 later and to collect register values when the tracepoint is hit.
2042 The saved registers are pushed in a layout that needs to be in sync
2043 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2044 the supply_fast_tracepoint_registers function will fill in the
2045 register cache from a pointer to saved registers on the stack we build
2048 For simplicity, we set the size of each cell on the stack to 16 bytes.
2049 This way one cell can hold any register type, from system registers
2050 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2051 has to be 16 bytes aligned anyway.
2053 Note that the CPSR register does not exist on AArch64. Instead we
2054 can access system bits describing the process state with the
2055 MRS/MSR instructions, namely the condition flags. We save them as
2056 if they are part of a CPSR register because that's how GDB
2057 interprets these system bits. At the moment, only the condition
2058 flags are saved in CPSR (NZCV).
2060 Stack layout, each cell is 16 bytes (descending):
2062 High *-------- SIMD&FP registers from 31 down to 0. --------*
2068 *---- General purpose registers from 30 down to 0. ----*
2074 *------------- Special purpose registers. -------------*
2077 | CPSR (NZCV) | 5 cells
2080 *------------- collecting_t object --------------------*
2081 | TPIDR_EL0 | struct tracepoint * |
2082 Low *------------------------------------------------------*
2084 After this stack is set up, we issue a call to the collector, passing
2085 it the saved registers at (SP + 16). */
2087 /* Push SIMD&FP registers on the stack:
2089 SUB sp, sp, #(32 * 16)
2091 STP q30, q31, [sp, #(30 * 16)]
2096 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2097 for (i
= 30; i
>= 0; i
-= 2)
2098 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2100 /* Push general purpose registers on the stack. Note that we do not need
2101 to push x31 as it represents the xzr register and not the stack
2102 pointer in a STR instruction.
2104 SUB sp, sp, #(31 * 16)
2106 STR x30, [sp, #(30 * 16)]
2111 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2112 for (i
= 30; i
>= 0; i
-= 1)
2113 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2114 offset_memory_operand (i
* 16));
2116 /* Make space for 5 more cells.
2118 SUB sp, sp, #(5 * 16)
2121 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2126 ADD x4, sp, #((32 + 31 + 5) * 16)
2127 STR x4, [sp, #(4 * 16)]
2130 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2131 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2133 /* Save PC (tracepoint address):
2138 STR x3, [sp, #(3 * 16)]
2142 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2143 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2145 /* Save CPSR (NZCV), FPSR and FPCR:
2151 STR x2, [sp, #(2 * 16)]
2152 STR x1, [sp, #(1 * 16)]
2153 STR x0, [sp, #(0 * 16)]
2156 p
+= emit_mrs (p
, x2
, NZCV
);
2157 p
+= emit_mrs (p
, x1
, FPSR
);
2158 p
+= emit_mrs (p
, x0
, FPCR
);
2159 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2160 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2161 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2163 /* Push the collecting_t object. It consist of the address of the
2164 tracepoint and an ID for the current thread. We get the latter by
2165 reading the tpidr_el0 system register. It corresponds to the
2166 NT_ARM_TLS register accessible with ptrace.
2173 STP x0, x1, [sp, #-16]!
2177 p
+= emit_mov_addr (p
, x0
, tpoint
);
2178 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2179 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2183 The shared memory for the lock is at lockaddr. It will hold zero
2184 if no-one is holding the lock, otherwise it contains the address of
2185 the collecting_t object on the stack of the thread which acquired it.
2187 At this stage, the stack pointer points to this thread's collecting_t
2190 We use the following registers:
2191 - x0: Address of the lock.
2192 - x1: Pointer to collecting_t object.
2193 - x2: Scratch register.
2199 ; Trigger an event local to this core. So the following WFE
2200 ; instruction is ignored.
2203 ; Wait for an event. The event is triggered by either the SEVL
2204 ; or STLR instructions (store release).
2207 ; Atomically read at lockaddr. This marks the memory location as
2208 ; exclusive. This instruction also has memory constraints which
2209 ; make sure all previous data reads and writes are done before
2213 ; Try again if another thread holds the lock.
2216 ; We can lock it! Write the address of the collecting_t object.
2217 ; This instruction will fail if the memory location is not marked
2218 ; as exclusive anymore. If it succeeds, it will remove the
2219 ; exclusive mark on the memory location. This way, if another
2220 ; thread executes this instruction before us, we will fail and try
2227 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2228 p
+= emit_mov (p
, x1
, register_operand (sp
));
2232 p
+= emit_ldaxr (p
, x2
, x0
);
2233 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2234 p
+= emit_stxr (p
, w2
, x1
, x0
);
2235 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2237 /* Call collector (struct tracepoint *, unsigned char *):
2242 ; Saved registers start after the collecting_t object.
2245 ; We use an intra-procedure-call scratch register.
2246 MOV ip0, #(collector)
2249 ; And call back to C!
2254 p
+= emit_mov_addr (p
, x0
, tpoint
);
2255 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2257 p
+= emit_mov_addr (p
, ip0
, collector
);
2258 p
+= emit_blr (p
, ip0
);
2260 /* Release the lock.
2265 ; This instruction is a normal store with memory ordering
2266 ; constraints. Thanks to this we do not have to put a data
2267 ; barrier instruction to make sure all data read and writes are done
2268 ; before this instruction is executed. Furthermore, this instruction
2269 ; will trigger an event, letting other threads know they can grab
2274 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2275 p
+= emit_stlr (p
, xzr
, x0
);
2277 /* Free collecting_t object:
2282 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2284 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2285 registers from the stack.
2287 LDR x2, [sp, #(2 * 16)]
2288 LDR x1, [sp, #(1 * 16)]
2289 LDR x0, [sp, #(0 * 16)]
2295 ADD sp, sp #(5 * 16)
2298 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2299 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2300 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2301 p
+= emit_msr (p
, NZCV
, x2
);
2302 p
+= emit_msr (p
, FPSR
, x1
);
2303 p
+= emit_msr (p
, FPCR
, x0
);
2305 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2307 /* Pop general purpose registers:
2311 LDR x30, [sp, #(30 * 16)]
2313 ADD sp, sp, #(31 * 16)
2316 for (i
= 0; i
<= 30; i
+= 1)
2317 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2318 offset_memory_operand (i
* 16));
2319 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2321 /* Pop SIMD&FP registers:
2325 LDP q30, q31, [sp, #(30 * 16)]
2327 ADD sp, sp, #(32 * 16)
2330 for (i
= 0; i
<= 30; i
+= 2)
2331 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2332 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2334 /* Write the code into the inferior memory. */
2335 append_insns (&buildaddr
, p
- buf
, buf
);
2337 /* Now emit the relocated instruction. */
2338 *adjusted_insn_addr
= buildaddr
;
2339 target_read_uint32 (tpaddr
, &insn
);
2341 insn_data
.base
.insn_addr
= tpaddr
;
2342 insn_data
.new_addr
= buildaddr
;
2343 insn_data
.insn_ptr
= buf
;
2345 aarch64_relocate_instruction (insn
, &visitor
,
2346 (struct aarch64_insn_data
*) &insn_data
);
2348 /* We may not have been able to relocate the instruction. */
2349 if (insn_data
.insn_ptr
== buf
)
2352 "E.Could not relocate instruction from %s to %s.",
2353 core_addr_to_string_nz (tpaddr
),
2354 core_addr_to_string_nz (buildaddr
));
2358 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2359 *adjusted_insn_addr_end
= buildaddr
;
2361 /* Go back to the start of the buffer. */
2364 /* Emit a branch back from the jump pad. */
2365 offset
= (tpaddr
+ orig_size
- buildaddr
);
2366 if (!can_encode_int32 (offset
, 28))
2369 "E.Jump back from jump pad too far from tracepoint "
2370 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2375 p
+= emit_b (p
, 0, offset
);
2376 append_insns (&buildaddr
, p
- buf
, buf
);
2378 /* Give the caller a branch instruction into the jump pad. */
2379 offset
= (*jump_entry
- tpaddr
);
2380 if (!can_encode_int32 (offset
, 28))
2383 "E.Jump pad too far from tracepoint "
2384 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2389 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2390 *jjump_pad_insn_size
= 4;
2392 /* Return the end address of our pad. */
2393 *jump_entry
= buildaddr
;
2398 /* Helper function writing LEN instructions from START into
2399 current_insn_ptr. */
2402 emit_ops_insns (const uint32_t *start
, int len
)
2404 CORE_ADDR buildaddr
= current_insn_ptr
;
2407 debug_printf ("Adding %d instrucions at %s\n",
2408 len
, paddress (buildaddr
));
2410 append_insns (&buildaddr
, len
, start
);
2411 current_insn_ptr
= buildaddr
;
2414 /* Pop a register from the stack. */
2417 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2419 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2422 /* Push a register on the stack. */
2425 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2427 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2430 /* Implementation of emit_ops method "emit_prologue". */
2433 aarch64_emit_prologue (void)
2438 /* This function emit a prologue for the following function prototype:
2440 enum eval_result_type f (unsigned char *regs,
2443 The first argument is a buffer of raw registers. The second
2444 argument is the result of
2445 evaluating the expression, which will be set to whatever is on top of
2446 the stack at the end.
2448 The stack set up by the prologue is as such:
2450 High *------------------------------------------------------*
2453 | x1 (ULONGEST *value) |
2454 | x0 (unsigned char *regs) |
2455 Low *------------------------------------------------------*
2457 As we are implementing a stack machine, each opcode can expand the
2458 stack so we never know how far we are from the data saved by this
2459 prologue. In order to be able refer to value and regs later, we save
2460 the current stack pointer in the frame pointer. This way, it is not
2461 clobbered when calling C functions.
2463 Finally, throughout every operation, we are using register x0 as the
2464 top of the stack, and x1 as a scratch register. */
2466 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2467 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2468 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2470 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2473 emit_ops_insns (buf
, p
- buf
);
2476 /* Implementation of emit_ops method "emit_epilogue". */
2479 aarch64_emit_epilogue (void)
2484 /* Store the result of the expression (x0) in *value. */
2485 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2486 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2487 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2489 /* Restore the previous state. */
2490 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2491 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2493 /* Return expr_eval_no_error. */
2494 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2495 p
+= emit_ret (p
, lr
);
2497 emit_ops_insns (buf
, p
- buf
);
2500 /* Implementation of emit_ops method "emit_add". */
2503 aarch64_emit_add (void)
2508 p
+= emit_pop (p
, x1
);
2509 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2511 emit_ops_insns (buf
, p
- buf
);
2514 /* Implementation of emit_ops method "emit_sub". */
2517 aarch64_emit_sub (void)
2522 p
+= emit_pop (p
, x1
);
2523 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2525 emit_ops_insns (buf
, p
- buf
);
2528 /* Implementation of emit_ops method "emit_mul". */
2531 aarch64_emit_mul (void)
2536 p
+= emit_pop (p
, x1
);
2537 p
+= emit_mul (p
, x0
, x1
, x0
);
2539 emit_ops_insns (buf
, p
- buf
);
2542 /* Implementation of emit_ops method "emit_lsh". */
2545 aarch64_emit_lsh (void)
2550 p
+= emit_pop (p
, x1
);
2551 p
+= emit_lslv (p
, x0
, x1
, x0
);
2553 emit_ops_insns (buf
, p
- buf
);
2556 /* Implementation of emit_ops method "emit_rsh_signed". */
2559 aarch64_emit_rsh_signed (void)
2564 p
+= emit_pop (p
, x1
);
2565 p
+= emit_asrv (p
, x0
, x1
, x0
);
2567 emit_ops_insns (buf
, p
- buf
);
2570 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2573 aarch64_emit_rsh_unsigned (void)
2578 p
+= emit_pop (p
, x1
);
2579 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2581 emit_ops_insns (buf
, p
- buf
);
2584 /* Implementation of emit_ops method "emit_ext". */
2587 aarch64_emit_ext (int arg
)
2592 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2594 emit_ops_insns (buf
, p
- buf
);
2597 /* Implementation of emit_ops method "emit_log_not". */
2600 aarch64_emit_log_not (void)
2605 /* If the top of the stack is 0, replace it with 1. Else replace it with
2608 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2609 p
+= emit_cset (p
, x0
, EQ
);
2611 emit_ops_insns (buf
, p
- buf
);
2614 /* Implementation of emit_ops method "emit_bit_and". */
2617 aarch64_emit_bit_and (void)
2622 p
+= emit_pop (p
, x1
);
2623 p
+= emit_and (p
, x0
, x0
, x1
);
2625 emit_ops_insns (buf
, p
- buf
);
2628 /* Implementation of emit_ops method "emit_bit_or". */
2631 aarch64_emit_bit_or (void)
2636 p
+= emit_pop (p
, x1
);
2637 p
+= emit_orr (p
, x0
, x0
, x1
);
2639 emit_ops_insns (buf
, p
- buf
);
2642 /* Implementation of emit_ops method "emit_bit_xor". */
2645 aarch64_emit_bit_xor (void)
2650 p
+= emit_pop (p
, x1
);
2651 p
+= emit_eor (p
, x0
, x0
, x1
);
2653 emit_ops_insns (buf
, p
- buf
);
2656 /* Implementation of emit_ops method "emit_bit_not". */
2659 aarch64_emit_bit_not (void)
2664 p
+= emit_mvn (p
, x0
, x0
);
2666 emit_ops_insns (buf
, p
- buf
);
2669 /* Implementation of emit_ops method "emit_equal". */
2672 aarch64_emit_equal (void)
2677 p
+= emit_pop (p
, x1
);
2678 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2679 p
+= emit_cset (p
, x0
, EQ
);
2681 emit_ops_insns (buf
, p
- buf
);
2684 /* Implementation of emit_ops method "emit_less_signed". */
2687 aarch64_emit_less_signed (void)
2692 p
+= emit_pop (p
, x1
);
2693 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2694 p
+= emit_cset (p
, x0
, LT
);
2696 emit_ops_insns (buf
, p
- buf
);
2699 /* Implementation of emit_ops method "emit_less_unsigned". */
2702 aarch64_emit_less_unsigned (void)
2707 p
+= emit_pop (p
, x1
);
2708 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2709 p
+= emit_cset (p
, x0
, LO
);
2711 emit_ops_insns (buf
, p
- buf
);
2714 /* Implementation of emit_ops method "emit_ref". */
2717 aarch64_emit_ref (int size
)
2725 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2728 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2731 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2734 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2737 /* Unknown size, bail on compilation. */
2742 emit_ops_insns (buf
, p
- buf
);
2745 /* Implementation of emit_ops method "emit_if_goto". */
2748 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2753 /* The Z flag is set or cleared here. */
2754 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2755 /* This instruction must not change the Z flag. */
2756 p
+= emit_pop (p
, x0
);
2757 /* Branch over the next instruction if x0 == 0. */
2758 p
+= emit_bcond (p
, EQ
, 8);
2760 /* The NOP instruction will be patched with an unconditional branch. */
2762 *offset_p
= (p
- buf
) * 4;
2767 emit_ops_insns (buf
, p
- buf
);
2770 /* Implementation of emit_ops method "emit_goto". */
2773 aarch64_emit_goto (int *offset_p
, int *size_p
)
2778 /* The NOP instruction will be patched with an unconditional branch. */
2785 emit_ops_insns (buf
, p
- buf
);
2788 /* Implementation of emit_ops method "write_goto_address". */
2791 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2795 emit_b (&insn
, 0, to
- from
);
2796 append_insns (&from
, 1, &insn
);
2799 /* Implementation of emit_ops method "emit_const". */
2802 aarch64_emit_const (LONGEST num
)
2807 p
+= emit_mov_addr (p
, x0
, num
);
2809 emit_ops_insns (buf
, p
- buf
);
2812 /* Implementation of emit_ops method "emit_call". */
2815 aarch64_emit_call (CORE_ADDR fn
)
2820 p
+= emit_mov_addr (p
, ip0
, fn
);
2821 p
+= emit_blr (p
, ip0
);
2823 emit_ops_insns (buf
, p
- buf
);
2826 /* Implementation of emit_ops method "emit_reg". */
2829 aarch64_emit_reg (int reg
)
2834 /* Set x0 to unsigned char *regs. */
2835 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2836 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2837 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2839 emit_ops_insns (buf
, p
- buf
);
2841 aarch64_emit_call (get_raw_reg_func_addr ());
2844 /* Implementation of emit_ops method "emit_pop". */
2847 aarch64_emit_pop (void)
2852 p
+= emit_pop (p
, x0
);
2854 emit_ops_insns (buf
, p
- buf
);
2857 /* Implementation of emit_ops method "emit_stack_flush". */
2860 aarch64_emit_stack_flush (void)
2865 p
+= emit_push (p
, x0
);
2867 emit_ops_insns (buf
, p
- buf
);
2870 /* Implementation of emit_ops method "emit_zero_ext". */
2873 aarch64_emit_zero_ext (int arg
)
2878 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2880 emit_ops_insns (buf
, p
- buf
);
2883 /* Implementation of emit_ops method "emit_swap". */
2886 aarch64_emit_swap (void)
2891 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2892 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2893 p
+= emit_mov (p
, x0
, register_operand (x1
));
2895 emit_ops_insns (buf
, p
- buf
);
2898 /* Implementation of emit_ops method "emit_stack_adjust". */
2901 aarch64_emit_stack_adjust (int n
)
2903 /* This is not needed with our design. */
2907 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2909 emit_ops_insns (buf
, p
- buf
);
2912 /* Implementation of emit_ops method "emit_int_call_1". */
2915 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2920 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2922 emit_ops_insns (buf
, p
- buf
);
2924 aarch64_emit_call (fn
);
2927 /* Implementation of emit_ops method "emit_void_call_2". */
2930 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2935 /* Push x0 on the stack. */
2936 aarch64_emit_stack_flush ();
2938 /* Setup arguments for the function call:
2941 x1: top of the stack
2946 p
+= emit_mov (p
, x1
, register_operand (x0
));
2947 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2949 emit_ops_insns (buf
, p
- buf
);
2951 aarch64_emit_call (fn
);
2954 aarch64_emit_pop ();
2957 /* Implementation of emit_ops method "emit_eq_goto". */
2960 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2965 p
+= emit_pop (p
, x1
);
2966 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2967 /* Branch over the next instruction if x0 != x1. */
2968 p
+= emit_bcond (p
, NE
, 8);
2969 /* The NOP instruction will be patched with an unconditional branch. */
2971 *offset_p
= (p
- buf
) * 4;
2976 emit_ops_insns (buf
, p
- buf
);
2979 /* Implementation of emit_ops method "emit_ne_goto". */
2982 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2987 p
+= emit_pop (p
, x1
);
2988 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2989 /* Branch over the next instruction if x0 == x1. */
2990 p
+= emit_bcond (p
, EQ
, 8);
2991 /* The NOP instruction will be patched with an unconditional branch. */
2993 *offset_p
= (p
- buf
) * 4;
2998 emit_ops_insns (buf
, p
- buf
);
3001 /* Implementation of emit_ops method "emit_lt_goto". */
3004 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
3009 p
+= emit_pop (p
, x1
);
3010 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3011 /* Branch over the next instruction if x0 >= x1. */
3012 p
+= emit_bcond (p
, GE
, 8);
3013 /* The NOP instruction will be patched with an unconditional branch. */
3015 *offset_p
= (p
- buf
) * 4;
3020 emit_ops_insns (buf
, p
- buf
);
3023 /* Implementation of emit_ops method "emit_le_goto". */
3026 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
3031 p
+= emit_pop (p
, x1
);
3032 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3033 /* Branch over the next instruction if x0 > x1. */
3034 p
+= emit_bcond (p
, GT
, 8);
3035 /* The NOP instruction will be patched with an unconditional branch. */
3037 *offset_p
= (p
- buf
) * 4;
3042 emit_ops_insns (buf
, p
- buf
);
3045 /* Implementation of emit_ops method "emit_gt_goto". */
3048 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
3053 p
+= emit_pop (p
, x1
);
3054 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3055 /* Branch over the next instruction if x0 <= x1. */
3056 p
+= emit_bcond (p
, LE
, 8);
3057 /* The NOP instruction will be patched with an unconditional branch. */
3059 *offset_p
= (p
- buf
) * 4;
3064 emit_ops_insns (buf
, p
- buf
);
3067 /* Implementation of emit_ops method "emit_ge_got". */
3070 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
3075 p
+= emit_pop (p
, x1
);
3076 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3077 /* Branch over the next instruction if x0 <= x1. */
3078 p
+= emit_bcond (p
, LT
, 8);
3079 /* The NOP instruction will be patched with an unconditional branch. */
3081 *offset_p
= (p
- buf
) * 4;
3086 emit_ops_insns (buf
, p
- buf
);
3089 static struct emit_ops aarch64_emit_ops_impl
=
3091 aarch64_emit_prologue
,
3092 aarch64_emit_epilogue
,
3097 aarch64_emit_rsh_signed
,
3098 aarch64_emit_rsh_unsigned
,
3100 aarch64_emit_log_not
,
3101 aarch64_emit_bit_and
,
3102 aarch64_emit_bit_or
,
3103 aarch64_emit_bit_xor
,
3104 aarch64_emit_bit_not
,
3106 aarch64_emit_less_signed
,
3107 aarch64_emit_less_unsigned
,
3109 aarch64_emit_if_goto
,
3111 aarch64_write_goto_address
,
3116 aarch64_emit_stack_flush
,
3117 aarch64_emit_zero_ext
,
3119 aarch64_emit_stack_adjust
,
3120 aarch64_emit_int_call_1
,
3121 aarch64_emit_void_call_2
,
3122 aarch64_emit_eq_goto
,
3123 aarch64_emit_ne_goto
,
3124 aarch64_emit_lt_goto
,
3125 aarch64_emit_le_goto
,
3126 aarch64_emit_gt_goto
,
3127 aarch64_emit_ge_got
,
3130 /* Implementation of target ops method "emit_ops". */
3133 aarch64_target::emit_ops ()
3135 return &aarch64_emit_ops_impl
;
3138 /* Implementation of target ops method
3139 "get_min_fast_tracepoint_insn_len". */
3142 aarch64_target::get_min_fast_tracepoint_insn_len ()
3147 /* Implementation of linux target ops method "low_supports_range_stepping". */
3150 aarch64_target::low_supports_range_stepping ()
3155 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3158 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3160 if (is_64bit_tdesc ())
3162 *size
= aarch64_breakpoint_len
;
3163 return aarch64_breakpoint
;
3166 return arm_sw_breakpoint_from_kind (kind
, size
);
3169 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3172 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3174 if (is_64bit_tdesc ())
3175 return aarch64_breakpoint_len
;
3177 return arm_breakpoint_kind_from_pc (pcptr
);
3180 /* Implementation of the target ops method
3181 "breakpoint_kind_from_current_state". */
3184 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3186 if (is_64bit_tdesc ())
3187 return aarch64_breakpoint_len
;
3189 return arm_breakpoint_kind_from_current_state (pcptr
);
3192 /* The linux target ops object. */
3194 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3197 initialize_low_arch (void)
3199 initialize_low_arch_aarch32 ();
3201 initialize_regsets_info (&aarch64_regsets_info
);
3202 initialize_regsets_info (&aarch64_sve_regsets_info
);