1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2019 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "linux-aarch64-tdesc.h"
43 #include "nat/aarch64-sve-linux-ptrace.h"
50 /* Per-process arch-specific data we want to keep. */
52 struct arch_process_info
54 /* Hardware breakpoint/watchpoint data.
55 The reason for them to be per-process rather than per-thread is
56 due to the lack of information in the gdbserver environment;
57 gdbserver is not told that whether a requested hardware
58 breakpoint/watchpoint is thread specific or not, so it has to set
59 each hw bp/wp for every thread in the current process. The
60 higher level bp/wp management in gdb will resume a thread if a hw
61 bp/wp trap is not expected for it. Since the hw bp/wp setting is
62 same for each thread, it is reasonable for the data to live here.
64 struct aarch64_debug_reg_state debug_reg_state
;
67 /* Return true if the size of register 0 is 8 byte. */
72 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
74 return register_size (regcache
->tdesc
, 0) == 8;
77 /* Return true if the regcache contains the number of SVE registers. */
82 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
84 return regcache
->tdesc
->reg_defs
.size () == AARCH64_SVE_NUM_REGS
;
88 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
90 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
93 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
94 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
95 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
96 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
97 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
101 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
103 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
106 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
107 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
108 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
109 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
110 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
114 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
116 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
119 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
120 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
121 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
122 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
126 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
128 const struct user_fpsimd_state
*regset
129 = (const struct user_fpsimd_state
*) buf
;
132 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
133 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
134 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
135 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
138 /* Enable miscellaneous debugging output. The name is historical - it
139 was originally used to debug LinuxThreads support. */
140 extern int debug_threads
;
142 /* Implementation of linux_target_ops method "get_pc". */
145 aarch64_get_pc (struct regcache
*regcache
)
147 if (register_size (regcache
->tdesc
, 0) == 8)
148 return linux_get_pc_64bit (regcache
);
150 return linux_get_pc_32bit (regcache
);
153 /* Implementation of linux_target_ops method "set_pc". */
156 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
158 if (register_size (regcache
->tdesc
, 0) == 8)
159 linux_set_pc_64bit (regcache
, pc
);
161 linux_set_pc_32bit (regcache
, pc
);
164 #define aarch64_breakpoint_len 4
166 /* AArch64 BRK software debug mode instruction.
167 This instruction needs to match gdb/aarch64-tdep.c
168 (aarch64_default_breakpoint). */
169 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
171 /* Implementation of linux_target_ops method "breakpoint_at". */
174 aarch64_breakpoint_at (CORE_ADDR where
)
176 if (is_64bit_tdesc ())
178 gdb_byte insn
[aarch64_breakpoint_len
];
180 (*the_target
->read_memory
) (where
, (unsigned char *) &insn
,
181 aarch64_breakpoint_len
);
182 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
188 return arm_breakpoint_at (where
);
192 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
196 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
198 state
->dr_addr_bp
[i
] = 0;
199 state
->dr_ctrl_bp
[i
] = 0;
200 state
->dr_ref_count_bp
[i
] = 0;
203 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
205 state
->dr_addr_wp
[i
] = 0;
206 state
->dr_ctrl_wp
[i
] = 0;
207 state
->dr_ref_count_wp
[i
] = 0;
211 /* Return the pointer to the debug register state structure in the
212 current process' arch-specific data area. */
214 struct aarch64_debug_reg_state
*
215 aarch64_get_debug_reg_state (pid_t pid
)
217 struct process_info
*proc
= find_process_pid (pid
);
219 return &proc
->priv
->arch_private
->debug_reg_state
;
222 /* Implementation of linux_target_ops method "supports_z_point_type". */
225 aarch64_supports_z_point_type (char z_type
)
231 case Z_PACKET_WRITE_WP
:
232 case Z_PACKET_READ_WP
:
233 case Z_PACKET_ACCESS_WP
:
240 /* Implementation of linux_target_ops method "insert_point".
242 It actually only records the info of the to-be-inserted bp/wp;
243 the actual insertion will happen when threads are resumed. */
246 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
247 int len
, struct raw_breakpoint
*bp
)
250 enum target_hw_bp_type targ_type
;
251 struct aarch64_debug_reg_state
*state
252 = aarch64_get_debug_reg_state (pid_of (current_thread
));
255 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
256 (unsigned long) addr
, len
);
258 /* Determine the type from the raw breakpoint type. */
259 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
261 if (targ_type
!= hw_execute
)
263 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
264 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
265 1 /* is_insert */, state
);
273 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
274 instruction. Set it to 2 to correctly encode length bit
275 mask in hardware/watchpoint control register. */
278 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
279 1 /* is_insert */, state
);
283 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
289 /* Implementation of linux_target_ops method "remove_point".
291 It actually only records the info of the to-be-removed bp/wp,
292 the actual removal will be done when threads are resumed. */
295 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
296 int len
, struct raw_breakpoint
*bp
)
299 enum target_hw_bp_type targ_type
;
300 struct aarch64_debug_reg_state
*state
301 = aarch64_get_debug_reg_state (pid_of (current_thread
));
304 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
305 (unsigned long) addr
, len
);
307 /* Determine the type from the raw breakpoint type. */
308 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
310 /* Set up state pointers. */
311 if (targ_type
!= hw_execute
)
313 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
319 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
320 instruction. Set it to 2 to correctly encode length bit
321 mask in hardware/watchpoint control register. */
324 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
325 0 /* is_insert */, state
);
329 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
335 /* Implementation of linux_target_ops method "stopped_data_address". */
338 aarch64_stopped_data_address (void)
342 struct aarch64_debug_reg_state
*state
;
344 pid
= lwpid_of (current_thread
);
346 /* Get the siginfo. */
347 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
348 return (CORE_ADDR
) 0;
350 /* Need to be a hardware breakpoint/watchpoint trap. */
351 if (siginfo
.si_signo
!= SIGTRAP
352 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
353 return (CORE_ADDR
) 0;
355 /* Check if the address matches any watched address. */
356 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
357 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
359 const unsigned int offset
360 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
361 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
362 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
363 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
364 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
365 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
367 if (state
->dr_ref_count_wp
[i
]
368 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
369 && addr_trap
>= addr_watch_aligned
370 && addr_trap
< addr_watch
+ len
)
372 /* ADDR_TRAP reports the first address of the memory range
373 accessed by the CPU, regardless of what was the memory
374 range watched. Thus, a large CPU access that straddles
375 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
376 ADDR_TRAP that is lower than the
377 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
379 addr: | 4 | 5 | 6 | 7 | 8 |
380 |---- range watched ----|
381 |----------- range accessed ------------|
383 In this case, ADDR_TRAP will be 4.
385 To match a watchpoint known to GDB core, we must never
386 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
387 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
388 positive on kernels older than 4.10. See PR
394 return (CORE_ADDR
) 0;
397 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
400 aarch64_stopped_by_watchpoint (void)
402 if (aarch64_stopped_data_address () != 0)
408 /* Fetch the thread-local storage pointer for libthread_db. */
411 ps_get_thread_area (struct ps_prochandle
*ph
,
412 lwpid_t lwpid
, int idx
, void **base
)
414 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
418 /* Implementation of linux_target_ops method "siginfo_fixup". */
421 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
423 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
424 if (!is_64bit_tdesc ())
427 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
430 aarch64_siginfo_from_compat_siginfo (native
,
431 (struct compat_siginfo
*) inf
);
439 /* Implementation of linux_target_ops method "new_process". */
441 static struct arch_process_info
*
442 aarch64_linux_new_process (void)
444 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
446 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
451 /* Implementation of linux_target_ops method "delete_process". */
454 aarch64_linux_delete_process (struct arch_process_info
*info
)
459 /* Implementation of linux_target_ops method "linux_new_fork". */
462 aarch64_linux_new_fork (struct process_info
*parent
,
463 struct process_info
*child
)
465 /* These are allocated by linux_add_process. */
466 gdb_assert (parent
->priv
!= NULL
467 && parent
->priv
->arch_private
!= NULL
);
468 gdb_assert (child
->priv
!= NULL
469 && child
->priv
->arch_private
!= NULL
);
471 /* Linux kernel before 2.6.33 commit
472 72f674d203cd230426437cdcf7dd6f681dad8b0d
473 will inherit hardware debug registers from parent
474 on fork/vfork/clone. Newer Linux kernels create such tasks with
475 zeroed debug registers.
477 GDB core assumes the child inherits the watchpoints/hw
478 breakpoints of the parent, and will remove them all from the
479 forked off process. Copy the debug registers mirrors into the
480 new process so that all breakpoints and watchpoints can be
481 removed together. The debug registers mirror will become zeroed
482 in the end before detaching the forked off process, thus making
483 this compatible with older Linux kernels too. */
485 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
488 /* Implementation of linux_target_ops method "arch_setup". */
491 aarch64_arch_setup (void)
493 unsigned int machine
;
497 tid
= lwpid_of (current_thread
);
499 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
503 uint64_t vq
= aarch64_sve_get_vq (tid
);
504 /* pauth not yet supported. */
505 current_process ()->tdesc
= aarch64_linux_read_description (vq
, false);
508 current_process ()->tdesc
= tdesc_arm_with_neon
;
510 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
513 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
516 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
518 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
521 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
524 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
526 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
529 static struct regset_info aarch64_regsets
[] =
531 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
532 sizeof (struct user_pt_regs
), GENERAL_REGS
,
533 aarch64_fill_gregset
, aarch64_store_gregset
},
534 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
535 sizeof (struct user_fpsimd_state
), FP_REGS
,
536 aarch64_fill_fpregset
, aarch64_store_fpregset
541 static struct regsets_info aarch64_regsets_info
=
543 aarch64_regsets
, /* regsets */
545 NULL
, /* disabled_regsets */
548 static struct regs_info regs_info_aarch64
=
550 NULL
, /* regset_bitmap */
552 &aarch64_regsets_info
,
555 static struct regset_info aarch64_sve_regsets
[] =
557 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
558 sizeof (struct user_pt_regs
), GENERAL_REGS
,
559 aarch64_fill_gregset
, aarch64_store_gregset
},
560 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
561 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
562 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
567 static struct regsets_info aarch64_sve_regsets_info
=
569 aarch64_sve_regsets
, /* regsets. */
570 0, /* num_regsets. */
571 NULL
, /* disabled_regsets. */
574 static struct regs_info regs_info_aarch64_sve
=
576 NULL
, /* regset_bitmap. */
578 &aarch64_sve_regsets_info
,
581 /* Implementation of linux_target_ops method "regs_info". */
583 static const struct regs_info
*
584 aarch64_regs_info (void)
586 if (!is_64bit_tdesc ())
587 return ®s_info_aarch32
;
590 return ®s_info_aarch64_sve
;
592 return ®s_info_aarch64
;
595 /* Implementation of linux_target_ops method "supports_tracepoints". */
598 aarch64_supports_tracepoints (void)
600 if (current_thread
== NULL
)
604 /* We don't support tracepoints on aarch32 now. */
605 return is_64bit_tdesc ();
609 /* Implementation of linux_target_ops method "get_thread_area". */
612 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
617 iovec
.iov_base
= ®
;
618 iovec
.iov_len
= sizeof (reg
);
620 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
628 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
631 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
633 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
639 collect_register_by_name (regcache
, "x8", &l_sysno
);
640 *sysno
= (int) l_sysno
;
643 collect_register_by_name (regcache
, "r7", sysno
);
646 /* List of condition codes that we need. */
648 enum aarch64_condition_codes
659 enum aarch64_operand_type
665 /* Representation of an operand. At this time, it only supports register
666 and immediate types. */
668 struct aarch64_operand
670 /* Type of the operand. */
671 enum aarch64_operand_type type
;
673 /* Value of the operand according to the type. */
677 struct aarch64_register reg
;
681 /* List of registers that we are currently using, we can add more here as
682 we need to use them. */
684 /* General purpose scratch registers (64 bit). */
685 static const struct aarch64_register x0
= { 0, 1 };
686 static const struct aarch64_register x1
= { 1, 1 };
687 static const struct aarch64_register x2
= { 2, 1 };
688 static const struct aarch64_register x3
= { 3, 1 };
689 static const struct aarch64_register x4
= { 4, 1 };
691 /* General purpose scratch registers (32 bit). */
692 static const struct aarch64_register w0
= { 0, 0 };
693 static const struct aarch64_register w2
= { 2, 0 };
695 /* Intra-procedure scratch registers. */
696 static const struct aarch64_register ip0
= { 16, 1 };
698 /* Special purpose registers. */
699 static const struct aarch64_register fp
= { 29, 1 };
700 static const struct aarch64_register lr
= { 30, 1 };
701 static const struct aarch64_register sp
= { 31, 1 };
702 static const struct aarch64_register xzr
= { 31, 1 };
704 /* Dynamically allocate a new register. If we know the register
705 statically, we should make it a global as above instead of using this
708 static struct aarch64_register
709 aarch64_register (unsigned num
, int is64
)
711 return (struct aarch64_register
) { num
, is64
};
714 /* Helper function to create a register operand, for instructions with
715 different types of operands.
718 p += emit_mov (p, x0, register_operand (x1)); */
720 static struct aarch64_operand
721 register_operand (struct aarch64_register reg
)
723 struct aarch64_operand operand
;
725 operand
.type
= OPERAND_REGISTER
;
731 /* Helper function to create an immediate operand, for instructions with
732 different types of operands.
735 p += emit_mov (p, x0, immediate_operand (12)); */
737 static struct aarch64_operand
738 immediate_operand (uint32_t imm
)
740 struct aarch64_operand operand
;
742 operand
.type
= OPERAND_IMMEDIATE
;
748 /* Helper function to create an offset memory operand.
751 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
753 static struct aarch64_memory_operand
754 offset_memory_operand (int32_t offset
)
756 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
759 /* Helper function to create a pre-index memory operand.
762 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
764 static struct aarch64_memory_operand
765 preindex_memory_operand (int32_t index
)
767 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
770 /* Helper function to create a post-index memory operand.
773 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
775 static struct aarch64_memory_operand
776 postindex_memory_operand (int32_t index
)
778 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
781 /* System control registers. These special registers can be written and
782 read with the MRS and MSR instructions.
784 - NZCV: Condition flags. GDB refers to this register under the CPSR
786 - FPSR: Floating-point status register.
787 - FPCR: Floating-point control registers.
788 - TPIDR_EL0: Software thread ID register. */
790 enum aarch64_system_control_registers
792 /* op0 op1 crn crm op2 */
793 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
794 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
795 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
796 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
799 /* Write a BLR instruction into *BUF.
803 RN is the register to branch to. */
806 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
808 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
811 /* Write a RET instruction into *BUF.
815 RN is the register to branch to. */
818 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
820 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
824 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
825 struct aarch64_register rt
,
826 struct aarch64_register rt2
,
827 struct aarch64_register rn
,
828 struct aarch64_memory_operand operand
)
835 opc
= ENCODE (2, 2, 30);
837 opc
= ENCODE (0, 2, 30);
839 switch (operand
.type
)
841 case MEMORY_OPERAND_OFFSET
:
843 pre_index
= ENCODE (1, 1, 24);
844 write_back
= ENCODE (0, 1, 23);
847 case MEMORY_OPERAND_POSTINDEX
:
849 pre_index
= ENCODE (0, 1, 24);
850 write_back
= ENCODE (1, 1, 23);
853 case MEMORY_OPERAND_PREINDEX
:
855 pre_index
= ENCODE (1, 1, 24);
856 write_back
= ENCODE (1, 1, 23);
863 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
864 | ENCODE (operand
.index
>> 3, 7, 15)
865 | ENCODE (rt2
.num
, 5, 10)
866 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
869 /* Write a STP instruction into *BUF.
871 STP rt, rt2, [rn, #offset]
872 STP rt, rt2, [rn, #index]!
873 STP rt, rt2, [rn], #index
875 RT and RT2 are the registers to store.
876 RN is the base address register.
877 OFFSET is the immediate to add to the base address. It is limited to a
878 -512 .. 504 range (7 bits << 3). */
881 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
882 struct aarch64_register rt2
, struct aarch64_register rn
,
883 struct aarch64_memory_operand operand
)
885 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
888 /* Write a LDP instruction into *BUF.
890 LDP rt, rt2, [rn, #offset]
891 LDP rt, rt2, [rn, #index]!
892 LDP rt, rt2, [rn], #index
894 RT and RT2 are the registers to store.
895 RN is the base address register.
896 OFFSET is the immediate to add to the base address. It is limited to a
897 -512 .. 504 range (7 bits << 3). */
900 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
901 struct aarch64_register rt2
, struct aarch64_register rn
,
902 struct aarch64_memory_operand operand
)
904 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
907 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
909 LDP qt, qt2, [rn, #offset]
911 RT and RT2 are the Q registers to store.
912 RN is the base address register.
913 OFFSET is the immediate to add to the base address. It is limited to
914 -1024 .. 1008 range (7 bits << 4). */
917 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
918 struct aarch64_register rn
, int32_t offset
)
920 uint32_t opc
= ENCODE (2, 2, 30);
921 uint32_t pre_index
= ENCODE (1, 1, 24);
923 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
924 | ENCODE (offset
>> 4, 7, 15)
925 | ENCODE (rt2
, 5, 10)
926 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
929 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
931 STP qt, qt2, [rn, #offset]
933 RT and RT2 are the Q registers to store.
934 RN is the base address register.
935 OFFSET is the immediate to add to the base address. It is limited to
936 -1024 .. 1008 range (7 bits << 4). */
939 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
940 struct aarch64_register rn
, int32_t offset
)
942 uint32_t opc
= ENCODE (2, 2, 30);
943 uint32_t pre_index
= ENCODE (1, 1, 24);
945 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
946 | ENCODE (offset
>> 4, 7, 15)
947 | ENCODE (rt2
, 5, 10)
948 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
951 /* Write a LDRH instruction into *BUF.
953 LDRH wt, [xn, #offset]
954 LDRH wt, [xn, #index]!
955 LDRH wt, [xn], #index
957 RT is the register to store.
958 RN is the base address register.
959 OFFSET is the immediate to add to the base address. It is limited to
960 0 .. 32760 range (12 bits << 3). */
963 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
964 struct aarch64_register rn
,
965 struct aarch64_memory_operand operand
)
967 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
970 /* Write a LDRB instruction into *BUF.
972 LDRB wt, [xn, #offset]
973 LDRB wt, [xn, #index]!
974 LDRB wt, [xn], #index
976 RT is the register to store.
977 RN is the base address register.
978 OFFSET is the immediate to add to the base address. It is limited to
979 0 .. 32760 range (12 bits << 3). */
982 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
983 struct aarch64_register rn
,
984 struct aarch64_memory_operand operand
)
986 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
991 /* Write a STR instruction into *BUF.
993 STR rt, [rn, #offset]
994 STR rt, [rn, #index]!
997 RT is the register to store.
998 RN is the base address register.
999 OFFSET is the immediate to add to the base address. It is limited to
1000 0 .. 32760 range (12 bits << 3). */
1003 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1004 struct aarch64_register rn
,
1005 struct aarch64_memory_operand operand
)
1007 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1010 /* Helper function emitting an exclusive load or store instruction. */
1013 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1014 enum aarch64_opcodes opcode
,
1015 struct aarch64_register rs
,
1016 struct aarch64_register rt
,
1017 struct aarch64_register rt2
,
1018 struct aarch64_register rn
)
1020 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1021 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1022 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1025 /* Write a LAXR instruction into *BUF.
1029 RT is the destination register.
1030 RN is the base address register. */
1033 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1034 struct aarch64_register rn
)
1036 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1040 /* Write a STXR instruction into *BUF.
1044 RS is the result register, it indicates if the store succeeded or not.
1045 RT is the destination register.
1046 RN is the base address register. */
1049 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1050 struct aarch64_register rt
, struct aarch64_register rn
)
1052 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1056 /* Write a STLR instruction into *BUF.
1060 RT is the register to store.
1061 RN is the base address register. */
1064 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1065 struct aarch64_register rn
)
1067 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1071 /* Helper function for data processing instructions with register sources. */
1074 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1075 struct aarch64_register rd
,
1076 struct aarch64_register rn
,
1077 struct aarch64_register rm
)
1079 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1081 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1082 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1085 /* Helper function for data processing instructions taking either a register
1089 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1090 struct aarch64_register rd
,
1091 struct aarch64_register rn
,
1092 struct aarch64_operand operand
)
1094 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1095 /* The opcode is different for register and immediate source operands. */
1096 uint32_t operand_opcode
;
1098 if (operand
.type
== OPERAND_IMMEDIATE
)
1100 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1101 operand_opcode
= ENCODE (8, 4, 25);
1103 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1104 | ENCODE (operand
.imm
, 12, 10)
1105 | ENCODE (rn
.num
, 5, 5)
1106 | ENCODE (rd
.num
, 5, 0));
1110 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1111 operand_opcode
= ENCODE (5, 4, 25);
1113 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1118 /* Write an ADD instruction into *BUF.
1123 This function handles both an immediate and register add.
1125 RD is the destination register.
1126 RN is the input register.
1127 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1128 OPERAND_REGISTER. */
1131 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1132 struct aarch64_register rn
, struct aarch64_operand operand
)
1134 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1137 /* Write a SUB instruction into *BUF.
1142 This function handles both an immediate and register sub.
1144 RD is the destination register.
1145 RN is the input register.
1146 IMM is the immediate to substract to RN. */
1149 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1150 struct aarch64_register rn
, struct aarch64_operand operand
)
1152 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1155 /* Write a MOV instruction into *BUF.
1160 This function handles both a wide immediate move and a register move,
1161 with the condition that the source register is not xzr. xzr and the
1162 stack pointer share the same encoding and this function only supports
1165 RD is the destination register.
1166 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1167 OPERAND_REGISTER. */
1170 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1171 struct aarch64_operand operand
)
1173 if (operand
.type
== OPERAND_IMMEDIATE
)
1175 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1176 /* Do not shift the immediate. */
1177 uint32_t shift
= ENCODE (0, 2, 21);
1179 return aarch64_emit_insn (buf
, MOV
| size
| shift
1180 | ENCODE (operand
.imm
, 16, 5)
1181 | ENCODE (rd
.num
, 5, 0));
1184 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1187 /* Write a MOVK instruction into *BUF.
1189 MOVK rd, #imm, lsl #shift
1191 RD is the destination register.
1192 IMM is the immediate.
1193 SHIFT is the logical shift left to apply to IMM. */
1196 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1199 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1201 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1202 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1205 /* Write instructions into *BUF in order to move ADDR into a register.
1206 ADDR can be a 64-bit value.
1208 This function will emit a series of MOV and MOVK instructions, such as:
1211 MOVK xd, #(addr >> 16), lsl #16
1212 MOVK xd, #(addr >> 32), lsl #32
1213 MOVK xd, #(addr >> 48), lsl #48 */
1216 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1220 /* The MOV (wide immediate) instruction clears to top bits of the
1222 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1224 if ((addr
>> 16) != 0)
1225 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1229 if ((addr
>> 32) != 0)
1230 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1234 if ((addr
>> 48) != 0)
1235 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1240 /* Write a SUBS instruction into *BUF.
1244 This instruction update the condition flags.
1246 RD is the destination register.
1247 RN and RM are the source registers. */
1250 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1251 struct aarch64_register rn
, struct aarch64_operand operand
)
1253 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1256 /* Write a CMP instruction into *BUF.
1260 This instruction is an alias of SUBS xzr, rn, rm.
1262 RN and RM are the registers to compare. */
1265 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1266 struct aarch64_operand operand
)
1268 return emit_subs (buf
, xzr
, rn
, operand
);
1271 /* Write a AND instruction into *BUF.
1275 RD is the destination register.
1276 RN and RM are the source registers. */
1279 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1280 struct aarch64_register rn
, struct aarch64_register rm
)
1282 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1285 /* Write a ORR instruction into *BUF.
1289 RD is the destination register.
1290 RN and RM are the source registers. */
1293 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1294 struct aarch64_register rn
, struct aarch64_register rm
)
1296 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1299 /* Write a ORN instruction into *BUF.
1303 RD is the destination register.
1304 RN and RM are the source registers. */
1307 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1308 struct aarch64_register rn
, struct aarch64_register rm
)
1310 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1313 /* Write a EOR instruction into *BUF.
1317 RD is the destination register.
1318 RN and RM are the source registers. */
1321 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1322 struct aarch64_register rn
, struct aarch64_register rm
)
1324 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1327 /* Write a MVN instruction into *BUF.
1331 This is an alias for ORN rd, xzr, rm.
1333 RD is the destination register.
1334 RM is the source register. */
1337 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1338 struct aarch64_register rm
)
1340 return emit_orn (buf
, rd
, xzr
, rm
);
1343 /* Write a LSLV instruction into *BUF.
1347 RD is the destination register.
1348 RN and RM are the source registers. */
1351 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1352 struct aarch64_register rn
, struct aarch64_register rm
)
1354 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1357 /* Write a LSRV instruction into *BUF.
1361 RD is the destination register.
1362 RN and RM are the source registers. */
1365 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1366 struct aarch64_register rn
, struct aarch64_register rm
)
1368 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1371 /* Write a ASRV instruction into *BUF.
1375 RD is the destination register.
1376 RN and RM are the source registers. */
1379 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1380 struct aarch64_register rn
, struct aarch64_register rm
)
1382 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1385 /* Write a MUL instruction into *BUF.
1389 RD is the destination register.
1390 RN and RM are the source registers. */
1393 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1394 struct aarch64_register rn
, struct aarch64_register rm
)
1396 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1399 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1403 RT is the destination register.
1404 SYSTEM_REG is special purpose register to read. */
1407 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1408 enum aarch64_system_control_registers system_reg
)
1410 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1411 | ENCODE (rt
.num
, 5, 0));
1414 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1418 SYSTEM_REG is special purpose register to write.
1419 RT is the input register. */
1422 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1423 struct aarch64_register rt
)
1425 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1426 | ENCODE (rt
.num
, 5, 0));
1429 /* Write a SEVL instruction into *BUF.
1431 This is a hint instruction telling the hardware to trigger an event. */
1434 emit_sevl (uint32_t *buf
)
1436 return aarch64_emit_insn (buf
, SEVL
);
1439 /* Write a WFE instruction into *BUF.
1441 This is a hint instruction telling the hardware to wait for an event. */
1444 emit_wfe (uint32_t *buf
)
1446 return aarch64_emit_insn (buf
, WFE
);
1449 /* Write a SBFM instruction into *BUF.
1451 SBFM rd, rn, #immr, #imms
1453 This instruction moves the bits from #immr to #imms into the
1454 destination, sign extending the result.
1456 RD is the destination register.
1457 RN is the source register.
1458 IMMR is the bit number to start at (least significant bit).
1459 IMMS is the bit number to stop at (most significant bit). */
1462 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1463 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1465 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1466 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1468 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1469 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1470 | ENCODE (rd
.num
, 5, 0));
1473 /* Write a SBFX instruction into *BUF.
1475 SBFX rd, rn, #lsb, #width
1477 This instruction moves #width bits from #lsb into the destination, sign
1478 extending the result. This is an alias for:
1480 SBFM rd, rn, #lsb, #(lsb + width - 1)
1482 RD is the destination register.
1483 RN is the source register.
1484 LSB is the bit number to start at (least significant bit).
1485 WIDTH is the number of bits to move. */
1488 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1489 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1491 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1494 /* Write a UBFM instruction into *BUF.
1496 UBFM rd, rn, #immr, #imms
1498 This instruction moves the bits from #immr to #imms into the
1499 destination, extending the result with zeros.
1501 RD is the destination register.
1502 RN is the source register.
1503 IMMR is the bit number to start at (least significant bit).
1504 IMMS is the bit number to stop at (most significant bit). */
1507 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1508 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1510 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1511 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1513 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1514 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1515 | ENCODE (rd
.num
, 5, 0));
1518 /* Write a UBFX instruction into *BUF.
1520 UBFX rd, rn, #lsb, #width
1522 This instruction moves #width bits from #lsb into the destination,
1523 extending the result with zeros. This is an alias for:
1525 UBFM rd, rn, #lsb, #(lsb + width - 1)
1527 RD is the destination register.
1528 RN is the source register.
1529 LSB is the bit number to start at (least significant bit).
1530 WIDTH is the number of bits to move. */
1533 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1534 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1536 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1539 /* Write a CSINC instruction into *BUF.
1541 CSINC rd, rn, rm, cond
1543 This instruction conditionally increments rn or rm and places the result
1544 in rd. rn is chosen is the condition is true.
1546 RD is the destination register.
1547 RN and RM are the source registers.
1548 COND is the encoded condition. */
1551 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1552 struct aarch64_register rn
, struct aarch64_register rm
,
1555 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1557 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1558 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1559 | ENCODE (rd
.num
, 5, 0));
1562 /* Write a CSET instruction into *BUF.
1566 This instruction conditionally write 1 or 0 in the destination register.
1567 1 is written if the condition is true. This is an alias for:
1569 CSINC rd, xzr, xzr, !cond
1571 Note that the condition needs to be inverted.
1573 RD is the destination register.
1574 RN and RM are the source registers.
1575 COND is the encoded condition. */
1578 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1580 /* The least significant bit of the condition needs toggling in order to
1582 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1585 /* Write LEN instructions from BUF into the inferior memory at *TO.
1587 Note instructions are always little endian on AArch64, unlike data. */
1590 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1592 size_t byte_len
= len
* sizeof (uint32_t);
1593 #if (__BYTE_ORDER == __BIG_ENDIAN)
1594 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1597 for (i
= 0; i
< len
; i
++)
1598 le_buf
[i
] = htole32 (buf
[i
]);
1600 write_inferior_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1604 write_inferior_memory (*to
, (const unsigned char *) buf
, byte_len
);
1610 /* Sub-class of struct aarch64_insn_data, store information of
1611 instruction relocation for fast tracepoint. Visitor can
1612 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1613 the relocated instructions in buffer pointed by INSN_PTR. */
1615 struct aarch64_insn_relocation_data
1617 struct aarch64_insn_data base
;
1619 /* The new address the instruction is relocated to. */
1621 /* Pointer to the buffer of relocated instruction(s). */
1625 /* Implementation of aarch64_insn_visitor method "b". */
1628 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1629 struct aarch64_insn_data
*data
)
1631 struct aarch64_insn_relocation_data
*insn_reloc
1632 = (struct aarch64_insn_relocation_data
*) data
;
1634 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1636 if (can_encode_int32 (new_offset
, 28))
1637 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1640 /* Implementation of aarch64_insn_visitor method "b_cond". */
1643 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1644 struct aarch64_insn_data
*data
)
1646 struct aarch64_insn_relocation_data
*insn_reloc
1647 = (struct aarch64_insn_relocation_data
*) data
;
1649 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1651 if (can_encode_int32 (new_offset
, 21))
1653 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1656 else if (can_encode_int32 (new_offset
, 28))
1658 /* The offset is out of range for a conditional branch
1659 instruction but not for a unconditional branch. We can use
1660 the following instructions instead:
1662 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1663 B NOT_TAKEN ; Else jump over TAKEN and continue.
1670 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1671 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1672 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1676 /* Implementation of aarch64_insn_visitor method "cb". */
1679 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1680 const unsigned rn
, int is64
,
1681 struct aarch64_insn_data
*data
)
1683 struct aarch64_insn_relocation_data
*insn_reloc
1684 = (struct aarch64_insn_relocation_data
*) data
;
1686 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1688 if (can_encode_int32 (new_offset
, 21))
1690 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1691 aarch64_register (rn
, is64
), new_offset
);
1693 else if (can_encode_int32 (new_offset
, 28))
1695 /* The offset is out of range for a compare and branch
1696 instruction but not for a unconditional branch. We can use
1697 the following instructions instead:
1699 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1700 B NOT_TAKEN ; Else jump over TAKEN and continue.
1706 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1707 aarch64_register (rn
, is64
), 8);
1708 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1709 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1713 /* Implementation of aarch64_insn_visitor method "tb". */
1716 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1717 const unsigned rt
, unsigned bit
,
1718 struct aarch64_insn_data
*data
)
1720 struct aarch64_insn_relocation_data
*insn_reloc
1721 = (struct aarch64_insn_relocation_data
*) data
;
1723 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1725 if (can_encode_int32 (new_offset
, 16))
1727 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1728 aarch64_register (rt
, 1), new_offset
);
1730 else if (can_encode_int32 (new_offset
, 28))
1732 /* The offset is out of range for a test bit and branch
1733 instruction but not for a unconditional branch. We can use
1734 the following instructions instead:
1736 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1737 B NOT_TAKEN ; Else jump over TAKEN and continue.
1743 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1744 aarch64_register (rt
, 1), 8);
1745 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1746 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1751 /* Implementation of aarch64_insn_visitor method "adr". */
1754 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1756 struct aarch64_insn_data
*data
)
1758 struct aarch64_insn_relocation_data
*insn_reloc
1759 = (struct aarch64_insn_relocation_data
*) data
;
1760 /* We know exactly the address the ADR{P,} instruction will compute.
1761 We can just write it to the destination register. */
1762 CORE_ADDR address
= data
->insn_addr
+ offset
;
1766 /* Clear the lower 12 bits of the offset to get the 4K page. */
1767 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1768 aarch64_register (rd
, 1),
1772 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1773 aarch64_register (rd
, 1), address
);
1776 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1779 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1780 const unsigned rt
, const int is64
,
1781 struct aarch64_insn_data
*data
)
1783 struct aarch64_insn_relocation_data
*insn_reloc
1784 = (struct aarch64_insn_relocation_data
*) data
;
1785 CORE_ADDR address
= data
->insn_addr
+ offset
;
1787 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1788 aarch64_register (rt
, 1), address
);
1790 /* We know exactly what address to load from, and what register we
1793 MOV xd, #(oldloc + offset)
1794 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1797 LDR xd, [xd] ; or LDRSW xd, [xd]
1802 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1803 aarch64_register (rt
, 1),
1804 aarch64_register (rt
, 1),
1805 offset_memory_operand (0));
1807 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1808 aarch64_register (rt
, is64
),
1809 aarch64_register (rt
, 1),
1810 offset_memory_operand (0));
1813 /* Implementation of aarch64_insn_visitor method "others". */
1816 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1817 struct aarch64_insn_data
*data
)
1819 struct aarch64_insn_relocation_data
*insn_reloc
1820 = (struct aarch64_insn_relocation_data
*) data
;
1822 /* The instruction is not PC relative. Just re-emit it at the new
1824 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1827 static const struct aarch64_insn_visitor visitor
=
1829 aarch64_ftrace_insn_reloc_b
,
1830 aarch64_ftrace_insn_reloc_b_cond
,
1831 aarch64_ftrace_insn_reloc_cb
,
1832 aarch64_ftrace_insn_reloc_tb
,
1833 aarch64_ftrace_insn_reloc_adr
,
1834 aarch64_ftrace_insn_reloc_ldr_literal
,
1835 aarch64_ftrace_insn_reloc_others
,
1838 /* Implementation of linux_target_ops method
1839 "install_fast_tracepoint_jump_pad". */
1842 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1844 CORE_ADDR collector
,
1847 CORE_ADDR
*jump_entry
,
1848 CORE_ADDR
*trampoline
,
1849 ULONGEST
*trampoline_size
,
1850 unsigned char *jjump_pad_insn
,
1851 ULONGEST
*jjump_pad_insn_size
,
1852 CORE_ADDR
*adjusted_insn_addr
,
1853 CORE_ADDR
*adjusted_insn_addr_end
,
1861 CORE_ADDR buildaddr
= *jump_entry
;
1862 struct aarch64_insn_relocation_data insn_data
;
1864 /* We need to save the current state on the stack both to restore it
1865 later and to collect register values when the tracepoint is hit.
1867 The saved registers are pushed in a layout that needs to be in sync
1868 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1869 the supply_fast_tracepoint_registers function will fill in the
1870 register cache from a pointer to saved registers on the stack we build
1873 For simplicity, we set the size of each cell on the stack to 16 bytes.
1874 This way one cell can hold any register type, from system registers
1875 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1876 has to be 16 bytes aligned anyway.
1878 Note that the CPSR register does not exist on AArch64. Instead we
1879 can access system bits describing the process state with the
1880 MRS/MSR instructions, namely the condition flags. We save them as
1881 if they are part of a CPSR register because that's how GDB
1882 interprets these system bits. At the moment, only the condition
1883 flags are saved in CPSR (NZCV).
1885 Stack layout, each cell is 16 bytes (descending):
1887 High *-------- SIMD&FP registers from 31 down to 0. --------*
1893 *---- General purpose registers from 30 down to 0. ----*
1899 *------------- Special purpose registers. -------------*
1902 | CPSR (NZCV) | 5 cells
1905 *------------- collecting_t object --------------------*
1906 | TPIDR_EL0 | struct tracepoint * |
1907 Low *------------------------------------------------------*
1909 After this stack is set up, we issue a call to the collector, passing
1910 it the saved registers at (SP + 16). */
1912 /* Push SIMD&FP registers on the stack:
1914 SUB sp, sp, #(32 * 16)
1916 STP q30, q31, [sp, #(30 * 16)]
1921 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1922 for (i
= 30; i
>= 0; i
-= 2)
1923 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1925 /* Push general puspose registers on the stack. Note that we do not need
1926 to push x31 as it represents the xzr register and not the stack
1927 pointer in a STR instruction.
1929 SUB sp, sp, #(31 * 16)
1931 STR x30, [sp, #(30 * 16)]
1936 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1937 for (i
= 30; i
>= 0; i
-= 1)
1938 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
1939 offset_memory_operand (i
* 16));
1941 /* Make space for 5 more cells.
1943 SUB sp, sp, #(5 * 16)
1946 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
1951 ADD x4, sp, #((32 + 31 + 5) * 16)
1952 STR x4, [sp, #(4 * 16)]
1955 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
1956 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
1958 /* Save PC (tracepoint address):
1963 STR x3, [sp, #(3 * 16)]
1967 p
+= emit_mov_addr (p
, x3
, tpaddr
);
1968 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
1970 /* Save CPSR (NZCV), FPSR and FPCR:
1976 STR x2, [sp, #(2 * 16)]
1977 STR x1, [sp, #(1 * 16)]
1978 STR x0, [sp, #(0 * 16)]
1981 p
+= emit_mrs (p
, x2
, NZCV
);
1982 p
+= emit_mrs (p
, x1
, FPSR
);
1983 p
+= emit_mrs (p
, x0
, FPCR
);
1984 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
1985 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
1986 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
1988 /* Push the collecting_t object. It consist of the address of the
1989 tracepoint and an ID for the current thread. We get the latter by
1990 reading the tpidr_el0 system register. It corresponds to the
1991 NT_ARM_TLS register accessible with ptrace.
1998 STP x0, x1, [sp, #-16]!
2002 p
+= emit_mov_addr (p
, x0
, tpoint
);
2003 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2004 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2008 The shared memory for the lock is at lockaddr. It will hold zero
2009 if no-one is holding the lock, otherwise it contains the address of
2010 the collecting_t object on the stack of the thread which acquired it.
2012 At this stage, the stack pointer points to this thread's collecting_t
2015 We use the following registers:
2016 - x0: Address of the lock.
2017 - x1: Pointer to collecting_t object.
2018 - x2: Scratch register.
2024 ; Trigger an event local to this core. So the following WFE
2025 ; instruction is ignored.
2028 ; Wait for an event. The event is triggered by either the SEVL
2029 ; or STLR instructions (store release).
2032 ; Atomically read at lockaddr. This marks the memory location as
2033 ; exclusive. This instruction also has memory constraints which
2034 ; make sure all previous data reads and writes are done before
2038 ; Try again if another thread holds the lock.
2041 ; We can lock it! Write the address of the collecting_t object.
2042 ; This instruction will fail if the memory location is not marked
2043 ; as exclusive anymore. If it succeeds, it will remove the
2044 ; exclusive mark on the memory location. This way, if another
2045 ; thread executes this instruction before us, we will fail and try
2052 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2053 p
+= emit_mov (p
, x1
, register_operand (sp
));
2057 p
+= emit_ldaxr (p
, x2
, x0
);
2058 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2059 p
+= emit_stxr (p
, w2
, x1
, x0
);
2060 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2062 /* Call collector (struct tracepoint *, unsigned char *):
2067 ; Saved registers start after the collecting_t object.
2070 ; We use an intra-procedure-call scratch register.
2071 MOV ip0, #(collector)
2074 ; And call back to C!
2079 p
+= emit_mov_addr (p
, x0
, tpoint
);
2080 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2082 p
+= emit_mov_addr (p
, ip0
, collector
);
2083 p
+= emit_blr (p
, ip0
);
2085 /* Release the lock.
2090 ; This instruction is a normal store with memory ordering
2091 ; constraints. Thanks to this we do not have to put a data
2092 ; barrier instruction to make sure all data read and writes are done
2093 ; before this instruction is executed. Furthermore, this instrucion
2094 ; will trigger an event, letting other threads know they can grab
2099 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2100 p
+= emit_stlr (p
, xzr
, x0
);
2102 /* Free collecting_t object:
2107 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2109 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2110 registers from the stack.
2112 LDR x2, [sp, #(2 * 16)]
2113 LDR x1, [sp, #(1 * 16)]
2114 LDR x0, [sp, #(0 * 16)]
2120 ADD sp, sp #(5 * 16)
2123 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2124 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2125 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2126 p
+= emit_msr (p
, NZCV
, x2
);
2127 p
+= emit_msr (p
, FPSR
, x1
);
2128 p
+= emit_msr (p
, FPCR
, x0
);
2130 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2132 /* Pop general purpose registers:
2136 LDR x30, [sp, #(30 * 16)]
2138 ADD sp, sp, #(31 * 16)
2141 for (i
= 0; i
<= 30; i
+= 1)
2142 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2143 offset_memory_operand (i
* 16));
2144 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2146 /* Pop SIMD&FP registers:
2150 LDP q30, q31, [sp, #(30 * 16)]
2152 ADD sp, sp, #(32 * 16)
2155 for (i
= 0; i
<= 30; i
+= 2)
2156 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2157 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2159 /* Write the code into the inferior memory. */
2160 append_insns (&buildaddr
, p
- buf
, buf
);
2162 /* Now emit the relocated instruction. */
2163 *adjusted_insn_addr
= buildaddr
;
2164 target_read_uint32 (tpaddr
, &insn
);
2166 insn_data
.base
.insn_addr
= tpaddr
;
2167 insn_data
.new_addr
= buildaddr
;
2168 insn_data
.insn_ptr
= buf
;
2170 aarch64_relocate_instruction (insn
, &visitor
,
2171 (struct aarch64_insn_data
*) &insn_data
);
2173 /* We may not have been able to relocate the instruction. */
2174 if (insn_data
.insn_ptr
== buf
)
2177 "E.Could not relocate instruction from %s to %s.",
2178 core_addr_to_string_nz (tpaddr
),
2179 core_addr_to_string_nz (buildaddr
));
2183 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2184 *adjusted_insn_addr_end
= buildaddr
;
2186 /* Go back to the start of the buffer. */
2189 /* Emit a branch back from the jump pad. */
2190 offset
= (tpaddr
+ orig_size
- buildaddr
);
2191 if (!can_encode_int32 (offset
, 28))
2194 "E.Jump back from jump pad too far from tracepoint "
2195 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2200 p
+= emit_b (p
, 0, offset
);
2201 append_insns (&buildaddr
, p
- buf
, buf
);
2203 /* Give the caller a branch instruction into the jump pad. */
2204 offset
= (*jump_entry
- tpaddr
);
2205 if (!can_encode_int32 (offset
, 28))
2208 "E.Jump pad too far from tracepoint "
2209 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2214 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2215 *jjump_pad_insn_size
= 4;
2217 /* Return the end address of our pad. */
2218 *jump_entry
= buildaddr
;
2223 /* Helper function writing LEN instructions from START into
2224 current_insn_ptr. */
2227 emit_ops_insns (const uint32_t *start
, int len
)
2229 CORE_ADDR buildaddr
= current_insn_ptr
;
2232 debug_printf ("Adding %d instrucions at %s\n",
2233 len
, paddress (buildaddr
));
2235 append_insns (&buildaddr
, len
, start
);
2236 current_insn_ptr
= buildaddr
;
2239 /* Pop a register from the stack. */
2242 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2244 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2247 /* Push a register on the stack. */
2250 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2252 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2255 /* Implementation of emit_ops method "emit_prologue". */
2258 aarch64_emit_prologue (void)
2263 /* This function emit a prologue for the following function prototype:
2265 enum eval_result_type f (unsigned char *regs,
2268 The first argument is a buffer of raw registers. The second
2269 argument is the result of
2270 evaluating the expression, which will be set to whatever is on top of
2271 the stack at the end.
2273 The stack set up by the prologue is as such:
2275 High *------------------------------------------------------*
2278 | x1 (ULONGEST *value) |
2279 | x0 (unsigned char *regs) |
2280 Low *------------------------------------------------------*
2282 As we are implementing a stack machine, each opcode can expand the
2283 stack so we never know how far we are from the data saved by this
2284 prologue. In order to be able refer to value and regs later, we save
2285 the current stack pointer in the frame pointer. This way, it is not
2286 clobbered when calling C functions.
2288 Finally, throughtout every operation, we are using register x0 as the
2289 top of the stack, and x1 as a scratch register. */
2291 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2292 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2293 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2295 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2298 emit_ops_insns (buf
, p
- buf
);
2301 /* Implementation of emit_ops method "emit_epilogue". */
2304 aarch64_emit_epilogue (void)
2309 /* Store the result of the expression (x0) in *value. */
2310 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2311 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2312 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2314 /* Restore the previous state. */
2315 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2316 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2318 /* Return expr_eval_no_error. */
2319 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2320 p
+= emit_ret (p
, lr
);
2322 emit_ops_insns (buf
, p
- buf
);
2325 /* Implementation of emit_ops method "emit_add". */
2328 aarch64_emit_add (void)
2333 p
+= emit_pop (p
, x1
);
2334 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2336 emit_ops_insns (buf
, p
- buf
);
2339 /* Implementation of emit_ops method "emit_sub". */
2342 aarch64_emit_sub (void)
2347 p
+= emit_pop (p
, x1
);
2348 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2350 emit_ops_insns (buf
, p
- buf
);
2353 /* Implementation of emit_ops method "emit_mul". */
2356 aarch64_emit_mul (void)
2361 p
+= emit_pop (p
, x1
);
2362 p
+= emit_mul (p
, x0
, x1
, x0
);
2364 emit_ops_insns (buf
, p
- buf
);
2367 /* Implementation of emit_ops method "emit_lsh". */
2370 aarch64_emit_lsh (void)
2375 p
+= emit_pop (p
, x1
);
2376 p
+= emit_lslv (p
, x0
, x1
, x0
);
2378 emit_ops_insns (buf
, p
- buf
);
2381 /* Implementation of emit_ops method "emit_rsh_signed". */
2384 aarch64_emit_rsh_signed (void)
2389 p
+= emit_pop (p
, x1
);
2390 p
+= emit_asrv (p
, x0
, x1
, x0
);
2392 emit_ops_insns (buf
, p
- buf
);
2395 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2398 aarch64_emit_rsh_unsigned (void)
2403 p
+= emit_pop (p
, x1
);
2404 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2406 emit_ops_insns (buf
, p
- buf
);
2409 /* Implementation of emit_ops method "emit_ext". */
2412 aarch64_emit_ext (int arg
)
2417 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2419 emit_ops_insns (buf
, p
- buf
);
2422 /* Implementation of emit_ops method "emit_log_not". */
2425 aarch64_emit_log_not (void)
2430 /* If the top of the stack is 0, replace it with 1. Else replace it with
2433 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2434 p
+= emit_cset (p
, x0
, EQ
);
2436 emit_ops_insns (buf
, p
- buf
);
2439 /* Implementation of emit_ops method "emit_bit_and". */
2442 aarch64_emit_bit_and (void)
2447 p
+= emit_pop (p
, x1
);
2448 p
+= emit_and (p
, x0
, x0
, x1
);
2450 emit_ops_insns (buf
, p
- buf
);
2453 /* Implementation of emit_ops method "emit_bit_or". */
2456 aarch64_emit_bit_or (void)
2461 p
+= emit_pop (p
, x1
);
2462 p
+= emit_orr (p
, x0
, x0
, x1
);
2464 emit_ops_insns (buf
, p
- buf
);
2467 /* Implementation of emit_ops method "emit_bit_xor". */
2470 aarch64_emit_bit_xor (void)
2475 p
+= emit_pop (p
, x1
);
2476 p
+= emit_eor (p
, x0
, x0
, x1
);
2478 emit_ops_insns (buf
, p
- buf
);
2481 /* Implementation of emit_ops method "emit_bit_not". */
2484 aarch64_emit_bit_not (void)
2489 p
+= emit_mvn (p
, x0
, x0
);
2491 emit_ops_insns (buf
, p
- buf
);
2494 /* Implementation of emit_ops method "emit_equal". */
2497 aarch64_emit_equal (void)
2502 p
+= emit_pop (p
, x1
);
2503 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2504 p
+= emit_cset (p
, x0
, EQ
);
2506 emit_ops_insns (buf
, p
- buf
);
2509 /* Implementation of emit_ops method "emit_less_signed". */
2512 aarch64_emit_less_signed (void)
2517 p
+= emit_pop (p
, x1
);
2518 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2519 p
+= emit_cset (p
, x0
, LT
);
2521 emit_ops_insns (buf
, p
- buf
);
2524 /* Implementation of emit_ops method "emit_less_unsigned". */
2527 aarch64_emit_less_unsigned (void)
2532 p
+= emit_pop (p
, x1
);
2533 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2534 p
+= emit_cset (p
, x0
, LO
);
2536 emit_ops_insns (buf
, p
- buf
);
2539 /* Implementation of emit_ops method "emit_ref". */
2542 aarch64_emit_ref (int size
)
2550 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2553 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2556 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2559 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2562 /* Unknown size, bail on compilation. */
2567 emit_ops_insns (buf
, p
- buf
);
2570 /* Implementation of emit_ops method "emit_if_goto". */
2573 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2578 /* The Z flag is set or cleared here. */
2579 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2580 /* This instruction must not change the Z flag. */
2581 p
+= emit_pop (p
, x0
);
2582 /* Branch over the next instruction if x0 == 0. */
2583 p
+= emit_bcond (p
, EQ
, 8);
2585 /* The NOP instruction will be patched with an unconditional branch. */
2587 *offset_p
= (p
- buf
) * 4;
2592 emit_ops_insns (buf
, p
- buf
);
2595 /* Implementation of emit_ops method "emit_goto". */
2598 aarch64_emit_goto (int *offset_p
, int *size_p
)
2603 /* The NOP instruction will be patched with an unconditional branch. */
2610 emit_ops_insns (buf
, p
- buf
);
2613 /* Implementation of emit_ops method "write_goto_address". */
2616 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2620 emit_b (&insn
, 0, to
- from
);
2621 append_insns (&from
, 1, &insn
);
2624 /* Implementation of emit_ops method "emit_const". */
2627 aarch64_emit_const (LONGEST num
)
2632 p
+= emit_mov_addr (p
, x0
, num
);
2634 emit_ops_insns (buf
, p
- buf
);
2637 /* Implementation of emit_ops method "emit_call". */
2640 aarch64_emit_call (CORE_ADDR fn
)
2645 p
+= emit_mov_addr (p
, ip0
, fn
);
2646 p
+= emit_blr (p
, ip0
);
2648 emit_ops_insns (buf
, p
- buf
);
2651 /* Implementation of emit_ops method "emit_reg". */
2654 aarch64_emit_reg (int reg
)
2659 /* Set x0 to unsigned char *regs. */
2660 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2661 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2662 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2664 emit_ops_insns (buf
, p
- buf
);
2666 aarch64_emit_call (get_raw_reg_func_addr ());
2669 /* Implementation of emit_ops method "emit_pop". */
2672 aarch64_emit_pop (void)
2677 p
+= emit_pop (p
, x0
);
2679 emit_ops_insns (buf
, p
- buf
);
2682 /* Implementation of emit_ops method "emit_stack_flush". */
2685 aarch64_emit_stack_flush (void)
2690 p
+= emit_push (p
, x0
);
2692 emit_ops_insns (buf
, p
- buf
);
2695 /* Implementation of emit_ops method "emit_zero_ext". */
2698 aarch64_emit_zero_ext (int arg
)
2703 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2705 emit_ops_insns (buf
, p
- buf
);
2708 /* Implementation of emit_ops method "emit_swap". */
2711 aarch64_emit_swap (void)
2716 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2717 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2718 p
+= emit_mov (p
, x0
, register_operand (x1
));
2720 emit_ops_insns (buf
, p
- buf
);
2723 /* Implementation of emit_ops method "emit_stack_adjust". */
2726 aarch64_emit_stack_adjust (int n
)
2728 /* This is not needed with our design. */
2732 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2734 emit_ops_insns (buf
, p
- buf
);
2737 /* Implementation of emit_ops method "emit_int_call_1". */
2740 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2745 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2747 emit_ops_insns (buf
, p
- buf
);
2749 aarch64_emit_call (fn
);
2752 /* Implementation of emit_ops method "emit_void_call_2". */
2755 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2760 /* Push x0 on the stack. */
2761 aarch64_emit_stack_flush ();
2763 /* Setup arguments for the function call:
2766 x1: top of the stack
2771 p
+= emit_mov (p
, x1
, register_operand (x0
));
2772 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2774 emit_ops_insns (buf
, p
- buf
);
2776 aarch64_emit_call (fn
);
2779 aarch64_emit_pop ();
2782 /* Implementation of emit_ops method "emit_eq_goto". */
2785 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2790 p
+= emit_pop (p
, x1
);
2791 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2792 /* Branch over the next instruction if x0 != x1. */
2793 p
+= emit_bcond (p
, NE
, 8);
2794 /* The NOP instruction will be patched with an unconditional branch. */
2796 *offset_p
= (p
- buf
) * 4;
2801 emit_ops_insns (buf
, p
- buf
);
2804 /* Implementation of emit_ops method "emit_ne_goto". */
2807 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2812 p
+= emit_pop (p
, x1
);
2813 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2814 /* Branch over the next instruction if x0 == x1. */
2815 p
+= emit_bcond (p
, EQ
, 8);
2816 /* The NOP instruction will be patched with an unconditional branch. */
2818 *offset_p
= (p
- buf
) * 4;
2823 emit_ops_insns (buf
, p
- buf
);
2826 /* Implementation of emit_ops method "emit_lt_goto". */
2829 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2834 p
+= emit_pop (p
, x1
);
2835 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2836 /* Branch over the next instruction if x0 >= x1. */
2837 p
+= emit_bcond (p
, GE
, 8);
2838 /* The NOP instruction will be patched with an unconditional branch. */
2840 *offset_p
= (p
- buf
) * 4;
2845 emit_ops_insns (buf
, p
- buf
);
2848 /* Implementation of emit_ops method "emit_le_goto". */
2851 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2856 p
+= emit_pop (p
, x1
);
2857 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2858 /* Branch over the next instruction if x0 > x1. */
2859 p
+= emit_bcond (p
, GT
, 8);
2860 /* The NOP instruction will be patched with an unconditional branch. */
2862 *offset_p
= (p
- buf
) * 4;
2867 emit_ops_insns (buf
, p
- buf
);
2870 /* Implementation of emit_ops method "emit_gt_goto". */
2873 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2878 p
+= emit_pop (p
, x1
);
2879 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2880 /* Branch over the next instruction if x0 <= x1. */
2881 p
+= emit_bcond (p
, LE
, 8);
2882 /* The NOP instruction will be patched with an unconditional branch. */
2884 *offset_p
= (p
- buf
) * 4;
2889 emit_ops_insns (buf
, p
- buf
);
2892 /* Implementation of emit_ops method "emit_ge_got". */
2895 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2900 p
+= emit_pop (p
, x1
);
2901 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2902 /* Branch over the next instruction if x0 <= x1. */
2903 p
+= emit_bcond (p
, LT
, 8);
2904 /* The NOP instruction will be patched with an unconditional branch. */
2906 *offset_p
= (p
- buf
) * 4;
2911 emit_ops_insns (buf
, p
- buf
);
2914 static struct emit_ops aarch64_emit_ops_impl
=
2916 aarch64_emit_prologue
,
2917 aarch64_emit_epilogue
,
2922 aarch64_emit_rsh_signed
,
2923 aarch64_emit_rsh_unsigned
,
2925 aarch64_emit_log_not
,
2926 aarch64_emit_bit_and
,
2927 aarch64_emit_bit_or
,
2928 aarch64_emit_bit_xor
,
2929 aarch64_emit_bit_not
,
2931 aarch64_emit_less_signed
,
2932 aarch64_emit_less_unsigned
,
2934 aarch64_emit_if_goto
,
2936 aarch64_write_goto_address
,
2941 aarch64_emit_stack_flush
,
2942 aarch64_emit_zero_ext
,
2944 aarch64_emit_stack_adjust
,
2945 aarch64_emit_int_call_1
,
2946 aarch64_emit_void_call_2
,
2947 aarch64_emit_eq_goto
,
2948 aarch64_emit_ne_goto
,
2949 aarch64_emit_lt_goto
,
2950 aarch64_emit_le_goto
,
2951 aarch64_emit_gt_goto
,
2952 aarch64_emit_ge_got
,
2955 /* Implementation of linux_target_ops method "emit_ops". */
2957 static struct emit_ops
*
2958 aarch64_emit_ops (void)
2960 return &aarch64_emit_ops_impl
;
2963 /* Implementation of linux_target_ops method
2964 "get_min_fast_tracepoint_insn_len". */
2967 aarch64_get_min_fast_tracepoint_insn_len (void)
2972 /* Implementation of linux_target_ops method "supports_range_stepping". */
2975 aarch64_supports_range_stepping (void)
2980 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2982 static const gdb_byte
*
2983 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
2985 if (is_64bit_tdesc ())
2987 *size
= aarch64_breakpoint_len
;
2988 return aarch64_breakpoint
;
2991 return arm_sw_breakpoint_from_kind (kind
, size
);
2994 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2997 aarch64_breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
2999 if (is_64bit_tdesc ())
3000 return aarch64_breakpoint_len
;
3002 return arm_breakpoint_kind_from_pc (pcptr
);
3005 /* Implementation of the linux_target_ops method
3006 "breakpoint_kind_from_current_state". */
3009 aarch64_breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3011 if (is_64bit_tdesc ())
3012 return aarch64_breakpoint_len
;
3014 return arm_breakpoint_kind_from_current_state (pcptr
);
3017 /* Support for hardware single step. */
3020 aarch64_supports_hardware_single_step (void)
3025 struct linux_target_ops the_low_target
=
3029 NULL
, /* cannot_fetch_register */
3030 NULL
, /* cannot_store_register */
3031 NULL
, /* fetch_register */
3034 aarch64_breakpoint_kind_from_pc
,
3035 aarch64_sw_breakpoint_from_kind
,
3036 NULL
, /* get_next_pcs */
3037 0, /* decr_pc_after_break */
3038 aarch64_breakpoint_at
,
3039 aarch64_supports_z_point_type
,
3040 aarch64_insert_point
,
3041 aarch64_remove_point
,
3042 aarch64_stopped_by_watchpoint
,
3043 aarch64_stopped_data_address
,
3044 NULL
, /* collect_ptrace_register */
3045 NULL
, /* supply_ptrace_register */
3046 aarch64_linux_siginfo_fixup
,
3047 aarch64_linux_new_process
,
3048 aarch64_linux_delete_process
,
3049 aarch64_linux_new_thread
,
3050 aarch64_linux_delete_thread
,
3051 aarch64_linux_new_fork
,
3052 aarch64_linux_prepare_to_resume
,
3053 NULL
, /* process_qsupported */
3054 aarch64_supports_tracepoints
,
3055 aarch64_get_thread_area
,
3056 aarch64_install_fast_tracepoint_jump_pad
,
3058 aarch64_get_min_fast_tracepoint_insn_len
,
3059 aarch64_supports_range_stepping
,
3060 aarch64_breakpoint_kind_from_current_state
,
3061 aarch64_supports_hardware_single_step
,
3062 aarch64_get_syscall_trapinfo
,
3066 initialize_low_arch (void)
3068 initialize_low_arch_aarch32 ();
3070 initialize_regsets_info (&aarch64_regsets_info
);
3071 initialize_regsets_info (&aarch64_sve_regsets_info
);
3074 initialize_low_tdesc ();