1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2015 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
42 /* Defined in auto-generated files. */
43 void init_registers_aarch64 (void);
44 extern const struct target_desc
*tdesc_aarch64
;
50 #define AARCH64_X_REGS_NUM 31
51 #define AARCH64_V_REGS_NUM 32
52 #define AARCH64_X0_REGNO 0
53 #define AARCH64_SP_REGNO 31
54 #define AARCH64_PC_REGNO 32
55 #define AARCH64_CPSR_REGNO 33
56 #define AARCH64_V0_REGNO 34
57 #define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58 #define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
60 #define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
62 /* Per-process arch-specific data we want to keep. */
64 struct arch_process_info
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
76 struct aarch64_debug_reg_state debug_reg_state
;
79 /* Return true if the size of register 0 is 8 byte. */
84 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
86 return register_size (regcache
->tdesc
, 0) == 8;
89 /* Implementation of linux_target_ops method "cannot_store_register". */
92 aarch64_cannot_store_register (int regno
)
94 return regno
>= AARCH64_NUM_REGS
;
97 /* Implementation of linux_target_ops method "cannot_fetch_register". */
100 aarch64_cannot_fetch_register (int regno
)
102 return regno
>= AARCH64_NUM_REGS
;
106 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
108 struct user_pt_regs
*regset
= buf
;
111 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
112 collect_register (regcache
, AARCH64_X0_REGNO
+ i
, ®set
->regs
[i
]);
113 collect_register (regcache
, AARCH64_SP_REGNO
, ®set
->sp
);
114 collect_register (regcache
, AARCH64_PC_REGNO
, ®set
->pc
);
115 collect_register (regcache
, AARCH64_CPSR_REGNO
, ®set
->pstate
);
119 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
121 const struct user_pt_regs
*regset
= buf
;
124 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
125 supply_register (regcache
, AARCH64_X0_REGNO
+ i
, ®set
->regs
[i
]);
126 supply_register (regcache
, AARCH64_SP_REGNO
, ®set
->sp
);
127 supply_register (regcache
, AARCH64_PC_REGNO
, ®set
->pc
);
128 supply_register (regcache
, AARCH64_CPSR_REGNO
, ®set
->pstate
);
132 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
134 struct user_fpsimd_state
*regset
= buf
;
137 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
138 collect_register (regcache
, AARCH64_V0_REGNO
+ i
, ®set
->vregs
[i
]);
139 collect_register (regcache
, AARCH64_FPSR_REGNO
, ®set
->fpsr
);
140 collect_register (regcache
, AARCH64_FPCR_REGNO
, ®set
->fpcr
);
144 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
146 const struct user_fpsimd_state
*regset
= buf
;
149 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
150 supply_register (regcache
, AARCH64_V0_REGNO
+ i
, ®set
->vregs
[i
]);
151 supply_register (regcache
, AARCH64_FPSR_REGNO
, ®set
->fpsr
);
152 supply_register (regcache
, AARCH64_FPCR_REGNO
, ®set
->fpcr
);
155 /* Enable miscellaneous debugging output. The name is historical - it
156 was originally used to debug LinuxThreads support. */
157 extern int debug_threads
;
159 /* Implementation of linux_target_ops method "get_pc". */
162 aarch64_get_pc (struct regcache
*regcache
)
164 if (register_size (regcache
->tdesc
, 0) == 8)
168 collect_register_by_name (regcache
, "pc", &pc
);
170 debug_printf ("stop pc is %08lx\n", pc
);
177 collect_register_by_name (regcache
, "pc", &pc
);
179 debug_printf ("stop pc is %04x\n", pc
);
184 /* Implementation of linux_target_ops method "set_pc". */
187 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
189 if (register_size (regcache
->tdesc
, 0) == 8)
191 unsigned long newpc
= pc
;
192 supply_register_by_name (regcache
, "pc", &newpc
);
196 unsigned int newpc
= pc
;
197 supply_register_by_name (regcache
, "pc", &newpc
);
201 #define aarch64_breakpoint_len 4
203 /* AArch64 BRK software debug mode instruction.
204 This instruction needs to match gdb/aarch64-tdep.c
205 (aarch64_default_breakpoint). */
206 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
208 /* Implementation of linux_target_ops method "breakpoint_at". */
211 aarch64_breakpoint_at (CORE_ADDR where
)
213 gdb_byte insn
[aarch64_breakpoint_len
];
215 (*the_target
->read_memory
) (where
, (unsigned char *) &insn
,
216 aarch64_breakpoint_len
);
217 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
224 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
228 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
230 state
->dr_addr_bp
[i
] = 0;
231 state
->dr_ctrl_bp
[i
] = 0;
232 state
->dr_ref_count_bp
[i
] = 0;
235 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
237 state
->dr_addr_wp
[i
] = 0;
238 state
->dr_ctrl_wp
[i
] = 0;
239 state
->dr_ref_count_wp
[i
] = 0;
243 /* Return the pointer to the debug register state structure in the
244 current process' arch-specific data area. */
246 struct aarch64_debug_reg_state
*
247 aarch64_get_debug_reg_state (pid_t pid
)
249 struct process_info
*proc
= find_process_pid (pid
);
251 return &proc
->priv
->arch_private
->debug_reg_state
;
254 /* Implementation of linux_target_ops method "supports_z_point_type". */
257 aarch64_supports_z_point_type (char z_type
)
263 if (!extended_protocol
&& is_64bit_tdesc ())
265 /* Only enable Z0 packet in non-multi-arch debugging. If
266 extended protocol is used, don't enable Z0 packet because
267 GDBserver may attach to 32-bit process. */
272 /* Disable Z0 packet so that GDBserver doesn't have to handle
273 different breakpoint instructions (aarch64, arm, thumb etc)
274 in multi-arch debugging. */
279 case Z_PACKET_WRITE_WP
:
280 case Z_PACKET_READ_WP
:
281 case Z_PACKET_ACCESS_WP
:
288 /* Implementation of linux_target_ops method "insert_point".
290 It actually only records the info of the to-be-inserted bp/wp;
291 the actual insertion will happen when threads are resumed. */
294 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
295 int len
, struct raw_breakpoint
*bp
)
298 enum target_hw_bp_type targ_type
;
299 struct aarch64_debug_reg_state
*state
300 = aarch64_get_debug_reg_state (pid_of (current_thread
));
303 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
304 (unsigned long) addr
, len
);
306 /* Determine the type from the raw breakpoint type. */
307 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
309 if (targ_type
!= hw_execute
)
311 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
312 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
313 1 /* is_insert */, state
);
319 aarch64_handle_breakpoint (targ_type
, addr
, len
, 1 /* is_insert */,
323 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
329 /* Implementation of linux_target_ops method "remove_point".
331 It actually only records the info of the to-be-removed bp/wp,
332 the actual removal will be done when threads are resumed. */
335 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
336 int len
, struct raw_breakpoint
*bp
)
339 enum target_hw_bp_type targ_type
;
340 struct aarch64_debug_reg_state
*state
341 = aarch64_get_debug_reg_state (pid_of (current_thread
));
344 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
345 (unsigned long) addr
, len
);
347 /* Determine the type from the raw breakpoint type. */
348 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
350 /* Set up state pointers. */
351 if (targ_type
!= hw_execute
)
353 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
357 aarch64_handle_breakpoint (targ_type
, addr
, len
, 0 /* is_insert */,
361 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
367 /* Implementation of linux_target_ops method "stopped_data_address". */
370 aarch64_stopped_data_address (void)
374 struct aarch64_debug_reg_state
*state
;
376 pid
= lwpid_of (current_thread
);
378 /* Get the siginfo. */
379 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
380 return (CORE_ADDR
) 0;
382 /* Need to be a hardware breakpoint/watchpoint trap. */
383 if (siginfo
.si_signo
!= SIGTRAP
384 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
385 return (CORE_ADDR
) 0;
387 /* Check if the address matches any watched address. */
388 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
389 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
391 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
392 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
393 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
];
394 if (state
->dr_ref_count_wp
[i
]
395 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
396 && addr_trap
>= addr_watch
397 && addr_trap
< addr_watch
+ len
)
401 return (CORE_ADDR
) 0;
404 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
407 aarch64_stopped_by_watchpoint (void)
409 if (aarch64_stopped_data_address () != 0)
415 /* Fetch the thread-local storage pointer for libthread_db. */
418 ps_get_thread_area (const struct ps_prochandle
*ph
,
419 lwpid_t lwpid
, int idx
, void **base
)
421 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
425 /* Implementation of linux_target_ops method "siginfo_fixup". */
428 aarch64_linux_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
430 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
431 if (!is_64bit_tdesc ())
434 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
437 aarch64_siginfo_from_compat_siginfo (native
,
438 (struct compat_siginfo
*) inf
);
446 /* Implementation of linux_target_ops method "linux_new_process". */
448 static struct arch_process_info
*
449 aarch64_linux_new_process (void)
451 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
453 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
458 /* Implementation of linux_target_ops method "linux_new_fork". */
461 aarch64_linux_new_fork (struct process_info
*parent
,
462 struct process_info
*child
)
464 /* These are allocated by linux_add_process. */
465 gdb_assert (parent
->priv
!= NULL
466 && parent
->priv
->arch_private
!= NULL
);
467 gdb_assert (child
->priv
!= NULL
468 && child
->priv
->arch_private
!= NULL
);
470 /* Linux kernel before 2.6.33 commit
471 72f674d203cd230426437cdcf7dd6f681dad8b0d
472 will inherit hardware debug registers from parent
473 on fork/vfork/clone. Newer Linux kernels create such tasks with
474 zeroed debug registers.
476 GDB core assumes the child inherits the watchpoints/hw
477 breakpoints of the parent, and will remove them all from the
478 forked off process. Copy the debug registers mirrors into the
479 new process so that all breakpoints and watchpoints can be
480 removed together. The debug registers mirror will become zeroed
481 in the end before detaching the forked off process, thus making
482 this compatible with older Linux kernels too. */
484 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
487 /* Return the right target description according to the ELF file of
490 static const struct target_desc
*
491 aarch64_linux_read_description (void)
493 unsigned int machine
;
497 tid
= lwpid_of (current_thread
);
499 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
502 return tdesc_aarch64
;
504 return tdesc_arm_with_neon
;
507 /* Implementation of linux_target_ops method "arch_setup". */
510 aarch64_arch_setup (void)
512 current_process ()->tdesc
= aarch64_linux_read_description ();
514 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
517 static struct regset_info aarch64_regsets
[] =
519 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
520 sizeof (struct user_pt_regs
), GENERAL_REGS
,
521 aarch64_fill_gregset
, aarch64_store_gregset
},
522 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
523 sizeof (struct user_fpsimd_state
), FP_REGS
,
524 aarch64_fill_fpregset
, aarch64_store_fpregset
526 { 0, 0, 0, -1, -1, NULL
, NULL
}
529 static struct regsets_info aarch64_regsets_info
=
531 aarch64_regsets
, /* regsets */
533 NULL
, /* disabled_regsets */
536 static struct regs_info regs_info_aarch64
=
538 NULL
, /* regset_bitmap */
540 &aarch64_regsets_info
,
543 /* Implementation of linux_target_ops method "regs_info". */
545 static const struct regs_info
*
546 aarch64_regs_info (void)
548 if (is_64bit_tdesc ())
549 return ®s_info_aarch64
;
551 return ®s_info_aarch32
;
554 /* Implementation of linux_target_ops method "supports_tracepoints". */
557 aarch64_supports_tracepoints (void)
559 if (current_thread
== NULL
)
563 /* We don't support tracepoints on aarch32 now. */
564 return is_64bit_tdesc ();
568 /* Implementation of linux_target_ops method "get_thread_area". */
571 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
576 iovec
.iov_base
= ®
;
577 iovec
.iov_len
= sizeof (reg
);
579 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
587 /* List of condition codes that we need. */
589 enum aarch64_condition_codes
600 /* Representation of an operand. At this time, it only supports register
601 and immediate types. */
603 struct aarch64_operand
605 /* Type of the operand. */
611 /* Value of the operand according to the type. */
615 struct aarch64_register reg
;
619 /* List of registers that we are currently using, we can add more here as
620 we need to use them. */
622 /* General purpose scratch registers (64 bit). */
623 static const struct aarch64_register x0
= { 0, 1 };
624 static const struct aarch64_register x1
= { 1, 1 };
625 static const struct aarch64_register x2
= { 2, 1 };
626 static const struct aarch64_register x3
= { 3, 1 };
627 static const struct aarch64_register x4
= { 4, 1 };
629 /* General purpose scratch registers (32 bit). */
630 static const struct aarch64_register w0
= { 0, 0 };
631 static const struct aarch64_register w2
= { 2, 0 };
633 /* Intra-procedure scratch registers. */
634 static const struct aarch64_register ip0
= { 16, 1 };
636 /* Special purpose registers. */
637 static const struct aarch64_register fp
= { 29, 1 };
638 static const struct aarch64_register lr
= { 30, 1 };
639 static const struct aarch64_register sp
= { 31, 1 };
640 static const struct aarch64_register xzr
= { 31, 1 };
642 /* Dynamically allocate a new register. If we know the register
643 statically, we should make it a global as above instead of using this
646 static struct aarch64_register
647 aarch64_register (unsigned num
, int is64
)
649 return (struct aarch64_register
) { num
, is64
};
652 /* Helper function to create a register operand, for instructions with
653 different types of operands.
656 p += emit_mov (p, x0, register_operand (x1)); */
658 static struct aarch64_operand
659 register_operand (struct aarch64_register reg
)
661 struct aarch64_operand operand
;
663 operand
.type
= OPERAND_REGISTER
;
669 /* Helper function to create an immediate operand, for instructions with
670 different types of operands.
673 p += emit_mov (p, x0, immediate_operand (12)); */
675 static struct aarch64_operand
676 immediate_operand (uint32_t imm
)
678 struct aarch64_operand operand
;
680 operand
.type
= OPERAND_IMMEDIATE
;
686 /* Helper function to create an offset memory operand.
689 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
691 static struct aarch64_memory_operand
692 offset_memory_operand (int32_t offset
)
694 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
697 /* Helper function to create a pre-index memory operand.
700 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
702 static struct aarch64_memory_operand
703 preindex_memory_operand (int32_t index
)
705 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
708 /* Helper function to create a post-index memory operand.
711 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
713 static struct aarch64_memory_operand
714 postindex_memory_operand (int32_t index
)
716 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
719 /* System control registers. These special registers can be written and
720 read with the MRS and MSR instructions.
722 - NZCV: Condition flags. GDB refers to this register under the CPSR
724 - FPSR: Floating-point status register.
725 - FPCR: Floating-point control registers.
726 - TPIDR_EL0: Software thread ID register. */
728 enum aarch64_system_control_registers
730 /* op0 op1 crn crm op2 */
731 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
732 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
733 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
734 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
737 /* Write a BLR instruction into *BUF.
741 RN is the register to branch to. */
744 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
746 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
749 /* Write a RET instruction into *BUF.
753 RN is the register to branch to. */
756 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
758 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
762 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
763 struct aarch64_register rt
,
764 struct aarch64_register rt2
,
765 struct aarch64_register rn
,
766 struct aarch64_memory_operand operand
)
773 opc
= ENCODE (2, 2, 30);
775 opc
= ENCODE (0, 2, 30);
777 switch (operand
.type
)
779 case MEMORY_OPERAND_OFFSET
:
781 pre_index
= ENCODE (1, 1, 24);
782 write_back
= ENCODE (0, 1, 23);
785 case MEMORY_OPERAND_POSTINDEX
:
787 pre_index
= ENCODE (0, 1, 24);
788 write_back
= ENCODE (1, 1, 23);
791 case MEMORY_OPERAND_PREINDEX
:
793 pre_index
= ENCODE (1, 1, 24);
794 write_back
= ENCODE (1, 1, 23);
801 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
802 | ENCODE (operand
.index
>> 3, 7, 15)
803 | ENCODE (rt2
.num
, 5, 10)
804 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
807 /* Write a STP instruction into *BUF.
809 STP rt, rt2, [rn, #offset]
810 STP rt, rt2, [rn, #index]!
811 STP rt, rt2, [rn], #index
813 RT and RT2 are the registers to store.
814 RN is the base address register.
815 OFFSET is the immediate to add to the base address. It is limited to a
816 -512 .. 504 range (7 bits << 3). */
819 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
820 struct aarch64_register rt2
, struct aarch64_register rn
,
821 struct aarch64_memory_operand operand
)
823 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
826 /* Write a LDP instruction into *BUF.
828 LDP rt, rt2, [rn, #offset]
829 LDP rt, rt2, [rn, #index]!
830 LDP rt, rt2, [rn], #index
832 RT and RT2 are the registers to store.
833 RN is the base address register.
834 OFFSET is the immediate to add to the base address. It is limited to a
835 -512 .. 504 range (7 bits << 3). */
838 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
839 struct aarch64_register rt2
, struct aarch64_register rn
,
840 struct aarch64_memory_operand operand
)
842 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
845 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
847 LDP qt, qt2, [rn, #offset]
849 RT and RT2 are the Q registers to store.
850 RN is the base address register.
851 OFFSET is the immediate to add to the base address. It is limited to
852 -1024 .. 1008 range (7 bits << 4). */
855 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
856 struct aarch64_register rn
, int32_t offset
)
858 uint32_t opc
= ENCODE (2, 2, 30);
859 uint32_t pre_index
= ENCODE (1, 1, 24);
861 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
862 | ENCODE (offset
>> 4, 7, 15)
863 | ENCODE (rt2
, 5, 10)
864 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
867 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
869 STP qt, qt2, [rn, #offset]
871 RT and RT2 are the Q registers to store.
872 RN is the base address register.
873 OFFSET is the immediate to add to the base address. It is limited to
874 -1024 .. 1008 range (7 bits << 4). */
877 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
878 struct aarch64_register rn
, int32_t offset
)
880 uint32_t opc
= ENCODE (2, 2, 30);
881 uint32_t pre_index
= ENCODE (1, 1, 24);
883 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
884 | ENCODE (offset
>> 4, 7, 15)
885 | ENCODE (rt2
, 5, 10)
886 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
889 /* Write a LDRH instruction into *BUF.
891 LDRH wt, [xn, #offset]
892 LDRH wt, [xn, #index]!
893 LDRH wt, [xn], #index
895 RT is the register to store.
896 RN is the base address register.
897 OFFSET is the immediate to add to the base address. It is limited to
898 0 .. 32760 range (12 bits << 3). */
901 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
902 struct aarch64_register rn
,
903 struct aarch64_memory_operand operand
)
905 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
908 /* Write a LDRB instruction into *BUF.
910 LDRB wt, [xn, #offset]
911 LDRB wt, [xn, #index]!
912 LDRB wt, [xn], #index
914 RT is the register to store.
915 RN is the base address register.
916 OFFSET is the immediate to add to the base address. It is limited to
917 0 .. 32760 range (12 bits << 3). */
920 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
921 struct aarch64_register rn
,
922 struct aarch64_memory_operand operand
)
924 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
929 /* Write a STR instruction into *BUF.
931 STR rt, [rn, #offset]
932 STR rt, [rn, #index]!
935 RT is the register to store.
936 RN is the base address register.
937 OFFSET is the immediate to add to the base address. It is limited to
938 0 .. 32760 range (12 bits << 3). */
941 emit_str (uint32_t *buf
, struct aarch64_register rt
,
942 struct aarch64_register rn
,
943 struct aarch64_memory_operand operand
)
945 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
948 /* Helper function emitting an exclusive load or store instruction. */
951 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
952 enum aarch64_opcodes opcode
,
953 struct aarch64_register rs
,
954 struct aarch64_register rt
,
955 struct aarch64_register rt2
,
956 struct aarch64_register rn
)
958 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
959 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
960 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
963 /* Write a LAXR instruction into *BUF.
967 RT is the destination register.
968 RN is the base address register. */
971 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
972 struct aarch64_register rn
)
974 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
978 /* Write a STXR instruction into *BUF.
982 RS is the result register, it indicates if the store succeeded or not.
983 RT is the destination register.
984 RN is the base address register. */
987 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
988 struct aarch64_register rt
, struct aarch64_register rn
)
990 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
994 /* Write a STLR instruction into *BUF.
998 RT is the register to store.
999 RN is the base address register. */
1002 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1003 struct aarch64_register rn
)
1005 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1009 /* Helper function for data processing instructions with register sources. */
1012 emit_data_processing_reg (uint32_t *buf
, enum aarch64_opcodes opcode
,
1013 struct aarch64_register rd
,
1014 struct aarch64_register rn
,
1015 struct aarch64_register rm
)
1017 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1019 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1020 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1023 /* Helper function for data processing instructions taking either a register
1027 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1028 struct aarch64_register rd
,
1029 struct aarch64_register rn
,
1030 struct aarch64_operand operand
)
1032 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1033 /* The opcode is different for register and immediate source operands. */
1034 uint32_t operand_opcode
;
1036 if (operand
.type
== OPERAND_IMMEDIATE
)
1038 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1039 operand_opcode
= ENCODE (8, 4, 25);
1041 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1042 | ENCODE (operand
.imm
, 12, 10)
1043 | ENCODE (rn
.num
, 5, 5)
1044 | ENCODE (rd
.num
, 5, 0));
1048 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1049 operand_opcode
= ENCODE (5, 4, 25);
1051 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1056 /* Write an ADD instruction into *BUF.
1061 This function handles both an immediate and register add.
1063 RD is the destination register.
1064 RN is the input register.
1065 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1066 OPERAND_REGISTER. */
1069 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1070 struct aarch64_register rn
, struct aarch64_operand operand
)
1072 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1075 /* Write a SUB instruction into *BUF.
1080 This function handles both an immediate and register sub.
1082 RD is the destination register.
1083 RN is the input register.
1084 IMM is the immediate to substract to RN. */
1087 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1088 struct aarch64_register rn
, struct aarch64_operand operand
)
1090 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1093 /* Write a MOV instruction into *BUF.
1098 This function handles both a wide immediate move and a register move,
1099 with the condition that the source register is not xzr. xzr and the
1100 stack pointer share the same encoding and this function only supports
1103 RD is the destination register.
1104 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1105 OPERAND_REGISTER. */
1108 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1109 struct aarch64_operand operand
)
1111 if (operand
.type
== OPERAND_IMMEDIATE
)
1113 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1114 /* Do not shift the immediate. */
1115 uint32_t shift
= ENCODE (0, 2, 21);
1117 return aarch64_emit_insn (buf
, MOV
| size
| shift
1118 | ENCODE (operand
.imm
, 16, 5)
1119 | ENCODE (rd
.num
, 5, 0));
1122 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1125 /* Write a MOVK instruction into *BUF.
1127 MOVK rd, #imm, lsl #shift
1129 RD is the destination register.
1130 IMM is the immediate.
1131 SHIFT is the logical shift left to apply to IMM. */
1134 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1137 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1139 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1140 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1143 /* Write instructions into *BUF in order to move ADDR into a register.
1144 ADDR can be a 64-bit value.
1146 This function will emit a series of MOV and MOVK instructions, such as:
1149 MOVK xd, #(addr >> 16), lsl #16
1150 MOVK xd, #(addr >> 32), lsl #32
1151 MOVK xd, #(addr >> 48), lsl #48 */
1154 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1158 /* The MOV (wide immediate) instruction clears to top bits of the
1160 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1162 if ((addr
>> 16) != 0)
1163 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1167 if ((addr
>> 32) != 0)
1168 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1172 if ((addr
>> 48) != 0)
1173 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1178 /* Write a SUBS instruction into *BUF.
1182 This instruction update the condition flags.
1184 RD is the destination register.
1185 RN and RM are the source registers. */
1188 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1189 struct aarch64_register rn
, struct aarch64_operand operand
)
1191 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1194 /* Write a CMP instruction into *BUF.
1198 This instruction is an alias of SUBS xzr, rn, rm.
1200 RN and RM are the registers to compare. */
1203 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1204 struct aarch64_operand operand
)
1206 return emit_subs (buf
, xzr
, rn
, operand
);
1209 /* Write a AND instruction into *BUF.
1213 RD is the destination register.
1214 RN and RM are the source registers. */
1217 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1218 struct aarch64_register rn
, struct aarch64_register rm
)
1220 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1223 /* Write a ORR instruction into *BUF.
1227 RD is the destination register.
1228 RN and RM are the source registers. */
1231 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1232 struct aarch64_register rn
, struct aarch64_register rm
)
1234 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1237 /* Write a ORN instruction into *BUF.
1241 RD is the destination register.
1242 RN and RM are the source registers. */
1245 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1246 struct aarch64_register rn
, struct aarch64_register rm
)
1248 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1251 /* Write a EOR instruction into *BUF.
1255 RD is the destination register.
1256 RN and RM are the source registers. */
1259 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1260 struct aarch64_register rn
, struct aarch64_register rm
)
1262 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1265 /* Write a MVN instruction into *BUF.
1269 This is an alias for ORN rd, xzr, rm.
1271 RD is the destination register.
1272 RM is the source register. */
1275 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1276 struct aarch64_register rm
)
1278 return emit_orn (buf
, rd
, xzr
, rm
);
1281 /* Write a LSLV instruction into *BUF.
1285 RD is the destination register.
1286 RN and RM are the source registers. */
1289 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1290 struct aarch64_register rn
, struct aarch64_register rm
)
1292 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1295 /* Write a LSRV instruction into *BUF.
1299 RD is the destination register.
1300 RN and RM are the source registers. */
1303 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1304 struct aarch64_register rn
, struct aarch64_register rm
)
1306 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1309 /* Write a ASRV instruction into *BUF.
1313 RD is the destination register.
1314 RN and RM are the source registers. */
1317 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1318 struct aarch64_register rn
, struct aarch64_register rm
)
1320 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1323 /* Write a MUL instruction into *BUF.
1327 RD is the destination register.
1328 RN and RM are the source registers. */
1331 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1332 struct aarch64_register rn
, struct aarch64_register rm
)
1334 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1337 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1341 RT is the destination register.
1342 SYSTEM_REG is special purpose register to read. */
1345 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1346 enum aarch64_system_control_registers system_reg
)
1348 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1349 | ENCODE (rt
.num
, 5, 0));
1352 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1356 SYSTEM_REG is special purpose register to write.
1357 RT is the input register. */
1360 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1361 struct aarch64_register rt
)
1363 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1364 | ENCODE (rt
.num
, 5, 0));
1367 /* Write a SEVL instruction into *BUF.
1369 This is a hint instruction telling the hardware to trigger an event. */
1372 emit_sevl (uint32_t *buf
)
1374 return aarch64_emit_insn (buf
, SEVL
);
1377 /* Write a WFE instruction into *BUF.
1379 This is a hint instruction telling the hardware to wait for an event. */
1382 emit_wfe (uint32_t *buf
)
1384 return aarch64_emit_insn (buf
, WFE
);
1387 /* Write a SBFM instruction into *BUF.
1389 SBFM rd, rn, #immr, #imms
1391 This instruction moves the bits from #immr to #imms into the
1392 destination, sign extending the result.
1394 RD is the destination register.
1395 RN is the source register.
1396 IMMR is the bit number to start at (least significant bit).
1397 IMMS is the bit number to stop at (most significant bit). */
1400 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1401 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1403 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1404 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1406 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1407 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1408 | ENCODE (rd
.num
, 5, 0));
1411 /* Write a SBFX instruction into *BUF.
1413 SBFX rd, rn, #lsb, #width
1415 This instruction moves #width bits from #lsb into the destination, sign
1416 extending the result. This is an alias for:
1418 SBFM rd, rn, #lsb, #(lsb + width - 1)
1420 RD is the destination register.
1421 RN is the source register.
1422 LSB is the bit number to start at (least significant bit).
1423 WIDTH is the number of bits to move. */
1426 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1427 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1429 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1432 /* Write a UBFM instruction into *BUF.
1434 UBFM rd, rn, #immr, #imms
1436 This instruction moves the bits from #immr to #imms into the
1437 destination, extending the result with zeros.
1439 RD is the destination register.
1440 RN is the source register.
1441 IMMR is the bit number to start at (least significant bit).
1442 IMMS is the bit number to stop at (most significant bit). */
1445 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1446 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1448 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1449 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1451 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1452 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1453 | ENCODE (rd
.num
, 5, 0));
1456 /* Write a UBFX instruction into *BUF.
1458 UBFX rd, rn, #lsb, #width
1460 This instruction moves #width bits from #lsb into the destination,
1461 extending the result with zeros. This is an alias for:
1463 UBFM rd, rn, #lsb, #(lsb + width - 1)
1465 RD is the destination register.
1466 RN is the source register.
1467 LSB is the bit number to start at (least significant bit).
1468 WIDTH is the number of bits to move. */
1471 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1472 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1474 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1477 /* Write a CSINC instruction into *BUF.
1479 CSINC rd, rn, rm, cond
1481 This instruction conditionally increments rn or rm and places the result
1482 in rd. rn is chosen is the condition is true.
1484 RD is the destination register.
1485 RN and RM are the source registers.
1486 COND is the encoded condition. */
1489 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1490 struct aarch64_register rn
, struct aarch64_register rm
,
1493 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1495 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1496 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1497 | ENCODE (rd
.num
, 5, 0));
1500 /* Write a CSET instruction into *BUF.
1504 This instruction conditionally write 1 or 0 in the destination register.
1505 1 is written if the condition is true. This is an alias for:
1507 CSINC rd, xzr, xzr, !cond
1509 Note that the condition needs to be inverted.
1511 RD is the destination register.
1512 RN and RM are the source registers.
1513 COND is the encoded condition. */
1516 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1518 /* The least significant bit of the condition needs toggling in order to
1520 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1523 /* Write LEN instructions from BUF into the inferior memory at *TO.
1525 Note instructions are always little endian on AArch64, unlike data. */
1528 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1530 size_t byte_len
= len
* sizeof (uint32_t);
1531 #if (__BYTE_ORDER == __BIG_ENDIAN)
1532 uint32_t *le_buf
= xmalloc (byte_len
);
1535 for (i
= 0; i
< len
; i
++)
1536 le_buf
[i
] = htole32 (buf
[i
]);
1538 write_inferior_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1542 write_inferior_memory (*to
, (const unsigned char *) buf
, byte_len
);
1548 /* Sub-class of struct aarch64_insn_data, store information of
1549 instruction relocation for fast tracepoint. Visitor can
1550 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1551 the relocated instructions in buffer pointed by INSN_PTR. */
1553 struct aarch64_insn_relocation_data
1555 struct aarch64_insn_data base
;
1557 /* The new address the instruction is relocated to. */
1559 /* Pointer to the buffer of relocated instruction(s). */
1563 /* Implementation of aarch64_insn_visitor method "b". */
1566 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1567 struct aarch64_insn_data
*data
)
1569 struct aarch64_insn_relocation_data
*insn_reloc
1570 = (struct aarch64_insn_relocation_data
*) data
;
1572 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1574 if (can_encode_int32 (new_offset
, 28))
1575 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1578 /* Implementation of aarch64_insn_visitor method "b_cond". */
1581 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1582 struct aarch64_insn_data
*data
)
1584 struct aarch64_insn_relocation_data
*insn_reloc
1585 = (struct aarch64_insn_relocation_data
*) data
;
1587 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1589 if (can_encode_int32 (new_offset
, 21))
1591 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1594 else if (can_encode_int32 (new_offset
, 28))
1596 /* The offset is out of range for a conditional branch
1597 instruction but not for a unconditional branch. We can use
1598 the following instructions instead:
1600 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1601 B NOT_TAKEN ; Else jump over TAKEN and continue.
1608 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1609 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1610 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1614 /* Implementation of aarch64_insn_visitor method "cb". */
1617 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1618 const unsigned rn
, int is64
,
1619 struct aarch64_insn_data
*data
)
1621 struct aarch64_insn_relocation_data
*insn_reloc
1622 = (struct aarch64_insn_relocation_data
*) data
;
1624 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1626 if (can_encode_int32 (new_offset
, 21))
1628 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1629 aarch64_register (rn
, is64
), new_offset
);
1631 else if (can_encode_int32 (new_offset
, 28))
1633 /* The offset is out of range for a compare and branch
1634 instruction but not for a unconditional branch. We can use
1635 the following instructions instead:
1637 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1638 B NOT_TAKEN ; Else jump over TAKEN and continue.
1644 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1645 aarch64_register (rn
, is64
), 8);
1646 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1647 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1651 /* Implementation of aarch64_insn_visitor method "tb". */
1654 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1655 const unsigned rt
, unsigned bit
,
1656 struct aarch64_insn_data
*data
)
1658 struct aarch64_insn_relocation_data
*insn_reloc
1659 = (struct aarch64_insn_relocation_data
*) data
;
1661 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1663 if (can_encode_int32 (new_offset
, 16))
1665 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1666 aarch64_register (rt
, 1), new_offset
);
1668 else if (can_encode_int32 (new_offset
, 28))
1670 /* The offset is out of range for a test bit and branch
1671 instruction but not for a unconditional branch. We can use
1672 the following instructions instead:
1674 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1675 B NOT_TAKEN ; Else jump over TAKEN and continue.
1681 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1682 aarch64_register (rt
, 1), 8);
1683 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1684 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1689 /* Implementation of aarch64_insn_visitor method "adr". */
1692 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1694 struct aarch64_insn_data
*data
)
1696 struct aarch64_insn_relocation_data
*insn_reloc
1697 = (struct aarch64_insn_relocation_data
*) data
;
1698 /* We know exactly the address the ADR{P,} instruction will compute.
1699 We can just write it to the destination register. */
1700 CORE_ADDR address
= data
->insn_addr
+ offset
;
1704 /* Clear the lower 12 bits of the offset to get the 4K page. */
1705 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1706 aarch64_register (rd
, 1),
1710 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1711 aarch64_register (rd
, 1), address
);
1714 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1717 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1718 const unsigned rt
, const int is64
,
1719 struct aarch64_insn_data
*data
)
1721 struct aarch64_insn_relocation_data
*insn_reloc
1722 = (struct aarch64_insn_relocation_data
*) data
;
1723 CORE_ADDR address
= data
->insn_addr
+ offset
;
1725 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1726 aarch64_register (rt
, 1), address
);
1728 /* We know exactly what address to load from, and what register we
1731 MOV xd, #(oldloc + offset)
1732 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1735 LDR xd, [xd] ; or LDRSW xd, [xd]
1740 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1741 aarch64_register (rt
, 1),
1742 aarch64_register (rt
, 1),
1743 offset_memory_operand (0));
1745 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1746 aarch64_register (rt
, is64
),
1747 aarch64_register (rt
, 1),
1748 offset_memory_operand (0));
1751 /* Implementation of aarch64_insn_visitor method "others". */
1754 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1755 struct aarch64_insn_data
*data
)
1757 struct aarch64_insn_relocation_data
*insn_reloc
1758 = (struct aarch64_insn_relocation_data
*) data
;
1760 /* The instruction is not PC relative. Just re-emit it at the new
1762 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1765 static const struct aarch64_insn_visitor visitor
=
1767 aarch64_ftrace_insn_reloc_b
,
1768 aarch64_ftrace_insn_reloc_b_cond
,
1769 aarch64_ftrace_insn_reloc_cb
,
1770 aarch64_ftrace_insn_reloc_tb
,
1771 aarch64_ftrace_insn_reloc_adr
,
1772 aarch64_ftrace_insn_reloc_ldr_literal
,
1773 aarch64_ftrace_insn_reloc_others
,
1776 /* Implementation of linux_target_ops method
1777 "install_fast_tracepoint_jump_pad". */
1780 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1782 CORE_ADDR collector
,
1785 CORE_ADDR
*jump_entry
,
1786 CORE_ADDR
*trampoline
,
1787 ULONGEST
*trampoline_size
,
1788 unsigned char *jjump_pad_insn
,
1789 ULONGEST
*jjump_pad_insn_size
,
1790 CORE_ADDR
*adjusted_insn_addr
,
1791 CORE_ADDR
*adjusted_insn_addr_end
,
1799 CORE_ADDR buildaddr
= *jump_entry
;
1800 struct aarch64_insn_relocation_data insn_data
;
1802 /* We need to save the current state on the stack both to restore it
1803 later and to collect register values when the tracepoint is hit.
1805 The saved registers are pushed in a layout that needs to be in sync
1806 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1807 the supply_fast_tracepoint_registers function will fill in the
1808 register cache from a pointer to saved registers on the stack we build
1811 For simplicity, we set the size of each cell on the stack to 16 bytes.
1812 This way one cell can hold any register type, from system registers
1813 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1814 has to be 16 bytes aligned anyway.
1816 Note that the CPSR register does not exist on AArch64. Instead we
1817 can access system bits describing the process state with the
1818 MRS/MSR instructions, namely the condition flags. We save them as
1819 if they are part of a CPSR register because that's how GDB
1820 interprets these system bits. At the moment, only the condition
1821 flags are saved in CPSR (NZCV).
1823 Stack layout, each cell is 16 bytes (descending):
1825 High *-------- SIMD&FP registers from 31 down to 0. --------*
1831 *---- General purpose registers from 30 down to 0. ----*
1837 *------------- Special purpose registers. -------------*
1840 | CPSR (NZCV) | 5 cells
1843 *------------- collecting_t object --------------------*
1844 | TPIDR_EL0 | struct tracepoint * |
1845 Low *------------------------------------------------------*
1847 After this stack is set up, we issue a call to the collector, passing
1848 it the saved registers at (SP + 16). */
1850 /* Push SIMD&FP registers on the stack:
1852 SUB sp, sp, #(32 * 16)
1854 STP q30, q31, [sp, #(30 * 16)]
1859 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1860 for (i
= 30; i
>= 0; i
-= 2)
1861 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1863 /* Push general puspose registers on the stack. Note that we do not need
1864 to push x31 as it represents the xzr register and not the stack
1865 pointer in a STR instruction.
1867 SUB sp, sp, #(31 * 16)
1869 STR x30, [sp, #(30 * 16)]
1874 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1875 for (i
= 30; i
>= 0; i
-= 1)
1876 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
1877 offset_memory_operand (i
* 16));
1879 /* Make space for 5 more cells.
1881 SUB sp, sp, #(5 * 16)
1884 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
1889 ADD x4, sp, #((32 + 31 + 5) * 16)
1890 STR x4, [sp, #(4 * 16)]
1893 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
1894 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
1896 /* Save PC (tracepoint address):
1901 STR x3, [sp, #(3 * 16)]
1905 p
+= emit_mov_addr (p
, x3
, tpaddr
);
1906 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
1908 /* Save CPSR (NZCV), FPSR and FPCR:
1914 STR x2, [sp, #(2 * 16)]
1915 STR x1, [sp, #(1 * 16)]
1916 STR x0, [sp, #(0 * 16)]
1919 p
+= emit_mrs (p
, x2
, NZCV
);
1920 p
+= emit_mrs (p
, x1
, FPSR
);
1921 p
+= emit_mrs (p
, x0
, FPCR
);
1922 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
1923 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
1924 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
1926 /* Push the collecting_t object. It consist of the address of the
1927 tracepoint and an ID for the current thread. We get the latter by
1928 reading the tpidr_el0 system register. It corresponds to the
1929 NT_ARM_TLS register accessible with ptrace.
1936 STP x0, x1, [sp, #-16]!
1940 p
+= emit_mov_addr (p
, x0
, tpoint
);
1941 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
1942 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
1946 The shared memory for the lock is at lockaddr. It will hold zero
1947 if no-one is holding the lock, otherwise it contains the address of
1948 the collecting_t object on the stack of the thread which acquired it.
1950 At this stage, the stack pointer points to this thread's collecting_t
1953 We use the following registers:
1954 - x0: Address of the lock.
1955 - x1: Pointer to collecting_t object.
1956 - x2: Scratch register.
1962 ; Trigger an event local to this core. So the following WFE
1963 ; instruction is ignored.
1966 ; Wait for an event. The event is triggered by either the SEVL
1967 ; or STLR instructions (store release).
1970 ; Atomically read at lockaddr. This marks the memory location as
1971 ; exclusive. This instruction also has memory constraints which
1972 ; make sure all previous data reads and writes are done before
1976 ; Try again if another thread holds the lock.
1979 ; We can lock it! Write the address of the collecting_t object.
1980 ; This instruction will fail if the memory location is not marked
1981 ; as exclusive anymore. If it succeeds, it will remove the
1982 ; exclusive mark on the memory location. This way, if another
1983 ; thread executes this instruction before us, we will fail and try
1990 p
+= emit_mov_addr (p
, x0
, lockaddr
);
1991 p
+= emit_mov (p
, x1
, register_operand (sp
));
1995 p
+= emit_ldaxr (p
, x2
, x0
);
1996 p
+= emit_cb (p
, 1, w2
, -2 * 4);
1997 p
+= emit_stxr (p
, w2
, x1
, x0
);
1998 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2000 /* Call collector (struct tracepoint *, unsigned char *):
2005 ; Saved registers start after the collecting_t object.
2008 ; We use an intra-procedure-call scratch register.
2009 MOV ip0, #(collector)
2012 ; And call back to C!
2017 p
+= emit_mov_addr (p
, x0
, tpoint
);
2018 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2020 p
+= emit_mov_addr (p
, ip0
, collector
);
2021 p
+= emit_blr (p
, ip0
);
2023 /* Release the lock.
2028 ; This instruction is a normal store with memory ordering
2029 ; constraints. Thanks to this we do not have to put a data
2030 ; barrier instruction to make sure all data read and writes are done
2031 ; before this instruction is executed. Furthermore, this instrucion
2032 ; will trigger an event, letting other threads know they can grab
2037 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2038 p
+= emit_stlr (p
, xzr
, x0
);
2040 /* Free collecting_t object:
2045 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2047 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2048 registers from the stack.
2050 LDR x2, [sp, #(2 * 16)]
2051 LDR x1, [sp, #(1 * 16)]
2052 LDR x0, [sp, #(0 * 16)]
2058 ADD sp, sp #(5 * 16)
2061 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2062 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2063 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2064 p
+= emit_msr (p
, NZCV
, x2
);
2065 p
+= emit_msr (p
, FPSR
, x1
);
2066 p
+= emit_msr (p
, FPCR
, x0
);
2068 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2070 /* Pop general purpose registers:
2074 LDR x30, [sp, #(30 * 16)]
2076 ADD sp, sp, #(31 * 16)
2079 for (i
= 0; i
<= 30; i
+= 1)
2080 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2081 offset_memory_operand (i
* 16));
2082 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2084 /* Pop SIMD&FP registers:
2088 LDP q30, q31, [sp, #(30 * 16)]
2090 ADD sp, sp, #(32 * 16)
2093 for (i
= 0; i
<= 30; i
+= 2)
2094 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2095 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2097 /* Write the code into the inferior memory. */
2098 append_insns (&buildaddr
, p
- buf
, buf
);
2100 /* Now emit the relocated instruction. */
2101 *adjusted_insn_addr
= buildaddr
;
2102 target_read_uint32 (tpaddr
, &insn
);
2104 insn_data
.base
.insn_addr
= tpaddr
;
2105 insn_data
.new_addr
= buildaddr
;
2106 insn_data
.insn_ptr
= buf
;
2108 aarch64_relocate_instruction (insn
, &visitor
,
2109 (struct aarch64_insn_data
*) &insn_data
);
2111 /* We may not have been able to relocate the instruction. */
2112 if (insn_data
.insn_ptr
== buf
)
2115 "E.Could not relocate instruction from %s to %s.",
2116 core_addr_to_string_nz (tpaddr
),
2117 core_addr_to_string_nz (buildaddr
));
2121 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2122 *adjusted_insn_addr_end
= buildaddr
;
2124 /* Go back to the start of the buffer. */
2127 /* Emit a branch back from the jump pad. */
2128 offset
= (tpaddr
+ orig_size
- buildaddr
);
2129 if (!can_encode_int32 (offset
, 28))
2132 "E.Jump back from jump pad too far from tracepoint "
2133 "(offset 0x%" PRIx32
" cannot be encoded in 28 bits).",
2138 p
+= emit_b (p
, 0, offset
);
2139 append_insns (&buildaddr
, p
- buf
, buf
);
2141 /* Give the caller a branch instruction into the jump pad. */
2142 offset
= (*jump_entry
- tpaddr
);
2143 if (!can_encode_int32 (offset
, 28))
2146 "E.Jump pad too far from tracepoint "
2147 "(offset 0x%" PRIx32
" cannot be encoded in 28 bits).",
2152 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2153 *jjump_pad_insn_size
= 4;
2155 /* Return the end address of our pad. */
2156 *jump_entry
= buildaddr
;
2161 /* Helper function writing LEN instructions from START into
2162 current_insn_ptr. */
2165 emit_ops_insns (const uint32_t *start
, int len
)
2167 CORE_ADDR buildaddr
= current_insn_ptr
;
2170 debug_printf ("Adding %d instrucions at %s\n",
2171 len
, paddress (buildaddr
));
2173 append_insns (&buildaddr
, len
, start
);
2174 current_insn_ptr
= buildaddr
;
2177 /* Pop a register from the stack. */
2180 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2182 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2185 /* Push a register on the stack. */
2188 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2190 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2193 /* Implementation of emit_ops method "emit_prologue". */
2196 aarch64_emit_prologue (void)
2201 /* This function emit a prologue for the following function prototype:
2203 enum eval_result_type f (unsigned char *regs,
2206 The first argument is a buffer of raw registers. The second
2207 argument is the result of
2208 evaluating the expression, which will be set to whatever is on top of
2209 the stack at the end.
2211 The stack set up by the prologue is as such:
2213 High *------------------------------------------------------*
2216 | x1 (ULONGEST *value) |
2217 | x0 (unsigned char *regs) |
2218 Low *------------------------------------------------------*
2220 As we are implementing a stack machine, each opcode can expand the
2221 stack so we never know how far we are from the data saved by this
2222 prologue. In order to be able refer to value and regs later, we save
2223 the current stack pointer in the frame pointer. This way, it is not
2224 clobbered when calling C functions.
2226 Finally, throughtout every operation, we are using register x0 as the
2227 top of the stack, and x1 as a scratch register. */
2229 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2230 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2231 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2233 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2236 emit_ops_insns (buf
, p
- buf
);
2239 /* Implementation of emit_ops method "emit_epilogue". */
2242 aarch64_emit_epilogue (void)
2247 /* Store the result of the expression (x0) in *value. */
2248 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2249 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2250 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2252 /* Restore the previous state. */
2253 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2254 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2256 /* Return expr_eval_no_error. */
2257 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2258 p
+= emit_ret (p
, lr
);
2260 emit_ops_insns (buf
, p
- buf
);
2263 /* Implementation of emit_ops method "emit_add". */
2266 aarch64_emit_add (void)
2271 p
+= emit_pop (p
, x1
);
2272 p
+= emit_add (p
, x0
, x0
, register_operand (x1
));
2274 emit_ops_insns (buf
, p
- buf
);
2277 /* Implementation of emit_ops method "emit_sub". */
2280 aarch64_emit_sub (void)
2285 p
+= emit_pop (p
, x1
);
2286 p
+= emit_sub (p
, x0
, x0
, register_operand (x1
));
2288 emit_ops_insns (buf
, p
- buf
);
2291 /* Implementation of emit_ops method "emit_mul". */
2294 aarch64_emit_mul (void)
2299 p
+= emit_pop (p
, x1
);
2300 p
+= emit_mul (p
, x0
, x1
, x0
);
2302 emit_ops_insns (buf
, p
- buf
);
2305 /* Implementation of emit_ops method "emit_lsh". */
2308 aarch64_emit_lsh (void)
2313 p
+= emit_pop (p
, x1
);
2314 p
+= emit_lslv (p
, x0
, x1
, x0
);
2316 emit_ops_insns (buf
, p
- buf
);
2319 /* Implementation of emit_ops method "emit_rsh_signed". */
2322 aarch64_emit_rsh_signed (void)
2327 p
+= emit_pop (p
, x1
);
2328 p
+= emit_asrv (p
, x0
, x1
, x0
);
2330 emit_ops_insns (buf
, p
- buf
);
2333 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2336 aarch64_emit_rsh_unsigned (void)
2341 p
+= emit_pop (p
, x1
);
2342 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2344 emit_ops_insns (buf
, p
- buf
);
2347 /* Implementation of emit_ops method "emit_ext". */
2350 aarch64_emit_ext (int arg
)
2355 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2357 emit_ops_insns (buf
, p
- buf
);
2360 /* Implementation of emit_ops method "emit_log_not". */
2363 aarch64_emit_log_not (void)
2368 /* If the top of the stack is 0, replace it with 1. Else replace it with
2371 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2372 p
+= emit_cset (p
, x0
, EQ
);
2374 emit_ops_insns (buf
, p
- buf
);
2377 /* Implementation of emit_ops method "emit_bit_and". */
2380 aarch64_emit_bit_and (void)
2385 p
+= emit_pop (p
, x1
);
2386 p
+= emit_and (p
, x0
, x0
, x1
);
2388 emit_ops_insns (buf
, p
- buf
);
2391 /* Implementation of emit_ops method "emit_bit_or". */
2394 aarch64_emit_bit_or (void)
2399 p
+= emit_pop (p
, x1
);
2400 p
+= emit_orr (p
, x0
, x0
, x1
);
2402 emit_ops_insns (buf
, p
- buf
);
2405 /* Implementation of emit_ops method "emit_bit_xor". */
2408 aarch64_emit_bit_xor (void)
2413 p
+= emit_pop (p
, x1
);
2414 p
+= emit_eor (p
, x0
, x0
, x1
);
2416 emit_ops_insns (buf
, p
- buf
);
2419 /* Implementation of emit_ops method "emit_bit_not". */
2422 aarch64_emit_bit_not (void)
2427 p
+= emit_mvn (p
, x0
, x0
);
2429 emit_ops_insns (buf
, p
- buf
);
2432 /* Implementation of emit_ops method "emit_equal". */
2435 aarch64_emit_equal (void)
2440 p
+= emit_pop (p
, x1
);
2441 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2442 p
+= emit_cset (p
, x0
, EQ
);
2444 emit_ops_insns (buf
, p
- buf
);
2447 /* Implementation of emit_ops method "emit_less_signed". */
2450 aarch64_emit_less_signed (void)
2455 p
+= emit_pop (p
, x1
);
2456 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2457 p
+= emit_cset (p
, x0
, LT
);
2459 emit_ops_insns (buf
, p
- buf
);
2462 /* Implementation of emit_ops method "emit_less_unsigned". */
2465 aarch64_emit_less_unsigned (void)
2470 p
+= emit_pop (p
, x1
);
2471 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2472 p
+= emit_cset (p
, x0
, LO
);
2474 emit_ops_insns (buf
, p
- buf
);
2477 /* Implementation of emit_ops method "emit_ref". */
2480 aarch64_emit_ref (int size
)
2488 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2491 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2494 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2497 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2500 /* Unknown size, bail on compilation. */
2505 emit_ops_insns (buf
, p
- buf
);
2508 /* Implementation of emit_ops method "emit_if_goto". */
2511 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2516 /* The Z flag is set or cleared here. */
2517 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2518 /* This instruction must not change the Z flag. */
2519 p
+= emit_pop (p
, x0
);
2520 /* Branch over the next instruction if x0 == 0. */
2521 p
+= emit_bcond (p
, EQ
, 8);
2523 /* The NOP instruction will be patched with an unconditional branch. */
2525 *offset_p
= (p
- buf
) * 4;
2530 emit_ops_insns (buf
, p
- buf
);
2533 /* Implementation of emit_ops method "emit_goto". */
2536 aarch64_emit_goto (int *offset_p
, int *size_p
)
2541 /* The NOP instruction will be patched with an unconditional branch. */
2548 emit_ops_insns (buf
, p
- buf
);
2551 /* Implementation of emit_ops method "write_goto_address". */
2554 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2558 emit_b (&insn
, 0, to
- from
);
2559 append_insns (&from
, 1, &insn
);
2562 /* Implementation of emit_ops method "emit_const". */
2565 aarch64_emit_const (LONGEST num
)
2570 p
+= emit_mov_addr (p
, x0
, num
);
2572 emit_ops_insns (buf
, p
- buf
);
2575 /* Implementation of emit_ops method "emit_call". */
2578 aarch64_emit_call (CORE_ADDR fn
)
2583 p
+= emit_mov_addr (p
, ip0
, fn
);
2584 p
+= emit_blr (p
, ip0
);
2586 emit_ops_insns (buf
, p
- buf
);
2589 /* Implementation of emit_ops method "emit_reg". */
2592 aarch64_emit_reg (int reg
)
2597 /* Set x0 to unsigned char *regs. */
2598 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2599 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2600 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2602 emit_ops_insns (buf
, p
- buf
);
2604 aarch64_emit_call (get_raw_reg_func_addr ());
2607 /* Implementation of emit_ops method "emit_pop". */
2610 aarch64_emit_pop (void)
2615 p
+= emit_pop (p
, x0
);
2617 emit_ops_insns (buf
, p
- buf
);
2620 /* Implementation of emit_ops method "emit_stack_flush". */
2623 aarch64_emit_stack_flush (void)
2628 p
+= emit_push (p
, x0
);
2630 emit_ops_insns (buf
, p
- buf
);
2633 /* Implementation of emit_ops method "emit_zero_ext". */
2636 aarch64_emit_zero_ext (int arg
)
2641 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2643 emit_ops_insns (buf
, p
- buf
);
2646 /* Implementation of emit_ops method "emit_swap". */
2649 aarch64_emit_swap (void)
2654 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2655 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2656 p
+= emit_mov (p
, x0
, register_operand (x1
));
2658 emit_ops_insns (buf
, p
- buf
);
2661 /* Implementation of emit_ops method "emit_stack_adjust". */
2664 aarch64_emit_stack_adjust (int n
)
2666 /* This is not needed with our design. */
2670 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2672 emit_ops_insns (buf
, p
- buf
);
2675 /* Implementation of emit_ops method "emit_int_call_1". */
2678 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2683 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2685 emit_ops_insns (buf
, p
- buf
);
2687 aarch64_emit_call (fn
);
2690 /* Implementation of emit_ops method "emit_void_call_2". */
2693 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2698 /* Push x0 on the stack. */
2699 aarch64_emit_stack_flush ();
2701 /* Setup arguments for the function call:
2704 x1: top of the stack
2709 p
+= emit_mov (p
, x1
, register_operand (x0
));
2710 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2712 emit_ops_insns (buf
, p
- buf
);
2714 aarch64_emit_call (fn
);
2717 aarch64_emit_pop ();
2720 /* Implementation of emit_ops method "emit_eq_goto". */
2723 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2728 p
+= emit_pop (p
, x1
);
2729 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2730 /* Branch over the next instruction if x0 != x1. */
2731 p
+= emit_bcond (p
, NE
, 8);
2732 /* The NOP instruction will be patched with an unconditional branch. */
2734 *offset_p
= (p
- buf
) * 4;
2739 emit_ops_insns (buf
, p
- buf
);
2742 /* Implementation of emit_ops method "emit_ne_goto". */
2745 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2750 p
+= emit_pop (p
, x1
);
2751 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2752 /* Branch over the next instruction if x0 == x1. */
2753 p
+= emit_bcond (p
, EQ
, 8);
2754 /* The NOP instruction will be patched with an unconditional branch. */
2756 *offset_p
= (p
- buf
) * 4;
2761 emit_ops_insns (buf
, p
- buf
);
2764 /* Implementation of emit_ops method "emit_lt_goto". */
2767 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2772 p
+= emit_pop (p
, x1
);
2773 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2774 /* Branch over the next instruction if x0 >= x1. */
2775 p
+= emit_bcond (p
, GE
, 8);
2776 /* The NOP instruction will be patched with an unconditional branch. */
2778 *offset_p
= (p
- buf
) * 4;
2783 emit_ops_insns (buf
, p
- buf
);
2786 /* Implementation of emit_ops method "emit_le_goto". */
2789 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2794 p
+= emit_pop (p
, x1
);
2795 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2796 /* Branch over the next instruction if x0 > x1. */
2797 p
+= emit_bcond (p
, GT
, 8);
2798 /* The NOP instruction will be patched with an unconditional branch. */
2800 *offset_p
= (p
- buf
) * 4;
2805 emit_ops_insns (buf
, p
- buf
);
2808 /* Implementation of emit_ops method "emit_gt_goto". */
2811 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2816 p
+= emit_pop (p
, x1
);
2817 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2818 /* Branch over the next instruction if x0 <= x1. */
2819 p
+= emit_bcond (p
, LE
, 8);
2820 /* The NOP instruction will be patched with an unconditional branch. */
2822 *offset_p
= (p
- buf
) * 4;
2827 emit_ops_insns (buf
, p
- buf
);
2830 /* Implementation of emit_ops method "emit_ge_got". */
2833 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2838 p
+= emit_pop (p
, x1
);
2839 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2840 /* Branch over the next instruction if x0 <= x1. */
2841 p
+= emit_bcond (p
, LT
, 8);
2842 /* The NOP instruction will be patched with an unconditional branch. */
2844 *offset_p
= (p
- buf
) * 4;
2849 emit_ops_insns (buf
, p
- buf
);
2852 static struct emit_ops aarch64_emit_ops_impl
=
2854 aarch64_emit_prologue
,
2855 aarch64_emit_epilogue
,
2860 aarch64_emit_rsh_signed
,
2861 aarch64_emit_rsh_unsigned
,
2863 aarch64_emit_log_not
,
2864 aarch64_emit_bit_and
,
2865 aarch64_emit_bit_or
,
2866 aarch64_emit_bit_xor
,
2867 aarch64_emit_bit_not
,
2869 aarch64_emit_less_signed
,
2870 aarch64_emit_less_unsigned
,
2872 aarch64_emit_if_goto
,
2874 aarch64_write_goto_address
,
2879 aarch64_emit_stack_flush
,
2880 aarch64_emit_zero_ext
,
2882 aarch64_emit_stack_adjust
,
2883 aarch64_emit_int_call_1
,
2884 aarch64_emit_void_call_2
,
2885 aarch64_emit_eq_goto
,
2886 aarch64_emit_ne_goto
,
2887 aarch64_emit_lt_goto
,
2888 aarch64_emit_le_goto
,
2889 aarch64_emit_gt_goto
,
2890 aarch64_emit_ge_got
,
2893 /* Implementation of linux_target_ops method "emit_ops". */
2895 static struct emit_ops
*
2896 aarch64_emit_ops (void)
2898 return &aarch64_emit_ops_impl
;
2901 /* Implementation of linux_target_ops method
2902 "get_min_fast_tracepoint_insn_len". */
2905 aarch64_get_min_fast_tracepoint_insn_len (void)
2910 /* Implementation of linux_target_ops method "supports_range_stepping". */
2913 aarch64_supports_range_stepping (void)
2918 struct linux_target_ops the_low_target
=
2922 aarch64_cannot_fetch_register
,
2923 aarch64_cannot_store_register
,
2924 NULL
, /* fetch_register */
2927 (const unsigned char *) &aarch64_breakpoint
,
2928 aarch64_breakpoint_len
,
2929 NULL
, /* breakpoint_reinsert_addr */
2930 0, /* decr_pc_after_break */
2931 aarch64_breakpoint_at
,
2932 aarch64_supports_z_point_type
,
2933 aarch64_insert_point
,
2934 aarch64_remove_point
,
2935 aarch64_stopped_by_watchpoint
,
2936 aarch64_stopped_data_address
,
2937 NULL
, /* collect_ptrace_register */
2938 NULL
, /* supply_ptrace_register */
2939 aarch64_linux_siginfo_fixup
,
2940 aarch64_linux_new_process
,
2941 aarch64_linux_new_thread
,
2942 aarch64_linux_new_fork
,
2943 aarch64_linux_prepare_to_resume
,
2944 NULL
, /* process_qsupported */
2945 aarch64_supports_tracepoints
,
2946 aarch64_get_thread_area
,
2947 aarch64_install_fast_tracepoint_jump_pad
,
2949 aarch64_get_min_fast_tracepoint_insn_len
,
2950 aarch64_supports_range_stepping
,
2954 initialize_low_arch (void)
2956 init_registers_aarch64 ();
2958 initialize_low_arch_aarch32 ();
2960 initialize_regsets_info (&aarch64_regsets_info
);