gdbserver: make aarch64_write_goto_address static
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
176eb98c
MS
52/* Per-process arch-specific data we want to keep. */
53
54struct arch_process_info
55{
56 /* Hardware breakpoint/watchpoint data.
57 The reason for them to be per-process rather than per-thread is
58 due to the lack of information in the gdbserver environment;
59 gdbserver is not told that whether a requested hardware
60 breakpoint/watchpoint is thread specific or not, so it has to set
61 each hw bp/wp for every thread in the current process. The
62 higher level bp/wp management in gdb will resume a thread if a hw
63 bp/wp trap is not expected for it. Since the hw bp/wp setting is
64 same for each thread, it is reasonable for the data to live here.
65 */
66 struct aarch64_debug_reg_state debug_reg_state;
67};
68
3b53ae99
YQ
69/* Return true if the size of register 0 is 8 byte. */
70
71static int
72is_64bit_tdesc (void)
73{
74 struct regcache *regcache = get_thread_regcache (current_thread, 0);
75
76 return register_size (regcache->tdesc, 0) == 8;
77}
78
02895270
AH
79/* Return true if the regcache contains the number of SVE registers. */
80
81static bool
82is_sve_tdesc (void)
83{
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
85
6cdd651f 86 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
87}
88
176eb98c
MS
89static void
90aarch64_fill_gregset (struct regcache *regcache, void *buf)
91{
6a69a054 92 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
93 int i;
94
95 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
96 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
97 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
98 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
99 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
100}
101
102static void
103aarch64_store_gregset (struct regcache *regcache, const void *buf)
104{
6a69a054 105 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
106 int i;
107
108 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
109 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
110 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
111 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
112 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
113}
114
115static void
116aarch64_fill_fpregset (struct regcache *regcache, void *buf)
117{
9caa3311 118 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
119 int i;
120
121 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
122 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
123 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
124 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
125}
126
127static void
128aarch64_store_fpregset (struct regcache *regcache, const void *buf)
129{
9caa3311
YQ
130 const struct user_fpsimd_state *regset
131 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
132 int i;
133
134 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
135 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
136 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
137 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
138}
139
1ef53e6b
AH
140/* Store the pauth registers to regcache. */
141
142static void
143aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
144{
145 uint64_t *pauth_regset = (uint64_t *) buf;
146 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
147
148 if (pauth_base == 0)
149 return;
150
151 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
152 &pauth_regset[0]);
153 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
154 &pauth_regset[1]);
155}
156
421530db
PL
157/* Implementation of linux_target_ops method "get_pc". */
158
176eb98c
MS
159static CORE_ADDR
160aarch64_get_pc (struct regcache *regcache)
161{
8a7e4587 162 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 163 return linux_get_pc_64bit (regcache);
8a7e4587 164 else
a5652c21 165 return linux_get_pc_32bit (regcache);
176eb98c
MS
166}
167
421530db
PL
168/* Implementation of linux_target_ops method "set_pc". */
169
176eb98c
MS
170static void
171aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
172{
8a7e4587 173 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 174 linux_set_pc_64bit (regcache, pc);
8a7e4587 175 else
a5652c21 176 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
177}
178
176eb98c
MS
179#define aarch64_breakpoint_len 4
180
37d66942
PL
181/* AArch64 BRK software debug mode instruction.
182 This instruction needs to match gdb/aarch64-tdep.c
183 (aarch64_default_breakpoint). */
184static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 185
421530db
PL
186/* Implementation of linux_target_ops method "breakpoint_at". */
187
176eb98c
MS
188static int
189aarch64_breakpoint_at (CORE_ADDR where)
190{
db91f502
YQ
191 if (is_64bit_tdesc ())
192 {
193 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 194
db91f502
YQ
195 (*the_target->read_memory) (where, (unsigned char *) &insn,
196 aarch64_breakpoint_len);
197 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
198 return 1;
176eb98c 199
db91f502
YQ
200 return 0;
201 }
202 else
203 return arm_breakpoint_at (where);
176eb98c
MS
204}
205
176eb98c
MS
206static void
207aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
208{
209 int i;
210
211 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
212 {
213 state->dr_addr_bp[i] = 0;
214 state->dr_ctrl_bp[i] = 0;
215 state->dr_ref_count_bp[i] = 0;
216 }
217
218 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
219 {
220 state->dr_addr_wp[i] = 0;
221 state->dr_ctrl_wp[i] = 0;
222 state->dr_ref_count_wp[i] = 0;
223 }
224}
225
176eb98c
MS
226/* Return the pointer to the debug register state structure in the
227 current process' arch-specific data area. */
228
db3cb7cb 229struct aarch64_debug_reg_state *
88e2cf7e 230aarch64_get_debug_reg_state (pid_t pid)
176eb98c 231{
88e2cf7e 232 struct process_info *proc = find_process_pid (pid);
176eb98c 233
fe978cb0 234 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
235}
236
421530db
PL
237/* Implementation of linux_target_ops method "supports_z_point_type". */
238
4ff0d3d8
PA
239static int
240aarch64_supports_z_point_type (char z_type)
241{
242 switch (z_type)
243 {
96c97461 244 case Z_PACKET_SW_BP:
4ff0d3d8
PA
245 case Z_PACKET_HW_BP:
246 case Z_PACKET_WRITE_WP:
247 case Z_PACKET_READ_WP:
248 case Z_PACKET_ACCESS_WP:
249 return 1;
250 default:
4ff0d3d8
PA
251 return 0;
252 }
253}
254
421530db 255/* Implementation of linux_target_ops method "insert_point".
176eb98c 256
421530db
PL
257 It actually only records the info of the to-be-inserted bp/wp;
258 the actual insertion will happen when threads are resumed. */
176eb98c
MS
259
260static int
802e8e6d
PA
261aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
262 int len, struct raw_breakpoint *bp)
176eb98c
MS
263{
264 int ret;
4ff0d3d8 265 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
266 struct aarch64_debug_reg_state *state
267 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 268
c5e92cca 269 if (show_debug_regs)
176eb98c
MS
270 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
271 (unsigned long) addr, len);
272
802e8e6d
PA
273 /* Determine the type from the raw breakpoint type. */
274 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
275
276 if (targ_type != hw_execute)
39edd165
YQ
277 {
278 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
279 ret = aarch64_handle_watchpoint (targ_type, addr, len,
280 1 /* is_insert */, state);
281 else
282 ret = -1;
283 }
176eb98c 284 else
8d689ee5
YQ
285 {
286 if (len == 3)
287 {
288 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
289 instruction. Set it to 2 to correctly encode length bit
290 mask in hardware/watchpoint control register. */
291 len = 2;
292 }
293 ret = aarch64_handle_breakpoint (targ_type, addr, len,
294 1 /* is_insert */, state);
295 }
176eb98c 296
60a191ed 297 if (show_debug_regs)
88e2cf7e
YQ
298 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
299 targ_type);
176eb98c
MS
300
301 return ret;
302}
303
421530db 304/* Implementation of linux_target_ops method "remove_point".
176eb98c 305
421530db
PL
306 It actually only records the info of the to-be-removed bp/wp,
307 the actual removal will be done when threads are resumed. */
176eb98c
MS
308
309static int
802e8e6d
PA
310aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
311 int len, struct raw_breakpoint *bp)
176eb98c
MS
312{
313 int ret;
4ff0d3d8 314 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
315 struct aarch64_debug_reg_state *state
316 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 317
c5e92cca 318 if (show_debug_regs)
176eb98c
MS
319 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
320 (unsigned long) addr, len);
321
802e8e6d
PA
322 /* Determine the type from the raw breakpoint type. */
323 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
324
325 /* Set up state pointers. */
326 if (targ_type != hw_execute)
327 ret =
c67ca4de
YQ
328 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
329 state);
176eb98c 330 else
8d689ee5
YQ
331 {
332 if (len == 3)
333 {
334 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
335 instruction. Set it to 2 to correctly encode length bit
336 mask in hardware/watchpoint control register. */
337 len = 2;
338 }
339 ret = aarch64_handle_breakpoint (targ_type, addr, len,
340 0 /* is_insert */, state);
341 }
176eb98c 342
60a191ed 343 if (show_debug_regs)
88e2cf7e
YQ
344 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
345 targ_type);
176eb98c
MS
346
347 return ret;
348}
349
421530db 350/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
351
352static CORE_ADDR
353aarch64_stopped_data_address (void)
354{
355 siginfo_t siginfo;
356 int pid, i;
357 struct aarch64_debug_reg_state *state;
358
0bfdf32f 359 pid = lwpid_of (current_thread);
176eb98c
MS
360
361 /* Get the siginfo. */
362 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
363 return (CORE_ADDR) 0;
364
365 /* Need to be a hardware breakpoint/watchpoint trap. */
366 if (siginfo.si_signo != SIGTRAP
367 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
368 return (CORE_ADDR) 0;
369
370 /* Check if the address matches any watched address. */
88e2cf7e 371 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
372 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
373 {
a3b60e45
JK
374 const unsigned int offset
375 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
376 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
377 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
378 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
379 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
380 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
381
176eb98c
MS
382 if (state->dr_ref_count_wp[i]
383 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 384 && addr_trap >= addr_watch_aligned
176eb98c 385 && addr_trap < addr_watch + len)
a3b60e45
JK
386 {
387 /* ADDR_TRAP reports the first address of the memory range
388 accessed by the CPU, regardless of what was the memory
389 range watched. Thus, a large CPU access that straddles
390 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
391 ADDR_TRAP that is lower than the
392 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
393
394 addr: | 4 | 5 | 6 | 7 | 8 |
395 |---- range watched ----|
396 |----------- range accessed ------------|
397
398 In this case, ADDR_TRAP will be 4.
399
400 To match a watchpoint known to GDB core, we must never
401 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
402 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
403 positive on kernels older than 4.10. See PR
404 external/20207. */
405 return addr_orig;
406 }
176eb98c
MS
407 }
408
409 return (CORE_ADDR) 0;
410}
411
421530db 412/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
413
414static int
415aarch64_stopped_by_watchpoint (void)
416{
417 if (aarch64_stopped_data_address () != 0)
418 return 1;
419 else
420 return 0;
421}
422
423/* Fetch the thread-local storage pointer for libthread_db. */
424
425ps_err_e
754653a7 426ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
427 lwpid_t lwpid, int idx, void **base)
428{
a0cc84cd
YQ
429 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
430 is_64bit_tdesc ());
176eb98c
MS
431}
432
ade90bde
YQ
433/* Implementation of linux_target_ops method "siginfo_fixup". */
434
435static int
8adce034 436aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
437{
438 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
439 if (!is_64bit_tdesc ())
440 {
441 if (direction == 0)
442 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
443 native);
444 else
445 aarch64_siginfo_from_compat_siginfo (native,
446 (struct compat_siginfo *) inf);
447
448 return 1;
449 }
450
451 return 0;
452}
453
04ec7890 454/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
455
456static struct arch_process_info *
457aarch64_linux_new_process (void)
458{
8d749320 459 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
460
461 aarch64_init_debug_reg_state (&info->debug_reg_state);
462
463 return info;
464}
465
04ec7890
SM
466/* Implementation of linux_target_ops method "delete_process". */
467
468static void
469aarch64_linux_delete_process (struct arch_process_info *info)
470{
471 xfree (info);
472}
473
421530db
PL
474/* Implementation of linux_target_ops method "linux_new_fork". */
475
3a8a0396
DB
476static void
477aarch64_linux_new_fork (struct process_info *parent,
478 struct process_info *child)
479{
480 /* These are allocated by linux_add_process. */
61a7418c
DB
481 gdb_assert (parent->priv != NULL
482 && parent->priv->arch_private != NULL);
483 gdb_assert (child->priv != NULL
484 && child->priv->arch_private != NULL);
3a8a0396
DB
485
486 /* Linux kernel before 2.6.33 commit
487 72f674d203cd230426437cdcf7dd6f681dad8b0d
488 will inherit hardware debug registers from parent
489 on fork/vfork/clone. Newer Linux kernels create such tasks with
490 zeroed debug registers.
491
492 GDB core assumes the child inherits the watchpoints/hw
493 breakpoints of the parent, and will remove them all from the
494 forked off process. Copy the debug registers mirrors into the
495 new process so that all breakpoints and watchpoints can be
496 removed together. The debug registers mirror will become zeroed
497 in the end before detaching the forked off process, thus making
498 this compatible with older Linux kernels too. */
499
61a7418c 500 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
501}
502
ee4fbcfa
AH
503/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
504#define AARCH64_HWCAP_PACA (1 << 30)
505
d6d7ce56 506/* Implementation of linux_target_ops method "arch_setup". */
3b53ae99 507
d6d7ce56
AH
508static void
509aarch64_arch_setup (void)
3b53ae99
YQ
510{
511 unsigned int machine;
512 int is_elf64;
513 int tid;
514
515 tid = lwpid_of (current_thread);
516
517 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
518
519 if (is_elf64)
fefa175e
AH
520 {
521 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
522 unsigned long hwcap = linux_get_hwcap (8);
523 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
524
525 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 526 }
3b53ae99 527 else
7cc17433 528 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 529
af1b22f3 530 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
531}
532
02895270
AH
533/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
534
535static void
536aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
537{
538 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
539}
540
541/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
542
543static void
544aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
545{
546 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
547}
548
3aee8918 549static struct regset_info aarch64_regsets[] =
176eb98c
MS
550{
551 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
552 sizeof (struct user_pt_regs), GENERAL_REGS,
553 aarch64_fill_gregset, aarch64_store_gregset },
554 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
555 sizeof (struct user_fpsimd_state), FP_REGS,
556 aarch64_fill_fpregset, aarch64_store_fpregset
557 },
1ef53e6b
AH
558 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
559 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
560 NULL, aarch64_store_pauthregset },
50bc912a 561 NULL_REGSET
176eb98c
MS
562};
563
3aee8918
PA
564static struct regsets_info aarch64_regsets_info =
565 {
566 aarch64_regsets, /* regsets */
567 0, /* num_regsets */
568 NULL, /* disabled_regsets */
569 };
570
3b53ae99 571static struct regs_info regs_info_aarch64 =
3aee8918
PA
572 {
573 NULL, /* regset_bitmap */
c2d65f38 574 NULL, /* usrregs */
3aee8918
PA
575 &aarch64_regsets_info,
576 };
577
02895270
AH
578static struct regset_info aarch64_sve_regsets[] =
579{
580 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
581 sizeof (struct user_pt_regs), GENERAL_REGS,
582 aarch64_fill_gregset, aarch64_store_gregset },
583 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
584 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
585 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
586 },
1ef53e6b
AH
587 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
588 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
589 NULL, aarch64_store_pauthregset },
02895270
AH
590 NULL_REGSET
591};
592
593static struct regsets_info aarch64_sve_regsets_info =
594 {
595 aarch64_sve_regsets, /* regsets. */
596 0, /* num_regsets. */
597 NULL, /* disabled_regsets. */
598 };
599
600static struct regs_info regs_info_aarch64_sve =
601 {
602 NULL, /* regset_bitmap. */
603 NULL, /* usrregs. */
604 &aarch64_sve_regsets_info,
605 };
606
421530db
PL
607/* Implementation of linux_target_ops method "regs_info". */
608
3aee8918
PA
609static const struct regs_info *
610aarch64_regs_info (void)
611{
02895270 612 if (!is_64bit_tdesc ())
3b53ae99 613 return &regs_info_aarch32;
02895270
AH
614
615 if (is_sve_tdesc ())
616 return &regs_info_aarch64_sve;
617
618 return &regs_info_aarch64;
3aee8918
PA
619}
620
7671bf47
PL
621/* Implementation of linux_target_ops method "supports_tracepoints". */
622
623static int
624aarch64_supports_tracepoints (void)
625{
524b57e6
YQ
626 if (current_thread == NULL)
627 return 1;
628 else
629 {
630 /* We don't support tracepoints on aarch32 now. */
631 return is_64bit_tdesc ();
632 }
7671bf47
PL
633}
634
bb903df0
PL
635/* Implementation of linux_target_ops method "get_thread_area". */
636
637static int
638aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
639{
640 struct iovec iovec;
641 uint64_t reg;
642
643 iovec.iov_base = &reg;
644 iovec.iov_len = sizeof (reg);
645
646 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
647 return -1;
648
649 *addrp = reg;
650
651 return 0;
652}
653
061fc021
YQ
654/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
655
656static void
657aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
658{
659 int use_64bit = register_size (regcache->tdesc, 0) == 8;
660
661 if (use_64bit)
662 {
663 long l_sysno;
664
665 collect_register_by_name (regcache, "x8", &l_sysno);
666 *sysno = (int) l_sysno;
667 }
668 else
669 collect_register_by_name (regcache, "r7", sysno);
670}
671
afbe19f8
PL
672/* List of condition codes that we need. */
673
674enum aarch64_condition_codes
675{
676 EQ = 0x0,
677 NE = 0x1,
678 LO = 0x3,
679 GE = 0xa,
680 LT = 0xb,
681 GT = 0xc,
682 LE = 0xd,
bb903df0
PL
683};
684
6c1c9a8b
YQ
685enum aarch64_operand_type
686{
687 OPERAND_IMMEDIATE,
688 OPERAND_REGISTER,
689};
690
bb903df0
PL
691/* Representation of an operand. At this time, it only supports register
692 and immediate types. */
693
694struct aarch64_operand
695{
696 /* Type of the operand. */
6c1c9a8b
YQ
697 enum aarch64_operand_type type;
698
bb903df0
PL
699 /* Value of the operand according to the type. */
700 union
701 {
702 uint32_t imm;
703 struct aarch64_register reg;
704 };
705};
706
707/* List of registers that we are currently using, we can add more here as
708 we need to use them. */
709
710/* General purpose scratch registers (64 bit). */
711static const struct aarch64_register x0 = { 0, 1 };
712static const struct aarch64_register x1 = { 1, 1 };
713static const struct aarch64_register x2 = { 2, 1 };
714static const struct aarch64_register x3 = { 3, 1 };
715static const struct aarch64_register x4 = { 4, 1 };
716
717/* General purpose scratch registers (32 bit). */
afbe19f8 718static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
719static const struct aarch64_register w2 = { 2, 0 };
720
721/* Intra-procedure scratch registers. */
722static const struct aarch64_register ip0 = { 16, 1 };
723
724/* Special purpose registers. */
afbe19f8
PL
725static const struct aarch64_register fp = { 29, 1 };
726static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
727static const struct aarch64_register sp = { 31, 1 };
728static const struct aarch64_register xzr = { 31, 1 };
729
730/* Dynamically allocate a new register. If we know the register
731 statically, we should make it a global as above instead of using this
732 helper function. */
733
734static struct aarch64_register
735aarch64_register (unsigned num, int is64)
736{
737 return (struct aarch64_register) { num, is64 };
738}
739
740/* Helper function to create a register operand, for instructions with
741 different types of operands.
742
743 For example:
744 p += emit_mov (p, x0, register_operand (x1)); */
745
746static struct aarch64_operand
747register_operand (struct aarch64_register reg)
748{
749 struct aarch64_operand operand;
750
751 operand.type = OPERAND_REGISTER;
752 operand.reg = reg;
753
754 return operand;
755}
756
757/* Helper function to create an immediate operand, for instructions with
758 different types of operands.
759
760 For example:
761 p += emit_mov (p, x0, immediate_operand (12)); */
762
763static struct aarch64_operand
764immediate_operand (uint32_t imm)
765{
766 struct aarch64_operand operand;
767
768 operand.type = OPERAND_IMMEDIATE;
769 operand.imm = imm;
770
771 return operand;
772}
773
bb903df0
PL
774/* Helper function to create an offset memory operand.
775
776 For example:
777 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
778
779static struct aarch64_memory_operand
780offset_memory_operand (int32_t offset)
781{
782 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
783}
784
785/* Helper function to create a pre-index memory operand.
786
787 For example:
788 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
789
790static struct aarch64_memory_operand
791preindex_memory_operand (int32_t index)
792{
793 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
794}
795
afbe19f8
PL
796/* Helper function to create a post-index memory operand.
797
798 For example:
799 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
800
801static struct aarch64_memory_operand
802postindex_memory_operand (int32_t index)
803{
804 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
805}
806
bb903df0
PL
807/* System control registers. These special registers can be written and
808 read with the MRS and MSR instructions.
809
810 - NZCV: Condition flags. GDB refers to this register under the CPSR
811 name.
812 - FPSR: Floating-point status register.
813 - FPCR: Floating-point control registers.
814 - TPIDR_EL0: Software thread ID register. */
815
816enum aarch64_system_control_registers
817{
818 /* op0 op1 crn crm op2 */
819 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
820 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
821 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
822 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
823};
824
bb903df0
PL
825/* Write a BLR instruction into *BUF.
826
827 BLR rn
828
829 RN is the register to branch to. */
830
831static int
832emit_blr (uint32_t *buf, struct aarch64_register rn)
833{
e1c587c3 834 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
835}
836
afbe19f8 837/* Write a RET instruction into *BUF.
bb903df0 838
afbe19f8 839 RET xn
bb903df0 840
afbe19f8 841 RN is the register to branch to. */
bb903df0
PL
842
843static int
afbe19f8
PL
844emit_ret (uint32_t *buf, struct aarch64_register rn)
845{
e1c587c3 846 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
847}
848
849static int
850emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
851 struct aarch64_register rt,
852 struct aarch64_register rt2,
853 struct aarch64_register rn,
854 struct aarch64_memory_operand operand)
bb903df0
PL
855{
856 uint32_t opc;
857 uint32_t pre_index;
858 uint32_t write_back;
859
860 if (rt.is64)
861 opc = ENCODE (2, 2, 30);
862 else
863 opc = ENCODE (0, 2, 30);
864
865 switch (operand.type)
866 {
867 case MEMORY_OPERAND_OFFSET:
868 {
869 pre_index = ENCODE (1, 1, 24);
870 write_back = ENCODE (0, 1, 23);
871 break;
872 }
afbe19f8
PL
873 case MEMORY_OPERAND_POSTINDEX:
874 {
875 pre_index = ENCODE (0, 1, 24);
876 write_back = ENCODE (1, 1, 23);
877 break;
878 }
bb903df0
PL
879 case MEMORY_OPERAND_PREINDEX:
880 {
881 pre_index = ENCODE (1, 1, 24);
882 write_back = ENCODE (1, 1, 23);
883 break;
884 }
885 default:
886 return 0;
887 }
888
e1c587c3
YQ
889 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
890 | ENCODE (operand.index >> 3, 7, 15)
891 | ENCODE (rt2.num, 5, 10)
892 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
893}
894
afbe19f8
PL
895/* Write a STP instruction into *BUF.
896
897 STP rt, rt2, [rn, #offset]
898 STP rt, rt2, [rn, #index]!
899 STP rt, rt2, [rn], #index
900
901 RT and RT2 are the registers to store.
902 RN is the base address register.
903 OFFSET is the immediate to add to the base address. It is limited to a
904 -512 .. 504 range (7 bits << 3). */
905
906static int
907emit_stp (uint32_t *buf, struct aarch64_register rt,
908 struct aarch64_register rt2, struct aarch64_register rn,
909 struct aarch64_memory_operand operand)
910{
911 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
912}
913
914/* Write a LDP instruction into *BUF.
915
916 LDP rt, rt2, [rn, #offset]
917 LDP rt, rt2, [rn, #index]!
918 LDP rt, rt2, [rn], #index
919
920 RT and RT2 are the registers to store.
921 RN is the base address register.
922 OFFSET is the immediate to add to the base address. It is limited to a
923 -512 .. 504 range (7 bits << 3). */
924
925static int
926emit_ldp (uint32_t *buf, struct aarch64_register rt,
927 struct aarch64_register rt2, struct aarch64_register rn,
928 struct aarch64_memory_operand operand)
929{
930 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
931}
932
bb903df0
PL
933/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
934
935 LDP qt, qt2, [rn, #offset]
936
937 RT and RT2 are the Q registers to store.
938 RN is the base address register.
939 OFFSET is the immediate to add to the base address. It is limited to
940 -1024 .. 1008 range (7 bits << 4). */
941
942static int
943emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
944 struct aarch64_register rn, int32_t offset)
945{
946 uint32_t opc = ENCODE (2, 2, 30);
947 uint32_t pre_index = ENCODE (1, 1, 24);
948
e1c587c3
YQ
949 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
950 | ENCODE (offset >> 4, 7, 15)
951 | ENCODE (rt2, 5, 10)
952 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
953}
954
955/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
956
957 STP qt, qt2, [rn, #offset]
958
959 RT and RT2 are the Q registers to store.
960 RN is the base address register.
961 OFFSET is the immediate to add to the base address. It is limited to
962 -1024 .. 1008 range (7 bits << 4). */
963
964static int
965emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
966 struct aarch64_register rn, int32_t offset)
967{
968 uint32_t opc = ENCODE (2, 2, 30);
969 uint32_t pre_index = ENCODE (1, 1, 24);
970
e1c587c3 971 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
972 | ENCODE (offset >> 4, 7, 15)
973 | ENCODE (rt2, 5, 10)
974 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
975}
976
afbe19f8
PL
977/* Write a LDRH instruction into *BUF.
978
979 LDRH wt, [xn, #offset]
980 LDRH wt, [xn, #index]!
981 LDRH wt, [xn], #index
982
983 RT is the register to store.
984 RN is the base address register.
985 OFFSET is the immediate to add to the base address. It is limited to
986 0 .. 32760 range (12 bits << 3). */
987
988static int
989emit_ldrh (uint32_t *buf, struct aarch64_register rt,
990 struct aarch64_register rn,
991 struct aarch64_memory_operand operand)
992{
1c2e1515 993 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
994}
995
996/* Write a LDRB instruction into *BUF.
997
998 LDRB wt, [xn, #offset]
999 LDRB wt, [xn, #index]!
1000 LDRB wt, [xn], #index
1001
1002 RT is the register to store.
1003 RN is the base address register.
1004 OFFSET is the immediate to add to the base address. It is limited to
1005 0 .. 32760 range (12 bits << 3). */
1006
1007static int
1008emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1009 struct aarch64_register rn,
1010 struct aarch64_memory_operand operand)
1011{
1c2e1515 1012 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1013}
1014
bb903df0 1015
bb903df0
PL
1016
1017/* Write a STR instruction into *BUF.
1018
1019 STR rt, [rn, #offset]
1020 STR rt, [rn, #index]!
afbe19f8 1021 STR rt, [rn], #index
bb903df0
PL
1022
1023 RT is the register to store.
1024 RN is the base address register.
1025 OFFSET is the immediate to add to the base address. It is limited to
1026 0 .. 32760 range (12 bits << 3). */
1027
1028static int
1029emit_str (uint32_t *buf, struct aarch64_register rt,
1030 struct aarch64_register rn,
1031 struct aarch64_memory_operand operand)
1032{
1c2e1515 1033 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1034}
1035
1036/* Helper function emitting an exclusive load or store instruction. */
1037
1038static int
1039emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1040 enum aarch64_opcodes opcode,
1041 struct aarch64_register rs,
1042 struct aarch64_register rt,
1043 struct aarch64_register rt2,
1044 struct aarch64_register rn)
1045{
e1c587c3
YQ
1046 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1047 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1048 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1049}
1050
1051/* Write a LAXR instruction into *BUF.
1052
1053 LDAXR rt, [xn]
1054
1055 RT is the destination register.
1056 RN is the base address register. */
1057
1058static int
1059emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1060 struct aarch64_register rn)
1061{
1062 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1063 xzr, rn);
1064}
1065
1066/* Write a STXR instruction into *BUF.
1067
1068 STXR ws, rt, [xn]
1069
1070 RS is the result register, it indicates if the store succeeded or not.
1071 RT is the destination register.
1072 RN is the base address register. */
1073
1074static int
1075emit_stxr (uint32_t *buf, struct aarch64_register rs,
1076 struct aarch64_register rt, struct aarch64_register rn)
1077{
1078 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1079 xzr, rn);
1080}
1081
1082/* Write a STLR instruction into *BUF.
1083
1084 STLR rt, [xn]
1085
1086 RT is the register to store.
1087 RN is the base address register. */
1088
1089static int
1090emit_stlr (uint32_t *buf, struct aarch64_register rt,
1091 struct aarch64_register rn)
1092{
1093 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1094 xzr, rn);
1095}
1096
1097/* Helper function for data processing instructions with register sources. */
1098
1099static int
231c0592 1100emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1101 struct aarch64_register rd,
1102 struct aarch64_register rn,
1103 struct aarch64_register rm)
1104{
1105 uint32_t size = ENCODE (rd.is64, 1, 31);
1106
e1c587c3
YQ
1107 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1108 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1109}
1110
1111/* Helper function for data processing instructions taking either a register
1112 or an immediate. */
1113
1114static int
1115emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1116 struct aarch64_register rd,
1117 struct aarch64_register rn,
1118 struct aarch64_operand operand)
1119{
1120 uint32_t size = ENCODE (rd.is64, 1, 31);
1121 /* The opcode is different for register and immediate source operands. */
1122 uint32_t operand_opcode;
1123
1124 if (operand.type == OPERAND_IMMEDIATE)
1125 {
1126 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1127 operand_opcode = ENCODE (8, 4, 25);
1128
e1c587c3
YQ
1129 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1130 | ENCODE (operand.imm, 12, 10)
1131 | ENCODE (rn.num, 5, 5)
1132 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1133 }
1134 else
1135 {
1136 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1137 operand_opcode = ENCODE (5, 4, 25);
1138
1139 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1140 rn, operand.reg);
1141 }
1142}
1143
1144/* Write an ADD instruction into *BUF.
1145
1146 ADD rd, rn, #imm
1147 ADD rd, rn, rm
1148
1149 This function handles both an immediate and register add.
1150
1151 RD is the destination register.
1152 RN is the input register.
1153 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1154 OPERAND_REGISTER. */
1155
1156static int
1157emit_add (uint32_t *buf, struct aarch64_register rd,
1158 struct aarch64_register rn, struct aarch64_operand operand)
1159{
1160 return emit_data_processing (buf, ADD, rd, rn, operand);
1161}
1162
1163/* Write a SUB instruction into *BUF.
1164
1165 SUB rd, rn, #imm
1166 SUB rd, rn, rm
1167
1168 This function handles both an immediate and register sub.
1169
1170 RD is the destination register.
1171 RN is the input register.
1172 IMM is the immediate to substract to RN. */
1173
1174static int
1175emit_sub (uint32_t *buf, struct aarch64_register rd,
1176 struct aarch64_register rn, struct aarch64_operand operand)
1177{
1178 return emit_data_processing (buf, SUB, rd, rn, operand);
1179}
1180
1181/* Write a MOV instruction into *BUF.
1182
1183 MOV rd, #imm
1184 MOV rd, rm
1185
1186 This function handles both a wide immediate move and a register move,
1187 with the condition that the source register is not xzr. xzr and the
1188 stack pointer share the same encoding and this function only supports
1189 the stack pointer.
1190
1191 RD is the destination register.
1192 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1193 OPERAND_REGISTER. */
1194
1195static int
1196emit_mov (uint32_t *buf, struct aarch64_register rd,
1197 struct aarch64_operand operand)
1198{
1199 if (operand.type == OPERAND_IMMEDIATE)
1200 {
1201 uint32_t size = ENCODE (rd.is64, 1, 31);
1202 /* Do not shift the immediate. */
1203 uint32_t shift = ENCODE (0, 2, 21);
1204
e1c587c3
YQ
1205 return aarch64_emit_insn (buf, MOV | size | shift
1206 | ENCODE (operand.imm, 16, 5)
1207 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1208 }
1209 else
1210 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1211}
1212
1213/* Write a MOVK instruction into *BUF.
1214
1215 MOVK rd, #imm, lsl #shift
1216
1217 RD is the destination register.
1218 IMM is the immediate.
1219 SHIFT is the logical shift left to apply to IMM. */
1220
1221static int
7781c06f
YQ
1222emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1223 unsigned shift)
bb903df0
PL
1224{
1225 uint32_t size = ENCODE (rd.is64, 1, 31);
1226
e1c587c3
YQ
1227 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1228 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1229}
1230
1231/* Write instructions into *BUF in order to move ADDR into a register.
1232 ADDR can be a 64-bit value.
1233
1234 This function will emit a series of MOV and MOVK instructions, such as:
1235
1236 MOV xd, #(addr)
1237 MOVK xd, #(addr >> 16), lsl #16
1238 MOVK xd, #(addr >> 32), lsl #32
1239 MOVK xd, #(addr >> 48), lsl #48 */
1240
1241static int
1242emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1243{
1244 uint32_t *p = buf;
1245
1246 /* The MOV (wide immediate) instruction clears to top bits of the
1247 register. */
1248 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1249
1250 if ((addr >> 16) != 0)
1251 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1252 else
1253 return p - buf;
1254
1255 if ((addr >> 32) != 0)
1256 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1257 else
1258 return p - buf;
1259
1260 if ((addr >> 48) != 0)
1261 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1262
1263 return p - buf;
1264}
1265
afbe19f8
PL
1266/* Write a SUBS instruction into *BUF.
1267
1268 SUBS rd, rn, rm
1269
1270 This instruction update the condition flags.
1271
1272 RD is the destination register.
1273 RN and RM are the source registers. */
1274
1275static int
1276emit_subs (uint32_t *buf, struct aarch64_register rd,
1277 struct aarch64_register rn, struct aarch64_operand operand)
1278{
1279 return emit_data_processing (buf, SUBS, rd, rn, operand);
1280}
1281
1282/* Write a CMP instruction into *BUF.
1283
1284 CMP rn, rm
1285
1286 This instruction is an alias of SUBS xzr, rn, rm.
1287
1288 RN and RM are the registers to compare. */
1289
1290static int
1291emit_cmp (uint32_t *buf, struct aarch64_register rn,
1292 struct aarch64_operand operand)
1293{
1294 return emit_subs (buf, xzr, rn, operand);
1295}
1296
1297/* Write a AND instruction into *BUF.
1298
1299 AND rd, rn, rm
1300
1301 RD is the destination register.
1302 RN and RM are the source registers. */
1303
1304static int
1305emit_and (uint32_t *buf, struct aarch64_register rd,
1306 struct aarch64_register rn, struct aarch64_register rm)
1307{
1308 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1309}
1310
1311/* Write a ORR instruction into *BUF.
1312
1313 ORR rd, rn, rm
1314
1315 RD is the destination register.
1316 RN and RM are the source registers. */
1317
1318static int
1319emit_orr (uint32_t *buf, struct aarch64_register rd,
1320 struct aarch64_register rn, struct aarch64_register rm)
1321{
1322 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1323}
1324
1325/* Write a ORN instruction into *BUF.
1326
1327 ORN rd, rn, rm
1328
1329 RD is the destination register.
1330 RN and RM are the source registers. */
1331
1332static int
1333emit_orn (uint32_t *buf, struct aarch64_register rd,
1334 struct aarch64_register rn, struct aarch64_register rm)
1335{
1336 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1337}
1338
1339/* Write a EOR instruction into *BUF.
1340
1341 EOR rd, rn, rm
1342
1343 RD is the destination register.
1344 RN and RM are the source registers. */
1345
1346static int
1347emit_eor (uint32_t *buf, struct aarch64_register rd,
1348 struct aarch64_register rn, struct aarch64_register rm)
1349{
1350 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1351}
1352
1353/* Write a MVN instruction into *BUF.
1354
1355 MVN rd, rm
1356
1357 This is an alias for ORN rd, xzr, rm.
1358
1359 RD is the destination register.
1360 RM is the source register. */
1361
1362static int
1363emit_mvn (uint32_t *buf, struct aarch64_register rd,
1364 struct aarch64_register rm)
1365{
1366 return emit_orn (buf, rd, xzr, rm);
1367}
1368
1369/* Write a LSLV instruction into *BUF.
1370
1371 LSLV rd, rn, rm
1372
1373 RD is the destination register.
1374 RN and RM are the source registers. */
1375
1376static int
1377emit_lslv (uint32_t *buf, struct aarch64_register rd,
1378 struct aarch64_register rn, struct aarch64_register rm)
1379{
1380 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1381}
1382
1383/* Write a LSRV instruction into *BUF.
1384
1385 LSRV rd, rn, rm
1386
1387 RD is the destination register.
1388 RN and RM are the source registers. */
1389
1390static int
1391emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1392 struct aarch64_register rn, struct aarch64_register rm)
1393{
1394 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1395}
1396
1397/* Write a ASRV instruction into *BUF.
1398
1399 ASRV rd, rn, rm
1400
1401 RD is the destination register.
1402 RN and RM are the source registers. */
1403
1404static int
1405emit_asrv (uint32_t *buf, struct aarch64_register rd,
1406 struct aarch64_register rn, struct aarch64_register rm)
1407{
1408 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1409}
1410
1411/* Write a MUL instruction into *BUF.
1412
1413 MUL rd, rn, rm
1414
1415 RD is the destination register.
1416 RN and RM are the source registers. */
1417
1418static int
1419emit_mul (uint32_t *buf, struct aarch64_register rd,
1420 struct aarch64_register rn, struct aarch64_register rm)
1421{
1422 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1423}
1424
bb903df0
PL
1425/* Write a MRS instruction into *BUF. The register size is 64-bit.
1426
1427 MRS xt, system_reg
1428
1429 RT is the destination register.
1430 SYSTEM_REG is special purpose register to read. */
1431
1432static int
1433emit_mrs (uint32_t *buf, struct aarch64_register rt,
1434 enum aarch64_system_control_registers system_reg)
1435{
e1c587c3
YQ
1436 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1437 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1438}
1439
1440/* Write a MSR instruction into *BUF. The register size is 64-bit.
1441
1442 MSR system_reg, xt
1443
1444 SYSTEM_REG is special purpose register to write.
1445 RT is the input register. */
1446
1447static int
1448emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1449 struct aarch64_register rt)
1450{
e1c587c3
YQ
1451 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1452 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1453}
1454
1455/* Write a SEVL instruction into *BUF.
1456
1457 This is a hint instruction telling the hardware to trigger an event. */
1458
1459static int
1460emit_sevl (uint32_t *buf)
1461{
e1c587c3 1462 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1463}
1464
1465/* Write a WFE instruction into *BUF.
1466
1467 This is a hint instruction telling the hardware to wait for an event. */
1468
1469static int
1470emit_wfe (uint32_t *buf)
1471{
e1c587c3 1472 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1473}
1474
afbe19f8
PL
1475/* Write a SBFM instruction into *BUF.
1476
1477 SBFM rd, rn, #immr, #imms
1478
1479 This instruction moves the bits from #immr to #imms into the
1480 destination, sign extending the result.
1481
1482 RD is the destination register.
1483 RN is the source register.
1484 IMMR is the bit number to start at (least significant bit).
1485 IMMS is the bit number to stop at (most significant bit). */
1486
1487static int
1488emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1489 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1490{
1491 uint32_t size = ENCODE (rd.is64, 1, 31);
1492 uint32_t n = ENCODE (rd.is64, 1, 22);
1493
e1c587c3
YQ
1494 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1495 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1496 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1497}
1498
1499/* Write a SBFX instruction into *BUF.
1500
1501 SBFX rd, rn, #lsb, #width
1502
1503 This instruction moves #width bits from #lsb into the destination, sign
1504 extending the result. This is an alias for:
1505
1506 SBFM rd, rn, #lsb, #(lsb + width - 1)
1507
1508 RD is the destination register.
1509 RN is the source register.
1510 LSB is the bit number to start at (least significant bit).
1511 WIDTH is the number of bits to move. */
1512
1513static int
1514emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1515 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1516{
1517 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1518}
1519
1520/* Write a UBFM instruction into *BUF.
1521
1522 UBFM rd, rn, #immr, #imms
1523
1524 This instruction moves the bits from #immr to #imms into the
1525 destination, extending the result with zeros.
1526
1527 RD is the destination register.
1528 RN is the source register.
1529 IMMR is the bit number to start at (least significant bit).
1530 IMMS is the bit number to stop at (most significant bit). */
1531
1532static int
1533emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1534 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1535{
1536 uint32_t size = ENCODE (rd.is64, 1, 31);
1537 uint32_t n = ENCODE (rd.is64, 1, 22);
1538
e1c587c3
YQ
1539 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1540 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1541 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1542}
1543
1544/* Write a UBFX instruction into *BUF.
1545
1546 UBFX rd, rn, #lsb, #width
1547
1548 This instruction moves #width bits from #lsb into the destination,
1549 extending the result with zeros. This is an alias for:
1550
1551 UBFM rd, rn, #lsb, #(lsb + width - 1)
1552
1553 RD is the destination register.
1554 RN is the source register.
1555 LSB is the bit number to start at (least significant bit).
1556 WIDTH is the number of bits to move. */
1557
1558static int
1559emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1560 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1561{
1562 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1563}
1564
1565/* Write a CSINC instruction into *BUF.
1566
1567 CSINC rd, rn, rm, cond
1568
1569 This instruction conditionally increments rn or rm and places the result
1570 in rd. rn is chosen is the condition is true.
1571
1572 RD is the destination register.
1573 RN and RM are the source registers.
1574 COND is the encoded condition. */
1575
1576static int
1577emit_csinc (uint32_t *buf, struct aarch64_register rd,
1578 struct aarch64_register rn, struct aarch64_register rm,
1579 unsigned cond)
1580{
1581 uint32_t size = ENCODE (rd.is64, 1, 31);
1582
e1c587c3
YQ
1583 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1584 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1585 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1586}
1587
1588/* Write a CSET instruction into *BUF.
1589
1590 CSET rd, cond
1591
1592 This instruction conditionally write 1 or 0 in the destination register.
1593 1 is written if the condition is true. This is an alias for:
1594
1595 CSINC rd, xzr, xzr, !cond
1596
1597 Note that the condition needs to be inverted.
1598
1599 RD is the destination register.
1600 RN and RM are the source registers.
1601 COND is the encoded condition. */
1602
1603static int
1604emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1605{
1606 /* The least significant bit of the condition needs toggling in order to
1607 invert it. */
1608 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1609}
1610
bb903df0
PL
1611/* Write LEN instructions from BUF into the inferior memory at *TO.
1612
1613 Note instructions are always little endian on AArch64, unlike data. */
1614
1615static void
1616append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1617{
1618 size_t byte_len = len * sizeof (uint32_t);
1619#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1620 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1621 size_t i;
1622
1623 for (i = 0; i < len; i++)
1624 le_buf[i] = htole32 (buf[i]);
1625
4196ab2a 1626 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1627
1628 xfree (le_buf);
1629#else
4196ab2a 1630 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1631#endif
1632
1633 *to += byte_len;
1634}
1635
0badd99f
YQ
1636/* Sub-class of struct aarch64_insn_data, store information of
1637 instruction relocation for fast tracepoint. Visitor can
1638 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1639 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1640
0badd99f
YQ
1641struct aarch64_insn_relocation_data
1642{
1643 struct aarch64_insn_data base;
1644
1645 /* The new address the instruction is relocated to. */
1646 CORE_ADDR new_addr;
1647 /* Pointer to the buffer of relocated instruction(s). */
1648 uint32_t *insn_ptr;
1649};
1650
1651/* Implementation of aarch64_insn_visitor method "b". */
1652
1653static void
1654aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1655 struct aarch64_insn_data *data)
1656{
1657 struct aarch64_insn_relocation_data *insn_reloc
1658 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1659 int64_t new_offset
0badd99f
YQ
1660 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1661
1662 if (can_encode_int32 (new_offset, 28))
1663 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1664}
1665
1666/* Implementation of aarch64_insn_visitor method "b_cond". */
1667
1668static void
1669aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1670 struct aarch64_insn_data *data)
1671{
1672 struct aarch64_insn_relocation_data *insn_reloc
1673 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1674 int64_t new_offset
0badd99f
YQ
1675 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1676
1677 if (can_encode_int32 (new_offset, 21))
1678 {
1679 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1680 new_offset);
bb903df0 1681 }
0badd99f 1682 else if (can_encode_int32 (new_offset, 28))
bb903df0 1683 {
0badd99f
YQ
1684 /* The offset is out of range for a conditional branch
1685 instruction but not for a unconditional branch. We can use
1686 the following instructions instead:
bb903df0 1687
0badd99f
YQ
1688 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1689 B NOT_TAKEN ; Else jump over TAKEN and continue.
1690 TAKEN:
1691 B #(offset - 8)
1692 NOT_TAKEN:
1693
1694 */
1695
1696 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1697 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1698 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1699 }
0badd99f 1700}
bb903df0 1701
0badd99f
YQ
1702/* Implementation of aarch64_insn_visitor method "cb". */
1703
1704static void
1705aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1706 const unsigned rn, int is64,
1707 struct aarch64_insn_data *data)
1708{
1709 struct aarch64_insn_relocation_data *insn_reloc
1710 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1711 int64_t new_offset
0badd99f
YQ
1712 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1713
1714 if (can_encode_int32 (new_offset, 21))
1715 {
1716 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1717 aarch64_register (rn, is64), new_offset);
bb903df0 1718 }
0badd99f 1719 else if (can_encode_int32 (new_offset, 28))
bb903df0 1720 {
0badd99f
YQ
1721 /* The offset is out of range for a compare and branch
1722 instruction but not for a unconditional branch. We can use
1723 the following instructions instead:
1724
1725 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1726 B NOT_TAKEN ; Else jump over TAKEN and continue.
1727 TAKEN:
1728 B #(offset - 8)
1729 NOT_TAKEN:
1730
1731 */
1732 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1733 aarch64_register (rn, is64), 8);
1734 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1735 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1736 }
1737}
bb903df0 1738
0badd99f 1739/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1740
0badd99f
YQ
1741static void
1742aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1743 const unsigned rt, unsigned bit,
1744 struct aarch64_insn_data *data)
1745{
1746 struct aarch64_insn_relocation_data *insn_reloc
1747 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1748 int64_t new_offset
0badd99f
YQ
1749 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1750
1751 if (can_encode_int32 (new_offset, 16))
1752 {
1753 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1754 aarch64_register (rt, 1), new_offset);
bb903df0 1755 }
0badd99f 1756 else if (can_encode_int32 (new_offset, 28))
bb903df0 1757 {
0badd99f
YQ
1758 /* The offset is out of range for a test bit and branch
1759 instruction but not for a unconditional branch. We can use
1760 the following instructions instead:
1761
1762 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1763 B NOT_TAKEN ; Else jump over TAKEN and continue.
1764 TAKEN:
1765 B #(offset - 8)
1766 NOT_TAKEN:
1767
1768 */
1769 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1770 aarch64_register (rt, 1), 8);
1771 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1772 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1773 new_offset - 8);
1774 }
1775}
bb903df0 1776
0badd99f 1777/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1778
0badd99f
YQ
1779static void
1780aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1781 const int is_adrp,
1782 struct aarch64_insn_data *data)
1783{
1784 struct aarch64_insn_relocation_data *insn_reloc
1785 = (struct aarch64_insn_relocation_data *) data;
1786 /* We know exactly the address the ADR{P,} instruction will compute.
1787 We can just write it to the destination register. */
1788 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1789
0badd99f
YQ
1790 if (is_adrp)
1791 {
1792 /* Clear the lower 12 bits of the offset to get the 4K page. */
1793 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1794 aarch64_register (rd, 1),
1795 address & ~0xfff);
1796 }
1797 else
1798 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1799 aarch64_register (rd, 1), address);
1800}
bb903df0 1801
0badd99f 1802/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1803
0badd99f
YQ
1804static void
1805aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1806 const unsigned rt, const int is64,
1807 struct aarch64_insn_data *data)
1808{
1809 struct aarch64_insn_relocation_data *insn_reloc
1810 = (struct aarch64_insn_relocation_data *) data;
1811 CORE_ADDR address = data->insn_addr + offset;
1812
1813 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1814 aarch64_register (rt, 1), address);
1815
1816 /* We know exactly what address to load from, and what register we
1817 can use:
1818
1819 MOV xd, #(oldloc + offset)
1820 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1821 ...
1822
1823 LDR xd, [xd] ; or LDRSW xd, [xd]
1824
1825 */
1826
1827 if (is_sw)
1828 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1829 aarch64_register (rt, 1),
1830 aarch64_register (rt, 1),
1831 offset_memory_operand (0));
bb903df0 1832 else
0badd99f
YQ
1833 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1834 aarch64_register (rt, is64),
1835 aarch64_register (rt, 1),
1836 offset_memory_operand (0));
1837}
1838
1839/* Implementation of aarch64_insn_visitor method "others". */
1840
1841static void
1842aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1843 struct aarch64_insn_data *data)
1844{
1845 struct aarch64_insn_relocation_data *insn_reloc
1846 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1847
0badd99f
YQ
1848 /* The instruction is not PC relative. Just re-emit it at the new
1849 location. */
e1c587c3 1850 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1851}
1852
1853static const struct aarch64_insn_visitor visitor =
1854{
1855 aarch64_ftrace_insn_reloc_b,
1856 aarch64_ftrace_insn_reloc_b_cond,
1857 aarch64_ftrace_insn_reloc_cb,
1858 aarch64_ftrace_insn_reloc_tb,
1859 aarch64_ftrace_insn_reloc_adr,
1860 aarch64_ftrace_insn_reloc_ldr_literal,
1861 aarch64_ftrace_insn_reloc_others,
1862};
1863
bb903df0
PL
1864/* Implementation of linux_target_ops method
1865 "install_fast_tracepoint_jump_pad". */
1866
1867static int
1868aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1869 CORE_ADDR tpaddr,
1870 CORE_ADDR collector,
1871 CORE_ADDR lockaddr,
1872 ULONGEST orig_size,
1873 CORE_ADDR *jump_entry,
1874 CORE_ADDR *trampoline,
1875 ULONGEST *trampoline_size,
1876 unsigned char *jjump_pad_insn,
1877 ULONGEST *jjump_pad_insn_size,
1878 CORE_ADDR *adjusted_insn_addr,
1879 CORE_ADDR *adjusted_insn_addr_end,
1880 char *err)
1881{
1882 uint32_t buf[256];
1883 uint32_t *p = buf;
2ac09a5b 1884 int64_t offset;
bb903df0 1885 int i;
70b439f0 1886 uint32_t insn;
bb903df0 1887 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1888 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1889
1890 /* We need to save the current state on the stack both to restore it
1891 later and to collect register values when the tracepoint is hit.
1892
1893 The saved registers are pushed in a layout that needs to be in sync
1894 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1895 the supply_fast_tracepoint_registers function will fill in the
1896 register cache from a pointer to saved registers on the stack we build
1897 here.
1898
1899 For simplicity, we set the size of each cell on the stack to 16 bytes.
1900 This way one cell can hold any register type, from system registers
1901 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1902 has to be 16 bytes aligned anyway.
1903
1904 Note that the CPSR register does not exist on AArch64. Instead we
1905 can access system bits describing the process state with the
1906 MRS/MSR instructions, namely the condition flags. We save them as
1907 if they are part of a CPSR register because that's how GDB
1908 interprets these system bits. At the moment, only the condition
1909 flags are saved in CPSR (NZCV).
1910
1911 Stack layout, each cell is 16 bytes (descending):
1912
1913 High *-------- SIMD&FP registers from 31 down to 0. --------*
1914 | q31 |
1915 . .
1916 . . 32 cells
1917 . .
1918 | q0 |
1919 *---- General purpose registers from 30 down to 0. ----*
1920 | x30 |
1921 . .
1922 . . 31 cells
1923 . .
1924 | x0 |
1925 *------------- Special purpose registers. -------------*
1926 | SP |
1927 | PC |
1928 | CPSR (NZCV) | 5 cells
1929 | FPSR |
1930 | FPCR | <- SP + 16
1931 *------------- collecting_t object --------------------*
1932 | TPIDR_EL0 | struct tracepoint * |
1933 Low *------------------------------------------------------*
1934
1935 After this stack is set up, we issue a call to the collector, passing
1936 it the saved registers at (SP + 16). */
1937
1938 /* Push SIMD&FP registers on the stack:
1939
1940 SUB sp, sp, #(32 * 16)
1941
1942 STP q30, q31, [sp, #(30 * 16)]
1943 ...
1944 STP q0, q1, [sp]
1945
1946 */
1947 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1948 for (i = 30; i >= 0; i -= 2)
1949 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1950
30baf67b 1951 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
1952 to push x31 as it represents the xzr register and not the stack
1953 pointer in a STR instruction.
1954
1955 SUB sp, sp, #(31 * 16)
1956
1957 STR x30, [sp, #(30 * 16)]
1958 ...
1959 STR x0, [sp]
1960
1961 */
1962 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1963 for (i = 30; i >= 0; i -= 1)
1964 p += emit_str (p, aarch64_register (i, 1), sp,
1965 offset_memory_operand (i * 16));
1966
1967 /* Make space for 5 more cells.
1968
1969 SUB sp, sp, #(5 * 16)
1970
1971 */
1972 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1973
1974
1975 /* Save SP:
1976
1977 ADD x4, sp, #((32 + 31 + 5) * 16)
1978 STR x4, [sp, #(4 * 16)]
1979
1980 */
1981 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1982 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1983
1984 /* Save PC (tracepoint address):
1985
1986 MOV x3, #(tpaddr)
1987 ...
1988
1989 STR x3, [sp, #(3 * 16)]
1990
1991 */
1992
1993 p += emit_mov_addr (p, x3, tpaddr);
1994 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1995
1996 /* Save CPSR (NZCV), FPSR and FPCR:
1997
1998 MRS x2, nzcv
1999 MRS x1, fpsr
2000 MRS x0, fpcr
2001
2002 STR x2, [sp, #(2 * 16)]
2003 STR x1, [sp, #(1 * 16)]
2004 STR x0, [sp, #(0 * 16)]
2005
2006 */
2007 p += emit_mrs (p, x2, NZCV);
2008 p += emit_mrs (p, x1, FPSR);
2009 p += emit_mrs (p, x0, FPCR);
2010 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2011 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2012 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2013
2014 /* Push the collecting_t object. It consist of the address of the
2015 tracepoint and an ID for the current thread. We get the latter by
2016 reading the tpidr_el0 system register. It corresponds to the
2017 NT_ARM_TLS register accessible with ptrace.
2018
2019 MOV x0, #(tpoint)
2020 ...
2021
2022 MRS x1, tpidr_el0
2023
2024 STP x0, x1, [sp, #-16]!
2025
2026 */
2027
2028 p += emit_mov_addr (p, x0, tpoint);
2029 p += emit_mrs (p, x1, TPIDR_EL0);
2030 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2031
2032 /* Spin-lock:
2033
2034 The shared memory for the lock is at lockaddr. It will hold zero
2035 if no-one is holding the lock, otherwise it contains the address of
2036 the collecting_t object on the stack of the thread which acquired it.
2037
2038 At this stage, the stack pointer points to this thread's collecting_t
2039 object.
2040
2041 We use the following registers:
2042 - x0: Address of the lock.
2043 - x1: Pointer to collecting_t object.
2044 - x2: Scratch register.
2045
2046 MOV x0, #(lockaddr)
2047 ...
2048 MOV x1, sp
2049
2050 ; Trigger an event local to this core. So the following WFE
2051 ; instruction is ignored.
2052 SEVL
2053 again:
2054 ; Wait for an event. The event is triggered by either the SEVL
2055 ; or STLR instructions (store release).
2056 WFE
2057
2058 ; Atomically read at lockaddr. This marks the memory location as
2059 ; exclusive. This instruction also has memory constraints which
2060 ; make sure all previous data reads and writes are done before
2061 ; executing it.
2062 LDAXR x2, [x0]
2063
2064 ; Try again if another thread holds the lock.
2065 CBNZ x2, again
2066
2067 ; We can lock it! Write the address of the collecting_t object.
2068 ; This instruction will fail if the memory location is not marked
2069 ; as exclusive anymore. If it succeeds, it will remove the
2070 ; exclusive mark on the memory location. This way, if another
2071 ; thread executes this instruction before us, we will fail and try
2072 ; all over again.
2073 STXR w2, x1, [x0]
2074 CBNZ w2, again
2075
2076 */
2077
2078 p += emit_mov_addr (p, x0, lockaddr);
2079 p += emit_mov (p, x1, register_operand (sp));
2080
2081 p += emit_sevl (p);
2082 p += emit_wfe (p);
2083 p += emit_ldaxr (p, x2, x0);
2084 p += emit_cb (p, 1, w2, -2 * 4);
2085 p += emit_stxr (p, w2, x1, x0);
2086 p += emit_cb (p, 1, x2, -4 * 4);
2087
2088 /* Call collector (struct tracepoint *, unsigned char *):
2089
2090 MOV x0, #(tpoint)
2091 ...
2092
2093 ; Saved registers start after the collecting_t object.
2094 ADD x1, sp, #16
2095
2096 ; We use an intra-procedure-call scratch register.
2097 MOV ip0, #(collector)
2098 ...
2099
2100 ; And call back to C!
2101 BLR ip0
2102
2103 */
2104
2105 p += emit_mov_addr (p, x0, tpoint);
2106 p += emit_add (p, x1, sp, immediate_operand (16));
2107
2108 p += emit_mov_addr (p, ip0, collector);
2109 p += emit_blr (p, ip0);
2110
2111 /* Release the lock.
2112
2113 MOV x0, #(lockaddr)
2114 ...
2115
2116 ; This instruction is a normal store with memory ordering
2117 ; constraints. Thanks to this we do not have to put a data
2118 ; barrier instruction to make sure all data read and writes are done
30baf67b 2119 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2120 ; will trigger an event, letting other threads know they can grab
2121 ; the lock.
2122 STLR xzr, [x0]
2123
2124 */
2125 p += emit_mov_addr (p, x0, lockaddr);
2126 p += emit_stlr (p, xzr, x0);
2127
2128 /* Free collecting_t object:
2129
2130 ADD sp, sp, #16
2131
2132 */
2133 p += emit_add (p, sp, sp, immediate_operand (16));
2134
2135 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2136 registers from the stack.
2137
2138 LDR x2, [sp, #(2 * 16)]
2139 LDR x1, [sp, #(1 * 16)]
2140 LDR x0, [sp, #(0 * 16)]
2141
2142 MSR NZCV, x2
2143 MSR FPSR, x1
2144 MSR FPCR, x0
2145
2146 ADD sp, sp #(5 * 16)
2147
2148 */
2149 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2150 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2151 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2152 p += emit_msr (p, NZCV, x2);
2153 p += emit_msr (p, FPSR, x1);
2154 p += emit_msr (p, FPCR, x0);
2155
2156 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2157
2158 /* Pop general purpose registers:
2159
2160 LDR x0, [sp]
2161 ...
2162 LDR x30, [sp, #(30 * 16)]
2163
2164 ADD sp, sp, #(31 * 16)
2165
2166 */
2167 for (i = 0; i <= 30; i += 1)
2168 p += emit_ldr (p, aarch64_register (i, 1), sp,
2169 offset_memory_operand (i * 16));
2170 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2171
2172 /* Pop SIMD&FP registers:
2173
2174 LDP q0, q1, [sp]
2175 ...
2176 LDP q30, q31, [sp, #(30 * 16)]
2177
2178 ADD sp, sp, #(32 * 16)
2179
2180 */
2181 for (i = 0; i <= 30; i += 2)
2182 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2183 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2184
2185 /* Write the code into the inferior memory. */
2186 append_insns (&buildaddr, p - buf, buf);
2187
2188 /* Now emit the relocated instruction. */
2189 *adjusted_insn_addr = buildaddr;
70b439f0 2190 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2191
2192 insn_data.base.insn_addr = tpaddr;
2193 insn_data.new_addr = buildaddr;
2194 insn_data.insn_ptr = buf;
2195
2196 aarch64_relocate_instruction (insn, &visitor,
2197 (struct aarch64_insn_data *) &insn_data);
2198
bb903df0 2199 /* We may not have been able to relocate the instruction. */
0badd99f 2200 if (insn_data.insn_ptr == buf)
bb903df0
PL
2201 {
2202 sprintf (err,
2203 "E.Could not relocate instruction from %s to %s.",
2204 core_addr_to_string_nz (tpaddr),
2205 core_addr_to_string_nz (buildaddr));
2206 return 1;
2207 }
dfaffe9d 2208 else
0badd99f 2209 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2210 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2211
2212 /* Go back to the start of the buffer. */
2213 p = buf;
2214
2215 /* Emit a branch back from the jump pad. */
2216 offset = (tpaddr + orig_size - buildaddr);
2217 if (!can_encode_int32 (offset, 28))
2218 {
2219 sprintf (err,
2220 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2221 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2222 offset);
2223 return 1;
2224 }
2225
2226 p += emit_b (p, 0, offset);
2227 append_insns (&buildaddr, p - buf, buf);
2228
2229 /* Give the caller a branch instruction into the jump pad. */
2230 offset = (*jump_entry - tpaddr);
2231 if (!can_encode_int32 (offset, 28))
2232 {
2233 sprintf (err,
2234 "E.Jump pad too far from tracepoint "
2ac09a5b 2235 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2236 offset);
2237 return 1;
2238 }
2239
2240 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2241 *jjump_pad_insn_size = 4;
2242
2243 /* Return the end address of our pad. */
2244 *jump_entry = buildaddr;
2245
2246 return 0;
2247}
2248
afbe19f8
PL
2249/* Helper function writing LEN instructions from START into
2250 current_insn_ptr. */
2251
2252static void
2253emit_ops_insns (const uint32_t *start, int len)
2254{
2255 CORE_ADDR buildaddr = current_insn_ptr;
2256
2257 if (debug_threads)
2258 debug_printf ("Adding %d instrucions at %s\n",
2259 len, paddress (buildaddr));
2260
2261 append_insns (&buildaddr, len, start);
2262 current_insn_ptr = buildaddr;
2263}
2264
2265/* Pop a register from the stack. */
2266
2267static int
2268emit_pop (uint32_t *buf, struct aarch64_register rt)
2269{
2270 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2271}
2272
2273/* Push a register on the stack. */
2274
2275static int
2276emit_push (uint32_t *buf, struct aarch64_register rt)
2277{
2278 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2279}
2280
2281/* Implementation of emit_ops method "emit_prologue". */
2282
2283static void
2284aarch64_emit_prologue (void)
2285{
2286 uint32_t buf[16];
2287 uint32_t *p = buf;
2288
2289 /* This function emit a prologue for the following function prototype:
2290
2291 enum eval_result_type f (unsigned char *regs,
2292 ULONGEST *value);
2293
2294 The first argument is a buffer of raw registers. The second
2295 argument is the result of
2296 evaluating the expression, which will be set to whatever is on top of
2297 the stack at the end.
2298
2299 The stack set up by the prologue is as such:
2300
2301 High *------------------------------------------------------*
2302 | LR |
2303 | FP | <- FP
2304 | x1 (ULONGEST *value) |
2305 | x0 (unsigned char *regs) |
2306 Low *------------------------------------------------------*
2307
2308 As we are implementing a stack machine, each opcode can expand the
2309 stack so we never know how far we are from the data saved by this
2310 prologue. In order to be able refer to value and regs later, we save
2311 the current stack pointer in the frame pointer. This way, it is not
2312 clobbered when calling C functions.
2313
30baf67b 2314 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2315 top of the stack, and x1 as a scratch register. */
2316
2317 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2318 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2319 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2320
2321 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2322
2323
2324 emit_ops_insns (buf, p - buf);
2325}
2326
2327/* Implementation of emit_ops method "emit_epilogue". */
2328
2329static void
2330aarch64_emit_epilogue (void)
2331{
2332 uint32_t buf[16];
2333 uint32_t *p = buf;
2334
2335 /* Store the result of the expression (x0) in *value. */
2336 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2337 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2338 p += emit_str (p, x0, x1, offset_memory_operand (0));
2339
2340 /* Restore the previous state. */
2341 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2342 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2343
2344 /* Return expr_eval_no_error. */
2345 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2346 p += emit_ret (p, lr);
2347
2348 emit_ops_insns (buf, p - buf);
2349}
2350
2351/* Implementation of emit_ops method "emit_add". */
2352
2353static void
2354aarch64_emit_add (void)
2355{
2356 uint32_t buf[16];
2357 uint32_t *p = buf;
2358
2359 p += emit_pop (p, x1);
45e3745e 2360 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2361
2362 emit_ops_insns (buf, p - buf);
2363}
2364
2365/* Implementation of emit_ops method "emit_sub". */
2366
2367static void
2368aarch64_emit_sub (void)
2369{
2370 uint32_t buf[16];
2371 uint32_t *p = buf;
2372
2373 p += emit_pop (p, x1);
45e3745e 2374 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2375
2376 emit_ops_insns (buf, p - buf);
2377}
2378
2379/* Implementation of emit_ops method "emit_mul". */
2380
2381static void
2382aarch64_emit_mul (void)
2383{
2384 uint32_t buf[16];
2385 uint32_t *p = buf;
2386
2387 p += emit_pop (p, x1);
2388 p += emit_mul (p, x0, x1, x0);
2389
2390 emit_ops_insns (buf, p - buf);
2391}
2392
2393/* Implementation of emit_ops method "emit_lsh". */
2394
2395static void
2396aarch64_emit_lsh (void)
2397{
2398 uint32_t buf[16];
2399 uint32_t *p = buf;
2400
2401 p += emit_pop (p, x1);
2402 p += emit_lslv (p, x0, x1, x0);
2403
2404 emit_ops_insns (buf, p - buf);
2405}
2406
2407/* Implementation of emit_ops method "emit_rsh_signed". */
2408
2409static void
2410aarch64_emit_rsh_signed (void)
2411{
2412 uint32_t buf[16];
2413 uint32_t *p = buf;
2414
2415 p += emit_pop (p, x1);
2416 p += emit_asrv (p, x0, x1, x0);
2417
2418 emit_ops_insns (buf, p - buf);
2419}
2420
2421/* Implementation of emit_ops method "emit_rsh_unsigned". */
2422
2423static void
2424aarch64_emit_rsh_unsigned (void)
2425{
2426 uint32_t buf[16];
2427 uint32_t *p = buf;
2428
2429 p += emit_pop (p, x1);
2430 p += emit_lsrv (p, x0, x1, x0);
2431
2432 emit_ops_insns (buf, p - buf);
2433}
2434
2435/* Implementation of emit_ops method "emit_ext". */
2436
2437static void
2438aarch64_emit_ext (int arg)
2439{
2440 uint32_t buf[16];
2441 uint32_t *p = buf;
2442
2443 p += emit_sbfx (p, x0, x0, 0, arg);
2444
2445 emit_ops_insns (buf, p - buf);
2446}
2447
2448/* Implementation of emit_ops method "emit_log_not". */
2449
2450static void
2451aarch64_emit_log_not (void)
2452{
2453 uint32_t buf[16];
2454 uint32_t *p = buf;
2455
2456 /* If the top of the stack is 0, replace it with 1. Else replace it with
2457 0. */
2458
2459 p += emit_cmp (p, x0, immediate_operand (0));
2460 p += emit_cset (p, x0, EQ);
2461
2462 emit_ops_insns (buf, p - buf);
2463}
2464
2465/* Implementation of emit_ops method "emit_bit_and". */
2466
2467static void
2468aarch64_emit_bit_and (void)
2469{
2470 uint32_t buf[16];
2471 uint32_t *p = buf;
2472
2473 p += emit_pop (p, x1);
2474 p += emit_and (p, x0, x0, x1);
2475
2476 emit_ops_insns (buf, p - buf);
2477}
2478
2479/* Implementation of emit_ops method "emit_bit_or". */
2480
2481static void
2482aarch64_emit_bit_or (void)
2483{
2484 uint32_t buf[16];
2485 uint32_t *p = buf;
2486
2487 p += emit_pop (p, x1);
2488 p += emit_orr (p, x0, x0, x1);
2489
2490 emit_ops_insns (buf, p - buf);
2491}
2492
2493/* Implementation of emit_ops method "emit_bit_xor". */
2494
2495static void
2496aarch64_emit_bit_xor (void)
2497{
2498 uint32_t buf[16];
2499 uint32_t *p = buf;
2500
2501 p += emit_pop (p, x1);
2502 p += emit_eor (p, x0, x0, x1);
2503
2504 emit_ops_insns (buf, p - buf);
2505}
2506
2507/* Implementation of emit_ops method "emit_bit_not". */
2508
2509static void
2510aarch64_emit_bit_not (void)
2511{
2512 uint32_t buf[16];
2513 uint32_t *p = buf;
2514
2515 p += emit_mvn (p, x0, x0);
2516
2517 emit_ops_insns (buf, p - buf);
2518}
2519
2520/* Implementation of emit_ops method "emit_equal". */
2521
2522static void
2523aarch64_emit_equal (void)
2524{
2525 uint32_t buf[16];
2526 uint32_t *p = buf;
2527
2528 p += emit_pop (p, x1);
2529 p += emit_cmp (p, x0, register_operand (x1));
2530 p += emit_cset (p, x0, EQ);
2531
2532 emit_ops_insns (buf, p - buf);
2533}
2534
2535/* Implementation of emit_ops method "emit_less_signed". */
2536
2537static void
2538aarch64_emit_less_signed (void)
2539{
2540 uint32_t buf[16];
2541 uint32_t *p = buf;
2542
2543 p += emit_pop (p, x1);
2544 p += emit_cmp (p, x1, register_operand (x0));
2545 p += emit_cset (p, x0, LT);
2546
2547 emit_ops_insns (buf, p - buf);
2548}
2549
2550/* Implementation of emit_ops method "emit_less_unsigned". */
2551
2552static void
2553aarch64_emit_less_unsigned (void)
2554{
2555 uint32_t buf[16];
2556 uint32_t *p = buf;
2557
2558 p += emit_pop (p, x1);
2559 p += emit_cmp (p, x1, register_operand (x0));
2560 p += emit_cset (p, x0, LO);
2561
2562 emit_ops_insns (buf, p - buf);
2563}
2564
2565/* Implementation of emit_ops method "emit_ref". */
2566
2567static void
2568aarch64_emit_ref (int size)
2569{
2570 uint32_t buf[16];
2571 uint32_t *p = buf;
2572
2573 switch (size)
2574 {
2575 case 1:
2576 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2577 break;
2578 case 2:
2579 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2580 break;
2581 case 4:
2582 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2583 break;
2584 case 8:
2585 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2586 break;
2587 default:
2588 /* Unknown size, bail on compilation. */
2589 emit_error = 1;
2590 break;
2591 }
2592
2593 emit_ops_insns (buf, p - buf);
2594}
2595
2596/* Implementation of emit_ops method "emit_if_goto". */
2597
2598static void
2599aarch64_emit_if_goto (int *offset_p, int *size_p)
2600{
2601 uint32_t buf[16];
2602 uint32_t *p = buf;
2603
2604 /* The Z flag is set or cleared here. */
2605 p += emit_cmp (p, x0, immediate_operand (0));
2606 /* This instruction must not change the Z flag. */
2607 p += emit_pop (p, x0);
2608 /* Branch over the next instruction if x0 == 0. */
2609 p += emit_bcond (p, EQ, 8);
2610
2611 /* The NOP instruction will be patched with an unconditional branch. */
2612 if (offset_p)
2613 *offset_p = (p - buf) * 4;
2614 if (size_p)
2615 *size_p = 4;
2616 p += emit_nop (p);
2617
2618 emit_ops_insns (buf, p - buf);
2619}
2620
2621/* Implementation of emit_ops method "emit_goto". */
2622
2623static void
2624aarch64_emit_goto (int *offset_p, int *size_p)
2625{
2626 uint32_t buf[16];
2627 uint32_t *p = buf;
2628
2629 /* The NOP instruction will be patched with an unconditional branch. */
2630 if (offset_p)
2631 *offset_p = 0;
2632 if (size_p)
2633 *size_p = 4;
2634 p += emit_nop (p);
2635
2636 emit_ops_insns (buf, p - buf);
2637}
2638
2639/* Implementation of emit_ops method "write_goto_address". */
2640
bb1183e2 2641static void
afbe19f8
PL
2642aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2643{
2644 uint32_t insn;
2645
2646 emit_b (&insn, 0, to - from);
2647 append_insns (&from, 1, &insn);
2648}
2649
2650/* Implementation of emit_ops method "emit_const". */
2651
2652static void
2653aarch64_emit_const (LONGEST num)
2654{
2655 uint32_t buf[16];
2656 uint32_t *p = buf;
2657
2658 p += emit_mov_addr (p, x0, num);
2659
2660 emit_ops_insns (buf, p - buf);
2661}
2662
2663/* Implementation of emit_ops method "emit_call". */
2664
2665static void
2666aarch64_emit_call (CORE_ADDR fn)
2667{
2668 uint32_t buf[16];
2669 uint32_t *p = buf;
2670
2671 p += emit_mov_addr (p, ip0, fn);
2672 p += emit_blr (p, ip0);
2673
2674 emit_ops_insns (buf, p - buf);
2675}
2676
2677/* Implementation of emit_ops method "emit_reg". */
2678
2679static void
2680aarch64_emit_reg (int reg)
2681{
2682 uint32_t buf[16];
2683 uint32_t *p = buf;
2684
2685 /* Set x0 to unsigned char *regs. */
2686 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2687 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2688 p += emit_mov (p, x1, immediate_operand (reg));
2689
2690 emit_ops_insns (buf, p - buf);
2691
2692 aarch64_emit_call (get_raw_reg_func_addr ());
2693}
2694
2695/* Implementation of emit_ops method "emit_pop". */
2696
2697static void
2698aarch64_emit_pop (void)
2699{
2700 uint32_t buf[16];
2701 uint32_t *p = buf;
2702
2703 p += emit_pop (p, x0);
2704
2705 emit_ops_insns (buf, p - buf);
2706}
2707
2708/* Implementation of emit_ops method "emit_stack_flush". */
2709
2710static void
2711aarch64_emit_stack_flush (void)
2712{
2713 uint32_t buf[16];
2714 uint32_t *p = buf;
2715
2716 p += emit_push (p, x0);
2717
2718 emit_ops_insns (buf, p - buf);
2719}
2720
2721/* Implementation of emit_ops method "emit_zero_ext". */
2722
2723static void
2724aarch64_emit_zero_ext (int arg)
2725{
2726 uint32_t buf[16];
2727 uint32_t *p = buf;
2728
2729 p += emit_ubfx (p, x0, x0, 0, arg);
2730
2731 emit_ops_insns (buf, p - buf);
2732}
2733
2734/* Implementation of emit_ops method "emit_swap". */
2735
2736static void
2737aarch64_emit_swap (void)
2738{
2739 uint32_t buf[16];
2740 uint32_t *p = buf;
2741
2742 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2743 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2744 p += emit_mov (p, x0, register_operand (x1));
2745
2746 emit_ops_insns (buf, p - buf);
2747}
2748
2749/* Implementation of emit_ops method "emit_stack_adjust". */
2750
2751static void
2752aarch64_emit_stack_adjust (int n)
2753{
2754 /* This is not needed with our design. */
2755 uint32_t buf[16];
2756 uint32_t *p = buf;
2757
2758 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2759
2760 emit_ops_insns (buf, p - buf);
2761}
2762
2763/* Implementation of emit_ops method "emit_int_call_1". */
2764
2765static void
2766aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2767{
2768 uint32_t buf[16];
2769 uint32_t *p = buf;
2770
2771 p += emit_mov (p, x0, immediate_operand (arg1));
2772
2773 emit_ops_insns (buf, p - buf);
2774
2775 aarch64_emit_call (fn);
2776}
2777
2778/* Implementation of emit_ops method "emit_void_call_2". */
2779
2780static void
2781aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2782{
2783 uint32_t buf[16];
2784 uint32_t *p = buf;
2785
2786 /* Push x0 on the stack. */
2787 aarch64_emit_stack_flush ();
2788
2789 /* Setup arguments for the function call:
2790
2791 x0: arg1
2792 x1: top of the stack
2793
2794 MOV x1, x0
2795 MOV x0, #arg1 */
2796
2797 p += emit_mov (p, x1, register_operand (x0));
2798 p += emit_mov (p, x0, immediate_operand (arg1));
2799
2800 emit_ops_insns (buf, p - buf);
2801
2802 aarch64_emit_call (fn);
2803
2804 /* Restore x0. */
2805 aarch64_emit_pop ();
2806}
2807
2808/* Implementation of emit_ops method "emit_eq_goto". */
2809
2810static void
2811aarch64_emit_eq_goto (int *offset_p, int *size_p)
2812{
2813 uint32_t buf[16];
2814 uint32_t *p = buf;
2815
2816 p += emit_pop (p, x1);
2817 p += emit_cmp (p, x1, register_operand (x0));
2818 /* Branch over the next instruction if x0 != x1. */
2819 p += emit_bcond (p, NE, 8);
2820 /* The NOP instruction will be patched with an unconditional branch. */
2821 if (offset_p)
2822 *offset_p = (p - buf) * 4;
2823 if (size_p)
2824 *size_p = 4;
2825 p += emit_nop (p);
2826
2827 emit_ops_insns (buf, p - buf);
2828}
2829
2830/* Implementation of emit_ops method "emit_ne_goto". */
2831
2832static void
2833aarch64_emit_ne_goto (int *offset_p, int *size_p)
2834{
2835 uint32_t buf[16];
2836 uint32_t *p = buf;
2837
2838 p += emit_pop (p, x1);
2839 p += emit_cmp (p, x1, register_operand (x0));
2840 /* Branch over the next instruction if x0 == x1. */
2841 p += emit_bcond (p, EQ, 8);
2842 /* The NOP instruction will be patched with an unconditional branch. */
2843 if (offset_p)
2844 *offset_p = (p - buf) * 4;
2845 if (size_p)
2846 *size_p = 4;
2847 p += emit_nop (p);
2848
2849 emit_ops_insns (buf, p - buf);
2850}
2851
2852/* Implementation of emit_ops method "emit_lt_goto". */
2853
2854static void
2855aarch64_emit_lt_goto (int *offset_p, int *size_p)
2856{
2857 uint32_t buf[16];
2858 uint32_t *p = buf;
2859
2860 p += emit_pop (p, x1);
2861 p += emit_cmp (p, x1, register_operand (x0));
2862 /* Branch over the next instruction if x0 >= x1. */
2863 p += emit_bcond (p, GE, 8);
2864 /* The NOP instruction will be patched with an unconditional branch. */
2865 if (offset_p)
2866 *offset_p = (p - buf) * 4;
2867 if (size_p)
2868 *size_p = 4;
2869 p += emit_nop (p);
2870
2871 emit_ops_insns (buf, p - buf);
2872}
2873
2874/* Implementation of emit_ops method "emit_le_goto". */
2875
2876static void
2877aarch64_emit_le_goto (int *offset_p, int *size_p)
2878{
2879 uint32_t buf[16];
2880 uint32_t *p = buf;
2881
2882 p += emit_pop (p, x1);
2883 p += emit_cmp (p, x1, register_operand (x0));
2884 /* Branch over the next instruction if x0 > x1. */
2885 p += emit_bcond (p, GT, 8);
2886 /* The NOP instruction will be patched with an unconditional branch. */
2887 if (offset_p)
2888 *offset_p = (p - buf) * 4;
2889 if (size_p)
2890 *size_p = 4;
2891 p += emit_nop (p);
2892
2893 emit_ops_insns (buf, p - buf);
2894}
2895
2896/* Implementation of emit_ops method "emit_gt_goto". */
2897
2898static void
2899aarch64_emit_gt_goto (int *offset_p, int *size_p)
2900{
2901 uint32_t buf[16];
2902 uint32_t *p = buf;
2903
2904 p += emit_pop (p, x1);
2905 p += emit_cmp (p, x1, register_operand (x0));
2906 /* Branch over the next instruction if x0 <= x1. */
2907 p += emit_bcond (p, LE, 8);
2908 /* The NOP instruction will be patched with an unconditional branch. */
2909 if (offset_p)
2910 *offset_p = (p - buf) * 4;
2911 if (size_p)
2912 *size_p = 4;
2913 p += emit_nop (p);
2914
2915 emit_ops_insns (buf, p - buf);
2916}
2917
2918/* Implementation of emit_ops method "emit_ge_got". */
2919
2920static void
2921aarch64_emit_ge_got (int *offset_p, int *size_p)
2922{
2923 uint32_t buf[16];
2924 uint32_t *p = buf;
2925
2926 p += emit_pop (p, x1);
2927 p += emit_cmp (p, x1, register_operand (x0));
2928 /* Branch over the next instruction if x0 <= x1. */
2929 p += emit_bcond (p, LT, 8);
2930 /* The NOP instruction will be patched with an unconditional branch. */
2931 if (offset_p)
2932 *offset_p = (p - buf) * 4;
2933 if (size_p)
2934 *size_p = 4;
2935 p += emit_nop (p);
2936
2937 emit_ops_insns (buf, p - buf);
2938}
2939
2940static struct emit_ops aarch64_emit_ops_impl =
2941{
2942 aarch64_emit_prologue,
2943 aarch64_emit_epilogue,
2944 aarch64_emit_add,
2945 aarch64_emit_sub,
2946 aarch64_emit_mul,
2947 aarch64_emit_lsh,
2948 aarch64_emit_rsh_signed,
2949 aarch64_emit_rsh_unsigned,
2950 aarch64_emit_ext,
2951 aarch64_emit_log_not,
2952 aarch64_emit_bit_and,
2953 aarch64_emit_bit_or,
2954 aarch64_emit_bit_xor,
2955 aarch64_emit_bit_not,
2956 aarch64_emit_equal,
2957 aarch64_emit_less_signed,
2958 aarch64_emit_less_unsigned,
2959 aarch64_emit_ref,
2960 aarch64_emit_if_goto,
2961 aarch64_emit_goto,
2962 aarch64_write_goto_address,
2963 aarch64_emit_const,
2964 aarch64_emit_call,
2965 aarch64_emit_reg,
2966 aarch64_emit_pop,
2967 aarch64_emit_stack_flush,
2968 aarch64_emit_zero_ext,
2969 aarch64_emit_swap,
2970 aarch64_emit_stack_adjust,
2971 aarch64_emit_int_call_1,
2972 aarch64_emit_void_call_2,
2973 aarch64_emit_eq_goto,
2974 aarch64_emit_ne_goto,
2975 aarch64_emit_lt_goto,
2976 aarch64_emit_le_goto,
2977 aarch64_emit_gt_goto,
2978 aarch64_emit_ge_got,
2979};
2980
2981/* Implementation of linux_target_ops method "emit_ops". */
2982
2983static struct emit_ops *
2984aarch64_emit_ops (void)
2985{
2986 return &aarch64_emit_ops_impl;
2987}
2988
bb903df0
PL
2989/* Implementation of linux_target_ops method
2990 "get_min_fast_tracepoint_insn_len". */
2991
2992static int
2993aarch64_get_min_fast_tracepoint_insn_len (void)
2994{
2995 return 4;
2996}
2997
d1d0aea1
PL
2998/* Implementation of linux_target_ops method "supports_range_stepping". */
2999
3000static int
3001aarch64_supports_range_stepping (void)
3002{
3003 return 1;
3004}
3005
dd373349
AT
3006/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3007
3008static const gdb_byte *
3009aarch64_sw_breakpoint_from_kind (int kind, int *size)
3010{
17b1509a
YQ
3011 if (is_64bit_tdesc ())
3012 {
3013 *size = aarch64_breakpoint_len;
3014 return aarch64_breakpoint;
3015 }
3016 else
3017 return arm_sw_breakpoint_from_kind (kind, size);
3018}
3019
3020/* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3021
3022static int
3023aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3024{
3025 if (is_64bit_tdesc ())
3026 return aarch64_breakpoint_len;
3027 else
3028 return arm_breakpoint_kind_from_pc (pcptr);
3029}
3030
3031/* Implementation of the linux_target_ops method
3032 "breakpoint_kind_from_current_state". */
3033
3034static int
3035aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3036{
3037 if (is_64bit_tdesc ())
3038 return aarch64_breakpoint_len;
3039 else
3040 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3041}
3042
7d00775e
AT
3043/* Support for hardware single step. */
3044
3045static int
3046aarch64_supports_hardware_single_step (void)
3047{
3048 return 1;
3049}
3050
176eb98c
MS
3051struct linux_target_ops the_low_target =
3052{
3053 aarch64_arch_setup,
3aee8918 3054 aarch64_regs_info,
50138245
AH
3055 NULL, /* cannot_fetch_register */
3056 NULL, /* cannot_store_register */
421530db 3057 NULL, /* fetch_register */
176eb98c
MS
3058 aarch64_get_pc,
3059 aarch64_set_pc,
17b1509a 3060 aarch64_breakpoint_kind_from_pc,
dd373349 3061 aarch64_sw_breakpoint_from_kind,
fa5308bd 3062 NULL, /* get_next_pcs */
421530db 3063 0, /* decr_pc_after_break */
176eb98c 3064 aarch64_breakpoint_at,
802e8e6d 3065 aarch64_supports_z_point_type,
176eb98c
MS
3066 aarch64_insert_point,
3067 aarch64_remove_point,
3068 aarch64_stopped_by_watchpoint,
3069 aarch64_stopped_data_address,
421530db
PL
3070 NULL, /* collect_ptrace_register */
3071 NULL, /* supply_ptrace_register */
ade90bde 3072 aarch64_linux_siginfo_fixup,
176eb98c 3073 aarch64_linux_new_process,
04ec7890 3074 aarch64_linux_delete_process,
176eb98c 3075 aarch64_linux_new_thread,
466eecee 3076 aarch64_linux_delete_thread,
3a8a0396 3077 aarch64_linux_new_fork,
176eb98c 3078 aarch64_linux_prepare_to_resume,
421530db 3079 NULL, /* process_qsupported */
7671bf47 3080 aarch64_supports_tracepoints,
bb903df0
PL
3081 aarch64_get_thread_area,
3082 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3083 aarch64_emit_ops,
bb903df0 3084 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3085 aarch64_supports_range_stepping,
17b1509a 3086 aarch64_breakpoint_kind_from_current_state,
7d00775e 3087 aarch64_supports_hardware_single_step,
061fc021 3088 aarch64_get_syscall_trapinfo,
176eb98c 3089};
3aee8918
PA
3090
3091void
3092initialize_low_arch (void)
3093{
3b53ae99
YQ
3094 initialize_low_arch_aarch32 ();
3095
3aee8918 3096 initialize_regsets_info (&aarch64_regsets_info);
02895270 3097 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3098}
This page took 0.854851 seconds and 4 git commands to generate.