gdbserver/linux-low: turn 'arch_setup' into a method
[deliverable/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
b811d2c2 4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
f9d949fb 31#include "debug.h"
176eb98c
MS
32
33#include <signal.h>
34#include <sys/user.h>
5826e159 35#include "nat/gdb_ptrace.h"
e9dae05e 36#include <asm/ptrace.h>
bb903df0
PL
37#include <inttypes.h>
38#include <endian.h>
39#include <sys/uio.h>
176eb98c
MS
40
41#include "gdb_proc_service.h"
cc628f3d 42#include "arch/aarch64.h"
7cc17433 43#include "linux-aarch32-tdesc.h"
d6d7ce56 44#include "linux-aarch64-tdesc.h"
fefa175e 45#include "nat/aarch64-sve-linux-ptrace.h"
02895270 46#include "tdesc.h"
176eb98c 47
176eb98c
MS
48#ifdef HAVE_SYS_REG_H
49#include <sys/reg.h>
50#endif
51
ef0478f6
TBA
52/* Linux target op definitions for the AArch64 architecture. */
53
54class aarch64_target : public linux_process_target
55{
56public:
57
797bcff5
TBA
58protected:
59
60 void low_arch_setup () override;
ef0478f6
TBA
61};
62
63/* The singleton target ops object. */
64
65static aarch64_target the_aarch64_target;
66
176eb98c
MS
67/* Per-process arch-specific data we want to keep. */
68
69struct arch_process_info
70{
71 /* Hardware breakpoint/watchpoint data.
72 The reason for them to be per-process rather than per-thread is
73 due to the lack of information in the gdbserver environment;
74 gdbserver is not told that whether a requested hardware
75 breakpoint/watchpoint is thread specific or not, so it has to set
76 each hw bp/wp for every thread in the current process. The
77 higher level bp/wp management in gdb will resume a thread if a hw
78 bp/wp trap is not expected for it. Since the hw bp/wp setting is
79 same for each thread, it is reasonable for the data to live here.
80 */
81 struct aarch64_debug_reg_state debug_reg_state;
82};
83
3b53ae99
YQ
84/* Return true if the size of register 0 is 8 byte. */
85
86static int
87is_64bit_tdesc (void)
88{
89 struct regcache *regcache = get_thread_regcache (current_thread, 0);
90
91 return register_size (regcache->tdesc, 0) == 8;
92}
93
02895270
AH
94/* Return true if the regcache contains the number of SVE registers. */
95
96static bool
97is_sve_tdesc (void)
98{
99 struct regcache *regcache = get_thread_regcache (current_thread, 0);
100
6cdd651f 101 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
02895270
AH
102}
103
176eb98c
MS
104static void
105aarch64_fill_gregset (struct regcache *regcache, void *buf)
106{
6a69a054 107 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
108 int i;
109
110 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
111 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
112 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
113 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
114 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
115}
116
117static void
118aarch64_store_gregset (struct regcache *regcache, const void *buf)
119{
6a69a054 120 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
121 int i;
122
123 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
124 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
125 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
126 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
127 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
128}
129
130static void
131aarch64_fill_fpregset (struct regcache *regcache, void *buf)
132{
9caa3311 133 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
134 int i;
135
136 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
137 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
138 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
139 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
140}
141
142static void
143aarch64_store_fpregset (struct regcache *regcache, const void *buf)
144{
9caa3311
YQ
145 const struct user_fpsimd_state *regset
146 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
147 int i;
148
149 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
150 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
151 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
152 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
153}
154
1ef53e6b
AH
155/* Store the pauth registers to regcache. */
156
157static void
158aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
159{
160 uint64_t *pauth_regset = (uint64_t *) buf;
161 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
162
163 if (pauth_base == 0)
164 return;
165
166 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
167 &pauth_regset[0]);
168 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
169 &pauth_regset[1]);
170}
171
421530db
PL
172/* Implementation of linux_target_ops method "get_pc". */
173
176eb98c
MS
174static CORE_ADDR
175aarch64_get_pc (struct regcache *regcache)
176{
8a7e4587 177 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 178 return linux_get_pc_64bit (regcache);
8a7e4587 179 else
a5652c21 180 return linux_get_pc_32bit (regcache);
176eb98c
MS
181}
182
421530db
PL
183/* Implementation of linux_target_ops method "set_pc". */
184
176eb98c
MS
185static void
186aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
187{
8a7e4587 188 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 189 linux_set_pc_64bit (regcache, pc);
8a7e4587 190 else
a5652c21 191 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
192}
193
176eb98c
MS
194#define aarch64_breakpoint_len 4
195
37d66942
PL
196/* AArch64 BRK software debug mode instruction.
197 This instruction needs to match gdb/aarch64-tdep.c
198 (aarch64_default_breakpoint). */
199static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 200
421530db
PL
201/* Implementation of linux_target_ops method "breakpoint_at". */
202
176eb98c
MS
203static int
204aarch64_breakpoint_at (CORE_ADDR where)
205{
db91f502
YQ
206 if (is_64bit_tdesc ())
207 {
208 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 209
52405d85
TBA
210 the_target->read_memory (where, (unsigned char *) &insn,
211 aarch64_breakpoint_len);
db91f502
YQ
212 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
213 return 1;
176eb98c 214
db91f502
YQ
215 return 0;
216 }
217 else
218 return arm_breakpoint_at (where);
176eb98c
MS
219}
220
176eb98c
MS
221static void
222aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
223{
224 int i;
225
226 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
227 {
228 state->dr_addr_bp[i] = 0;
229 state->dr_ctrl_bp[i] = 0;
230 state->dr_ref_count_bp[i] = 0;
231 }
232
233 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
234 {
235 state->dr_addr_wp[i] = 0;
236 state->dr_ctrl_wp[i] = 0;
237 state->dr_ref_count_wp[i] = 0;
238 }
239}
240
176eb98c
MS
241/* Return the pointer to the debug register state structure in the
242 current process' arch-specific data area. */
243
db3cb7cb 244struct aarch64_debug_reg_state *
88e2cf7e 245aarch64_get_debug_reg_state (pid_t pid)
176eb98c 246{
88e2cf7e 247 struct process_info *proc = find_process_pid (pid);
176eb98c 248
fe978cb0 249 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
250}
251
421530db
PL
252/* Implementation of linux_target_ops method "supports_z_point_type". */
253
4ff0d3d8
PA
254static int
255aarch64_supports_z_point_type (char z_type)
256{
257 switch (z_type)
258 {
96c97461 259 case Z_PACKET_SW_BP:
4ff0d3d8
PA
260 case Z_PACKET_HW_BP:
261 case Z_PACKET_WRITE_WP:
262 case Z_PACKET_READ_WP:
263 case Z_PACKET_ACCESS_WP:
264 return 1;
265 default:
4ff0d3d8
PA
266 return 0;
267 }
268}
269
421530db 270/* Implementation of linux_target_ops method "insert_point".
176eb98c 271
421530db
PL
272 It actually only records the info of the to-be-inserted bp/wp;
273 the actual insertion will happen when threads are resumed. */
176eb98c
MS
274
275static int
802e8e6d
PA
276aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
277 int len, struct raw_breakpoint *bp)
176eb98c
MS
278{
279 int ret;
4ff0d3d8 280 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
281 struct aarch64_debug_reg_state *state
282 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 283
c5e92cca 284 if (show_debug_regs)
176eb98c
MS
285 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
286 (unsigned long) addr, len);
287
802e8e6d
PA
288 /* Determine the type from the raw breakpoint type. */
289 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
290
291 if (targ_type != hw_execute)
39edd165
YQ
292 {
293 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
294 ret = aarch64_handle_watchpoint (targ_type, addr, len,
295 1 /* is_insert */, state);
296 else
297 ret = -1;
298 }
176eb98c 299 else
8d689ee5
YQ
300 {
301 if (len == 3)
302 {
303 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
304 instruction. Set it to 2 to correctly encode length bit
305 mask in hardware/watchpoint control register. */
306 len = 2;
307 }
308 ret = aarch64_handle_breakpoint (targ_type, addr, len,
309 1 /* is_insert */, state);
310 }
176eb98c 311
60a191ed 312 if (show_debug_regs)
88e2cf7e
YQ
313 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
314 targ_type);
176eb98c
MS
315
316 return ret;
317}
318
421530db 319/* Implementation of linux_target_ops method "remove_point".
176eb98c 320
421530db
PL
321 It actually only records the info of the to-be-removed bp/wp,
322 the actual removal will be done when threads are resumed. */
176eb98c
MS
323
324static int
802e8e6d
PA
325aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
326 int len, struct raw_breakpoint *bp)
176eb98c
MS
327{
328 int ret;
4ff0d3d8 329 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
330 struct aarch64_debug_reg_state *state
331 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 332
c5e92cca 333 if (show_debug_regs)
176eb98c
MS
334 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
335 (unsigned long) addr, len);
336
802e8e6d
PA
337 /* Determine the type from the raw breakpoint type. */
338 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
339
340 /* Set up state pointers. */
341 if (targ_type != hw_execute)
342 ret =
c67ca4de
YQ
343 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
344 state);
176eb98c 345 else
8d689ee5
YQ
346 {
347 if (len == 3)
348 {
349 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
350 instruction. Set it to 2 to correctly encode length bit
351 mask in hardware/watchpoint control register. */
352 len = 2;
353 }
354 ret = aarch64_handle_breakpoint (targ_type, addr, len,
355 0 /* is_insert */, state);
356 }
176eb98c 357
60a191ed 358 if (show_debug_regs)
88e2cf7e
YQ
359 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
360 targ_type);
176eb98c
MS
361
362 return ret;
363}
364
421530db 365/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
366
367static CORE_ADDR
368aarch64_stopped_data_address (void)
369{
370 siginfo_t siginfo;
371 int pid, i;
372 struct aarch64_debug_reg_state *state;
373
0bfdf32f 374 pid = lwpid_of (current_thread);
176eb98c
MS
375
376 /* Get the siginfo. */
377 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
378 return (CORE_ADDR) 0;
379
380 /* Need to be a hardware breakpoint/watchpoint trap. */
381 if (siginfo.si_signo != SIGTRAP
382 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
383 return (CORE_ADDR) 0;
384
385 /* Check if the address matches any watched address. */
88e2cf7e 386 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
387 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
388 {
a3b60e45
JK
389 const unsigned int offset
390 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
391 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
392 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
393 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
394 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
395 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
396
176eb98c
MS
397 if (state->dr_ref_count_wp[i]
398 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 399 && addr_trap >= addr_watch_aligned
176eb98c 400 && addr_trap < addr_watch + len)
a3b60e45
JK
401 {
402 /* ADDR_TRAP reports the first address of the memory range
403 accessed by the CPU, regardless of what was the memory
404 range watched. Thus, a large CPU access that straddles
405 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
406 ADDR_TRAP that is lower than the
407 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
408
409 addr: | 4 | 5 | 6 | 7 | 8 |
410 |---- range watched ----|
411 |----------- range accessed ------------|
412
413 In this case, ADDR_TRAP will be 4.
414
415 To match a watchpoint known to GDB core, we must never
416 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
417 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
418 positive on kernels older than 4.10. See PR
419 external/20207. */
420 return addr_orig;
421 }
176eb98c
MS
422 }
423
424 return (CORE_ADDR) 0;
425}
426
421530db 427/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
428
429static int
430aarch64_stopped_by_watchpoint (void)
431{
432 if (aarch64_stopped_data_address () != 0)
433 return 1;
434 else
435 return 0;
436}
437
438/* Fetch the thread-local storage pointer for libthread_db. */
439
440ps_err_e
754653a7 441ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
442 lwpid_t lwpid, int idx, void **base)
443{
a0cc84cd
YQ
444 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
445 is_64bit_tdesc ());
176eb98c
MS
446}
447
ade90bde
YQ
448/* Implementation of linux_target_ops method "siginfo_fixup". */
449
450static int
8adce034 451aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
452{
453 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
454 if (!is_64bit_tdesc ())
455 {
456 if (direction == 0)
457 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
458 native);
459 else
460 aarch64_siginfo_from_compat_siginfo (native,
461 (struct compat_siginfo *) inf);
462
463 return 1;
464 }
465
466 return 0;
467}
468
04ec7890 469/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
470
471static struct arch_process_info *
472aarch64_linux_new_process (void)
473{
8d749320 474 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
475
476 aarch64_init_debug_reg_state (&info->debug_reg_state);
477
478 return info;
479}
480
04ec7890
SM
481/* Implementation of linux_target_ops method "delete_process". */
482
483static void
484aarch64_linux_delete_process (struct arch_process_info *info)
485{
486 xfree (info);
487}
488
421530db
PL
489/* Implementation of linux_target_ops method "linux_new_fork". */
490
3a8a0396
DB
491static void
492aarch64_linux_new_fork (struct process_info *parent,
493 struct process_info *child)
494{
495 /* These are allocated by linux_add_process. */
61a7418c
DB
496 gdb_assert (parent->priv != NULL
497 && parent->priv->arch_private != NULL);
498 gdb_assert (child->priv != NULL
499 && child->priv->arch_private != NULL);
3a8a0396
DB
500
501 /* Linux kernel before 2.6.33 commit
502 72f674d203cd230426437cdcf7dd6f681dad8b0d
503 will inherit hardware debug registers from parent
504 on fork/vfork/clone. Newer Linux kernels create such tasks with
505 zeroed debug registers.
506
507 GDB core assumes the child inherits the watchpoints/hw
508 breakpoints of the parent, and will remove them all from the
509 forked off process. Copy the debug registers mirrors into the
510 new process so that all breakpoints and watchpoints can be
511 removed together. The debug registers mirror will become zeroed
512 in the end before detaching the forked off process, thus making
513 this compatible with older Linux kernels too. */
514
61a7418c 515 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
516}
517
ee4fbcfa
AH
518/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
519#define AARCH64_HWCAP_PACA (1 << 30)
520
797bcff5 521/* Implementation of linux target ops method "low_arch_setup". */
3b53ae99 522
797bcff5
TBA
523void
524aarch64_target::low_arch_setup ()
3b53ae99
YQ
525{
526 unsigned int machine;
527 int is_elf64;
528 int tid;
529
530 tid = lwpid_of (current_thread);
531
532 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
533
534 if (is_elf64)
fefa175e
AH
535 {
536 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
537 unsigned long hwcap = linux_get_hwcap (8);
538 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
539
540 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 541 }
3b53ae99 542 else
7cc17433 543 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 544
af1b22f3 545 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
546}
547
02895270
AH
548/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
549
550static void
551aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
552{
553 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
554}
555
556/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
557
558static void
559aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
560{
561 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
562}
563
3aee8918 564static struct regset_info aarch64_regsets[] =
176eb98c
MS
565{
566 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
567 sizeof (struct user_pt_regs), GENERAL_REGS,
568 aarch64_fill_gregset, aarch64_store_gregset },
569 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
570 sizeof (struct user_fpsimd_state), FP_REGS,
571 aarch64_fill_fpregset, aarch64_store_fpregset
572 },
1ef53e6b
AH
573 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
574 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
575 NULL, aarch64_store_pauthregset },
50bc912a 576 NULL_REGSET
176eb98c
MS
577};
578
3aee8918
PA
579static struct regsets_info aarch64_regsets_info =
580 {
581 aarch64_regsets, /* regsets */
582 0, /* num_regsets */
583 NULL, /* disabled_regsets */
584 };
585
3b53ae99 586static struct regs_info regs_info_aarch64 =
3aee8918
PA
587 {
588 NULL, /* regset_bitmap */
c2d65f38 589 NULL, /* usrregs */
3aee8918
PA
590 &aarch64_regsets_info,
591 };
592
02895270
AH
593static struct regset_info aarch64_sve_regsets[] =
594{
595 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
596 sizeof (struct user_pt_regs), GENERAL_REGS,
597 aarch64_fill_gregset, aarch64_store_gregset },
598 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
599 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
600 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
601 },
1ef53e6b
AH
602 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
603 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
604 NULL, aarch64_store_pauthregset },
02895270
AH
605 NULL_REGSET
606};
607
608static struct regsets_info aarch64_sve_regsets_info =
609 {
610 aarch64_sve_regsets, /* regsets. */
611 0, /* num_regsets. */
612 NULL, /* disabled_regsets. */
613 };
614
615static struct regs_info regs_info_aarch64_sve =
616 {
617 NULL, /* regset_bitmap. */
618 NULL, /* usrregs. */
619 &aarch64_sve_regsets_info,
620 };
621
421530db
PL
622/* Implementation of linux_target_ops method "regs_info". */
623
3aee8918
PA
624static const struct regs_info *
625aarch64_regs_info (void)
626{
02895270 627 if (!is_64bit_tdesc ())
3b53ae99 628 return &regs_info_aarch32;
02895270
AH
629
630 if (is_sve_tdesc ())
631 return &regs_info_aarch64_sve;
632
633 return &regs_info_aarch64;
3aee8918
PA
634}
635
7671bf47
PL
636/* Implementation of linux_target_ops method "supports_tracepoints". */
637
638static int
639aarch64_supports_tracepoints (void)
640{
524b57e6
YQ
641 if (current_thread == NULL)
642 return 1;
643 else
644 {
645 /* We don't support tracepoints on aarch32 now. */
646 return is_64bit_tdesc ();
647 }
7671bf47
PL
648}
649
bb903df0
PL
650/* Implementation of linux_target_ops method "get_thread_area". */
651
652static int
653aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
654{
655 struct iovec iovec;
656 uint64_t reg;
657
658 iovec.iov_base = &reg;
659 iovec.iov_len = sizeof (reg);
660
661 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
662 return -1;
663
664 *addrp = reg;
665
666 return 0;
667}
668
061fc021
YQ
669/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
670
671static void
672aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
673{
674 int use_64bit = register_size (regcache->tdesc, 0) == 8;
675
676 if (use_64bit)
677 {
678 long l_sysno;
679
680 collect_register_by_name (regcache, "x8", &l_sysno);
681 *sysno = (int) l_sysno;
682 }
683 else
684 collect_register_by_name (regcache, "r7", sysno);
685}
686
afbe19f8
PL
687/* List of condition codes that we need. */
688
689enum aarch64_condition_codes
690{
691 EQ = 0x0,
692 NE = 0x1,
693 LO = 0x3,
694 GE = 0xa,
695 LT = 0xb,
696 GT = 0xc,
697 LE = 0xd,
bb903df0
PL
698};
699
6c1c9a8b
YQ
700enum aarch64_operand_type
701{
702 OPERAND_IMMEDIATE,
703 OPERAND_REGISTER,
704};
705
bb903df0
PL
706/* Representation of an operand. At this time, it only supports register
707 and immediate types. */
708
709struct aarch64_operand
710{
711 /* Type of the operand. */
6c1c9a8b
YQ
712 enum aarch64_operand_type type;
713
bb903df0
PL
714 /* Value of the operand according to the type. */
715 union
716 {
717 uint32_t imm;
718 struct aarch64_register reg;
719 };
720};
721
722/* List of registers that we are currently using, we can add more here as
723 we need to use them. */
724
725/* General purpose scratch registers (64 bit). */
726static const struct aarch64_register x0 = { 0, 1 };
727static const struct aarch64_register x1 = { 1, 1 };
728static const struct aarch64_register x2 = { 2, 1 };
729static const struct aarch64_register x3 = { 3, 1 };
730static const struct aarch64_register x4 = { 4, 1 };
731
732/* General purpose scratch registers (32 bit). */
afbe19f8 733static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
734static const struct aarch64_register w2 = { 2, 0 };
735
736/* Intra-procedure scratch registers. */
737static const struct aarch64_register ip0 = { 16, 1 };
738
739/* Special purpose registers. */
afbe19f8
PL
740static const struct aarch64_register fp = { 29, 1 };
741static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
742static const struct aarch64_register sp = { 31, 1 };
743static const struct aarch64_register xzr = { 31, 1 };
744
745/* Dynamically allocate a new register. If we know the register
746 statically, we should make it a global as above instead of using this
747 helper function. */
748
749static struct aarch64_register
750aarch64_register (unsigned num, int is64)
751{
752 return (struct aarch64_register) { num, is64 };
753}
754
755/* Helper function to create a register operand, for instructions with
756 different types of operands.
757
758 For example:
759 p += emit_mov (p, x0, register_operand (x1)); */
760
761static struct aarch64_operand
762register_operand (struct aarch64_register reg)
763{
764 struct aarch64_operand operand;
765
766 operand.type = OPERAND_REGISTER;
767 operand.reg = reg;
768
769 return operand;
770}
771
772/* Helper function to create an immediate operand, for instructions with
773 different types of operands.
774
775 For example:
776 p += emit_mov (p, x0, immediate_operand (12)); */
777
778static struct aarch64_operand
779immediate_operand (uint32_t imm)
780{
781 struct aarch64_operand operand;
782
783 operand.type = OPERAND_IMMEDIATE;
784 operand.imm = imm;
785
786 return operand;
787}
788
bb903df0
PL
789/* Helper function to create an offset memory operand.
790
791 For example:
792 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
793
794static struct aarch64_memory_operand
795offset_memory_operand (int32_t offset)
796{
797 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
798}
799
800/* Helper function to create a pre-index memory operand.
801
802 For example:
803 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
804
805static struct aarch64_memory_operand
806preindex_memory_operand (int32_t index)
807{
808 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
809}
810
afbe19f8
PL
811/* Helper function to create a post-index memory operand.
812
813 For example:
814 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
815
816static struct aarch64_memory_operand
817postindex_memory_operand (int32_t index)
818{
819 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
820}
821
bb903df0
PL
822/* System control registers. These special registers can be written and
823 read with the MRS and MSR instructions.
824
825 - NZCV: Condition flags. GDB refers to this register under the CPSR
826 name.
827 - FPSR: Floating-point status register.
828 - FPCR: Floating-point control registers.
829 - TPIDR_EL0: Software thread ID register. */
830
831enum aarch64_system_control_registers
832{
833 /* op0 op1 crn crm op2 */
834 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
835 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
836 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
837 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
838};
839
bb903df0
PL
840/* Write a BLR instruction into *BUF.
841
842 BLR rn
843
844 RN is the register to branch to. */
845
846static int
847emit_blr (uint32_t *buf, struct aarch64_register rn)
848{
e1c587c3 849 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
850}
851
afbe19f8 852/* Write a RET instruction into *BUF.
bb903df0 853
afbe19f8 854 RET xn
bb903df0 855
afbe19f8 856 RN is the register to branch to. */
bb903df0
PL
857
858static int
afbe19f8
PL
859emit_ret (uint32_t *buf, struct aarch64_register rn)
860{
e1c587c3 861 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
862}
863
864static int
865emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
866 struct aarch64_register rt,
867 struct aarch64_register rt2,
868 struct aarch64_register rn,
869 struct aarch64_memory_operand operand)
bb903df0
PL
870{
871 uint32_t opc;
872 uint32_t pre_index;
873 uint32_t write_back;
874
875 if (rt.is64)
876 opc = ENCODE (2, 2, 30);
877 else
878 opc = ENCODE (0, 2, 30);
879
880 switch (operand.type)
881 {
882 case MEMORY_OPERAND_OFFSET:
883 {
884 pre_index = ENCODE (1, 1, 24);
885 write_back = ENCODE (0, 1, 23);
886 break;
887 }
afbe19f8
PL
888 case MEMORY_OPERAND_POSTINDEX:
889 {
890 pre_index = ENCODE (0, 1, 24);
891 write_back = ENCODE (1, 1, 23);
892 break;
893 }
bb903df0
PL
894 case MEMORY_OPERAND_PREINDEX:
895 {
896 pre_index = ENCODE (1, 1, 24);
897 write_back = ENCODE (1, 1, 23);
898 break;
899 }
900 default:
901 return 0;
902 }
903
e1c587c3
YQ
904 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
905 | ENCODE (operand.index >> 3, 7, 15)
906 | ENCODE (rt2.num, 5, 10)
907 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
908}
909
afbe19f8
PL
910/* Write a STP instruction into *BUF.
911
912 STP rt, rt2, [rn, #offset]
913 STP rt, rt2, [rn, #index]!
914 STP rt, rt2, [rn], #index
915
916 RT and RT2 are the registers to store.
917 RN is the base address register.
918 OFFSET is the immediate to add to the base address. It is limited to a
919 -512 .. 504 range (7 bits << 3). */
920
921static int
922emit_stp (uint32_t *buf, struct aarch64_register rt,
923 struct aarch64_register rt2, struct aarch64_register rn,
924 struct aarch64_memory_operand operand)
925{
926 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
927}
928
929/* Write a LDP instruction into *BUF.
930
931 LDP rt, rt2, [rn, #offset]
932 LDP rt, rt2, [rn, #index]!
933 LDP rt, rt2, [rn], #index
934
935 RT and RT2 are the registers to store.
936 RN is the base address register.
937 OFFSET is the immediate to add to the base address. It is limited to a
938 -512 .. 504 range (7 bits << 3). */
939
940static int
941emit_ldp (uint32_t *buf, struct aarch64_register rt,
942 struct aarch64_register rt2, struct aarch64_register rn,
943 struct aarch64_memory_operand operand)
944{
945 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
946}
947
bb903df0
PL
948/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
949
950 LDP qt, qt2, [rn, #offset]
951
952 RT and RT2 are the Q registers to store.
953 RN is the base address register.
954 OFFSET is the immediate to add to the base address. It is limited to
955 -1024 .. 1008 range (7 bits << 4). */
956
957static int
958emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
959 struct aarch64_register rn, int32_t offset)
960{
961 uint32_t opc = ENCODE (2, 2, 30);
962 uint32_t pre_index = ENCODE (1, 1, 24);
963
e1c587c3
YQ
964 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
965 | ENCODE (offset >> 4, 7, 15)
966 | ENCODE (rt2, 5, 10)
967 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
968}
969
970/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
971
972 STP qt, qt2, [rn, #offset]
973
974 RT and RT2 are the Q registers to store.
975 RN is the base address register.
976 OFFSET is the immediate to add to the base address. It is limited to
977 -1024 .. 1008 range (7 bits << 4). */
978
979static int
980emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
981 struct aarch64_register rn, int32_t offset)
982{
983 uint32_t opc = ENCODE (2, 2, 30);
984 uint32_t pre_index = ENCODE (1, 1, 24);
985
e1c587c3 986 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
987 | ENCODE (offset >> 4, 7, 15)
988 | ENCODE (rt2, 5, 10)
989 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
990}
991
afbe19f8
PL
992/* Write a LDRH instruction into *BUF.
993
994 LDRH wt, [xn, #offset]
995 LDRH wt, [xn, #index]!
996 LDRH wt, [xn], #index
997
998 RT is the register to store.
999 RN is the base address register.
1000 OFFSET is the immediate to add to the base address. It is limited to
1001 0 .. 32760 range (12 bits << 3). */
1002
1003static int
1004emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1005 struct aarch64_register rn,
1006 struct aarch64_memory_operand operand)
1007{
1c2e1515 1008 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
1009}
1010
1011/* Write a LDRB instruction into *BUF.
1012
1013 LDRB wt, [xn, #offset]
1014 LDRB wt, [xn, #index]!
1015 LDRB wt, [xn], #index
1016
1017 RT is the register to store.
1018 RN is the base address register.
1019 OFFSET is the immediate to add to the base address. It is limited to
1020 0 .. 32760 range (12 bits << 3). */
1021
1022static int
1023emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1024 struct aarch64_register rn,
1025 struct aarch64_memory_operand operand)
1026{
1c2e1515 1027 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1028}
1029
bb903df0 1030
bb903df0
PL
1031
1032/* Write a STR instruction into *BUF.
1033
1034 STR rt, [rn, #offset]
1035 STR rt, [rn, #index]!
afbe19f8 1036 STR rt, [rn], #index
bb903df0
PL
1037
1038 RT is the register to store.
1039 RN is the base address register.
1040 OFFSET is the immediate to add to the base address. It is limited to
1041 0 .. 32760 range (12 bits << 3). */
1042
1043static int
1044emit_str (uint32_t *buf, struct aarch64_register rt,
1045 struct aarch64_register rn,
1046 struct aarch64_memory_operand operand)
1047{
1c2e1515 1048 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1049}
1050
1051/* Helper function emitting an exclusive load or store instruction. */
1052
1053static int
1054emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1055 enum aarch64_opcodes opcode,
1056 struct aarch64_register rs,
1057 struct aarch64_register rt,
1058 struct aarch64_register rt2,
1059 struct aarch64_register rn)
1060{
e1c587c3
YQ
1061 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1062 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1063 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1064}
1065
1066/* Write a LAXR instruction into *BUF.
1067
1068 LDAXR rt, [xn]
1069
1070 RT is the destination register.
1071 RN is the base address register. */
1072
1073static int
1074emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1075 struct aarch64_register rn)
1076{
1077 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1078 xzr, rn);
1079}
1080
1081/* Write a STXR instruction into *BUF.
1082
1083 STXR ws, rt, [xn]
1084
1085 RS is the result register, it indicates if the store succeeded or not.
1086 RT is the destination register.
1087 RN is the base address register. */
1088
1089static int
1090emit_stxr (uint32_t *buf, struct aarch64_register rs,
1091 struct aarch64_register rt, struct aarch64_register rn)
1092{
1093 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1094 xzr, rn);
1095}
1096
1097/* Write a STLR instruction into *BUF.
1098
1099 STLR rt, [xn]
1100
1101 RT is the register to store.
1102 RN is the base address register. */
1103
1104static int
1105emit_stlr (uint32_t *buf, struct aarch64_register rt,
1106 struct aarch64_register rn)
1107{
1108 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1109 xzr, rn);
1110}
1111
1112/* Helper function for data processing instructions with register sources. */
1113
1114static int
231c0592 1115emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1116 struct aarch64_register rd,
1117 struct aarch64_register rn,
1118 struct aarch64_register rm)
1119{
1120 uint32_t size = ENCODE (rd.is64, 1, 31);
1121
e1c587c3
YQ
1122 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1123 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1124}
1125
1126/* Helper function for data processing instructions taking either a register
1127 or an immediate. */
1128
1129static int
1130emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1131 struct aarch64_register rd,
1132 struct aarch64_register rn,
1133 struct aarch64_operand operand)
1134{
1135 uint32_t size = ENCODE (rd.is64, 1, 31);
1136 /* The opcode is different for register and immediate source operands. */
1137 uint32_t operand_opcode;
1138
1139 if (operand.type == OPERAND_IMMEDIATE)
1140 {
1141 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1142 operand_opcode = ENCODE (8, 4, 25);
1143
e1c587c3
YQ
1144 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1145 | ENCODE (operand.imm, 12, 10)
1146 | ENCODE (rn.num, 5, 5)
1147 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1148 }
1149 else
1150 {
1151 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1152 operand_opcode = ENCODE (5, 4, 25);
1153
1154 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1155 rn, operand.reg);
1156 }
1157}
1158
1159/* Write an ADD instruction into *BUF.
1160
1161 ADD rd, rn, #imm
1162 ADD rd, rn, rm
1163
1164 This function handles both an immediate and register add.
1165
1166 RD is the destination register.
1167 RN is the input register.
1168 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1169 OPERAND_REGISTER. */
1170
1171static int
1172emit_add (uint32_t *buf, struct aarch64_register rd,
1173 struct aarch64_register rn, struct aarch64_operand operand)
1174{
1175 return emit_data_processing (buf, ADD, rd, rn, operand);
1176}
1177
1178/* Write a SUB instruction into *BUF.
1179
1180 SUB rd, rn, #imm
1181 SUB rd, rn, rm
1182
1183 This function handles both an immediate and register sub.
1184
1185 RD is the destination register.
1186 RN is the input register.
1187 IMM is the immediate to substract to RN. */
1188
1189static int
1190emit_sub (uint32_t *buf, struct aarch64_register rd,
1191 struct aarch64_register rn, struct aarch64_operand operand)
1192{
1193 return emit_data_processing (buf, SUB, rd, rn, operand);
1194}
1195
1196/* Write a MOV instruction into *BUF.
1197
1198 MOV rd, #imm
1199 MOV rd, rm
1200
1201 This function handles both a wide immediate move and a register move,
1202 with the condition that the source register is not xzr. xzr and the
1203 stack pointer share the same encoding and this function only supports
1204 the stack pointer.
1205
1206 RD is the destination register.
1207 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1208 OPERAND_REGISTER. */
1209
1210static int
1211emit_mov (uint32_t *buf, struct aarch64_register rd,
1212 struct aarch64_operand operand)
1213{
1214 if (operand.type == OPERAND_IMMEDIATE)
1215 {
1216 uint32_t size = ENCODE (rd.is64, 1, 31);
1217 /* Do not shift the immediate. */
1218 uint32_t shift = ENCODE (0, 2, 21);
1219
e1c587c3
YQ
1220 return aarch64_emit_insn (buf, MOV | size | shift
1221 | ENCODE (operand.imm, 16, 5)
1222 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1223 }
1224 else
1225 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1226}
1227
1228/* Write a MOVK instruction into *BUF.
1229
1230 MOVK rd, #imm, lsl #shift
1231
1232 RD is the destination register.
1233 IMM is the immediate.
1234 SHIFT is the logical shift left to apply to IMM. */
1235
1236static int
7781c06f
YQ
1237emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1238 unsigned shift)
bb903df0
PL
1239{
1240 uint32_t size = ENCODE (rd.is64, 1, 31);
1241
e1c587c3
YQ
1242 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1243 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1244}
1245
1246/* Write instructions into *BUF in order to move ADDR into a register.
1247 ADDR can be a 64-bit value.
1248
1249 This function will emit a series of MOV and MOVK instructions, such as:
1250
1251 MOV xd, #(addr)
1252 MOVK xd, #(addr >> 16), lsl #16
1253 MOVK xd, #(addr >> 32), lsl #32
1254 MOVK xd, #(addr >> 48), lsl #48 */
1255
1256static int
1257emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1258{
1259 uint32_t *p = buf;
1260
1261 /* The MOV (wide immediate) instruction clears to top bits of the
1262 register. */
1263 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1264
1265 if ((addr >> 16) != 0)
1266 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1267 else
1268 return p - buf;
1269
1270 if ((addr >> 32) != 0)
1271 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1272 else
1273 return p - buf;
1274
1275 if ((addr >> 48) != 0)
1276 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1277
1278 return p - buf;
1279}
1280
afbe19f8
PL
1281/* Write a SUBS instruction into *BUF.
1282
1283 SUBS rd, rn, rm
1284
1285 This instruction update the condition flags.
1286
1287 RD is the destination register.
1288 RN and RM are the source registers. */
1289
1290static int
1291emit_subs (uint32_t *buf, struct aarch64_register rd,
1292 struct aarch64_register rn, struct aarch64_operand operand)
1293{
1294 return emit_data_processing (buf, SUBS, rd, rn, operand);
1295}
1296
1297/* Write a CMP instruction into *BUF.
1298
1299 CMP rn, rm
1300
1301 This instruction is an alias of SUBS xzr, rn, rm.
1302
1303 RN and RM are the registers to compare. */
1304
1305static int
1306emit_cmp (uint32_t *buf, struct aarch64_register rn,
1307 struct aarch64_operand operand)
1308{
1309 return emit_subs (buf, xzr, rn, operand);
1310}
1311
1312/* Write a AND instruction into *BUF.
1313
1314 AND rd, rn, rm
1315
1316 RD is the destination register.
1317 RN and RM are the source registers. */
1318
1319static int
1320emit_and (uint32_t *buf, struct aarch64_register rd,
1321 struct aarch64_register rn, struct aarch64_register rm)
1322{
1323 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1324}
1325
1326/* Write a ORR instruction into *BUF.
1327
1328 ORR rd, rn, rm
1329
1330 RD is the destination register.
1331 RN and RM are the source registers. */
1332
1333static int
1334emit_orr (uint32_t *buf, struct aarch64_register rd,
1335 struct aarch64_register rn, struct aarch64_register rm)
1336{
1337 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1338}
1339
1340/* Write a ORN instruction into *BUF.
1341
1342 ORN rd, rn, rm
1343
1344 RD is the destination register.
1345 RN and RM are the source registers. */
1346
1347static int
1348emit_orn (uint32_t *buf, struct aarch64_register rd,
1349 struct aarch64_register rn, struct aarch64_register rm)
1350{
1351 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1352}
1353
1354/* Write a EOR instruction into *BUF.
1355
1356 EOR rd, rn, rm
1357
1358 RD is the destination register.
1359 RN and RM are the source registers. */
1360
1361static int
1362emit_eor (uint32_t *buf, struct aarch64_register rd,
1363 struct aarch64_register rn, struct aarch64_register rm)
1364{
1365 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1366}
1367
1368/* Write a MVN instruction into *BUF.
1369
1370 MVN rd, rm
1371
1372 This is an alias for ORN rd, xzr, rm.
1373
1374 RD is the destination register.
1375 RM is the source register. */
1376
1377static int
1378emit_mvn (uint32_t *buf, struct aarch64_register rd,
1379 struct aarch64_register rm)
1380{
1381 return emit_orn (buf, rd, xzr, rm);
1382}
1383
1384/* Write a LSLV instruction into *BUF.
1385
1386 LSLV rd, rn, rm
1387
1388 RD is the destination register.
1389 RN and RM are the source registers. */
1390
1391static int
1392emit_lslv (uint32_t *buf, struct aarch64_register rd,
1393 struct aarch64_register rn, struct aarch64_register rm)
1394{
1395 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1396}
1397
1398/* Write a LSRV instruction into *BUF.
1399
1400 LSRV rd, rn, rm
1401
1402 RD is the destination register.
1403 RN and RM are the source registers. */
1404
1405static int
1406emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1407 struct aarch64_register rn, struct aarch64_register rm)
1408{
1409 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1410}
1411
1412/* Write a ASRV instruction into *BUF.
1413
1414 ASRV rd, rn, rm
1415
1416 RD is the destination register.
1417 RN and RM are the source registers. */
1418
1419static int
1420emit_asrv (uint32_t *buf, struct aarch64_register rd,
1421 struct aarch64_register rn, struct aarch64_register rm)
1422{
1423 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1424}
1425
1426/* Write a MUL instruction into *BUF.
1427
1428 MUL rd, rn, rm
1429
1430 RD is the destination register.
1431 RN and RM are the source registers. */
1432
1433static int
1434emit_mul (uint32_t *buf, struct aarch64_register rd,
1435 struct aarch64_register rn, struct aarch64_register rm)
1436{
1437 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1438}
1439
bb903df0
PL
1440/* Write a MRS instruction into *BUF. The register size is 64-bit.
1441
1442 MRS xt, system_reg
1443
1444 RT is the destination register.
1445 SYSTEM_REG is special purpose register to read. */
1446
1447static int
1448emit_mrs (uint32_t *buf, struct aarch64_register rt,
1449 enum aarch64_system_control_registers system_reg)
1450{
e1c587c3
YQ
1451 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1452 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1453}
1454
1455/* Write a MSR instruction into *BUF. The register size is 64-bit.
1456
1457 MSR system_reg, xt
1458
1459 SYSTEM_REG is special purpose register to write.
1460 RT is the input register. */
1461
1462static int
1463emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1464 struct aarch64_register rt)
1465{
e1c587c3
YQ
1466 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1467 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1468}
1469
1470/* Write a SEVL instruction into *BUF.
1471
1472 This is a hint instruction telling the hardware to trigger an event. */
1473
1474static int
1475emit_sevl (uint32_t *buf)
1476{
e1c587c3 1477 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1478}
1479
1480/* Write a WFE instruction into *BUF.
1481
1482 This is a hint instruction telling the hardware to wait for an event. */
1483
1484static int
1485emit_wfe (uint32_t *buf)
1486{
e1c587c3 1487 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1488}
1489
afbe19f8
PL
1490/* Write a SBFM instruction into *BUF.
1491
1492 SBFM rd, rn, #immr, #imms
1493
1494 This instruction moves the bits from #immr to #imms into the
1495 destination, sign extending the result.
1496
1497 RD is the destination register.
1498 RN is the source register.
1499 IMMR is the bit number to start at (least significant bit).
1500 IMMS is the bit number to stop at (most significant bit). */
1501
1502static int
1503emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1504 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1505{
1506 uint32_t size = ENCODE (rd.is64, 1, 31);
1507 uint32_t n = ENCODE (rd.is64, 1, 22);
1508
e1c587c3
YQ
1509 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1510 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1511 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1512}
1513
1514/* Write a SBFX instruction into *BUF.
1515
1516 SBFX rd, rn, #lsb, #width
1517
1518 This instruction moves #width bits from #lsb into the destination, sign
1519 extending the result. This is an alias for:
1520
1521 SBFM rd, rn, #lsb, #(lsb + width - 1)
1522
1523 RD is the destination register.
1524 RN is the source register.
1525 LSB is the bit number to start at (least significant bit).
1526 WIDTH is the number of bits to move. */
1527
1528static int
1529emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1530 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1531{
1532 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1533}
1534
1535/* Write a UBFM instruction into *BUF.
1536
1537 UBFM rd, rn, #immr, #imms
1538
1539 This instruction moves the bits from #immr to #imms into the
1540 destination, extending the result with zeros.
1541
1542 RD is the destination register.
1543 RN is the source register.
1544 IMMR is the bit number to start at (least significant bit).
1545 IMMS is the bit number to stop at (most significant bit). */
1546
1547static int
1548emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1549 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1550{
1551 uint32_t size = ENCODE (rd.is64, 1, 31);
1552 uint32_t n = ENCODE (rd.is64, 1, 22);
1553
e1c587c3
YQ
1554 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1555 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1556 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1557}
1558
1559/* Write a UBFX instruction into *BUF.
1560
1561 UBFX rd, rn, #lsb, #width
1562
1563 This instruction moves #width bits from #lsb into the destination,
1564 extending the result with zeros. This is an alias for:
1565
1566 UBFM rd, rn, #lsb, #(lsb + width - 1)
1567
1568 RD is the destination register.
1569 RN is the source register.
1570 LSB is the bit number to start at (least significant bit).
1571 WIDTH is the number of bits to move. */
1572
1573static int
1574emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1575 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1576{
1577 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1578}
1579
1580/* Write a CSINC instruction into *BUF.
1581
1582 CSINC rd, rn, rm, cond
1583
1584 This instruction conditionally increments rn or rm and places the result
1585 in rd. rn is chosen is the condition is true.
1586
1587 RD is the destination register.
1588 RN and RM are the source registers.
1589 COND is the encoded condition. */
1590
1591static int
1592emit_csinc (uint32_t *buf, struct aarch64_register rd,
1593 struct aarch64_register rn, struct aarch64_register rm,
1594 unsigned cond)
1595{
1596 uint32_t size = ENCODE (rd.is64, 1, 31);
1597
e1c587c3
YQ
1598 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1599 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1600 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1601}
1602
1603/* Write a CSET instruction into *BUF.
1604
1605 CSET rd, cond
1606
1607 This instruction conditionally write 1 or 0 in the destination register.
1608 1 is written if the condition is true. This is an alias for:
1609
1610 CSINC rd, xzr, xzr, !cond
1611
1612 Note that the condition needs to be inverted.
1613
1614 RD is the destination register.
1615 RN and RM are the source registers.
1616 COND is the encoded condition. */
1617
1618static int
1619emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1620{
1621 /* The least significant bit of the condition needs toggling in order to
1622 invert it. */
1623 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1624}
1625
bb903df0
PL
1626/* Write LEN instructions from BUF into the inferior memory at *TO.
1627
1628 Note instructions are always little endian on AArch64, unlike data. */
1629
1630static void
1631append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1632{
1633 size_t byte_len = len * sizeof (uint32_t);
1634#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1635 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1636 size_t i;
1637
1638 for (i = 0; i < len; i++)
1639 le_buf[i] = htole32 (buf[i]);
1640
4196ab2a 1641 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1642
1643 xfree (le_buf);
1644#else
4196ab2a 1645 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1646#endif
1647
1648 *to += byte_len;
1649}
1650
0badd99f
YQ
1651/* Sub-class of struct aarch64_insn_data, store information of
1652 instruction relocation for fast tracepoint. Visitor can
1653 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1654 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1655
0badd99f
YQ
1656struct aarch64_insn_relocation_data
1657{
1658 struct aarch64_insn_data base;
1659
1660 /* The new address the instruction is relocated to. */
1661 CORE_ADDR new_addr;
1662 /* Pointer to the buffer of relocated instruction(s). */
1663 uint32_t *insn_ptr;
1664};
1665
1666/* Implementation of aarch64_insn_visitor method "b". */
1667
1668static void
1669aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1670 struct aarch64_insn_data *data)
1671{
1672 struct aarch64_insn_relocation_data *insn_reloc
1673 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1674 int64_t new_offset
0badd99f
YQ
1675 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1676
1677 if (can_encode_int32 (new_offset, 28))
1678 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1679}
1680
1681/* Implementation of aarch64_insn_visitor method "b_cond". */
1682
1683static void
1684aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1685 struct aarch64_insn_data *data)
1686{
1687 struct aarch64_insn_relocation_data *insn_reloc
1688 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1689 int64_t new_offset
0badd99f
YQ
1690 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1691
1692 if (can_encode_int32 (new_offset, 21))
1693 {
1694 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1695 new_offset);
bb903df0 1696 }
0badd99f 1697 else if (can_encode_int32 (new_offset, 28))
bb903df0 1698 {
0badd99f
YQ
1699 /* The offset is out of range for a conditional branch
1700 instruction but not for a unconditional branch. We can use
1701 the following instructions instead:
bb903df0 1702
0badd99f
YQ
1703 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1704 B NOT_TAKEN ; Else jump over TAKEN and continue.
1705 TAKEN:
1706 B #(offset - 8)
1707 NOT_TAKEN:
1708
1709 */
1710
1711 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1712 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1713 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1714 }
0badd99f 1715}
bb903df0 1716
0badd99f
YQ
1717/* Implementation of aarch64_insn_visitor method "cb". */
1718
1719static void
1720aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1721 const unsigned rn, int is64,
1722 struct aarch64_insn_data *data)
1723{
1724 struct aarch64_insn_relocation_data *insn_reloc
1725 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1726 int64_t new_offset
0badd99f
YQ
1727 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1728
1729 if (can_encode_int32 (new_offset, 21))
1730 {
1731 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1732 aarch64_register (rn, is64), new_offset);
bb903df0 1733 }
0badd99f 1734 else if (can_encode_int32 (new_offset, 28))
bb903df0 1735 {
0badd99f
YQ
1736 /* The offset is out of range for a compare and branch
1737 instruction but not for a unconditional branch. We can use
1738 the following instructions instead:
1739
1740 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1741 B NOT_TAKEN ; Else jump over TAKEN and continue.
1742 TAKEN:
1743 B #(offset - 8)
1744 NOT_TAKEN:
1745
1746 */
1747 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1748 aarch64_register (rn, is64), 8);
1749 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1750 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1751 }
1752}
bb903df0 1753
0badd99f 1754/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1755
0badd99f
YQ
1756static void
1757aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1758 const unsigned rt, unsigned bit,
1759 struct aarch64_insn_data *data)
1760{
1761 struct aarch64_insn_relocation_data *insn_reloc
1762 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1763 int64_t new_offset
0badd99f
YQ
1764 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1765
1766 if (can_encode_int32 (new_offset, 16))
1767 {
1768 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1769 aarch64_register (rt, 1), new_offset);
bb903df0 1770 }
0badd99f 1771 else if (can_encode_int32 (new_offset, 28))
bb903df0 1772 {
0badd99f
YQ
1773 /* The offset is out of range for a test bit and branch
1774 instruction but not for a unconditional branch. We can use
1775 the following instructions instead:
1776
1777 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1778 B NOT_TAKEN ; Else jump over TAKEN and continue.
1779 TAKEN:
1780 B #(offset - 8)
1781 NOT_TAKEN:
1782
1783 */
1784 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1785 aarch64_register (rt, 1), 8);
1786 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1787 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1788 new_offset - 8);
1789 }
1790}
bb903df0 1791
0badd99f 1792/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1793
0badd99f
YQ
1794static void
1795aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1796 const int is_adrp,
1797 struct aarch64_insn_data *data)
1798{
1799 struct aarch64_insn_relocation_data *insn_reloc
1800 = (struct aarch64_insn_relocation_data *) data;
1801 /* We know exactly the address the ADR{P,} instruction will compute.
1802 We can just write it to the destination register. */
1803 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1804
0badd99f
YQ
1805 if (is_adrp)
1806 {
1807 /* Clear the lower 12 bits of the offset to get the 4K page. */
1808 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1809 aarch64_register (rd, 1),
1810 address & ~0xfff);
1811 }
1812 else
1813 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1814 aarch64_register (rd, 1), address);
1815}
bb903df0 1816
0badd99f 1817/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1818
0badd99f
YQ
1819static void
1820aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1821 const unsigned rt, const int is64,
1822 struct aarch64_insn_data *data)
1823{
1824 struct aarch64_insn_relocation_data *insn_reloc
1825 = (struct aarch64_insn_relocation_data *) data;
1826 CORE_ADDR address = data->insn_addr + offset;
1827
1828 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1829 aarch64_register (rt, 1), address);
1830
1831 /* We know exactly what address to load from, and what register we
1832 can use:
1833
1834 MOV xd, #(oldloc + offset)
1835 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1836 ...
1837
1838 LDR xd, [xd] ; or LDRSW xd, [xd]
1839
1840 */
1841
1842 if (is_sw)
1843 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1844 aarch64_register (rt, 1),
1845 aarch64_register (rt, 1),
1846 offset_memory_operand (0));
bb903df0 1847 else
0badd99f
YQ
1848 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1849 aarch64_register (rt, is64),
1850 aarch64_register (rt, 1),
1851 offset_memory_operand (0));
1852}
1853
1854/* Implementation of aarch64_insn_visitor method "others". */
1855
1856static void
1857aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1858 struct aarch64_insn_data *data)
1859{
1860 struct aarch64_insn_relocation_data *insn_reloc
1861 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1862
0badd99f
YQ
1863 /* The instruction is not PC relative. Just re-emit it at the new
1864 location. */
e1c587c3 1865 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1866}
1867
1868static const struct aarch64_insn_visitor visitor =
1869{
1870 aarch64_ftrace_insn_reloc_b,
1871 aarch64_ftrace_insn_reloc_b_cond,
1872 aarch64_ftrace_insn_reloc_cb,
1873 aarch64_ftrace_insn_reloc_tb,
1874 aarch64_ftrace_insn_reloc_adr,
1875 aarch64_ftrace_insn_reloc_ldr_literal,
1876 aarch64_ftrace_insn_reloc_others,
1877};
1878
bb903df0
PL
1879/* Implementation of linux_target_ops method
1880 "install_fast_tracepoint_jump_pad". */
1881
1882static int
1883aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1884 CORE_ADDR tpaddr,
1885 CORE_ADDR collector,
1886 CORE_ADDR lockaddr,
1887 ULONGEST orig_size,
1888 CORE_ADDR *jump_entry,
1889 CORE_ADDR *trampoline,
1890 ULONGEST *trampoline_size,
1891 unsigned char *jjump_pad_insn,
1892 ULONGEST *jjump_pad_insn_size,
1893 CORE_ADDR *adjusted_insn_addr,
1894 CORE_ADDR *adjusted_insn_addr_end,
1895 char *err)
1896{
1897 uint32_t buf[256];
1898 uint32_t *p = buf;
2ac09a5b 1899 int64_t offset;
bb903df0 1900 int i;
70b439f0 1901 uint32_t insn;
bb903df0 1902 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1903 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1904
1905 /* We need to save the current state on the stack both to restore it
1906 later and to collect register values when the tracepoint is hit.
1907
1908 The saved registers are pushed in a layout that needs to be in sync
1909 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1910 the supply_fast_tracepoint_registers function will fill in the
1911 register cache from a pointer to saved registers on the stack we build
1912 here.
1913
1914 For simplicity, we set the size of each cell on the stack to 16 bytes.
1915 This way one cell can hold any register type, from system registers
1916 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1917 has to be 16 bytes aligned anyway.
1918
1919 Note that the CPSR register does not exist on AArch64. Instead we
1920 can access system bits describing the process state with the
1921 MRS/MSR instructions, namely the condition flags. We save them as
1922 if they are part of a CPSR register because that's how GDB
1923 interprets these system bits. At the moment, only the condition
1924 flags are saved in CPSR (NZCV).
1925
1926 Stack layout, each cell is 16 bytes (descending):
1927
1928 High *-------- SIMD&FP registers from 31 down to 0. --------*
1929 | q31 |
1930 . .
1931 . . 32 cells
1932 . .
1933 | q0 |
1934 *---- General purpose registers from 30 down to 0. ----*
1935 | x30 |
1936 . .
1937 . . 31 cells
1938 . .
1939 | x0 |
1940 *------------- Special purpose registers. -------------*
1941 | SP |
1942 | PC |
1943 | CPSR (NZCV) | 5 cells
1944 | FPSR |
1945 | FPCR | <- SP + 16
1946 *------------- collecting_t object --------------------*
1947 | TPIDR_EL0 | struct tracepoint * |
1948 Low *------------------------------------------------------*
1949
1950 After this stack is set up, we issue a call to the collector, passing
1951 it the saved registers at (SP + 16). */
1952
1953 /* Push SIMD&FP registers on the stack:
1954
1955 SUB sp, sp, #(32 * 16)
1956
1957 STP q30, q31, [sp, #(30 * 16)]
1958 ...
1959 STP q0, q1, [sp]
1960
1961 */
1962 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1963 for (i = 30; i >= 0; i -= 2)
1964 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1965
30baf67b 1966 /* Push general purpose registers on the stack. Note that we do not need
bb903df0
PL
1967 to push x31 as it represents the xzr register and not the stack
1968 pointer in a STR instruction.
1969
1970 SUB sp, sp, #(31 * 16)
1971
1972 STR x30, [sp, #(30 * 16)]
1973 ...
1974 STR x0, [sp]
1975
1976 */
1977 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1978 for (i = 30; i >= 0; i -= 1)
1979 p += emit_str (p, aarch64_register (i, 1), sp,
1980 offset_memory_operand (i * 16));
1981
1982 /* Make space for 5 more cells.
1983
1984 SUB sp, sp, #(5 * 16)
1985
1986 */
1987 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1988
1989
1990 /* Save SP:
1991
1992 ADD x4, sp, #((32 + 31 + 5) * 16)
1993 STR x4, [sp, #(4 * 16)]
1994
1995 */
1996 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1997 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1998
1999 /* Save PC (tracepoint address):
2000
2001 MOV x3, #(tpaddr)
2002 ...
2003
2004 STR x3, [sp, #(3 * 16)]
2005
2006 */
2007
2008 p += emit_mov_addr (p, x3, tpaddr);
2009 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2010
2011 /* Save CPSR (NZCV), FPSR and FPCR:
2012
2013 MRS x2, nzcv
2014 MRS x1, fpsr
2015 MRS x0, fpcr
2016
2017 STR x2, [sp, #(2 * 16)]
2018 STR x1, [sp, #(1 * 16)]
2019 STR x0, [sp, #(0 * 16)]
2020
2021 */
2022 p += emit_mrs (p, x2, NZCV);
2023 p += emit_mrs (p, x1, FPSR);
2024 p += emit_mrs (p, x0, FPCR);
2025 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2026 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2027 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2028
2029 /* Push the collecting_t object. It consist of the address of the
2030 tracepoint and an ID for the current thread. We get the latter by
2031 reading the tpidr_el0 system register. It corresponds to the
2032 NT_ARM_TLS register accessible with ptrace.
2033
2034 MOV x0, #(tpoint)
2035 ...
2036
2037 MRS x1, tpidr_el0
2038
2039 STP x0, x1, [sp, #-16]!
2040
2041 */
2042
2043 p += emit_mov_addr (p, x0, tpoint);
2044 p += emit_mrs (p, x1, TPIDR_EL0);
2045 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2046
2047 /* Spin-lock:
2048
2049 The shared memory for the lock is at lockaddr. It will hold zero
2050 if no-one is holding the lock, otherwise it contains the address of
2051 the collecting_t object on the stack of the thread which acquired it.
2052
2053 At this stage, the stack pointer points to this thread's collecting_t
2054 object.
2055
2056 We use the following registers:
2057 - x0: Address of the lock.
2058 - x1: Pointer to collecting_t object.
2059 - x2: Scratch register.
2060
2061 MOV x0, #(lockaddr)
2062 ...
2063 MOV x1, sp
2064
2065 ; Trigger an event local to this core. So the following WFE
2066 ; instruction is ignored.
2067 SEVL
2068 again:
2069 ; Wait for an event. The event is triggered by either the SEVL
2070 ; or STLR instructions (store release).
2071 WFE
2072
2073 ; Atomically read at lockaddr. This marks the memory location as
2074 ; exclusive. This instruction also has memory constraints which
2075 ; make sure all previous data reads and writes are done before
2076 ; executing it.
2077 LDAXR x2, [x0]
2078
2079 ; Try again if another thread holds the lock.
2080 CBNZ x2, again
2081
2082 ; We can lock it! Write the address of the collecting_t object.
2083 ; This instruction will fail if the memory location is not marked
2084 ; as exclusive anymore. If it succeeds, it will remove the
2085 ; exclusive mark on the memory location. This way, if another
2086 ; thread executes this instruction before us, we will fail and try
2087 ; all over again.
2088 STXR w2, x1, [x0]
2089 CBNZ w2, again
2090
2091 */
2092
2093 p += emit_mov_addr (p, x0, lockaddr);
2094 p += emit_mov (p, x1, register_operand (sp));
2095
2096 p += emit_sevl (p);
2097 p += emit_wfe (p);
2098 p += emit_ldaxr (p, x2, x0);
2099 p += emit_cb (p, 1, w2, -2 * 4);
2100 p += emit_stxr (p, w2, x1, x0);
2101 p += emit_cb (p, 1, x2, -4 * 4);
2102
2103 /* Call collector (struct tracepoint *, unsigned char *):
2104
2105 MOV x0, #(tpoint)
2106 ...
2107
2108 ; Saved registers start after the collecting_t object.
2109 ADD x1, sp, #16
2110
2111 ; We use an intra-procedure-call scratch register.
2112 MOV ip0, #(collector)
2113 ...
2114
2115 ; And call back to C!
2116 BLR ip0
2117
2118 */
2119
2120 p += emit_mov_addr (p, x0, tpoint);
2121 p += emit_add (p, x1, sp, immediate_operand (16));
2122
2123 p += emit_mov_addr (p, ip0, collector);
2124 p += emit_blr (p, ip0);
2125
2126 /* Release the lock.
2127
2128 MOV x0, #(lockaddr)
2129 ...
2130
2131 ; This instruction is a normal store with memory ordering
2132 ; constraints. Thanks to this we do not have to put a data
2133 ; barrier instruction to make sure all data read and writes are done
30baf67b 2134 ; before this instruction is executed. Furthermore, this instruction
bb903df0
PL
2135 ; will trigger an event, letting other threads know they can grab
2136 ; the lock.
2137 STLR xzr, [x0]
2138
2139 */
2140 p += emit_mov_addr (p, x0, lockaddr);
2141 p += emit_stlr (p, xzr, x0);
2142
2143 /* Free collecting_t object:
2144
2145 ADD sp, sp, #16
2146
2147 */
2148 p += emit_add (p, sp, sp, immediate_operand (16));
2149
2150 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2151 registers from the stack.
2152
2153 LDR x2, [sp, #(2 * 16)]
2154 LDR x1, [sp, #(1 * 16)]
2155 LDR x0, [sp, #(0 * 16)]
2156
2157 MSR NZCV, x2
2158 MSR FPSR, x1
2159 MSR FPCR, x0
2160
2161 ADD sp, sp #(5 * 16)
2162
2163 */
2164 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2165 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2166 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2167 p += emit_msr (p, NZCV, x2);
2168 p += emit_msr (p, FPSR, x1);
2169 p += emit_msr (p, FPCR, x0);
2170
2171 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2172
2173 /* Pop general purpose registers:
2174
2175 LDR x0, [sp]
2176 ...
2177 LDR x30, [sp, #(30 * 16)]
2178
2179 ADD sp, sp, #(31 * 16)
2180
2181 */
2182 for (i = 0; i <= 30; i += 1)
2183 p += emit_ldr (p, aarch64_register (i, 1), sp,
2184 offset_memory_operand (i * 16));
2185 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2186
2187 /* Pop SIMD&FP registers:
2188
2189 LDP q0, q1, [sp]
2190 ...
2191 LDP q30, q31, [sp, #(30 * 16)]
2192
2193 ADD sp, sp, #(32 * 16)
2194
2195 */
2196 for (i = 0; i <= 30; i += 2)
2197 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2198 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2199
2200 /* Write the code into the inferior memory. */
2201 append_insns (&buildaddr, p - buf, buf);
2202
2203 /* Now emit the relocated instruction. */
2204 *adjusted_insn_addr = buildaddr;
70b439f0 2205 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2206
2207 insn_data.base.insn_addr = tpaddr;
2208 insn_data.new_addr = buildaddr;
2209 insn_data.insn_ptr = buf;
2210
2211 aarch64_relocate_instruction (insn, &visitor,
2212 (struct aarch64_insn_data *) &insn_data);
2213
bb903df0 2214 /* We may not have been able to relocate the instruction. */
0badd99f 2215 if (insn_data.insn_ptr == buf)
bb903df0
PL
2216 {
2217 sprintf (err,
2218 "E.Could not relocate instruction from %s to %s.",
2219 core_addr_to_string_nz (tpaddr),
2220 core_addr_to_string_nz (buildaddr));
2221 return 1;
2222 }
dfaffe9d 2223 else
0badd99f 2224 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2225 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2226
2227 /* Go back to the start of the buffer. */
2228 p = buf;
2229
2230 /* Emit a branch back from the jump pad. */
2231 offset = (tpaddr + orig_size - buildaddr);
2232 if (!can_encode_int32 (offset, 28))
2233 {
2234 sprintf (err,
2235 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2236 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2237 offset);
2238 return 1;
2239 }
2240
2241 p += emit_b (p, 0, offset);
2242 append_insns (&buildaddr, p - buf, buf);
2243
2244 /* Give the caller a branch instruction into the jump pad. */
2245 offset = (*jump_entry - tpaddr);
2246 if (!can_encode_int32 (offset, 28))
2247 {
2248 sprintf (err,
2249 "E.Jump pad too far from tracepoint "
2ac09a5b 2250 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2251 offset);
2252 return 1;
2253 }
2254
2255 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2256 *jjump_pad_insn_size = 4;
2257
2258 /* Return the end address of our pad. */
2259 *jump_entry = buildaddr;
2260
2261 return 0;
2262}
2263
afbe19f8
PL
2264/* Helper function writing LEN instructions from START into
2265 current_insn_ptr. */
2266
2267static void
2268emit_ops_insns (const uint32_t *start, int len)
2269{
2270 CORE_ADDR buildaddr = current_insn_ptr;
2271
2272 if (debug_threads)
2273 debug_printf ("Adding %d instrucions at %s\n",
2274 len, paddress (buildaddr));
2275
2276 append_insns (&buildaddr, len, start);
2277 current_insn_ptr = buildaddr;
2278}
2279
2280/* Pop a register from the stack. */
2281
2282static int
2283emit_pop (uint32_t *buf, struct aarch64_register rt)
2284{
2285 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2286}
2287
2288/* Push a register on the stack. */
2289
2290static int
2291emit_push (uint32_t *buf, struct aarch64_register rt)
2292{
2293 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2294}
2295
2296/* Implementation of emit_ops method "emit_prologue". */
2297
2298static void
2299aarch64_emit_prologue (void)
2300{
2301 uint32_t buf[16];
2302 uint32_t *p = buf;
2303
2304 /* This function emit a prologue for the following function prototype:
2305
2306 enum eval_result_type f (unsigned char *regs,
2307 ULONGEST *value);
2308
2309 The first argument is a buffer of raw registers. The second
2310 argument is the result of
2311 evaluating the expression, which will be set to whatever is on top of
2312 the stack at the end.
2313
2314 The stack set up by the prologue is as such:
2315
2316 High *------------------------------------------------------*
2317 | LR |
2318 | FP | <- FP
2319 | x1 (ULONGEST *value) |
2320 | x0 (unsigned char *regs) |
2321 Low *------------------------------------------------------*
2322
2323 As we are implementing a stack machine, each opcode can expand the
2324 stack so we never know how far we are from the data saved by this
2325 prologue. In order to be able refer to value and regs later, we save
2326 the current stack pointer in the frame pointer. This way, it is not
2327 clobbered when calling C functions.
2328
30baf67b 2329 Finally, throughout every operation, we are using register x0 as the
afbe19f8
PL
2330 top of the stack, and x1 as a scratch register. */
2331
2332 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2333 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2334 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2335
2336 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2337
2338
2339 emit_ops_insns (buf, p - buf);
2340}
2341
2342/* Implementation of emit_ops method "emit_epilogue". */
2343
2344static void
2345aarch64_emit_epilogue (void)
2346{
2347 uint32_t buf[16];
2348 uint32_t *p = buf;
2349
2350 /* Store the result of the expression (x0) in *value. */
2351 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2352 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2353 p += emit_str (p, x0, x1, offset_memory_operand (0));
2354
2355 /* Restore the previous state. */
2356 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2357 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2358
2359 /* Return expr_eval_no_error. */
2360 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2361 p += emit_ret (p, lr);
2362
2363 emit_ops_insns (buf, p - buf);
2364}
2365
2366/* Implementation of emit_ops method "emit_add". */
2367
2368static void
2369aarch64_emit_add (void)
2370{
2371 uint32_t buf[16];
2372 uint32_t *p = buf;
2373
2374 p += emit_pop (p, x1);
45e3745e 2375 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2376
2377 emit_ops_insns (buf, p - buf);
2378}
2379
2380/* Implementation of emit_ops method "emit_sub". */
2381
2382static void
2383aarch64_emit_sub (void)
2384{
2385 uint32_t buf[16];
2386 uint32_t *p = buf;
2387
2388 p += emit_pop (p, x1);
45e3745e 2389 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2390
2391 emit_ops_insns (buf, p - buf);
2392}
2393
2394/* Implementation of emit_ops method "emit_mul". */
2395
2396static void
2397aarch64_emit_mul (void)
2398{
2399 uint32_t buf[16];
2400 uint32_t *p = buf;
2401
2402 p += emit_pop (p, x1);
2403 p += emit_mul (p, x0, x1, x0);
2404
2405 emit_ops_insns (buf, p - buf);
2406}
2407
2408/* Implementation of emit_ops method "emit_lsh". */
2409
2410static void
2411aarch64_emit_lsh (void)
2412{
2413 uint32_t buf[16];
2414 uint32_t *p = buf;
2415
2416 p += emit_pop (p, x1);
2417 p += emit_lslv (p, x0, x1, x0);
2418
2419 emit_ops_insns (buf, p - buf);
2420}
2421
2422/* Implementation of emit_ops method "emit_rsh_signed". */
2423
2424static void
2425aarch64_emit_rsh_signed (void)
2426{
2427 uint32_t buf[16];
2428 uint32_t *p = buf;
2429
2430 p += emit_pop (p, x1);
2431 p += emit_asrv (p, x0, x1, x0);
2432
2433 emit_ops_insns (buf, p - buf);
2434}
2435
2436/* Implementation of emit_ops method "emit_rsh_unsigned". */
2437
2438static void
2439aarch64_emit_rsh_unsigned (void)
2440{
2441 uint32_t buf[16];
2442 uint32_t *p = buf;
2443
2444 p += emit_pop (p, x1);
2445 p += emit_lsrv (p, x0, x1, x0);
2446
2447 emit_ops_insns (buf, p - buf);
2448}
2449
2450/* Implementation of emit_ops method "emit_ext". */
2451
2452static void
2453aarch64_emit_ext (int arg)
2454{
2455 uint32_t buf[16];
2456 uint32_t *p = buf;
2457
2458 p += emit_sbfx (p, x0, x0, 0, arg);
2459
2460 emit_ops_insns (buf, p - buf);
2461}
2462
2463/* Implementation of emit_ops method "emit_log_not". */
2464
2465static void
2466aarch64_emit_log_not (void)
2467{
2468 uint32_t buf[16];
2469 uint32_t *p = buf;
2470
2471 /* If the top of the stack is 0, replace it with 1. Else replace it with
2472 0. */
2473
2474 p += emit_cmp (p, x0, immediate_operand (0));
2475 p += emit_cset (p, x0, EQ);
2476
2477 emit_ops_insns (buf, p - buf);
2478}
2479
2480/* Implementation of emit_ops method "emit_bit_and". */
2481
2482static void
2483aarch64_emit_bit_and (void)
2484{
2485 uint32_t buf[16];
2486 uint32_t *p = buf;
2487
2488 p += emit_pop (p, x1);
2489 p += emit_and (p, x0, x0, x1);
2490
2491 emit_ops_insns (buf, p - buf);
2492}
2493
2494/* Implementation of emit_ops method "emit_bit_or". */
2495
2496static void
2497aarch64_emit_bit_or (void)
2498{
2499 uint32_t buf[16];
2500 uint32_t *p = buf;
2501
2502 p += emit_pop (p, x1);
2503 p += emit_orr (p, x0, x0, x1);
2504
2505 emit_ops_insns (buf, p - buf);
2506}
2507
2508/* Implementation of emit_ops method "emit_bit_xor". */
2509
2510static void
2511aarch64_emit_bit_xor (void)
2512{
2513 uint32_t buf[16];
2514 uint32_t *p = buf;
2515
2516 p += emit_pop (p, x1);
2517 p += emit_eor (p, x0, x0, x1);
2518
2519 emit_ops_insns (buf, p - buf);
2520}
2521
2522/* Implementation of emit_ops method "emit_bit_not". */
2523
2524static void
2525aarch64_emit_bit_not (void)
2526{
2527 uint32_t buf[16];
2528 uint32_t *p = buf;
2529
2530 p += emit_mvn (p, x0, x0);
2531
2532 emit_ops_insns (buf, p - buf);
2533}
2534
2535/* Implementation of emit_ops method "emit_equal". */
2536
2537static void
2538aarch64_emit_equal (void)
2539{
2540 uint32_t buf[16];
2541 uint32_t *p = buf;
2542
2543 p += emit_pop (p, x1);
2544 p += emit_cmp (p, x0, register_operand (x1));
2545 p += emit_cset (p, x0, EQ);
2546
2547 emit_ops_insns (buf, p - buf);
2548}
2549
2550/* Implementation of emit_ops method "emit_less_signed". */
2551
2552static void
2553aarch64_emit_less_signed (void)
2554{
2555 uint32_t buf[16];
2556 uint32_t *p = buf;
2557
2558 p += emit_pop (p, x1);
2559 p += emit_cmp (p, x1, register_operand (x0));
2560 p += emit_cset (p, x0, LT);
2561
2562 emit_ops_insns (buf, p - buf);
2563}
2564
2565/* Implementation of emit_ops method "emit_less_unsigned". */
2566
2567static void
2568aarch64_emit_less_unsigned (void)
2569{
2570 uint32_t buf[16];
2571 uint32_t *p = buf;
2572
2573 p += emit_pop (p, x1);
2574 p += emit_cmp (p, x1, register_operand (x0));
2575 p += emit_cset (p, x0, LO);
2576
2577 emit_ops_insns (buf, p - buf);
2578}
2579
2580/* Implementation of emit_ops method "emit_ref". */
2581
2582static void
2583aarch64_emit_ref (int size)
2584{
2585 uint32_t buf[16];
2586 uint32_t *p = buf;
2587
2588 switch (size)
2589 {
2590 case 1:
2591 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2592 break;
2593 case 2:
2594 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2595 break;
2596 case 4:
2597 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2598 break;
2599 case 8:
2600 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2601 break;
2602 default:
2603 /* Unknown size, bail on compilation. */
2604 emit_error = 1;
2605 break;
2606 }
2607
2608 emit_ops_insns (buf, p - buf);
2609}
2610
2611/* Implementation of emit_ops method "emit_if_goto". */
2612
2613static void
2614aarch64_emit_if_goto (int *offset_p, int *size_p)
2615{
2616 uint32_t buf[16];
2617 uint32_t *p = buf;
2618
2619 /* The Z flag is set or cleared here. */
2620 p += emit_cmp (p, x0, immediate_operand (0));
2621 /* This instruction must not change the Z flag. */
2622 p += emit_pop (p, x0);
2623 /* Branch over the next instruction if x0 == 0. */
2624 p += emit_bcond (p, EQ, 8);
2625
2626 /* The NOP instruction will be patched with an unconditional branch. */
2627 if (offset_p)
2628 *offset_p = (p - buf) * 4;
2629 if (size_p)
2630 *size_p = 4;
2631 p += emit_nop (p);
2632
2633 emit_ops_insns (buf, p - buf);
2634}
2635
2636/* Implementation of emit_ops method "emit_goto". */
2637
2638static void
2639aarch64_emit_goto (int *offset_p, int *size_p)
2640{
2641 uint32_t buf[16];
2642 uint32_t *p = buf;
2643
2644 /* The NOP instruction will be patched with an unconditional branch. */
2645 if (offset_p)
2646 *offset_p = 0;
2647 if (size_p)
2648 *size_p = 4;
2649 p += emit_nop (p);
2650
2651 emit_ops_insns (buf, p - buf);
2652}
2653
2654/* Implementation of emit_ops method "write_goto_address". */
2655
bb1183e2 2656static void
afbe19f8
PL
2657aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2658{
2659 uint32_t insn;
2660
2661 emit_b (&insn, 0, to - from);
2662 append_insns (&from, 1, &insn);
2663}
2664
2665/* Implementation of emit_ops method "emit_const". */
2666
2667static void
2668aarch64_emit_const (LONGEST num)
2669{
2670 uint32_t buf[16];
2671 uint32_t *p = buf;
2672
2673 p += emit_mov_addr (p, x0, num);
2674
2675 emit_ops_insns (buf, p - buf);
2676}
2677
2678/* Implementation of emit_ops method "emit_call". */
2679
2680static void
2681aarch64_emit_call (CORE_ADDR fn)
2682{
2683 uint32_t buf[16];
2684 uint32_t *p = buf;
2685
2686 p += emit_mov_addr (p, ip0, fn);
2687 p += emit_blr (p, ip0);
2688
2689 emit_ops_insns (buf, p - buf);
2690}
2691
2692/* Implementation of emit_ops method "emit_reg". */
2693
2694static void
2695aarch64_emit_reg (int reg)
2696{
2697 uint32_t buf[16];
2698 uint32_t *p = buf;
2699
2700 /* Set x0 to unsigned char *regs. */
2701 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2702 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2703 p += emit_mov (p, x1, immediate_operand (reg));
2704
2705 emit_ops_insns (buf, p - buf);
2706
2707 aarch64_emit_call (get_raw_reg_func_addr ());
2708}
2709
2710/* Implementation of emit_ops method "emit_pop". */
2711
2712static void
2713aarch64_emit_pop (void)
2714{
2715 uint32_t buf[16];
2716 uint32_t *p = buf;
2717
2718 p += emit_pop (p, x0);
2719
2720 emit_ops_insns (buf, p - buf);
2721}
2722
2723/* Implementation of emit_ops method "emit_stack_flush". */
2724
2725static void
2726aarch64_emit_stack_flush (void)
2727{
2728 uint32_t buf[16];
2729 uint32_t *p = buf;
2730
2731 p += emit_push (p, x0);
2732
2733 emit_ops_insns (buf, p - buf);
2734}
2735
2736/* Implementation of emit_ops method "emit_zero_ext". */
2737
2738static void
2739aarch64_emit_zero_ext (int arg)
2740{
2741 uint32_t buf[16];
2742 uint32_t *p = buf;
2743
2744 p += emit_ubfx (p, x0, x0, 0, arg);
2745
2746 emit_ops_insns (buf, p - buf);
2747}
2748
2749/* Implementation of emit_ops method "emit_swap". */
2750
2751static void
2752aarch64_emit_swap (void)
2753{
2754 uint32_t buf[16];
2755 uint32_t *p = buf;
2756
2757 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2758 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2759 p += emit_mov (p, x0, register_operand (x1));
2760
2761 emit_ops_insns (buf, p - buf);
2762}
2763
2764/* Implementation of emit_ops method "emit_stack_adjust". */
2765
2766static void
2767aarch64_emit_stack_adjust (int n)
2768{
2769 /* This is not needed with our design. */
2770 uint32_t buf[16];
2771 uint32_t *p = buf;
2772
2773 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2774
2775 emit_ops_insns (buf, p - buf);
2776}
2777
2778/* Implementation of emit_ops method "emit_int_call_1". */
2779
2780static void
2781aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2782{
2783 uint32_t buf[16];
2784 uint32_t *p = buf;
2785
2786 p += emit_mov (p, x0, immediate_operand (arg1));
2787
2788 emit_ops_insns (buf, p - buf);
2789
2790 aarch64_emit_call (fn);
2791}
2792
2793/* Implementation of emit_ops method "emit_void_call_2". */
2794
2795static void
2796aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2797{
2798 uint32_t buf[16];
2799 uint32_t *p = buf;
2800
2801 /* Push x0 on the stack. */
2802 aarch64_emit_stack_flush ();
2803
2804 /* Setup arguments for the function call:
2805
2806 x0: arg1
2807 x1: top of the stack
2808
2809 MOV x1, x0
2810 MOV x0, #arg1 */
2811
2812 p += emit_mov (p, x1, register_operand (x0));
2813 p += emit_mov (p, x0, immediate_operand (arg1));
2814
2815 emit_ops_insns (buf, p - buf);
2816
2817 aarch64_emit_call (fn);
2818
2819 /* Restore x0. */
2820 aarch64_emit_pop ();
2821}
2822
2823/* Implementation of emit_ops method "emit_eq_goto". */
2824
2825static void
2826aarch64_emit_eq_goto (int *offset_p, int *size_p)
2827{
2828 uint32_t buf[16];
2829 uint32_t *p = buf;
2830
2831 p += emit_pop (p, x1);
2832 p += emit_cmp (p, x1, register_operand (x0));
2833 /* Branch over the next instruction if x0 != x1. */
2834 p += emit_bcond (p, NE, 8);
2835 /* The NOP instruction will be patched with an unconditional branch. */
2836 if (offset_p)
2837 *offset_p = (p - buf) * 4;
2838 if (size_p)
2839 *size_p = 4;
2840 p += emit_nop (p);
2841
2842 emit_ops_insns (buf, p - buf);
2843}
2844
2845/* Implementation of emit_ops method "emit_ne_goto". */
2846
2847static void
2848aarch64_emit_ne_goto (int *offset_p, int *size_p)
2849{
2850 uint32_t buf[16];
2851 uint32_t *p = buf;
2852
2853 p += emit_pop (p, x1);
2854 p += emit_cmp (p, x1, register_operand (x0));
2855 /* Branch over the next instruction if x0 == x1. */
2856 p += emit_bcond (p, EQ, 8);
2857 /* The NOP instruction will be patched with an unconditional branch. */
2858 if (offset_p)
2859 *offset_p = (p - buf) * 4;
2860 if (size_p)
2861 *size_p = 4;
2862 p += emit_nop (p);
2863
2864 emit_ops_insns (buf, p - buf);
2865}
2866
2867/* Implementation of emit_ops method "emit_lt_goto". */
2868
2869static void
2870aarch64_emit_lt_goto (int *offset_p, int *size_p)
2871{
2872 uint32_t buf[16];
2873 uint32_t *p = buf;
2874
2875 p += emit_pop (p, x1);
2876 p += emit_cmp (p, x1, register_operand (x0));
2877 /* Branch over the next instruction if x0 >= x1. */
2878 p += emit_bcond (p, GE, 8);
2879 /* The NOP instruction will be patched with an unconditional branch. */
2880 if (offset_p)
2881 *offset_p = (p - buf) * 4;
2882 if (size_p)
2883 *size_p = 4;
2884 p += emit_nop (p);
2885
2886 emit_ops_insns (buf, p - buf);
2887}
2888
2889/* Implementation of emit_ops method "emit_le_goto". */
2890
2891static void
2892aarch64_emit_le_goto (int *offset_p, int *size_p)
2893{
2894 uint32_t buf[16];
2895 uint32_t *p = buf;
2896
2897 p += emit_pop (p, x1);
2898 p += emit_cmp (p, x1, register_operand (x0));
2899 /* Branch over the next instruction if x0 > x1. */
2900 p += emit_bcond (p, GT, 8);
2901 /* The NOP instruction will be patched with an unconditional branch. */
2902 if (offset_p)
2903 *offset_p = (p - buf) * 4;
2904 if (size_p)
2905 *size_p = 4;
2906 p += emit_nop (p);
2907
2908 emit_ops_insns (buf, p - buf);
2909}
2910
2911/* Implementation of emit_ops method "emit_gt_goto". */
2912
2913static void
2914aarch64_emit_gt_goto (int *offset_p, int *size_p)
2915{
2916 uint32_t buf[16];
2917 uint32_t *p = buf;
2918
2919 p += emit_pop (p, x1);
2920 p += emit_cmp (p, x1, register_operand (x0));
2921 /* Branch over the next instruction if x0 <= x1. */
2922 p += emit_bcond (p, LE, 8);
2923 /* The NOP instruction will be patched with an unconditional branch. */
2924 if (offset_p)
2925 *offset_p = (p - buf) * 4;
2926 if (size_p)
2927 *size_p = 4;
2928 p += emit_nop (p);
2929
2930 emit_ops_insns (buf, p - buf);
2931}
2932
2933/* Implementation of emit_ops method "emit_ge_got". */
2934
2935static void
2936aarch64_emit_ge_got (int *offset_p, int *size_p)
2937{
2938 uint32_t buf[16];
2939 uint32_t *p = buf;
2940
2941 p += emit_pop (p, x1);
2942 p += emit_cmp (p, x1, register_operand (x0));
2943 /* Branch over the next instruction if x0 <= x1. */
2944 p += emit_bcond (p, LT, 8);
2945 /* The NOP instruction will be patched with an unconditional branch. */
2946 if (offset_p)
2947 *offset_p = (p - buf) * 4;
2948 if (size_p)
2949 *size_p = 4;
2950 p += emit_nop (p);
2951
2952 emit_ops_insns (buf, p - buf);
2953}
2954
2955static struct emit_ops aarch64_emit_ops_impl =
2956{
2957 aarch64_emit_prologue,
2958 aarch64_emit_epilogue,
2959 aarch64_emit_add,
2960 aarch64_emit_sub,
2961 aarch64_emit_mul,
2962 aarch64_emit_lsh,
2963 aarch64_emit_rsh_signed,
2964 aarch64_emit_rsh_unsigned,
2965 aarch64_emit_ext,
2966 aarch64_emit_log_not,
2967 aarch64_emit_bit_and,
2968 aarch64_emit_bit_or,
2969 aarch64_emit_bit_xor,
2970 aarch64_emit_bit_not,
2971 aarch64_emit_equal,
2972 aarch64_emit_less_signed,
2973 aarch64_emit_less_unsigned,
2974 aarch64_emit_ref,
2975 aarch64_emit_if_goto,
2976 aarch64_emit_goto,
2977 aarch64_write_goto_address,
2978 aarch64_emit_const,
2979 aarch64_emit_call,
2980 aarch64_emit_reg,
2981 aarch64_emit_pop,
2982 aarch64_emit_stack_flush,
2983 aarch64_emit_zero_ext,
2984 aarch64_emit_swap,
2985 aarch64_emit_stack_adjust,
2986 aarch64_emit_int_call_1,
2987 aarch64_emit_void_call_2,
2988 aarch64_emit_eq_goto,
2989 aarch64_emit_ne_goto,
2990 aarch64_emit_lt_goto,
2991 aarch64_emit_le_goto,
2992 aarch64_emit_gt_goto,
2993 aarch64_emit_ge_got,
2994};
2995
2996/* Implementation of linux_target_ops method "emit_ops". */
2997
2998static struct emit_ops *
2999aarch64_emit_ops (void)
3000{
3001 return &aarch64_emit_ops_impl;
3002}
3003
bb903df0
PL
3004/* Implementation of linux_target_ops method
3005 "get_min_fast_tracepoint_insn_len". */
3006
3007static int
3008aarch64_get_min_fast_tracepoint_insn_len (void)
3009{
3010 return 4;
3011}
3012
d1d0aea1
PL
3013/* Implementation of linux_target_ops method "supports_range_stepping". */
3014
3015static int
3016aarch64_supports_range_stepping (void)
3017{
3018 return 1;
3019}
3020
dd373349
AT
3021/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3022
3023static const gdb_byte *
3024aarch64_sw_breakpoint_from_kind (int kind, int *size)
3025{
17b1509a
YQ
3026 if (is_64bit_tdesc ())
3027 {
3028 *size = aarch64_breakpoint_len;
3029 return aarch64_breakpoint;
3030 }
3031 else
3032 return arm_sw_breakpoint_from_kind (kind, size);
3033}
3034
3035/* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3036
3037static int
3038aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3039{
3040 if (is_64bit_tdesc ())
3041 return aarch64_breakpoint_len;
3042 else
3043 return arm_breakpoint_kind_from_pc (pcptr);
3044}
3045
3046/* Implementation of the linux_target_ops method
3047 "breakpoint_kind_from_current_state". */
3048
3049static int
3050aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3051{
3052 if (is_64bit_tdesc ())
3053 return aarch64_breakpoint_len;
3054 else
3055 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3056}
3057
7d00775e
AT
3058/* Support for hardware single step. */
3059
3060static int
3061aarch64_supports_hardware_single_step (void)
3062{
3063 return 1;
3064}
3065
176eb98c
MS
3066struct linux_target_ops the_low_target =
3067{
3aee8918 3068 aarch64_regs_info,
50138245
AH
3069 NULL, /* cannot_fetch_register */
3070 NULL, /* cannot_store_register */
421530db 3071 NULL, /* fetch_register */
176eb98c
MS
3072 aarch64_get_pc,
3073 aarch64_set_pc,
17b1509a 3074 aarch64_breakpoint_kind_from_pc,
dd373349 3075 aarch64_sw_breakpoint_from_kind,
fa5308bd 3076 NULL, /* get_next_pcs */
421530db 3077 0, /* decr_pc_after_break */
176eb98c 3078 aarch64_breakpoint_at,
802e8e6d 3079 aarch64_supports_z_point_type,
176eb98c
MS
3080 aarch64_insert_point,
3081 aarch64_remove_point,
3082 aarch64_stopped_by_watchpoint,
3083 aarch64_stopped_data_address,
421530db
PL
3084 NULL, /* collect_ptrace_register */
3085 NULL, /* supply_ptrace_register */
ade90bde 3086 aarch64_linux_siginfo_fixup,
176eb98c 3087 aarch64_linux_new_process,
04ec7890 3088 aarch64_linux_delete_process,
176eb98c 3089 aarch64_linux_new_thread,
466eecee 3090 aarch64_linux_delete_thread,
3a8a0396 3091 aarch64_linux_new_fork,
176eb98c 3092 aarch64_linux_prepare_to_resume,
421530db 3093 NULL, /* process_qsupported */
7671bf47 3094 aarch64_supports_tracepoints,
bb903df0
PL
3095 aarch64_get_thread_area,
3096 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3097 aarch64_emit_ops,
bb903df0 3098 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3099 aarch64_supports_range_stepping,
17b1509a 3100 aarch64_breakpoint_kind_from_current_state,
7d00775e 3101 aarch64_supports_hardware_single_step,
061fc021 3102 aarch64_get_syscall_trapinfo,
176eb98c 3103};
3aee8918 3104
ef0478f6
TBA
3105/* The linux target ops object. */
3106
3107linux_process_target *the_linux_target = &the_aarch64_target;
3108
3aee8918
PA
3109void
3110initialize_low_arch (void)
3111{
3b53ae99
YQ
3112 initialize_low_arch_aarch32 ();
3113
3aee8918 3114 initialize_regsets_info (&aarch64_regsets_info);
02895270 3115 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3116}
This page took 0.75629 seconds and 4 git commands to generate.