Arm: Add xml unit tests
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
42a4f53d 4 Copyright (C) 2009-2019 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
176eb98c
MS
31
32#include <signal.h>
33#include <sys/user.h>
5826e159 34#include "nat/gdb_ptrace.h"
e9dae05e 35#include <asm/ptrace.h>
bb903df0
PL
36#include <inttypes.h>
37#include <endian.h>
38#include <sys/uio.h>
176eb98c
MS
39
40#include "gdb_proc_service.h"
cc628f3d 41#include "arch/aarch64.h"
d6d7ce56 42#include "linux-aarch64-tdesc.h"
fefa175e 43#include "nat/aarch64-sve-linux-ptrace.h"
02895270 44#include "tdesc.h"
176eb98c 45
176eb98c
MS
46#ifdef HAVE_SYS_REG_H
47#include <sys/reg.h>
48#endif
49
176eb98c
MS
50/* Per-process arch-specific data we want to keep. */
51
52struct arch_process_info
53{
54 /* Hardware breakpoint/watchpoint data.
55 The reason for them to be per-process rather than per-thread is
56 due to the lack of information in the gdbserver environment;
57 gdbserver is not told that whether a requested hardware
58 breakpoint/watchpoint is thread specific or not, so it has to set
59 each hw bp/wp for every thread in the current process. The
60 higher level bp/wp management in gdb will resume a thread if a hw
61 bp/wp trap is not expected for it. Since the hw bp/wp setting is
62 same for each thread, it is reasonable for the data to live here.
63 */
64 struct aarch64_debug_reg_state debug_reg_state;
65};
66
3b53ae99
YQ
67/* Return true if the size of register 0 is 8 byte. */
68
69static int
70is_64bit_tdesc (void)
71{
72 struct regcache *regcache = get_thread_regcache (current_thread, 0);
73
74 return register_size (regcache->tdesc, 0) == 8;
75}
76
02895270
AH
77/* Return true if the regcache contains the number of SVE registers. */
78
79static bool
80is_sve_tdesc (void)
81{
82 struct regcache *regcache = get_thread_regcache (current_thread, 0);
83
84 return regcache->tdesc->reg_defs.size () == AARCH64_SVE_NUM_REGS;
85}
86
176eb98c
MS
87static void
88aarch64_fill_gregset (struct regcache *regcache, void *buf)
89{
6a69a054 90 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
91 int i;
92
93 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
94 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
95 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
96 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
97 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
98}
99
100static void
101aarch64_store_gregset (struct regcache *regcache, const void *buf)
102{
6a69a054 103 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
104 int i;
105
106 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
107 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
108 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
109 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
110 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
111}
112
113static void
114aarch64_fill_fpregset (struct regcache *regcache, void *buf)
115{
9caa3311 116 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
117 int i;
118
119 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
120 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
121 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
122 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
123}
124
125static void
126aarch64_store_fpregset (struct regcache *regcache, const void *buf)
127{
9caa3311
YQ
128 const struct user_fpsimd_state *regset
129 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
130 int i;
131
132 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
133 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
134 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
135 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
136}
137
1ef53e6b
AH
138/* Store the pauth registers to regcache. */
139
140static void
141aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
142{
143 uint64_t *pauth_regset = (uint64_t *) buf;
144 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
145
146 if (pauth_base == 0)
147 return;
148
149 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
150 &pauth_regset[0]);
151 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
152 &pauth_regset[1]);
153}
154
176eb98c
MS
155/* Enable miscellaneous debugging output. The name is historical - it
156 was originally used to debug LinuxThreads support. */
157extern int debug_threads;
158
421530db
PL
159/* Implementation of linux_target_ops method "get_pc". */
160
176eb98c
MS
161static CORE_ADDR
162aarch64_get_pc (struct regcache *regcache)
163{
8a7e4587 164 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 165 return linux_get_pc_64bit (regcache);
8a7e4587 166 else
a5652c21 167 return linux_get_pc_32bit (regcache);
176eb98c
MS
168}
169
421530db
PL
170/* Implementation of linux_target_ops method "set_pc". */
171
176eb98c
MS
172static void
173aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
174{
8a7e4587 175 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 176 linux_set_pc_64bit (regcache, pc);
8a7e4587 177 else
a5652c21 178 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
179}
180
176eb98c
MS
181#define aarch64_breakpoint_len 4
182
37d66942
PL
183/* AArch64 BRK software debug mode instruction.
184 This instruction needs to match gdb/aarch64-tdep.c
185 (aarch64_default_breakpoint). */
186static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 187
421530db
PL
188/* Implementation of linux_target_ops method "breakpoint_at". */
189
176eb98c
MS
190static int
191aarch64_breakpoint_at (CORE_ADDR where)
192{
db91f502
YQ
193 if (is_64bit_tdesc ())
194 {
195 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 196
db91f502
YQ
197 (*the_target->read_memory) (where, (unsigned char *) &insn,
198 aarch64_breakpoint_len);
199 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
200 return 1;
176eb98c 201
db91f502
YQ
202 return 0;
203 }
204 else
205 return arm_breakpoint_at (where);
176eb98c
MS
206}
207
176eb98c
MS
208static void
209aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
210{
211 int i;
212
213 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
214 {
215 state->dr_addr_bp[i] = 0;
216 state->dr_ctrl_bp[i] = 0;
217 state->dr_ref_count_bp[i] = 0;
218 }
219
220 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
221 {
222 state->dr_addr_wp[i] = 0;
223 state->dr_ctrl_wp[i] = 0;
224 state->dr_ref_count_wp[i] = 0;
225 }
226}
227
176eb98c
MS
228/* Return the pointer to the debug register state structure in the
229 current process' arch-specific data area. */
230
db3cb7cb 231struct aarch64_debug_reg_state *
88e2cf7e 232aarch64_get_debug_reg_state (pid_t pid)
176eb98c 233{
88e2cf7e 234 struct process_info *proc = find_process_pid (pid);
176eb98c 235
fe978cb0 236 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
237}
238
421530db
PL
239/* Implementation of linux_target_ops method "supports_z_point_type". */
240
4ff0d3d8
PA
241static int
242aarch64_supports_z_point_type (char z_type)
243{
244 switch (z_type)
245 {
96c97461 246 case Z_PACKET_SW_BP:
4ff0d3d8
PA
247 case Z_PACKET_HW_BP:
248 case Z_PACKET_WRITE_WP:
249 case Z_PACKET_READ_WP:
250 case Z_PACKET_ACCESS_WP:
251 return 1;
252 default:
4ff0d3d8
PA
253 return 0;
254 }
255}
256
421530db 257/* Implementation of linux_target_ops method "insert_point".
176eb98c 258
421530db
PL
259 It actually only records the info of the to-be-inserted bp/wp;
260 the actual insertion will happen when threads are resumed. */
176eb98c
MS
261
262static int
802e8e6d
PA
263aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
264 int len, struct raw_breakpoint *bp)
176eb98c
MS
265{
266 int ret;
4ff0d3d8 267 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
268 struct aarch64_debug_reg_state *state
269 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 270
c5e92cca 271 if (show_debug_regs)
176eb98c
MS
272 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
273 (unsigned long) addr, len);
274
802e8e6d
PA
275 /* Determine the type from the raw breakpoint type. */
276 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
277
278 if (targ_type != hw_execute)
39edd165
YQ
279 {
280 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
281 ret = aarch64_handle_watchpoint (targ_type, addr, len,
282 1 /* is_insert */, state);
283 else
284 ret = -1;
285 }
176eb98c 286 else
8d689ee5
YQ
287 {
288 if (len == 3)
289 {
290 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
291 instruction. Set it to 2 to correctly encode length bit
292 mask in hardware/watchpoint control register. */
293 len = 2;
294 }
295 ret = aarch64_handle_breakpoint (targ_type, addr, len,
296 1 /* is_insert */, state);
297 }
176eb98c 298
60a191ed 299 if (show_debug_regs)
88e2cf7e
YQ
300 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
301 targ_type);
176eb98c
MS
302
303 return ret;
304}
305
421530db 306/* Implementation of linux_target_ops method "remove_point".
176eb98c 307
421530db
PL
308 It actually only records the info of the to-be-removed bp/wp,
309 the actual removal will be done when threads are resumed. */
176eb98c
MS
310
311static int
802e8e6d
PA
312aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
313 int len, struct raw_breakpoint *bp)
176eb98c
MS
314{
315 int ret;
4ff0d3d8 316 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
317 struct aarch64_debug_reg_state *state
318 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 319
c5e92cca 320 if (show_debug_regs)
176eb98c
MS
321 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
322 (unsigned long) addr, len);
323
802e8e6d
PA
324 /* Determine the type from the raw breakpoint type. */
325 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
326
327 /* Set up state pointers. */
328 if (targ_type != hw_execute)
329 ret =
c67ca4de
YQ
330 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
331 state);
176eb98c 332 else
8d689ee5
YQ
333 {
334 if (len == 3)
335 {
336 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
337 instruction. Set it to 2 to correctly encode length bit
338 mask in hardware/watchpoint control register. */
339 len = 2;
340 }
341 ret = aarch64_handle_breakpoint (targ_type, addr, len,
342 0 /* is_insert */, state);
343 }
176eb98c 344
60a191ed 345 if (show_debug_regs)
88e2cf7e
YQ
346 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
347 targ_type);
176eb98c
MS
348
349 return ret;
350}
351
421530db 352/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
353
354static CORE_ADDR
355aarch64_stopped_data_address (void)
356{
357 siginfo_t siginfo;
358 int pid, i;
359 struct aarch64_debug_reg_state *state;
360
0bfdf32f 361 pid = lwpid_of (current_thread);
176eb98c
MS
362
363 /* Get the siginfo. */
364 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
365 return (CORE_ADDR) 0;
366
367 /* Need to be a hardware breakpoint/watchpoint trap. */
368 if (siginfo.si_signo != SIGTRAP
369 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
370 return (CORE_ADDR) 0;
371
372 /* Check if the address matches any watched address. */
88e2cf7e 373 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
374 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
375 {
a3b60e45
JK
376 const unsigned int offset
377 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
378 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
379 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
380 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
381 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
382 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
383
176eb98c
MS
384 if (state->dr_ref_count_wp[i]
385 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 386 && addr_trap >= addr_watch_aligned
176eb98c 387 && addr_trap < addr_watch + len)
a3b60e45
JK
388 {
389 /* ADDR_TRAP reports the first address of the memory range
390 accessed by the CPU, regardless of what was the memory
391 range watched. Thus, a large CPU access that straddles
392 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
393 ADDR_TRAP that is lower than the
394 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
395
396 addr: | 4 | 5 | 6 | 7 | 8 |
397 |---- range watched ----|
398 |----------- range accessed ------------|
399
400 In this case, ADDR_TRAP will be 4.
401
402 To match a watchpoint known to GDB core, we must never
403 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
404 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
405 positive on kernels older than 4.10. See PR
406 external/20207. */
407 return addr_orig;
408 }
176eb98c
MS
409 }
410
411 return (CORE_ADDR) 0;
412}
413
421530db 414/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
415
416static int
417aarch64_stopped_by_watchpoint (void)
418{
419 if (aarch64_stopped_data_address () != 0)
420 return 1;
421 else
422 return 0;
423}
424
425/* Fetch the thread-local storage pointer for libthread_db. */
426
427ps_err_e
754653a7 428ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
429 lwpid_t lwpid, int idx, void **base)
430{
a0cc84cd
YQ
431 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
432 is_64bit_tdesc ());
176eb98c
MS
433}
434
ade90bde
YQ
435/* Implementation of linux_target_ops method "siginfo_fixup". */
436
437static int
8adce034 438aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
439{
440 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
441 if (!is_64bit_tdesc ())
442 {
443 if (direction == 0)
444 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
445 native);
446 else
447 aarch64_siginfo_from_compat_siginfo (native,
448 (struct compat_siginfo *) inf);
449
450 return 1;
451 }
452
453 return 0;
454}
455
04ec7890 456/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
457
458static struct arch_process_info *
459aarch64_linux_new_process (void)
460{
8d749320 461 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
462
463 aarch64_init_debug_reg_state (&info->debug_reg_state);
464
465 return info;
466}
467
04ec7890
SM
468/* Implementation of linux_target_ops method "delete_process". */
469
470static void
471aarch64_linux_delete_process (struct arch_process_info *info)
472{
473 xfree (info);
474}
475
421530db
PL
476/* Implementation of linux_target_ops method "linux_new_fork". */
477
3a8a0396
DB
478static void
479aarch64_linux_new_fork (struct process_info *parent,
480 struct process_info *child)
481{
482 /* These are allocated by linux_add_process. */
61a7418c
DB
483 gdb_assert (parent->priv != NULL
484 && parent->priv->arch_private != NULL);
485 gdb_assert (child->priv != NULL
486 && child->priv->arch_private != NULL);
3a8a0396
DB
487
488 /* Linux kernel before 2.6.33 commit
489 72f674d203cd230426437cdcf7dd6f681dad8b0d
490 will inherit hardware debug registers from parent
491 on fork/vfork/clone. Newer Linux kernels create such tasks with
492 zeroed debug registers.
493
494 GDB core assumes the child inherits the watchpoints/hw
495 breakpoints of the parent, and will remove them all from the
496 forked off process. Copy the debug registers mirrors into the
497 new process so that all breakpoints and watchpoints can be
498 removed together. The debug registers mirror will become zeroed
499 in the end before detaching the forked off process, thus making
500 this compatible with older Linux kernels too. */
501
61a7418c 502 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
503}
504
ee4fbcfa
AH
505/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
506#define AARCH64_HWCAP_PACA (1 << 30)
507
d6d7ce56 508/* Implementation of linux_target_ops method "arch_setup". */
3b53ae99 509
d6d7ce56
AH
510static void
511aarch64_arch_setup (void)
3b53ae99
YQ
512{
513 unsigned int machine;
514 int is_elf64;
515 int tid;
516
517 tid = lwpid_of (current_thread);
518
519 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
520
521 if (is_elf64)
fefa175e
AH
522 {
523 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
524 unsigned long hwcap = linux_get_hwcap (8);
525 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
526
527 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 528 }
3b53ae99 529 else
d6d7ce56 530 current_process ()->tdesc = tdesc_arm_with_neon;
176eb98c 531
af1b22f3 532 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
533}
534
02895270
AH
535/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
536
537static void
538aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
539{
540 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
541}
542
543/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
544
545static void
546aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
547{
548 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
549}
550
3aee8918 551static struct regset_info aarch64_regsets[] =
176eb98c
MS
552{
553 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
554 sizeof (struct user_pt_regs), GENERAL_REGS,
555 aarch64_fill_gregset, aarch64_store_gregset },
556 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
557 sizeof (struct user_fpsimd_state), FP_REGS,
558 aarch64_fill_fpregset, aarch64_store_fpregset
559 },
1ef53e6b
AH
560 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
561 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
562 NULL, aarch64_store_pauthregset },
50bc912a 563 NULL_REGSET
176eb98c
MS
564};
565
3aee8918
PA
566static struct regsets_info aarch64_regsets_info =
567 {
568 aarch64_regsets, /* regsets */
569 0, /* num_regsets */
570 NULL, /* disabled_regsets */
571 };
572
3b53ae99 573static struct regs_info regs_info_aarch64 =
3aee8918
PA
574 {
575 NULL, /* regset_bitmap */
c2d65f38 576 NULL, /* usrregs */
3aee8918
PA
577 &aarch64_regsets_info,
578 };
579
02895270
AH
580static struct regset_info aarch64_sve_regsets[] =
581{
582 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
583 sizeof (struct user_pt_regs), GENERAL_REGS,
584 aarch64_fill_gregset, aarch64_store_gregset },
585 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
586 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
587 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
588 },
1ef53e6b
AH
589 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
590 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
591 NULL, aarch64_store_pauthregset },
02895270
AH
592 NULL_REGSET
593};
594
595static struct regsets_info aarch64_sve_regsets_info =
596 {
597 aarch64_sve_regsets, /* regsets. */
598 0, /* num_regsets. */
599 NULL, /* disabled_regsets. */
600 };
601
602static struct regs_info regs_info_aarch64_sve =
603 {
604 NULL, /* regset_bitmap. */
605 NULL, /* usrregs. */
606 &aarch64_sve_regsets_info,
607 };
608
421530db
PL
609/* Implementation of linux_target_ops method "regs_info". */
610
3aee8918
PA
611static const struct regs_info *
612aarch64_regs_info (void)
613{
02895270 614 if (!is_64bit_tdesc ())
3b53ae99 615 return &regs_info_aarch32;
02895270
AH
616
617 if (is_sve_tdesc ())
618 return &regs_info_aarch64_sve;
619
620 return &regs_info_aarch64;
3aee8918
PA
621}
622
7671bf47
PL
623/* Implementation of linux_target_ops method "supports_tracepoints". */
624
625static int
626aarch64_supports_tracepoints (void)
627{
524b57e6
YQ
628 if (current_thread == NULL)
629 return 1;
630 else
631 {
632 /* We don't support tracepoints on aarch32 now. */
633 return is_64bit_tdesc ();
634 }
7671bf47
PL
635}
636
bb903df0
PL
637/* Implementation of linux_target_ops method "get_thread_area". */
638
639static int
640aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
641{
642 struct iovec iovec;
643 uint64_t reg;
644
645 iovec.iov_base = &reg;
646 iovec.iov_len = sizeof (reg);
647
648 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
649 return -1;
650
651 *addrp = reg;
652
653 return 0;
654}
655
061fc021
YQ
656/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
657
658static void
659aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
660{
661 int use_64bit = register_size (regcache->tdesc, 0) == 8;
662
663 if (use_64bit)
664 {
665 long l_sysno;
666
667 collect_register_by_name (regcache, "x8", &l_sysno);
668 *sysno = (int) l_sysno;
669 }
670 else
671 collect_register_by_name (regcache, "r7", sysno);
672}
673
afbe19f8
PL
674/* List of condition codes that we need. */
675
676enum aarch64_condition_codes
677{
678 EQ = 0x0,
679 NE = 0x1,
680 LO = 0x3,
681 GE = 0xa,
682 LT = 0xb,
683 GT = 0xc,
684 LE = 0xd,
bb903df0
PL
685};
686
6c1c9a8b
YQ
687enum aarch64_operand_type
688{
689 OPERAND_IMMEDIATE,
690 OPERAND_REGISTER,
691};
692
bb903df0
PL
693/* Representation of an operand. At this time, it only supports register
694 and immediate types. */
695
696struct aarch64_operand
697{
698 /* Type of the operand. */
6c1c9a8b
YQ
699 enum aarch64_operand_type type;
700
bb903df0
PL
701 /* Value of the operand according to the type. */
702 union
703 {
704 uint32_t imm;
705 struct aarch64_register reg;
706 };
707};
708
709/* List of registers that we are currently using, we can add more here as
710 we need to use them. */
711
712/* General purpose scratch registers (64 bit). */
713static const struct aarch64_register x0 = { 0, 1 };
714static const struct aarch64_register x1 = { 1, 1 };
715static const struct aarch64_register x2 = { 2, 1 };
716static const struct aarch64_register x3 = { 3, 1 };
717static const struct aarch64_register x4 = { 4, 1 };
718
719/* General purpose scratch registers (32 bit). */
afbe19f8 720static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
721static const struct aarch64_register w2 = { 2, 0 };
722
723/* Intra-procedure scratch registers. */
724static const struct aarch64_register ip0 = { 16, 1 };
725
726/* Special purpose registers. */
afbe19f8
PL
727static const struct aarch64_register fp = { 29, 1 };
728static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
729static const struct aarch64_register sp = { 31, 1 };
730static const struct aarch64_register xzr = { 31, 1 };
731
732/* Dynamically allocate a new register. If we know the register
733 statically, we should make it a global as above instead of using this
734 helper function. */
735
736static struct aarch64_register
737aarch64_register (unsigned num, int is64)
738{
739 return (struct aarch64_register) { num, is64 };
740}
741
742/* Helper function to create a register operand, for instructions with
743 different types of operands.
744
745 For example:
746 p += emit_mov (p, x0, register_operand (x1)); */
747
748static struct aarch64_operand
749register_operand (struct aarch64_register reg)
750{
751 struct aarch64_operand operand;
752
753 operand.type = OPERAND_REGISTER;
754 operand.reg = reg;
755
756 return operand;
757}
758
759/* Helper function to create an immediate operand, for instructions with
760 different types of operands.
761
762 For example:
763 p += emit_mov (p, x0, immediate_operand (12)); */
764
765static struct aarch64_operand
766immediate_operand (uint32_t imm)
767{
768 struct aarch64_operand operand;
769
770 operand.type = OPERAND_IMMEDIATE;
771 operand.imm = imm;
772
773 return operand;
774}
775
bb903df0
PL
776/* Helper function to create an offset memory operand.
777
778 For example:
779 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
780
781static struct aarch64_memory_operand
782offset_memory_operand (int32_t offset)
783{
784 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
785}
786
787/* Helper function to create a pre-index memory operand.
788
789 For example:
790 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
791
792static struct aarch64_memory_operand
793preindex_memory_operand (int32_t index)
794{
795 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
796}
797
afbe19f8
PL
798/* Helper function to create a post-index memory operand.
799
800 For example:
801 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
802
803static struct aarch64_memory_operand
804postindex_memory_operand (int32_t index)
805{
806 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
807}
808
bb903df0
PL
809/* System control registers. These special registers can be written and
810 read with the MRS and MSR instructions.
811
812 - NZCV: Condition flags. GDB refers to this register under the CPSR
813 name.
814 - FPSR: Floating-point status register.
815 - FPCR: Floating-point control registers.
816 - TPIDR_EL0: Software thread ID register. */
817
818enum aarch64_system_control_registers
819{
820 /* op0 op1 crn crm op2 */
821 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
822 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
823 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
824 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
825};
826
bb903df0
PL
827/* Write a BLR instruction into *BUF.
828
829 BLR rn
830
831 RN is the register to branch to. */
832
833static int
834emit_blr (uint32_t *buf, struct aarch64_register rn)
835{
e1c587c3 836 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
837}
838
afbe19f8 839/* Write a RET instruction into *BUF.
bb903df0 840
afbe19f8 841 RET xn
bb903df0 842
afbe19f8 843 RN is the register to branch to. */
bb903df0
PL
844
845static int
afbe19f8
PL
846emit_ret (uint32_t *buf, struct aarch64_register rn)
847{
e1c587c3 848 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
849}
850
851static int
852emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
853 struct aarch64_register rt,
854 struct aarch64_register rt2,
855 struct aarch64_register rn,
856 struct aarch64_memory_operand operand)
bb903df0
PL
857{
858 uint32_t opc;
859 uint32_t pre_index;
860 uint32_t write_back;
861
862 if (rt.is64)
863 opc = ENCODE (2, 2, 30);
864 else
865 opc = ENCODE (0, 2, 30);
866
867 switch (operand.type)
868 {
869 case MEMORY_OPERAND_OFFSET:
870 {
871 pre_index = ENCODE (1, 1, 24);
872 write_back = ENCODE (0, 1, 23);
873 break;
874 }
afbe19f8
PL
875 case MEMORY_OPERAND_POSTINDEX:
876 {
877 pre_index = ENCODE (0, 1, 24);
878 write_back = ENCODE (1, 1, 23);
879 break;
880 }
bb903df0
PL
881 case MEMORY_OPERAND_PREINDEX:
882 {
883 pre_index = ENCODE (1, 1, 24);
884 write_back = ENCODE (1, 1, 23);
885 break;
886 }
887 default:
888 return 0;
889 }
890
e1c587c3
YQ
891 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
892 | ENCODE (operand.index >> 3, 7, 15)
893 | ENCODE (rt2.num, 5, 10)
894 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
895}
896
afbe19f8
PL
897/* Write a STP instruction into *BUF.
898
899 STP rt, rt2, [rn, #offset]
900 STP rt, rt2, [rn, #index]!
901 STP rt, rt2, [rn], #index
902
903 RT and RT2 are the registers to store.
904 RN is the base address register.
905 OFFSET is the immediate to add to the base address. It is limited to a
906 -512 .. 504 range (7 bits << 3). */
907
908static int
909emit_stp (uint32_t *buf, struct aarch64_register rt,
910 struct aarch64_register rt2, struct aarch64_register rn,
911 struct aarch64_memory_operand operand)
912{
913 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
914}
915
916/* Write a LDP instruction into *BUF.
917
918 LDP rt, rt2, [rn, #offset]
919 LDP rt, rt2, [rn, #index]!
920 LDP rt, rt2, [rn], #index
921
922 RT and RT2 are the registers to store.
923 RN is the base address register.
924 OFFSET is the immediate to add to the base address. It is limited to a
925 -512 .. 504 range (7 bits << 3). */
926
927static int
928emit_ldp (uint32_t *buf, struct aarch64_register rt,
929 struct aarch64_register rt2, struct aarch64_register rn,
930 struct aarch64_memory_operand operand)
931{
932 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
933}
934
bb903df0
PL
935/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
936
937 LDP qt, qt2, [rn, #offset]
938
939 RT and RT2 are the Q registers to store.
940 RN is the base address register.
941 OFFSET is the immediate to add to the base address. It is limited to
942 -1024 .. 1008 range (7 bits << 4). */
943
944static int
945emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
946 struct aarch64_register rn, int32_t offset)
947{
948 uint32_t opc = ENCODE (2, 2, 30);
949 uint32_t pre_index = ENCODE (1, 1, 24);
950
e1c587c3
YQ
951 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
952 | ENCODE (offset >> 4, 7, 15)
953 | ENCODE (rt2, 5, 10)
954 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
955}
956
957/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
958
959 STP qt, qt2, [rn, #offset]
960
961 RT and RT2 are the Q registers to store.
962 RN is the base address register.
963 OFFSET is the immediate to add to the base address. It is limited to
964 -1024 .. 1008 range (7 bits << 4). */
965
966static int
967emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
968 struct aarch64_register rn, int32_t offset)
969{
970 uint32_t opc = ENCODE (2, 2, 30);
971 uint32_t pre_index = ENCODE (1, 1, 24);
972
e1c587c3 973 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
974 | ENCODE (offset >> 4, 7, 15)
975 | ENCODE (rt2, 5, 10)
976 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
977}
978
afbe19f8
PL
979/* Write a LDRH instruction into *BUF.
980
981 LDRH wt, [xn, #offset]
982 LDRH wt, [xn, #index]!
983 LDRH wt, [xn], #index
984
985 RT is the register to store.
986 RN is the base address register.
987 OFFSET is the immediate to add to the base address. It is limited to
988 0 .. 32760 range (12 bits << 3). */
989
990static int
991emit_ldrh (uint32_t *buf, struct aarch64_register rt,
992 struct aarch64_register rn,
993 struct aarch64_memory_operand operand)
994{
1c2e1515 995 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
996}
997
998/* Write a LDRB instruction into *BUF.
999
1000 LDRB wt, [xn, #offset]
1001 LDRB wt, [xn, #index]!
1002 LDRB wt, [xn], #index
1003
1004 RT is the register to store.
1005 RN is the base address register.
1006 OFFSET is the immediate to add to the base address. It is limited to
1007 0 .. 32760 range (12 bits << 3). */
1008
1009static int
1010emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1011 struct aarch64_register rn,
1012 struct aarch64_memory_operand operand)
1013{
1c2e1515 1014 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1015}
1016
bb903df0 1017
bb903df0
PL
1018
1019/* Write a STR instruction into *BUF.
1020
1021 STR rt, [rn, #offset]
1022 STR rt, [rn, #index]!
afbe19f8 1023 STR rt, [rn], #index
bb903df0
PL
1024
1025 RT is the register to store.
1026 RN is the base address register.
1027 OFFSET is the immediate to add to the base address. It is limited to
1028 0 .. 32760 range (12 bits << 3). */
1029
1030static int
1031emit_str (uint32_t *buf, struct aarch64_register rt,
1032 struct aarch64_register rn,
1033 struct aarch64_memory_operand operand)
1034{
1c2e1515 1035 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1036}
1037
1038/* Helper function emitting an exclusive load or store instruction. */
1039
1040static int
1041emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1042 enum aarch64_opcodes opcode,
1043 struct aarch64_register rs,
1044 struct aarch64_register rt,
1045 struct aarch64_register rt2,
1046 struct aarch64_register rn)
1047{
e1c587c3
YQ
1048 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1049 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1050 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1051}
1052
1053/* Write a LAXR instruction into *BUF.
1054
1055 LDAXR rt, [xn]
1056
1057 RT is the destination register.
1058 RN is the base address register. */
1059
1060static int
1061emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1062 struct aarch64_register rn)
1063{
1064 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1065 xzr, rn);
1066}
1067
1068/* Write a STXR instruction into *BUF.
1069
1070 STXR ws, rt, [xn]
1071
1072 RS is the result register, it indicates if the store succeeded or not.
1073 RT is the destination register.
1074 RN is the base address register. */
1075
1076static int
1077emit_stxr (uint32_t *buf, struct aarch64_register rs,
1078 struct aarch64_register rt, struct aarch64_register rn)
1079{
1080 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1081 xzr, rn);
1082}
1083
1084/* Write a STLR instruction into *BUF.
1085
1086 STLR rt, [xn]
1087
1088 RT is the register to store.
1089 RN is the base address register. */
1090
1091static int
1092emit_stlr (uint32_t *buf, struct aarch64_register rt,
1093 struct aarch64_register rn)
1094{
1095 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1096 xzr, rn);
1097}
1098
1099/* Helper function for data processing instructions with register sources. */
1100
1101static int
231c0592 1102emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1103 struct aarch64_register rd,
1104 struct aarch64_register rn,
1105 struct aarch64_register rm)
1106{
1107 uint32_t size = ENCODE (rd.is64, 1, 31);
1108
e1c587c3
YQ
1109 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1110 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1111}
1112
1113/* Helper function for data processing instructions taking either a register
1114 or an immediate. */
1115
1116static int
1117emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1118 struct aarch64_register rd,
1119 struct aarch64_register rn,
1120 struct aarch64_operand operand)
1121{
1122 uint32_t size = ENCODE (rd.is64, 1, 31);
1123 /* The opcode is different for register and immediate source operands. */
1124 uint32_t operand_opcode;
1125
1126 if (operand.type == OPERAND_IMMEDIATE)
1127 {
1128 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1129 operand_opcode = ENCODE (8, 4, 25);
1130
e1c587c3
YQ
1131 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1132 | ENCODE (operand.imm, 12, 10)
1133 | ENCODE (rn.num, 5, 5)
1134 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1135 }
1136 else
1137 {
1138 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1139 operand_opcode = ENCODE (5, 4, 25);
1140
1141 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1142 rn, operand.reg);
1143 }
1144}
1145
1146/* Write an ADD instruction into *BUF.
1147
1148 ADD rd, rn, #imm
1149 ADD rd, rn, rm
1150
1151 This function handles both an immediate and register add.
1152
1153 RD is the destination register.
1154 RN is the input register.
1155 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1156 OPERAND_REGISTER. */
1157
1158static int
1159emit_add (uint32_t *buf, struct aarch64_register rd,
1160 struct aarch64_register rn, struct aarch64_operand operand)
1161{
1162 return emit_data_processing (buf, ADD, rd, rn, operand);
1163}
1164
1165/* Write a SUB instruction into *BUF.
1166
1167 SUB rd, rn, #imm
1168 SUB rd, rn, rm
1169
1170 This function handles both an immediate and register sub.
1171
1172 RD is the destination register.
1173 RN is the input register.
1174 IMM is the immediate to substract to RN. */
1175
1176static int
1177emit_sub (uint32_t *buf, struct aarch64_register rd,
1178 struct aarch64_register rn, struct aarch64_operand operand)
1179{
1180 return emit_data_processing (buf, SUB, rd, rn, operand);
1181}
1182
1183/* Write a MOV instruction into *BUF.
1184
1185 MOV rd, #imm
1186 MOV rd, rm
1187
1188 This function handles both a wide immediate move and a register move,
1189 with the condition that the source register is not xzr. xzr and the
1190 stack pointer share the same encoding and this function only supports
1191 the stack pointer.
1192
1193 RD is the destination register.
1194 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1195 OPERAND_REGISTER. */
1196
1197static int
1198emit_mov (uint32_t *buf, struct aarch64_register rd,
1199 struct aarch64_operand operand)
1200{
1201 if (operand.type == OPERAND_IMMEDIATE)
1202 {
1203 uint32_t size = ENCODE (rd.is64, 1, 31);
1204 /* Do not shift the immediate. */
1205 uint32_t shift = ENCODE (0, 2, 21);
1206
e1c587c3
YQ
1207 return aarch64_emit_insn (buf, MOV | size | shift
1208 | ENCODE (operand.imm, 16, 5)
1209 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1210 }
1211 else
1212 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1213}
1214
1215/* Write a MOVK instruction into *BUF.
1216
1217 MOVK rd, #imm, lsl #shift
1218
1219 RD is the destination register.
1220 IMM is the immediate.
1221 SHIFT is the logical shift left to apply to IMM. */
1222
1223static int
7781c06f
YQ
1224emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1225 unsigned shift)
bb903df0
PL
1226{
1227 uint32_t size = ENCODE (rd.is64, 1, 31);
1228
e1c587c3
YQ
1229 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1230 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1231}
1232
1233/* Write instructions into *BUF in order to move ADDR into a register.
1234 ADDR can be a 64-bit value.
1235
1236 This function will emit a series of MOV and MOVK instructions, such as:
1237
1238 MOV xd, #(addr)
1239 MOVK xd, #(addr >> 16), lsl #16
1240 MOVK xd, #(addr >> 32), lsl #32
1241 MOVK xd, #(addr >> 48), lsl #48 */
1242
1243static int
1244emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1245{
1246 uint32_t *p = buf;
1247
1248 /* The MOV (wide immediate) instruction clears to top bits of the
1249 register. */
1250 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1251
1252 if ((addr >> 16) != 0)
1253 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1254 else
1255 return p - buf;
1256
1257 if ((addr >> 32) != 0)
1258 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1259 else
1260 return p - buf;
1261
1262 if ((addr >> 48) != 0)
1263 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1264
1265 return p - buf;
1266}
1267
afbe19f8
PL
1268/* Write a SUBS instruction into *BUF.
1269
1270 SUBS rd, rn, rm
1271
1272 This instruction update the condition flags.
1273
1274 RD is the destination register.
1275 RN and RM are the source registers. */
1276
1277static int
1278emit_subs (uint32_t *buf, struct aarch64_register rd,
1279 struct aarch64_register rn, struct aarch64_operand operand)
1280{
1281 return emit_data_processing (buf, SUBS, rd, rn, operand);
1282}
1283
1284/* Write a CMP instruction into *BUF.
1285
1286 CMP rn, rm
1287
1288 This instruction is an alias of SUBS xzr, rn, rm.
1289
1290 RN and RM are the registers to compare. */
1291
1292static int
1293emit_cmp (uint32_t *buf, struct aarch64_register rn,
1294 struct aarch64_operand operand)
1295{
1296 return emit_subs (buf, xzr, rn, operand);
1297}
1298
1299/* Write a AND instruction into *BUF.
1300
1301 AND rd, rn, rm
1302
1303 RD is the destination register.
1304 RN and RM are the source registers. */
1305
1306static int
1307emit_and (uint32_t *buf, struct aarch64_register rd,
1308 struct aarch64_register rn, struct aarch64_register rm)
1309{
1310 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1311}
1312
1313/* Write a ORR instruction into *BUF.
1314
1315 ORR rd, rn, rm
1316
1317 RD is the destination register.
1318 RN and RM are the source registers. */
1319
1320static int
1321emit_orr (uint32_t *buf, struct aarch64_register rd,
1322 struct aarch64_register rn, struct aarch64_register rm)
1323{
1324 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1325}
1326
1327/* Write a ORN instruction into *BUF.
1328
1329 ORN rd, rn, rm
1330
1331 RD is the destination register.
1332 RN and RM are the source registers. */
1333
1334static int
1335emit_orn (uint32_t *buf, struct aarch64_register rd,
1336 struct aarch64_register rn, struct aarch64_register rm)
1337{
1338 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1339}
1340
1341/* Write a EOR instruction into *BUF.
1342
1343 EOR rd, rn, rm
1344
1345 RD is the destination register.
1346 RN and RM are the source registers. */
1347
1348static int
1349emit_eor (uint32_t *buf, struct aarch64_register rd,
1350 struct aarch64_register rn, struct aarch64_register rm)
1351{
1352 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1353}
1354
1355/* Write a MVN instruction into *BUF.
1356
1357 MVN rd, rm
1358
1359 This is an alias for ORN rd, xzr, rm.
1360
1361 RD is the destination register.
1362 RM is the source register. */
1363
1364static int
1365emit_mvn (uint32_t *buf, struct aarch64_register rd,
1366 struct aarch64_register rm)
1367{
1368 return emit_orn (buf, rd, xzr, rm);
1369}
1370
1371/* Write a LSLV instruction into *BUF.
1372
1373 LSLV rd, rn, rm
1374
1375 RD is the destination register.
1376 RN and RM are the source registers. */
1377
1378static int
1379emit_lslv (uint32_t *buf, struct aarch64_register rd,
1380 struct aarch64_register rn, struct aarch64_register rm)
1381{
1382 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1383}
1384
1385/* Write a LSRV instruction into *BUF.
1386
1387 LSRV rd, rn, rm
1388
1389 RD is the destination register.
1390 RN and RM are the source registers. */
1391
1392static int
1393emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1394 struct aarch64_register rn, struct aarch64_register rm)
1395{
1396 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1397}
1398
1399/* Write a ASRV instruction into *BUF.
1400
1401 ASRV rd, rn, rm
1402
1403 RD is the destination register.
1404 RN and RM are the source registers. */
1405
1406static int
1407emit_asrv (uint32_t *buf, struct aarch64_register rd,
1408 struct aarch64_register rn, struct aarch64_register rm)
1409{
1410 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1411}
1412
1413/* Write a MUL instruction into *BUF.
1414
1415 MUL rd, rn, rm
1416
1417 RD is the destination register.
1418 RN and RM are the source registers. */
1419
1420static int
1421emit_mul (uint32_t *buf, struct aarch64_register rd,
1422 struct aarch64_register rn, struct aarch64_register rm)
1423{
1424 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1425}
1426
bb903df0
PL
1427/* Write a MRS instruction into *BUF. The register size is 64-bit.
1428
1429 MRS xt, system_reg
1430
1431 RT is the destination register.
1432 SYSTEM_REG is special purpose register to read. */
1433
1434static int
1435emit_mrs (uint32_t *buf, struct aarch64_register rt,
1436 enum aarch64_system_control_registers system_reg)
1437{
e1c587c3
YQ
1438 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1439 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1440}
1441
1442/* Write a MSR instruction into *BUF. The register size is 64-bit.
1443
1444 MSR system_reg, xt
1445
1446 SYSTEM_REG is special purpose register to write.
1447 RT is the input register. */
1448
1449static int
1450emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1451 struct aarch64_register rt)
1452{
e1c587c3
YQ
1453 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1454 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1455}
1456
1457/* Write a SEVL instruction into *BUF.
1458
1459 This is a hint instruction telling the hardware to trigger an event. */
1460
1461static int
1462emit_sevl (uint32_t *buf)
1463{
e1c587c3 1464 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1465}
1466
1467/* Write a WFE instruction into *BUF.
1468
1469 This is a hint instruction telling the hardware to wait for an event. */
1470
1471static int
1472emit_wfe (uint32_t *buf)
1473{
e1c587c3 1474 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1475}
1476
afbe19f8
PL
1477/* Write a SBFM instruction into *BUF.
1478
1479 SBFM rd, rn, #immr, #imms
1480
1481 This instruction moves the bits from #immr to #imms into the
1482 destination, sign extending the result.
1483
1484 RD is the destination register.
1485 RN is the source register.
1486 IMMR is the bit number to start at (least significant bit).
1487 IMMS is the bit number to stop at (most significant bit). */
1488
1489static int
1490emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1491 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1492{
1493 uint32_t size = ENCODE (rd.is64, 1, 31);
1494 uint32_t n = ENCODE (rd.is64, 1, 22);
1495
e1c587c3
YQ
1496 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1497 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1498 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1499}
1500
1501/* Write a SBFX instruction into *BUF.
1502
1503 SBFX rd, rn, #lsb, #width
1504
1505 This instruction moves #width bits from #lsb into the destination, sign
1506 extending the result. This is an alias for:
1507
1508 SBFM rd, rn, #lsb, #(lsb + width - 1)
1509
1510 RD is the destination register.
1511 RN is the source register.
1512 LSB is the bit number to start at (least significant bit).
1513 WIDTH is the number of bits to move. */
1514
1515static int
1516emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1517 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1518{
1519 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1520}
1521
1522/* Write a UBFM instruction into *BUF.
1523
1524 UBFM rd, rn, #immr, #imms
1525
1526 This instruction moves the bits from #immr to #imms into the
1527 destination, extending the result with zeros.
1528
1529 RD is the destination register.
1530 RN is the source register.
1531 IMMR is the bit number to start at (least significant bit).
1532 IMMS is the bit number to stop at (most significant bit). */
1533
1534static int
1535emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1536 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1537{
1538 uint32_t size = ENCODE (rd.is64, 1, 31);
1539 uint32_t n = ENCODE (rd.is64, 1, 22);
1540
e1c587c3
YQ
1541 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1542 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1543 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1544}
1545
1546/* Write a UBFX instruction into *BUF.
1547
1548 UBFX rd, rn, #lsb, #width
1549
1550 This instruction moves #width bits from #lsb into the destination,
1551 extending the result with zeros. This is an alias for:
1552
1553 UBFM rd, rn, #lsb, #(lsb + width - 1)
1554
1555 RD is the destination register.
1556 RN is the source register.
1557 LSB is the bit number to start at (least significant bit).
1558 WIDTH is the number of bits to move. */
1559
1560static int
1561emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1562 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1563{
1564 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1565}
1566
1567/* Write a CSINC instruction into *BUF.
1568
1569 CSINC rd, rn, rm, cond
1570
1571 This instruction conditionally increments rn or rm and places the result
1572 in rd. rn is chosen is the condition is true.
1573
1574 RD is the destination register.
1575 RN and RM are the source registers.
1576 COND is the encoded condition. */
1577
1578static int
1579emit_csinc (uint32_t *buf, struct aarch64_register rd,
1580 struct aarch64_register rn, struct aarch64_register rm,
1581 unsigned cond)
1582{
1583 uint32_t size = ENCODE (rd.is64, 1, 31);
1584
e1c587c3
YQ
1585 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1586 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1587 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1588}
1589
1590/* Write a CSET instruction into *BUF.
1591
1592 CSET rd, cond
1593
1594 This instruction conditionally write 1 or 0 in the destination register.
1595 1 is written if the condition is true. This is an alias for:
1596
1597 CSINC rd, xzr, xzr, !cond
1598
1599 Note that the condition needs to be inverted.
1600
1601 RD is the destination register.
1602 RN and RM are the source registers.
1603 COND is the encoded condition. */
1604
1605static int
1606emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1607{
1608 /* The least significant bit of the condition needs toggling in order to
1609 invert it. */
1610 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1611}
1612
bb903df0
PL
1613/* Write LEN instructions from BUF into the inferior memory at *TO.
1614
1615 Note instructions are always little endian on AArch64, unlike data. */
1616
1617static void
1618append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1619{
1620 size_t byte_len = len * sizeof (uint32_t);
1621#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1622 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1623 size_t i;
1624
1625 for (i = 0; i < len; i++)
1626 le_buf[i] = htole32 (buf[i]);
1627
1628 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1629
1630 xfree (le_buf);
1631#else
1632 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1633#endif
1634
1635 *to += byte_len;
1636}
1637
0badd99f
YQ
1638/* Sub-class of struct aarch64_insn_data, store information of
1639 instruction relocation for fast tracepoint. Visitor can
1640 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1641 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1642
0badd99f
YQ
1643struct aarch64_insn_relocation_data
1644{
1645 struct aarch64_insn_data base;
1646
1647 /* The new address the instruction is relocated to. */
1648 CORE_ADDR new_addr;
1649 /* Pointer to the buffer of relocated instruction(s). */
1650 uint32_t *insn_ptr;
1651};
1652
1653/* Implementation of aarch64_insn_visitor method "b". */
1654
1655static void
1656aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1657 struct aarch64_insn_data *data)
1658{
1659 struct aarch64_insn_relocation_data *insn_reloc
1660 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1661 int64_t new_offset
0badd99f
YQ
1662 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1663
1664 if (can_encode_int32 (new_offset, 28))
1665 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1666}
1667
1668/* Implementation of aarch64_insn_visitor method "b_cond". */
1669
1670static void
1671aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1672 struct aarch64_insn_data *data)
1673{
1674 struct aarch64_insn_relocation_data *insn_reloc
1675 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1676 int64_t new_offset
0badd99f
YQ
1677 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1678
1679 if (can_encode_int32 (new_offset, 21))
1680 {
1681 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1682 new_offset);
bb903df0 1683 }
0badd99f 1684 else if (can_encode_int32 (new_offset, 28))
bb903df0 1685 {
0badd99f
YQ
1686 /* The offset is out of range for a conditional branch
1687 instruction but not for a unconditional branch. We can use
1688 the following instructions instead:
bb903df0 1689
0badd99f
YQ
1690 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1691 B NOT_TAKEN ; Else jump over TAKEN and continue.
1692 TAKEN:
1693 B #(offset - 8)
1694 NOT_TAKEN:
1695
1696 */
1697
1698 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1699 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1700 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1701 }
0badd99f 1702}
bb903df0 1703
0badd99f
YQ
1704/* Implementation of aarch64_insn_visitor method "cb". */
1705
1706static void
1707aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1708 const unsigned rn, int is64,
1709 struct aarch64_insn_data *data)
1710{
1711 struct aarch64_insn_relocation_data *insn_reloc
1712 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1713 int64_t new_offset
0badd99f
YQ
1714 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1715
1716 if (can_encode_int32 (new_offset, 21))
1717 {
1718 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1719 aarch64_register (rn, is64), new_offset);
bb903df0 1720 }
0badd99f 1721 else if (can_encode_int32 (new_offset, 28))
bb903df0 1722 {
0badd99f
YQ
1723 /* The offset is out of range for a compare and branch
1724 instruction but not for a unconditional branch. We can use
1725 the following instructions instead:
1726
1727 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1728 B NOT_TAKEN ; Else jump over TAKEN and continue.
1729 TAKEN:
1730 B #(offset - 8)
1731 NOT_TAKEN:
1732
1733 */
1734 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1735 aarch64_register (rn, is64), 8);
1736 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1737 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1738 }
1739}
bb903df0 1740
0badd99f 1741/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1742
0badd99f
YQ
1743static void
1744aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1745 const unsigned rt, unsigned bit,
1746 struct aarch64_insn_data *data)
1747{
1748 struct aarch64_insn_relocation_data *insn_reloc
1749 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1750 int64_t new_offset
0badd99f
YQ
1751 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1752
1753 if (can_encode_int32 (new_offset, 16))
1754 {
1755 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1756 aarch64_register (rt, 1), new_offset);
bb903df0 1757 }
0badd99f 1758 else if (can_encode_int32 (new_offset, 28))
bb903df0 1759 {
0badd99f
YQ
1760 /* The offset is out of range for a test bit and branch
1761 instruction but not for a unconditional branch. We can use
1762 the following instructions instead:
1763
1764 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1765 B NOT_TAKEN ; Else jump over TAKEN and continue.
1766 TAKEN:
1767 B #(offset - 8)
1768 NOT_TAKEN:
1769
1770 */
1771 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1772 aarch64_register (rt, 1), 8);
1773 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1774 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1775 new_offset - 8);
1776 }
1777}
bb903df0 1778
0badd99f 1779/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1780
0badd99f
YQ
1781static void
1782aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1783 const int is_adrp,
1784 struct aarch64_insn_data *data)
1785{
1786 struct aarch64_insn_relocation_data *insn_reloc
1787 = (struct aarch64_insn_relocation_data *) data;
1788 /* We know exactly the address the ADR{P,} instruction will compute.
1789 We can just write it to the destination register. */
1790 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1791
0badd99f
YQ
1792 if (is_adrp)
1793 {
1794 /* Clear the lower 12 bits of the offset to get the 4K page. */
1795 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1796 aarch64_register (rd, 1),
1797 address & ~0xfff);
1798 }
1799 else
1800 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1801 aarch64_register (rd, 1), address);
1802}
bb903df0 1803
0badd99f 1804/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1805
0badd99f
YQ
1806static void
1807aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1808 const unsigned rt, const int is64,
1809 struct aarch64_insn_data *data)
1810{
1811 struct aarch64_insn_relocation_data *insn_reloc
1812 = (struct aarch64_insn_relocation_data *) data;
1813 CORE_ADDR address = data->insn_addr + offset;
1814
1815 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1816 aarch64_register (rt, 1), address);
1817
1818 /* We know exactly what address to load from, and what register we
1819 can use:
1820
1821 MOV xd, #(oldloc + offset)
1822 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1823 ...
1824
1825 LDR xd, [xd] ; or LDRSW xd, [xd]
1826
1827 */
1828
1829 if (is_sw)
1830 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1831 aarch64_register (rt, 1),
1832 aarch64_register (rt, 1),
1833 offset_memory_operand (0));
bb903df0 1834 else
0badd99f
YQ
1835 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1836 aarch64_register (rt, is64),
1837 aarch64_register (rt, 1),
1838 offset_memory_operand (0));
1839}
1840
1841/* Implementation of aarch64_insn_visitor method "others". */
1842
1843static void
1844aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1845 struct aarch64_insn_data *data)
1846{
1847 struct aarch64_insn_relocation_data *insn_reloc
1848 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1849
0badd99f
YQ
1850 /* The instruction is not PC relative. Just re-emit it at the new
1851 location. */
e1c587c3 1852 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1853}
1854
1855static const struct aarch64_insn_visitor visitor =
1856{
1857 aarch64_ftrace_insn_reloc_b,
1858 aarch64_ftrace_insn_reloc_b_cond,
1859 aarch64_ftrace_insn_reloc_cb,
1860 aarch64_ftrace_insn_reloc_tb,
1861 aarch64_ftrace_insn_reloc_adr,
1862 aarch64_ftrace_insn_reloc_ldr_literal,
1863 aarch64_ftrace_insn_reloc_others,
1864};
1865
bb903df0
PL
1866/* Implementation of linux_target_ops method
1867 "install_fast_tracepoint_jump_pad". */
1868
1869static int
1870aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1871 CORE_ADDR tpaddr,
1872 CORE_ADDR collector,
1873 CORE_ADDR lockaddr,
1874 ULONGEST orig_size,
1875 CORE_ADDR *jump_entry,
1876 CORE_ADDR *trampoline,
1877 ULONGEST *trampoline_size,
1878 unsigned char *jjump_pad_insn,
1879 ULONGEST *jjump_pad_insn_size,
1880 CORE_ADDR *adjusted_insn_addr,
1881 CORE_ADDR *adjusted_insn_addr_end,
1882 char *err)
1883{
1884 uint32_t buf[256];
1885 uint32_t *p = buf;
2ac09a5b 1886 int64_t offset;
bb903df0 1887 int i;
70b439f0 1888 uint32_t insn;
bb903df0 1889 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1890 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1891
1892 /* We need to save the current state on the stack both to restore it
1893 later and to collect register values when the tracepoint is hit.
1894
1895 The saved registers are pushed in a layout that needs to be in sync
1896 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1897 the supply_fast_tracepoint_registers function will fill in the
1898 register cache from a pointer to saved registers on the stack we build
1899 here.
1900
1901 For simplicity, we set the size of each cell on the stack to 16 bytes.
1902 This way one cell can hold any register type, from system registers
1903 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1904 has to be 16 bytes aligned anyway.
1905
1906 Note that the CPSR register does not exist on AArch64. Instead we
1907 can access system bits describing the process state with the
1908 MRS/MSR instructions, namely the condition flags. We save them as
1909 if they are part of a CPSR register because that's how GDB
1910 interprets these system bits. At the moment, only the condition
1911 flags are saved in CPSR (NZCV).
1912
1913 Stack layout, each cell is 16 bytes (descending):
1914
1915 High *-------- SIMD&FP registers from 31 down to 0. --------*
1916 | q31 |
1917 . .
1918 . . 32 cells
1919 . .
1920 | q0 |
1921 *---- General purpose registers from 30 down to 0. ----*
1922 | x30 |
1923 . .
1924 . . 31 cells
1925 . .
1926 | x0 |
1927 *------------- Special purpose registers. -------------*
1928 | SP |
1929 | PC |
1930 | CPSR (NZCV) | 5 cells
1931 | FPSR |
1932 | FPCR | <- SP + 16
1933 *------------- collecting_t object --------------------*
1934 | TPIDR_EL0 | struct tracepoint * |
1935 Low *------------------------------------------------------*
1936
1937 After this stack is set up, we issue a call to the collector, passing
1938 it the saved registers at (SP + 16). */
1939
1940 /* Push SIMD&FP registers on the stack:
1941
1942 SUB sp, sp, #(32 * 16)
1943
1944 STP q30, q31, [sp, #(30 * 16)]
1945 ...
1946 STP q0, q1, [sp]
1947
1948 */
1949 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1950 for (i = 30; i >= 0; i -= 2)
1951 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1952
1953 /* Push general puspose registers on the stack. Note that we do not need
1954 to push x31 as it represents the xzr register and not the stack
1955 pointer in a STR instruction.
1956
1957 SUB sp, sp, #(31 * 16)
1958
1959 STR x30, [sp, #(30 * 16)]
1960 ...
1961 STR x0, [sp]
1962
1963 */
1964 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1965 for (i = 30; i >= 0; i -= 1)
1966 p += emit_str (p, aarch64_register (i, 1), sp,
1967 offset_memory_operand (i * 16));
1968
1969 /* Make space for 5 more cells.
1970
1971 SUB sp, sp, #(5 * 16)
1972
1973 */
1974 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1975
1976
1977 /* Save SP:
1978
1979 ADD x4, sp, #((32 + 31 + 5) * 16)
1980 STR x4, [sp, #(4 * 16)]
1981
1982 */
1983 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1984 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1985
1986 /* Save PC (tracepoint address):
1987
1988 MOV x3, #(tpaddr)
1989 ...
1990
1991 STR x3, [sp, #(3 * 16)]
1992
1993 */
1994
1995 p += emit_mov_addr (p, x3, tpaddr);
1996 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1997
1998 /* Save CPSR (NZCV), FPSR and FPCR:
1999
2000 MRS x2, nzcv
2001 MRS x1, fpsr
2002 MRS x0, fpcr
2003
2004 STR x2, [sp, #(2 * 16)]
2005 STR x1, [sp, #(1 * 16)]
2006 STR x0, [sp, #(0 * 16)]
2007
2008 */
2009 p += emit_mrs (p, x2, NZCV);
2010 p += emit_mrs (p, x1, FPSR);
2011 p += emit_mrs (p, x0, FPCR);
2012 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2013 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2014 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2015
2016 /* Push the collecting_t object. It consist of the address of the
2017 tracepoint and an ID for the current thread. We get the latter by
2018 reading the tpidr_el0 system register. It corresponds to the
2019 NT_ARM_TLS register accessible with ptrace.
2020
2021 MOV x0, #(tpoint)
2022 ...
2023
2024 MRS x1, tpidr_el0
2025
2026 STP x0, x1, [sp, #-16]!
2027
2028 */
2029
2030 p += emit_mov_addr (p, x0, tpoint);
2031 p += emit_mrs (p, x1, TPIDR_EL0);
2032 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2033
2034 /* Spin-lock:
2035
2036 The shared memory for the lock is at lockaddr. It will hold zero
2037 if no-one is holding the lock, otherwise it contains the address of
2038 the collecting_t object on the stack of the thread which acquired it.
2039
2040 At this stage, the stack pointer points to this thread's collecting_t
2041 object.
2042
2043 We use the following registers:
2044 - x0: Address of the lock.
2045 - x1: Pointer to collecting_t object.
2046 - x2: Scratch register.
2047
2048 MOV x0, #(lockaddr)
2049 ...
2050 MOV x1, sp
2051
2052 ; Trigger an event local to this core. So the following WFE
2053 ; instruction is ignored.
2054 SEVL
2055 again:
2056 ; Wait for an event. The event is triggered by either the SEVL
2057 ; or STLR instructions (store release).
2058 WFE
2059
2060 ; Atomically read at lockaddr. This marks the memory location as
2061 ; exclusive. This instruction also has memory constraints which
2062 ; make sure all previous data reads and writes are done before
2063 ; executing it.
2064 LDAXR x2, [x0]
2065
2066 ; Try again if another thread holds the lock.
2067 CBNZ x2, again
2068
2069 ; We can lock it! Write the address of the collecting_t object.
2070 ; This instruction will fail if the memory location is not marked
2071 ; as exclusive anymore. If it succeeds, it will remove the
2072 ; exclusive mark on the memory location. This way, if another
2073 ; thread executes this instruction before us, we will fail and try
2074 ; all over again.
2075 STXR w2, x1, [x0]
2076 CBNZ w2, again
2077
2078 */
2079
2080 p += emit_mov_addr (p, x0, lockaddr);
2081 p += emit_mov (p, x1, register_operand (sp));
2082
2083 p += emit_sevl (p);
2084 p += emit_wfe (p);
2085 p += emit_ldaxr (p, x2, x0);
2086 p += emit_cb (p, 1, w2, -2 * 4);
2087 p += emit_stxr (p, w2, x1, x0);
2088 p += emit_cb (p, 1, x2, -4 * 4);
2089
2090 /* Call collector (struct tracepoint *, unsigned char *):
2091
2092 MOV x0, #(tpoint)
2093 ...
2094
2095 ; Saved registers start after the collecting_t object.
2096 ADD x1, sp, #16
2097
2098 ; We use an intra-procedure-call scratch register.
2099 MOV ip0, #(collector)
2100 ...
2101
2102 ; And call back to C!
2103 BLR ip0
2104
2105 */
2106
2107 p += emit_mov_addr (p, x0, tpoint);
2108 p += emit_add (p, x1, sp, immediate_operand (16));
2109
2110 p += emit_mov_addr (p, ip0, collector);
2111 p += emit_blr (p, ip0);
2112
2113 /* Release the lock.
2114
2115 MOV x0, #(lockaddr)
2116 ...
2117
2118 ; This instruction is a normal store with memory ordering
2119 ; constraints. Thanks to this we do not have to put a data
2120 ; barrier instruction to make sure all data read and writes are done
2121 ; before this instruction is executed. Furthermore, this instrucion
2122 ; will trigger an event, letting other threads know they can grab
2123 ; the lock.
2124 STLR xzr, [x0]
2125
2126 */
2127 p += emit_mov_addr (p, x0, lockaddr);
2128 p += emit_stlr (p, xzr, x0);
2129
2130 /* Free collecting_t object:
2131
2132 ADD sp, sp, #16
2133
2134 */
2135 p += emit_add (p, sp, sp, immediate_operand (16));
2136
2137 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2138 registers from the stack.
2139
2140 LDR x2, [sp, #(2 * 16)]
2141 LDR x1, [sp, #(1 * 16)]
2142 LDR x0, [sp, #(0 * 16)]
2143
2144 MSR NZCV, x2
2145 MSR FPSR, x1
2146 MSR FPCR, x0
2147
2148 ADD sp, sp #(5 * 16)
2149
2150 */
2151 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2152 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2153 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2154 p += emit_msr (p, NZCV, x2);
2155 p += emit_msr (p, FPSR, x1);
2156 p += emit_msr (p, FPCR, x0);
2157
2158 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2159
2160 /* Pop general purpose registers:
2161
2162 LDR x0, [sp]
2163 ...
2164 LDR x30, [sp, #(30 * 16)]
2165
2166 ADD sp, sp, #(31 * 16)
2167
2168 */
2169 for (i = 0; i <= 30; i += 1)
2170 p += emit_ldr (p, aarch64_register (i, 1), sp,
2171 offset_memory_operand (i * 16));
2172 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2173
2174 /* Pop SIMD&FP registers:
2175
2176 LDP q0, q1, [sp]
2177 ...
2178 LDP q30, q31, [sp, #(30 * 16)]
2179
2180 ADD sp, sp, #(32 * 16)
2181
2182 */
2183 for (i = 0; i <= 30; i += 2)
2184 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2185 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2186
2187 /* Write the code into the inferior memory. */
2188 append_insns (&buildaddr, p - buf, buf);
2189
2190 /* Now emit the relocated instruction. */
2191 *adjusted_insn_addr = buildaddr;
70b439f0 2192 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2193
2194 insn_data.base.insn_addr = tpaddr;
2195 insn_data.new_addr = buildaddr;
2196 insn_data.insn_ptr = buf;
2197
2198 aarch64_relocate_instruction (insn, &visitor,
2199 (struct aarch64_insn_data *) &insn_data);
2200
bb903df0 2201 /* We may not have been able to relocate the instruction. */
0badd99f 2202 if (insn_data.insn_ptr == buf)
bb903df0
PL
2203 {
2204 sprintf (err,
2205 "E.Could not relocate instruction from %s to %s.",
2206 core_addr_to_string_nz (tpaddr),
2207 core_addr_to_string_nz (buildaddr));
2208 return 1;
2209 }
dfaffe9d 2210 else
0badd99f 2211 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2212 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2213
2214 /* Go back to the start of the buffer. */
2215 p = buf;
2216
2217 /* Emit a branch back from the jump pad. */
2218 offset = (tpaddr + orig_size - buildaddr);
2219 if (!can_encode_int32 (offset, 28))
2220 {
2221 sprintf (err,
2222 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2223 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2224 offset);
2225 return 1;
2226 }
2227
2228 p += emit_b (p, 0, offset);
2229 append_insns (&buildaddr, p - buf, buf);
2230
2231 /* Give the caller a branch instruction into the jump pad. */
2232 offset = (*jump_entry - tpaddr);
2233 if (!can_encode_int32 (offset, 28))
2234 {
2235 sprintf (err,
2236 "E.Jump pad too far from tracepoint "
2ac09a5b 2237 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2238 offset);
2239 return 1;
2240 }
2241
2242 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2243 *jjump_pad_insn_size = 4;
2244
2245 /* Return the end address of our pad. */
2246 *jump_entry = buildaddr;
2247
2248 return 0;
2249}
2250
afbe19f8
PL
2251/* Helper function writing LEN instructions from START into
2252 current_insn_ptr. */
2253
2254static void
2255emit_ops_insns (const uint32_t *start, int len)
2256{
2257 CORE_ADDR buildaddr = current_insn_ptr;
2258
2259 if (debug_threads)
2260 debug_printf ("Adding %d instrucions at %s\n",
2261 len, paddress (buildaddr));
2262
2263 append_insns (&buildaddr, len, start);
2264 current_insn_ptr = buildaddr;
2265}
2266
2267/* Pop a register from the stack. */
2268
2269static int
2270emit_pop (uint32_t *buf, struct aarch64_register rt)
2271{
2272 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2273}
2274
2275/* Push a register on the stack. */
2276
2277static int
2278emit_push (uint32_t *buf, struct aarch64_register rt)
2279{
2280 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2281}
2282
2283/* Implementation of emit_ops method "emit_prologue". */
2284
2285static void
2286aarch64_emit_prologue (void)
2287{
2288 uint32_t buf[16];
2289 uint32_t *p = buf;
2290
2291 /* This function emit a prologue for the following function prototype:
2292
2293 enum eval_result_type f (unsigned char *regs,
2294 ULONGEST *value);
2295
2296 The first argument is a buffer of raw registers. The second
2297 argument is the result of
2298 evaluating the expression, which will be set to whatever is on top of
2299 the stack at the end.
2300
2301 The stack set up by the prologue is as such:
2302
2303 High *------------------------------------------------------*
2304 | LR |
2305 | FP | <- FP
2306 | x1 (ULONGEST *value) |
2307 | x0 (unsigned char *regs) |
2308 Low *------------------------------------------------------*
2309
2310 As we are implementing a stack machine, each opcode can expand the
2311 stack so we never know how far we are from the data saved by this
2312 prologue. In order to be able refer to value and regs later, we save
2313 the current stack pointer in the frame pointer. This way, it is not
2314 clobbered when calling C functions.
2315
2316 Finally, throughtout every operation, we are using register x0 as the
2317 top of the stack, and x1 as a scratch register. */
2318
2319 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2320 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2321 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2322
2323 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2324
2325
2326 emit_ops_insns (buf, p - buf);
2327}
2328
2329/* Implementation of emit_ops method "emit_epilogue". */
2330
2331static void
2332aarch64_emit_epilogue (void)
2333{
2334 uint32_t buf[16];
2335 uint32_t *p = buf;
2336
2337 /* Store the result of the expression (x0) in *value. */
2338 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2339 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2340 p += emit_str (p, x0, x1, offset_memory_operand (0));
2341
2342 /* Restore the previous state. */
2343 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2344 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2345
2346 /* Return expr_eval_no_error. */
2347 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2348 p += emit_ret (p, lr);
2349
2350 emit_ops_insns (buf, p - buf);
2351}
2352
2353/* Implementation of emit_ops method "emit_add". */
2354
2355static void
2356aarch64_emit_add (void)
2357{
2358 uint32_t buf[16];
2359 uint32_t *p = buf;
2360
2361 p += emit_pop (p, x1);
45e3745e 2362 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2363
2364 emit_ops_insns (buf, p - buf);
2365}
2366
2367/* Implementation of emit_ops method "emit_sub". */
2368
2369static void
2370aarch64_emit_sub (void)
2371{
2372 uint32_t buf[16];
2373 uint32_t *p = buf;
2374
2375 p += emit_pop (p, x1);
45e3745e 2376 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2377
2378 emit_ops_insns (buf, p - buf);
2379}
2380
2381/* Implementation of emit_ops method "emit_mul". */
2382
2383static void
2384aarch64_emit_mul (void)
2385{
2386 uint32_t buf[16];
2387 uint32_t *p = buf;
2388
2389 p += emit_pop (p, x1);
2390 p += emit_mul (p, x0, x1, x0);
2391
2392 emit_ops_insns (buf, p - buf);
2393}
2394
2395/* Implementation of emit_ops method "emit_lsh". */
2396
2397static void
2398aarch64_emit_lsh (void)
2399{
2400 uint32_t buf[16];
2401 uint32_t *p = buf;
2402
2403 p += emit_pop (p, x1);
2404 p += emit_lslv (p, x0, x1, x0);
2405
2406 emit_ops_insns (buf, p - buf);
2407}
2408
2409/* Implementation of emit_ops method "emit_rsh_signed". */
2410
2411static void
2412aarch64_emit_rsh_signed (void)
2413{
2414 uint32_t buf[16];
2415 uint32_t *p = buf;
2416
2417 p += emit_pop (p, x1);
2418 p += emit_asrv (p, x0, x1, x0);
2419
2420 emit_ops_insns (buf, p - buf);
2421}
2422
2423/* Implementation of emit_ops method "emit_rsh_unsigned". */
2424
2425static void
2426aarch64_emit_rsh_unsigned (void)
2427{
2428 uint32_t buf[16];
2429 uint32_t *p = buf;
2430
2431 p += emit_pop (p, x1);
2432 p += emit_lsrv (p, x0, x1, x0);
2433
2434 emit_ops_insns (buf, p - buf);
2435}
2436
2437/* Implementation of emit_ops method "emit_ext". */
2438
2439static void
2440aarch64_emit_ext (int arg)
2441{
2442 uint32_t buf[16];
2443 uint32_t *p = buf;
2444
2445 p += emit_sbfx (p, x0, x0, 0, arg);
2446
2447 emit_ops_insns (buf, p - buf);
2448}
2449
2450/* Implementation of emit_ops method "emit_log_not". */
2451
2452static void
2453aarch64_emit_log_not (void)
2454{
2455 uint32_t buf[16];
2456 uint32_t *p = buf;
2457
2458 /* If the top of the stack is 0, replace it with 1. Else replace it with
2459 0. */
2460
2461 p += emit_cmp (p, x0, immediate_operand (0));
2462 p += emit_cset (p, x0, EQ);
2463
2464 emit_ops_insns (buf, p - buf);
2465}
2466
2467/* Implementation of emit_ops method "emit_bit_and". */
2468
2469static void
2470aarch64_emit_bit_and (void)
2471{
2472 uint32_t buf[16];
2473 uint32_t *p = buf;
2474
2475 p += emit_pop (p, x1);
2476 p += emit_and (p, x0, x0, x1);
2477
2478 emit_ops_insns (buf, p - buf);
2479}
2480
2481/* Implementation of emit_ops method "emit_bit_or". */
2482
2483static void
2484aarch64_emit_bit_or (void)
2485{
2486 uint32_t buf[16];
2487 uint32_t *p = buf;
2488
2489 p += emit_pop (p, x1);
2490 p += emit_orr (p, x0, x0, x1);
2491
2492 emit_ops_insns (buf, p - buf);
2493}
2494
2495/* Implementation of emit_ops method "emit_bit_xor". */
2496
2497static void
2498aarch64_emit_bit_xor (void)
2499{
2500 uint32_t buf[16];
2501 uint32_t *p = buf;
2502
2503 p += emit_pop (p, x1);
2504 p += emit_eor (p, x0, x0, x1);
2505
2506 emit_ops_insns (buf, p - buf);
2507}
2508
2509/* Implementation of emit_ops method "emit_bit_not". */
2510
2511static void
2512aarch64_emit_bit_not (void)
2513{
2514 uint32_t buf[16];
2515 uint32_t *p = buf;
2516
2517 p += emit_mvn (p, x0, x0);
2518
2519 emit_ops_insns (buf, p - buf);
2520}
2521
2522/* Implementation of emit_ops method "emit_equal". */
2523
2524static void
2525aarch64_emit_equal (void)
2526{
2527 uint32_t buf[16];
2528 uint32_t *p = buf;
2529
2530 p += emit_pop (p, x1);
2531 p += emit_cmp (p, x0, register_operand (x1));
2532 p += emit_cset (p, x0, EQ);
2533
2534 emit_ops_insns (buf, p - buf);
2535}
2536
2537/* Implementation of emit_ops method "emit_less_signed". */
2538
2539static void
2540aarch64_emit_less_signed (void)
2541{
2542 uint32_t buf[16];
2543 uint32_t *p = buf;
2544
2545 p += emit_pop (p, x1);
2546 p += emit_cmp (p, x1, register_operand (x0));
2547 p += emit_cset (p, x0, LT);
2548
2549 emit_ops_insns (buf, p - buf);
2550}
2551
2552/* Implementation of emit_ops method "emit_less_unsigned". */
2553
2554static void
2555aarch64_emit_less_unsigned (void)
2556{
2557 uint32_t buf[16];
2558 uint32_t *p = buf;
2559
2560 p += emit_pop (p, x1);
2561 p += emit_cmp (p, x1, register_operand (x0));
2562 p += emit_cset (p, x0, LO);
2563
2564 emit_ops_insns (buf, p - buf);
2565}
2566
2567/* Implementation of emit_ops method "emit_ref". */
2568
2569static void
2570aarch64_emit_ref (int size)
2571{
2572 uint32_t buf[16];
2573 uint32_t *p = buf;
2574
2575 switch (size)
2576 {
2577 case 1:
2578 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2579 break;
2580 case 2:
2581 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2582 break;
2583 case 4:
2584 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2585 break;
2586 case 8:
2587 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2588 break;
2589 default:
2590 /* Unknown size, bail on compilation. */
2591 emit_error = 1;
2592 break;
2593 }
2594
2595 emit_ops_insns (buf, p - buf);
2596}
2597
2598/* Implementation of emit_ops method "emit_if_goto". */
2599
2600static void
2601aarch64_emit_if_goto (int *offset_p, int *size_p)
2602{
2603 uint32_t buf[16];
2604 uint32_t *p = buf;
2605
2606 /* The Z flag is set or cleared here. */
2607 p += emit_cmp (p, x0, immediate_operand (0));
2608 /* This instruction must not change the Z flag. */
2609 p += emit_pop (p, x0);
2610 /* Branch over the next instruction if x0 == 0. */
2611 p += emit_bcond (p, EQ, 8);
2612
2613 /* The NOP instruction will be patched with an unconditional branch. */
2614 if (offset_p)
2615 *offset_p = (p - buf) * 4;
2616 if (size_p)
2617 *size_p = 4;
2618 p += emit_nop (p);
2619
2620 emit_ops_insns (buf, p - buf);
2621}
2622
2623/* Implementation of emit_ops method "emit_goto". */
2624
2625static void
2626aarch64_emit_goto (int *offset_p, int *size_p)
2627{
2628 uint32_t buf[16];
2629 uint32_t *p = buf;
2630
2631 /* The NOP instruction will be patched with an unconditional branch. */
2632 if (offset_p)
2633 *offset_p = 0;
2634 if (size_p)
2635 *size_p = 4;
2636 p += emit_nop (p);
2637
2638 emit_ops_insns (buf, p - buf);
2639}
2640
2641/* Implementation of emit_ops method "write_goto_address". */
2642
2643void
2644aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2645{
2646 uint32_t insn;
2647
2648 emit_b (&insn, 0, to - from);
2649 append_insns (&from, 1, &insn);
2650}
2651
2652/* Implementation of emit_ops method "emit_const". */
2653
2654static void
2655aarch64_emit_const (LONGEST num)
2656{
2657 uint32_t buf[16];
2658 uint32_t *p = buf;
2659
2660 p += emit_mov_addr (p, x0, num);
2661
2662 emit_ops_insns (buf, p - buf);
2663}
2664
2665/* Implementation of emit_ops method "emit_call". */
2666
2667static void
2668aarch64_emit_call (CORE_ADDR fn)
2669{
2670 uint32_t buf[16];
2671 uint32_t *p = buf;
2672
2673 p += emit_mov_addr (p, ip0, fn);
2674 p += emit_blr (p, ip0);
2675
2676 emit_ops_insns (buf, p - buf);
2677}
2678
2679/* Implementation of emit_ops method "emit_reg". */
2680
2681static void
2682aarch64_emit_reg (int reg)
2683{
2684 uint32_t buf[16];
2685 uint32_t *p = buf;
2686
2687 /* Set x0 to unsigned char *regs. */
2688 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2689 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2690 p += emit_mov (p, x1, immediate_operand (reg));
2691
2692 emit_ops_insns (buf, p - buf);
2693
2694 aarch64_emit_call (get_raw_reg_func_addr ());
2695}
2696
2697/* Implementation of emit_ops method "emit_pop". */
2698
2699static void
2700aarch64_emit_pop (void)
2701{
2702 uint32_t buf[16];
2703 uint32_t *p = buf;
2704
2705 p += emit_pop (p, x0);
2706
2707 emit_ops_insns (buf, p - buf);
2708}
2709
2710/* Implementation of emit_ops method "emit_stack_flush". */
2711
2712static void
2713aarch64_emit_stack_flush (void)
2714{
2715 uint32_t buf[16];
2716 uint32_t *p = buf;
2717
2718 p += emit_push (p, x0);
2719
2720 emit_ops_insns (buf, p - buf);
2721}
2722
2723/* Implementation of emit_ops method "emit_zero_ext". */
2724
2725static void
2726aarch64_emit_zero_ext (int arg)
2727{
2728 uint32_t buf[16];
2729 uint32_t *p = buf;
2730
2731 p += emit_ubfx (p, x0, x0, 0, arg);
2732
2733 emit_ops_insns (buf, p - buf);
2734}
2735
2736/* Implementation of emit_ops method "emit_swap". */
2737
2738static void
2739aarch64_emit_swap (void)
2740{
2741 uint32_t buf[16];
2742 uint32_t *p = buf;
2743
2744 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2745 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2746 p += emit_mov (p, x0, register_operand (x1));
2747
2748 emit_ops_insns (buf, p - buf);
2749}
2750
2751/* Implementation of emit_ops method "emit_stack_adjust". */
2752
2753static void
2754aarch64_emit_stack_adjust (int n)
2755{
2756 /* This is not needed with our design. */
2757 uint32_t buf[16];
2758 uint32_t *p = buf;
2759
2760 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2761
2762 emit_ops_insns (buf, p - buf);
2763}
2764
2765/* Implementation of emit_ops method "emit_int_call_1". */
2766
2767static void
2768aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2769{
2770 uint32_t buf[16];
2771 uint32_t *p = buf;
2772
2773 p += emit_mov (p, x0, immediate_operand (arg1));
2774
2775 emit_ops_insns (buf, p - buf);
2776
2777 aarch64_emit_call (fn);
2778}
2779
2780/* Implementation of emit_ops method "emit_void_call_2". */
2781
2782static void
2783aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2784{
2785 uint32_t buf[16];
2786 uint32_t *p = buf;
2787
2788 /* Push x0 on the stack. */
2789 aarch64_emit_stack_flush ();
2790
2791 /* Setup arguments for the function call:
2792
2793 x0: arg1
2794 x1: top of the stack
2795
2796 MOV x1, x0
2797 MOV x0, #arg1 */
2798
2799 p += emit_mov (p, x1, register_operand (x0));
2800 p += emit_mov (p, x0, immediate_operand (arg1));
2801
2802 emit_ops_insns (buf, p - buf);
2803
2804 aarch64_emit_call (fn);
2805
2806 /* Restore x0. */
2807 aarch64_emit_pop ();
2808}
2809
2810/* Implementation of emit_ops method "emit_eq_goto". */
2811
2812static void
2813aarch64_emit_eq_goto (int *offset_p, int *size_p)
2814{
2815 uint32_t buf[16];
2816 uint32_t *p = buf;
2817
2818 p += emit_pop (p, x1);
2819 p += emit_cmp (p, x1, register_operand (x0));
2820 /* Branch over the next instruction if x0 != x1. */
2821 p += emit_bcond (p, NE, 8);
2822 /* The NOP instruction will be patched with an unconditional branch. */
2823 if (offset_p)
2824 *offset_p = (p - buf) * 4;
2825 if (size_p)
2826 *size_p = 4;
2827 p += emit_nop (p);
2828
2829 emit_ops_insns (buf, p - buf);
2830}
2831
2832/* Implementation of emit_ops method "emit_ne_goto". */
2833
2834static void
2835aarch64_emit_ne_goto (int *offset_p, int *size_p)
2836{
2837 uint32_t buf[16];
2838 uint32_t *p = buf;
2839
2840 p += emit_pop (p, x1);
2841 p += emit_cmp (p, x1, register_operand (x0));
2842 /* Branch over the next instruction if x0 == x1. */
2843 p += emit_bcond (p, EQ, 8);
2844 /* The NOP instruction will be patched with an unconditional branch. */
2845 if (offset_p)
2846 *offset_p = (p - buf) * 4;
2847 if (size_p)
2848 *size_p = 4;
2849 p += emit_nop (p);
2850
2851 emit_ops_insns (buf, p - buf);
2852}
2853
2854/* Implementation of emit_ops method "emit_lt_goto". */
2855
2856static void
2857aarch64_emit_lt_goto (int *offset_p, int *size_p)
2858{
2859 uint32_t buf[16];
2860 uint32_t *p = buf;
2861
2862 p += emit_pop (p, x1);
2863 p += emit_cmp (p, x1, register_operand (x0));
2864 /* Branch over the next instruction if x0 >= x1. */
2865 p += emit_bcond (p, GE, 8);
2866 /* The NOP instruction will be patched with an unconditional branch. */
2867 if (offset_p)
2868 *offset_p = (p - buf) * 4;
2869 if (size_p)
2870 *size_p = 4;
2871 p += emit_nop (p);
2872
2873 emit_ops_insns (buf, p - buf);
2874}
2875
2876/* Implementation of emit_ops method "emit_le_goto". */
2877
2878static void
2879aarch64_emit_le_goto (int *offset_p, int *size_p)
2880{
2881 uint32_t buf[16];
2882 uint32_t *p = buf;
2883
2884 p += emit_pop (p, x1);
2885 p += emit_cmp (p, x1, register_operand (x0));
2886 /* Branch over the next instruction if x0 > x1. */
2887 p += emit_bcond (p, GT, 8);
2888 /* The NOP instruction will be patched with an unconditional branch. */
2889 if (offset_p)
2890 *offset_p = (p - buf) * 4;
2891 if (size_p)
2892 *size_p = 4;
2893 p += emit_nop (p);
2894
2895 emit_ops_insns (buf, p - buf);
2896}
2897
2898/* Implementation of emit_ops method "emit_gt_goto". */
2899
2900static void
2901aarch64_emit_gt_goto (int *offset_p, int *size_p)
2902{
2903 uint32_t buf[16];
2904 uint32_t *p = buf;
2905
2906 p += emit_pop (p, x1);
2907 p += emit_cmp (p, x1, register_operand (x0));
2908 /* Branch over the next instruction if x0 <= x1. */
2909 p += emit_bcond (p, LE, 8);
2910 /* The NOP instruction will be patched with an unconditional branch. */
2911 if (offset_p)
2912 *offset_p = (p - buf) * 4;
2913 if (size_p)
2914 *size_p = 4;
2915 p += emit_nop (p);
2916
2917 emit_ops_insns (buf, p - buf);
2918}
2919
2920/* Implementation of emit_ops method "emit_ge_got". */
2921
2922static void
2923aarch64_emit_ge_got (int *offset_p, int *size_p)
2924{
2925 uint32_t buf[16];
2926 uint32_t *p = buf;
2927
2928 p += emit_pop (p, x1);
2929 p += emit_cmp (p, x1, register_operand (x0));
2930 /* Branch over the next instruction if x0 <= x1. */
2931 p += emit_bcond (p, LT, 8);
2932 /* The NOP instruction will be patched with an unconditional branch. */
2933 if (offset_p)
2934 *offset_p = (p - buf) * 4;
2935 if (size_p)
2936 *size_p = 4;
2937 p += emit_nop (p);
2938
2939 emit_ops_insns (buf, p - buf);
2940}
2941
2942static struct emit_ops aarch64_emit_ops_impl =
2943{
2944 aarch64_emit_prologue,
2945 aarch64_emit_epilogue,
2946 aarch64_emit_add,
2947 aarch64_emit_sub,
2948 aarch64_emit_mul,
2949 aarch64_emit_lsh,
2950 aarch64_emit_rsh_signed,
2951 aarch64_emit_rsh_unsigned,
2952 aarch64_emit_ext,
2953 aarch64_emit_log_not,
2954 aarch64_emit_bit_and,
2955 aarch64_emit_bit_or,
2956 aarch64_emit_bit_xor,
2957 aarch64_emit_bit_not,
2958 aarch64_emit_equal,
2959 aarch64_emit_less_signed,
2960 aarch64_emit_less_unsigned,
2961 aarch64_emit_ref,
2962 aarch64_emit_if_goto,
2963 aarch64_emit_goto,
2964 aarch64_write_goto_address,
2965 aarch64_emit_const,
2966 aarch64_emit_call,
2967 aarch64_emit_reg,
2968 aarch64_emit_pop,
2969 aarch64_emit_stack_flush,
2970 aarch64_emit_zero_ext,
2971 aarch64_emit_swap,
2972 aarch64_emit_stack_adjust,
2973 aarch64_emit_int_call_1,
2974 aarch64_emit_void_call_2,
2975 aarch64_emit_eq_goto,
2976 aarch64_emit_ne_goto,
2977 aarch64_emit_lt_goto,
2978 aarch64_emit_le_goto,
2979 aarch64_emit_gt_goto,
2980 aarch64_emit_ge_got,
2981};
2982
2983/* Implementation of linux_target_ops method "emit_ops". */
2984
2985static struct emit_ops *
2986aarch64_emit_ops (void)
2987{
2988 return &aarch64_emit_ops_impl;
2989}
2990
bb903df0
PL
2991/* Implementation of linux_target_ops method
2992 "get_min_fast_tracepoint_insn_len". */
2993
2994static int
2995aarch64_get_min_fast_tracepoint_insn_len (void)
2996{
2997 return 4;
2998}
2999
d1d0aea1
PL
3000/* Implementation of linux_target_ops method "supports_range_stepping". */
3001
3002static int
3003aarch64_supports_range_stepping (void)
3004{
3005 return 1;
3006}
3007
dd373349
AT
3008/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3009
3010static const gdb_byte *
3011aarch64_sw_breakpoint_from_kind (int kind, int *size)
3012{
17b1509a
YQ
3013 if (is_64bit_tdesc ())
3014 {
3015 *size = aarch64_breakpoint_len;
3016 return aarch64_breakpoint;
3017 }
3018 else
3019 return arm_sw_breakpoint_from_kind (kind, size);
3020}
3021
3022/* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3023
3024static int
3025aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3026{
3027 if (is_64bit_tdesc ())
3028 return aarch64_breakpoint_len;
3029 else
3030 return arm_breakpoint_kind_from_pc (pcptr);
3031}
3032
3033/* Implementation of the linux_target_ops method
3034 "breakpoint_kind_from_current_state". */
3035
3036static int
3037aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3038{
3039 if (is_64bit_tdesc ())
3040 return aarch64_breakpoint_len;
3041 else
3042 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3043}
3044
7d00775e
AT
3045/* Support for hardware single step. */
3046
3047static int
3048aarch64_supports_hardware_single_step (void)
3049{
3050 return 1;
3051}
3052
176eb98c
MS
3053struct linux_target_ops the_low_target =
3054{
3055 aarch64_arch_setup,
3aee8918 3056 aarch64_regs_info,
50138245
AH
3057 NULL, /* cannot_fetch_register */
3058 NULL, /* cannot_store_register */
421530db 3059 NULL, /* fetch_register */
176eb98c
MS
3060 aarch64_get_pc,
3061 aarch64_set_pc,
17b1509a 3062 aarch64_breakpoint_kind_from_pc,
dd373349 3063 aarch64_sw_breakpoint_from_kind,
fa5308bd 3064 NULL, /* get_next_pcs */
421530db 3065 0, /* decr_pc_after_break */
176eb98c 3066 aarch64_breakpoint_at,
802e8e6d 3067 aarch64_supports_z_point_type,
176eb98c
MS
3068 aarch64_insert_point,
3069 aarch64_remove_point,
3070 aarch64_stopped_by_watchpoint,
3071 aarch64_stopped_data_address,
421530db
PL
3072 NULL, /* collect_ptrace_register */
3073 NULL, /* supply_ptrace_register */
ade90bde 3074 aarch64_linux_siginfo_fixup,
176eb98c 3075 aarch64_linux_new_process,
04ec7890 3076 aarch64_linux_delete_process,
176eb98c 3077 aarch64_linux_new_thread,
466eecee 3078 aarch64_linux_delete_thread,
3a8a0396 3079 aarch64_linux_new_fork,
176eb98c 3080 aarch64_linux_prepare_to_resume,
421530db 3081 NULL, /* process_qsupported */
7671bf47 3082 aarch64_supports_tracepoints,
bb903df0
PL
3083 aarch64_get_thread_area,
3084 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3085 aarch64_emit_ops,
bb903df0 3086 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3087 aarch64_supports_range_stepping,
17b1509a 3088 aarch64_breakpoint_kind_from_current_state,
7d00775e 3089 aarch64_supports_hardware_single_step,
061fc021 3090 aarch64_get_syscall_trapinfo,
176eb98c 3091};
3aee8918
PA
3092
3093void
3094initialize_low_arch (void)
3095{
3b53ae99
YQ
3096 initialize_low_arch_aarch32 ();
3097
3aee8918 3098 initialize_regsets_info (&aarch64_regsets_info);
02895270 3099 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3100}
This page took 1.01368 seconds and 4 git commands to generate.